]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/ipc_tt.c
7d0384cf272ea27fed789306480749e068b19e43
[apple/xnu.git] / osfmk / kern / ipc_tt.c
1 /*
2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
58 * support for mandatory and extensible security protections. This notice
59 * is included in support of clause 2.2 (b) of the Apple Public License,
60 * Version 2.0.
61 */
62 /*
63 */
64
65 /*
66 * File: ipc_tt.c
67 * Purpose:
68 * Task and thread related IPC functions.
69 */
70
71 #include <mach/mach_types.h>
72 #include <mach/boolean.h>
73 #include <mach/kern_return.h>
74 #include <mach/mach_param.h>
75 #include <mach/task_special_ports.h>
76 #include <mach/thread_special_ports.h>
77 #include <mach/thread_status.h>
78 #include <mach/exception_types.h>
79 #include <mach/memory_object_types.h>
80 #include <mach/mach_traps.h>
81 #include <mach/task_server.h>
82 #include <mach/thread_act_server.h>
83 #include <mach/mach_host_server.h>
84 #include <mach/host_priv_server.h>
85 #include <mach/vm_map_server.h>
86
87 #include <kern/kern_types.h>
88 #include <kern/host.h>
89 #include <kern/ipc_kobject.h>
90 #include <kern/ipc_tt.h>
91 #include <kern/kalloc.h>
92 #include <kern/thread.h>
93 #include <kern/misc_protos.h>
94
95 #include <vm/vm_map.h>
96 #include <vm/vm_pageout.h>
97 #include <vm/vm_protos.h>
98
99 #include <security/mac_mach_internal.h>
100
101 #if CONFIG_EMBEDDED && !SECURE_KERNEL
102 extern int cs_relax_platform_task_ports;
103 #endif
104
105 /* forward declarations */
106 task_t convert_port_to_locked_task(ipc_port_t port);
107 task_inspect_t convert_port_to_locked_task_inspect(ipc_port_t port);
108 static void ipc_port_bind_special_reply_port_locked(ipc_port_t port);
109 static kern_return_t ipc_port_unbind_special_reply_port(thread_t thread, boolean_t unbind_active_port);
110 kern_return_t task_conversion_eval(task_t caller, task_t victim);
111
112 /*
113 * Routine: ipc_task_init
114 * Purpose:
115 * Initialize a task's IPC state.
116 *
117 * If non-null, some state will be inherited from the parent.
118 * The parent must be appropriately initialized.
119 * Conditions:
120 * Nothing locked.
121 */
122
123 void
124 ipc_task_init(
125 task_t task,
126 task_t parent)
127 {
128 ipc_space_t space;
129 ipc_port_t kport;
130 ipc_port_t nport;
131 kern_return_t kr;
132 int i;
133
134
135 kr = ipc_space_create(&ipc_table_entries[0], &space);
136 if (kr != KERN_SUCCESS) {
137 panic("ipc_task_init");
138 }
139
140 space->is_task = task;
141
142 kport = ipc_port_alloc_kernel();
143 if (kport == IP_NULL) {
144 panic("ipc_task_init");
145 }
146
147 nport = ipc_port_alloc_kernel();
148 if (nport == IP_NULL) {
149 panic("ipc_task_init");
150 }
151
152 itk_lock_init(task);
153 task->itk_self = kport;
154 task->itk_nself = nport;
155 task->itk_resume = IP_NULL; /* Lazily allocated on-demand */
156 if (task_is_a_corpse_fork(task)) {
157 /*
158 * No sender's notification for corpse would not
159 * work with a naked send right in kernel.
160 */
161 task->itk_sself = IP_NULL;
162 } else {
163 task->itk_sself = ipc_port_make_send(kport);
164 }
165 task->itk_debug_control = IP_NULL;
166 task->itk_space = space;
167
168 #if CONFIG_MACF
169 task->exc_actions[0].label = NULL;
170 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
171 mac_exc_associate_action_label(&task->exc_actions[i], mac_exc_create_label());
172 }
173 #endif
174
175 /* always zero-out the first (unused) array element */
176 bzero(&task->exc_actions[0], sizeof(task->exc_actions[0]));
177
178 if (parent == TASK_NULL) {
179 ipc_port_t port;
180 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
181 task->exc_actions[i].port = IP_NULL;
182 task->exc_actions[i].flavor = 0;
183 task->exc_actions[i].behavior = 0;
184 task->exc_actions[i].privileged = FALSE;
185 }/* for */
186
187 kr = host_get_host_port(host_priv_self(), &port);
188 assert(kr == KERN_SUCCESS);
189 task->itk_host = port;
190
191 task->itk_bootstrap = IP_NULL;
192 task->itk_seatbelt = IP_NULL;
193 task->itk_gssd = IP_NULL;
194 task->itk_task_access = IP_NULL;
195
196 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
197 task->itk_registered[i] = IP_NULL;
198 }
199 } else {
200 itk_lock(parent);
201 assert(parent->itk_self != IP_NULL);
202
203 /* inherit registered ports */
204
205 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
206 task->itk_registered[i] =
207 ipc_port_copy_send(parent->itk_registered[i]);
208 }
209
210 /* inherit exception and bootstrap ports */
211
212 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
213 task->exc_actions[i].port =
214 ipc_port_copy_send(parent->exc_actions[i].port);
215 task->exc_actions[i].flavor =
216 parent->exc_actions[i].flavor;
217 task->exc_actions[i].behavior =
218 parent->exc_actions[i].behavior;
219 task->exc_actions[i].privileged =
220 parent->exc_actions[i].privileged;
221 #if CONFIG_MACF
222 mac_exc_inherit_action_label(parent->exc_actions + i, task->exc_actions + i);
223 #endif
224 }/* for */
225 task->itk_host =
226 ipc_port_copy_send(parent->itk_host);
227
228 task->itk_bootstrap =
229 ipc_port_copy_send(parent->itk_bootstrap);
230
231 task->itk_seatbelt =
232 ipc_port_copy_send(parent->itk_seatbelt);
233
234 task->itk_gssd =
235 ipc_port_copy_send(parent->itk_gssd);
236
237 task->itk_task_access =
238 ipc_port_copy_send(parent->itk_task_access);
239
240 itk_unlock(parent);
241 }
242 }
243
244 /*
245 * Routine: ipc_task_enable
246 * Purpose:
247 * Enable a task for IPC access.
248 * Conditions:
249 * Nothing locked.
250 */
251
252 void
253 ipc_task_enable(
254 task_t task)
255 {
256 ipc_port_t kport;
257 ipc_port_t nport;
258
259 itk_lock(task);
260 kport = task->itk_self;
261 if (kport != IP_NULL) {
262 ipc_kobject_set(kport, (ipc_kobject_t) task, IKOT_TASK);
263 }
264 nport = task->itk_nself;
265 if (nport != IP_NULL) {
266 ipc_kobject_set(nport, (ipc_kobject_t) task, IKOT_TASK_NAME);
267 }
268 itk_unlock(task);
269 }
270
271 /*
272 * Routine: ipc_task_disable
273 * Purpose:
274 * Disable IPC access to a task.
275 * Conditions:
276 * Nothing locked.
277 */
278
279 void
280 ipc_task_disable(
281 task_t task)
282 {
283 ipc_port_t kport;
284 ipc_port_t nport;
285 ipc_port_t rport;
286
287 itk_lock(task);
288 kport = task->itk_self;
289 if (kport != IP_NULL) {
290 ipc_kobject_set(kport, IKO_NULL, IKOT_NONE);
291 }
292 nport = task->itk_nself;
293 if (nport != IP_NULL) {
294 ipc_kobject_set(nport, IKO_NULL, IKOT_NONE);
295 }
296
297 rport = task->itk_resume;
298 if (rport != IP_NULL) {
299 /*
300 * From this point onwards this task is no longer accepting
301 * resumptions.
302 *
303 * There are still outstanding suspensions on this task,
304 * even as it is being torn down. Disconnect the task
305 * from the rport, thereby "orphaning" the rport. The rport
306 * itself will go away only when the last suspension holder
307 * destroys his SO right to it -- when he either
308 * exits, or tries to actually use that last SO right to
309 * resume this (now non-existent) task.
310 */
311 ipc_kobject_set(rport, IKO_NULL, IKOT_NONE);
312 }
313 itk_unlock(task);
314 }
315
316 /*
317 * Routine: ipc_task_terminate
318 * Purpose:
319 * Clean up and destroy a task's IPC state.
320 * Conditions:
321 * Nothing locked. The task must be suspended.
322 * (Or the current thread must be in the task.)
323 */
324
325 void
326 ipc_task_terminate(
327 task_t task)
328 {
329 ipc_port_t kport;
330 ipc_port_t nport;
331 ipc_port_t rport;
332 int i;
333
334 itk_lock(task);
335 kport = task->itk_self;
336
337 if (kport == IP_NULL) {
338 /* the task is already terminated (can this happen?) */
339 itk_unlock(task);
340 return;
341 }
342 task->itk_self = IP_NULL;
343
344 nport = task->itk_nself;
345 assert(nport != IP_NULL);
346 task->itk_nself = IP_NULL;
347
348 rport = task->itk_resume;
349 task->itk_resume = IP_NULL;
350
351 itk_unlock(task);
352
353 /* release the naked send rights */
354
355 if (IP_VALID(task->itk_sself)) {
356 ipc_port_release_send(task->itk_sself);
357 }
358
359 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
360 if (IP_VALID(task->exc_actions[i].port)) {
361 ipc_port_release_send(task->exc_actions[i].port);
362 }
363 #if CONFIG_MACF
364 mac_exc_free_action_label(task->exc_actions + i);
365 #endif
366 }
367
368 if (IP_VALID(task->itk_host)) {
369 ipc_port_release_send(task->itk_host);
370 }
371
372 if (IP_VALID(task->itk_bootstrap)) {
373 ipc_port_release_send(task->itk_bootstrap);
374 }
375
376 if (IP_VALID(task->itk_seatbelt)) {
377 ipc_port_release_send(task->itk_seatbelt);
378 }
379
380 if (IP_VALID(task->itk_gssd)) {
381 ipc_port_release_send(task->itk_gssd);
382 }
383
384 if (IP_VALID(task->itk_task_access)) {
385 ipc_port_release_send(task->itk_task_access);
386 }
387
388 if (IP_VALID(task->itk_debug_control)) {
389 ipc_port_release_send(task->itk_debug_control);
390 }
391
392 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
393 if (IP_VALID(task->itk_registered[i])) {
394 ipc_port_release_send(task->itk_registered[i]);
395 }
396 }
397
398 /* destroy the kernel ports */
399 ipc_port_dealloc_kernel(kport);
400 ipc_port_dealloc_kernel(nport);
401 if (rport != IP_NULL) {
402 ipc_port_dealloc_kernel(rport);
403 }
404
405 itk_lock_destroy(task);
406 }
407
408 /*
409 * Routine: ipc_task_reset
410 * Purpose:
411 * Reset a task's IPC state to protect it when
412 * it enters an elevated security context. The
413 * task name port can remain the same - since
414 * it represents no specific privilege.
415 * Conditions:
416 * Nothing locked. The task must be suspended.
417 * (Or the current thread must be in the task.)
418 */
419
420 void
421 ipc_task_reset(
422 task_t task)
423 {
424 ipc_port_t old_kport, new_kport;
425 ipc_port_t old_sself;
426 ipc_port_t old_exc_actions[EXC_TYPES_COUNT];
427 int i;
428
429 #if CONFIG_MACF
430 /* Fresh label to unset credentials in existing labels. */
431 struct label *unset_label = mac_exc_create_label();
432 #endif
433
434 new_kport = ipc_kobject_alloc_port((ipc_kobject_t)task, IKOT_TASK,
435 IPC_KOBJECT_ALLOC_MAKE_SEND);
436
437 itk_lock(task);
438
439 old_kport = task->itk_self;
440
441 if (old_kport == IP_NULL) {
442 /* the task is already terminated (can this happen?) */
443 itk_unlock(task);
444 ipc_port_release_send(new_kport);
445 ipc_port_dealloc_kernel(new_kport);
446 #if CONFIG_MACF
447 mac_exc_free_label(unset_label);
448 #endif
449 return;
450 }
451
452 old_sself = task->itk_sself;
453 task->itk_sself = task->itk_self = new_kport;
454
455 /* Set the old kport to IKOT_NONE and update the exec token while under the port lock */
456 ip_lock(old_kport);
457 ipc_kobject_set_atomically(old_kport, IKO_NULL, IKOT_NONE);
458 task->exec_token += 1;
459 ip_unlock(old_kport);
460
461 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
462 old_exc_actions[i] = IP_NULL;
463
464 if (i == EXC_CORPSE_NOTIFY && task_corpse_pending_report(task)) {
465 continue;
466 }
467
468 if (!task->exc_actions[i].privileged) {
469 #if CONFIG_MACF
470 mac_exc_update_action_label(task->exc_actions + i, unset_label);
471 #endif
472 old_exc_actions[i] = task->exc_actions[i].port;
473 task->exc_actions[i].port = IP_NULL;
474 }
475 }/* for */
476
477 if (IP_VALID(task->itk_debug_control)) {
478 ipc_port_release_send(task->itk_debug_control);
479 }
480 task->itk_debug_control = IP_NULL;
481
482 itk_unlock(task);
483
484 #if CONFIG_MACF
485 mac_exc_free_label(unset_label);
486 #endif
487
488 /* release the naked send rights */
489
490 if (IP_VALID(old_sself)) {
491 ipc_port_release_send(old_sself);
492 }
493
494 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
495 if (IP_VALID(old_exc_actions[i])) {
496 ipc_port_release_send(old_exc_actions[i]);
497 }
498 }/* for */
499
500 /* destroy the kernel port */
501 ipc_port_dealloc_kernel(old_kport);
502 }
503
504 /*
505 * Routine: ipc_thread_init
506 * Purpose:
507 * Initialize a thread's IPC state.
508 * Conditions:
509 * Nothing locked.
510 */
511
512 void
513 ipc_thread_init(
514 thread_t thread)
515 {
516 ipc_port_t kport;
517
518 kport = ipc_kobject_alloc_port((ipc_kobject_t)thread, IKOT_THREAD,
519 IPC_KOBJECT_ALLOC_MAKE_SEND);
520
521 thread->ith_sself = thread->ith_self = kport;
522 thread->ith_special_reply_port = NULL;
523 thread->exc_actions = NULL;
524
525 #if IMPORTANCE_INHERITANCE
526 thread->ith_assertions = 0;
527 #endif
528
529 ipc_kmsg_queue_init(&thread->ith_messages);
530
531 thread->ith_rpc_reply = IP_NULL;
532 }
533
534 void
535 ipc_thread_init_exc_actions(
536 thread_t thread)
537 {
538 assert(thread->exc_actions == NULL);
539
540 thread->exc_actions = kalloc(sizeof(struct exception_action) * EXC_TYPES_COUNT);
541 bzero(thread->exc_actions, sizeof(struct exception_action) * EXC_TYPES_COUNT);
542
543 #if CONFIG_MACF
544 for (size_t i = 0; i < EXC_TYPES_COUNT; ++i) {
545 mac_exc_associate_action_label(thread->exc_actions + i, mac_exc_create_label());
546 }
547 #endif
548 }
549
550 void
551 ipc_thread_destroy_exc_actions(
552 thread_t thread)
553 {
554 if (thread->exc_actions != NULL) {
555 #if CONFIG_MACF
556 for (size_t i = 0; i < EXC_TYPES_COUNT; ++i) {
557 mac_exc_free_action_label(thread->exc_actions + i);
558 }
559 #endif
560
561 kfree(thread->exc_actions,
562 sizeof(struct exception_action) * EXC_TYPES_COUNT);
563 thread->exc_actions = NULL;
564 }
565 }
566
567 void
568 ipc_thread_disable(
569 thread_t thread)
570 {
571 ipc_port_t kport = thread->ith_self;
572
573 if (kport != IP_NULL) {
574 ipc_kobject_set(kport, IKO_NULL, IKOT_NONE);
575 }
576
577 /* unbind the thread special reply port */
578 if (IP_VALID(thread->ith_special_reply_port)) {
579 ipc_port_unbind_special_reply_port(thread, TRUE);
580 }
581 }
582
583 /*
584 * Routine: ipc_thread_terminate
585 * Purpose:
586 * Clean up and destroy a thread's IPC state.
587 * Conditions:
588 * Nothing locked.
589 */
590
591 void
592 ipc_thread_terminate(
593 thread_t thread)
594 {
595 ipc_port_t kport = thread->ith_self;
596
597 if (kport != IP_NULL) {
598 int i;
599
600 if (IP_VALID(thread->ith_sself)) {
601 ipc_port_release_send(thread->ith_sself);
602 }
603
604 thread->ith_sself = thread->ith_self = IP_NULL;
605
606 if (thread->exc_actions != NULL) {
607 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
608 if (IP_VALID(thread->exc_actions[i].port)) {
609 ipc_port_release_send(thread->exc_actions[i].port);
610 }
611 }
612 ipc_thread_destroy_exc_actions(thread);
613 }
614
615 ipc_port_dealloc_kernel(kport);
616 }
617
618 #if IMPORTANCE_INHERITANCE
619 assert(thread->ith_assertions == 0);
620 #endif
621
622 assert(ipc_kmsg_queue_empty(&thread->ith_messages));
623
624 if (thread->ith_rpc_reply != IP_NULL) {
625 ipc_port_dealloc_reply(thread->ith_rpc_reply);
626 }
627
628 thread->ith_rpc_reply = IP_NULL;
629 }
630
631 /*
632 * Routine: ipc_thread_reset
633 * Purpose:
634 * Reset the IPC state for a given Mach thread when
635 * its task enters an elevated security context.
636 * Both the thread port and its exception ports have
637 * to be reset. Its RPC reply port cannot have any
638 * rights outstanding, so it should be fine.
639 * Conditions:
640 * Nothing locked.
641 */
642
643 void
644 ipc_thread_reset(
645 thread_t thread)
646 {
647 ipc_port_t old_kport, new_kport;
648 ipc_port_t old_sself;
649 ipc_port_t old_exc_actions[EXC_TYPES_COUNT];
650 boolean_t has_old_exc_actions = FALSE;
651 int i;
652
653 #if CONFIG_MACF
654 struct label *new_label = mac_exc_create_label();
655 #endif
656
657 new_kport = ipc_kobject_alloc_port((ipc_kobject_t)thread, IKOT_THREAD,
658 IPC_KOBJECT_ALLOC_MAKE_SEND);
659
660 thread_mtx_lock(thread);
661
662 old_kport = thread->ith_self;
663 old_sself = thread->ith_sself;
664
665 if (old_kport == IP_NULL && thread->inspection == FALSE) {
666 /* the is already terminated (can this happen?) */
667 thread_mtx_unlock(thread);
668 ipc_port_release_send(new_kport);
669 ipc_port_dealloc_kernel(new_kport);
670 #if CONFIG_MACF
671 mac_exc_free_label(new_label);
672 #endif
673 return;
674 }
675
676 thread->ith_sself = thread->ith_self = new_kport;
677 if (old_kport != IP_NULL) {
678 ipc_kobject_set(old_kport, IKO_NULL, IKOT_NONE);
679 }
680
681 /*
682 * Only ports that were set by root-owned processes
683 * (privileged ports) should survive
684 */
685 if (thread->exc_actions != NULL) {
686 has_old_exc_actions = TRUE;
687 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
688 if (thread->exc_actions[i].privileged) {
689 old_exc_actions[i] = IP_NULL;
690 } else {
691 #if CONFIG_MACF
692 mac_exc_update_action_label(thread->exc_actions + i, new_label);
693 #endif
694 old_exc_actions[i] = thread->exc_actions[i].port;
695 thread->exc_actions[i].port = IP_NULL;
696 }
697 }
698 }
699
700 thread_mtx_unlock(thread);
701
702 #if CONFIG_MACF
703 mac_exc_free_label(new_label);
704 #endif
705
706 /* release the naked send rights */
707
708 if (IP_VALID(old_sself)) {
709 ipc_port_release_send(old_sself);
710 }
711
712 if (has_old_exc_actions) {
713 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
714 ipc_port_release_send(old_exc_actions[i]);
715 }
716 }
717
718 /* destroy the kernel port */
719 if (old_kport != IP_NULL) {
720 ipc_port_dealloc_kernel(old_kport);
721 }
722
723 /* unbind the thread special reply port */
724 if (IP_VALID(thread->ith_special_reply_port)) {
725 ipc_port_unbind_special_reply_port(thread, TRUE);
726 }
727 }
728
729 /*
730 * Routine: retrieve_task_self_fast
731 * Purpose:
732 * Optimized version of retrieve_task_self,
733 * that only works for the current task.
734 *
735 * Return a send right (possibly null/dead)
736 * for the task's user-visible self port.
737 * Conditions:
738 * Nothing locked.
739 */
740
741 ipc_port_t
742 retrieve_task_self_fast(
743 task_t task)
744 {
745 __assert_only ipc_port_t sright;
746 ipc_port_t port;
747
748 assert(task == current_task());
749
750 itk_lock(task);
751 assert(task->itk_self != IP_NULL);
752
753 if ((port = task->itk_sself) == task->itk_self) {
754 /* no interposing */
755 sright = ipc_port_copy_send(port);
756 assert(sright == port);
757 } else {
758 port = ipc_port_copy_send(port);
759 }
760 itk_unlock(task);
761
762 return port;
763 }
764
765 /*
766 * Routine: retrieve_thread_self_fast
767 * Purpose:
768 * Return a send right (possibly null/dead)
769 * for the thread's user-visible self port.
770 *
771 * Only works for the current thread.
772 *
773 * Conditions:
774 * Nothing locked.
775 */
776
777 ipc_port_t
778 retrieve_thread_self_fast(
779 thread_t thread)
780 {
781 __assert_only ipc_port_t sright;
782 ipc_port_t port;
783
784 assert(thread == current_thread());
785
786 thread_mtx_lock(thread);
787
788 assert(thread->ith_self != IP_NULL);
789
790 if ((port = thread->ith_sself) == thread->ith_self) {
791 /* no interposing */
792 sright = ipc_port_copy_send(port);
793 assert(sright == port);
794 } else {
795 port = ipc_port_copy_send(port);
796 }
797
798 thread_mtx_unlock(thread);
799
800 return port;
801 }
802
803 /*
804 * Routine: task_self_trap [mach trap]
805 * Purpose:
806 * Give the caller send rights for his own task port.
807 * Conditions:
808 * Nothing locked.
809 * Returns:
810 * MACH_PORT_NULL if there are any resource failures
811 * or other errors.
812 */
813
814 mach_port_name_t
815 task_self_trap(
816 __unused struct task_self_trap_args *args)
817 {
818 task_t task = current_task();
819 ipc_port_t sright;
820 mach_port_name_t name;
821
822 sright = retrieve_task_self_fast(task);
823 name = ipc_port_copyout_send(sright, task->itk_space);
824 return name;
825 }
826
827 /*
828 * Routine: thread_self_trap [mach trap]
829 * Purpose:
830 * Give the caller send rights for his own thread port.
831 * Conditions:
832 * Nothing locked.
833 * Returns:
834 * MACH_PORT_NULL if there are any resource failures
835 * or other errors.
836 */
837
838 mach_port_name_t
839 thread_self_trap(
840 __unused struct thread_self_trap_args *args)
841 {
842 thread_t thread = current_thread();
843 task_t task = thread->task;
844 ipc_port_t sright;
845 mach_port_name_t name;
846
847 sright = retrieve_thread_self_fast(thread);
848 name = ipc_port_copyout_send(sright, task->itk_space);
849 return name;
850 }
851
852 /*
853 * Routine: mach_reply_port [mach trap]
854 * Purpose:
855 * Allocate a port for the caller.
856 * Conditions:
857 * Nothing locked.
858 * Returns:
859 * MACH_PORT_NULL if there are any resource failures
860 * or other errors.
861 */
862
863 mach_port_name_t
864 mach_reply_port(
865 __unused struct mach_reply_port_args *args)
866 {
867 ipc_port_t port;
868 mach_port_name_t name;
869 kern_return_t kr;
870
871 kr = ipc_port_alloc(current_task()->itk_space, FALSE, &name, &port);
872 if (kr == KERN_SUCCESS) {
873 ip_unlock(port);
874 } else {
875 name = MACH_PORT_NULL;
876 }
877 return name;
878 }
879
880 /*
881 * Routine: thread_get_special_reply_port [mach trap]
882 * Purpose:
883 * Allocate a special reply port for the calling thread.
884 * Conditions:
885 * Nothing locked.
886 * Returns:
887 * mach_port_name_t: send right & receive right for special reply port.
888 * MACH_PORT_NULL if there are any resource failures
889 * or other errors.
890 */
891
892 mach_port_name_t
893 thread_get_special_reply_port(
894 __unused struct thread_get_special_reply_port_args *args)
895 {
896 ipc_port_t port;
897 mach_port_name_t name;
898 kern_return_t kr;
899 thread_t thread = current_thread();
900
901 /* unbind the thread special reply port */
902 if (IP_VALID(thread->ith_special_reply_port)) {
903 kr = ipc_port_unbind_special_reply_port(thread, TRUE);
904 if (kr != KERN_SUCCESS) {
905 return MACH_PORT_NULL;
906 }
907 }
908
909 kr = ipc_port_alloc(current_task()->itk_space, TRUE, &name, &port);
910 if (kr == KERN_SUCCESS) {
911 ipc_port_bind_special_reply_port_locked(port);
912 ip_unlock(port);
913 } else {
914 name = MACH_PORT_NULL;
915 }
916 return name;
917 }
918
919 /*
920 * Routine: ipc_port_bind_special_reply_port_locked
921 * Purpose:
922 * Bind the given port to current thread as a special reply port.
923 * Conditions:
924 * Port locked.
925 * Returns:
926 * None.
927 */
928
929 static void
930 ipc_port_bind_special_reply_port_locked(
931 ipc_port_t port)
932 {
933 thread_t thread = current_thread();
934 assert(thread->ith_special_reply_port == NULL);
935
936 ip_reference(port);
937 thread->ith_special_reply_port = port;
938 port->ip_specialreply = 1;
939 port->ip_sync_link_state = PORT_SYNC_LINK_ANY;
940 port->ip_messages.imq_srp_owner_thread = thread;
941
942 ipc_special_reply_port_bits_reset(port);
943 }
944
945 /*
946 * Routine: ipc_port_unbind_special_reply_port
947 * Purpose:
948 * Unbind the thread's special reply port.
949 * If the special port has threads waiting on turnstile,
950 * update it's inheritor.
951 * Condition:
952 * Nothing locked.
953 * Returns:
954 * None.
955 */
956 static kern_return_t
957 ipc_port_unbind_special_reply_port(
958 thread_t thread,
959 boolean_t unbind_active_port)
960 {
961 ipc_port_t special_reply_port = thread->ith_special_reply_port;
962
963 ip_lock(special_reply_port);
964
965 /* Return error if port active and unbind_active_port set to FALSE */
966 if (unbind_active_port == FALSE && ip_active(special_reply_port)) {
967 ip_unlock(special_reply_port);
968 return KERN_FAILURE;
969 }
970
971 thread->ith_special_reply_port = NULL;
972 ipc_port_adjust_special_reply_port_locked(special_reply_port, NULL,
973 IPC_PORT_ADJUST_UNLINK_THREAD, FALSE);
974 /* port unlocked */
975
976 ip_release(special_reply_port);
977 return KERN_SUCCESS;
978 }
979
980 /*
981 * Routine: thread_get_special_port [kernel call]
982 * Purpose:
983 * Clones a send right for one of the thread's
984 * special ports.
985 * Conditions:
986 * Nothing locked.
987 * Returns:
988 * KERN_SUCCESS Extracted a send right.
989 * KERN_INVALID_ARGUMENT The thread is null.
990 * KERN_FAILURE The thread is dead.
991 * KERN_INVALID_ARGUMENT Invalid special port.
992 */
993
994 kern_return_t
995 thread_get_special_port(
996 thread_t thread,
997 int which,
998 ipc_port_t *portp)
999 {
1000 kern_return_t result = KERN_SUCCESS;
1001 ipc_port_t *whichp;
1002
1003 if (thread == THREAD_NULL) {
1004 return KERN_INVALID_ARGUMENT;
1005 }
1006
1007 switch (which) {
1008 case THREAD_KERNEL_PORT:
1009 whichp = &thread->ith_sself;
1010 break;
1011
1012 default:
1013 return KERN_INVALID_ARGUMENT;
1014 }
1015
1016 thread_mtx_lock(thread);
1017
1018 if (thread->active) {
1019 *portp = ipc_port_copy_send(*whichp);
1020 } else {
1021 result = KERN_FAILURE;
1022 }
1023
1024 thread_mtx_unlock(thread);
1025
1026 return result;
1027 }
1028
1029 /*
1030 * Routine: thread_set_special_port [kernel call]
1031 * Purpose:
1032 * Changes one of the thread's special ports,
1033 * setting it to the supplied send right.
1034 * Conditions:
1035 * Nothing locked. If successful, consumes
1036 * the supplied send right.
1037 * Returns:
1038 * KERN_SUCCESS Changed the special port.
1039 * KERN_INVALID_ARGUMENT The thread is null.
1040 * KERN_FAILURE The thread is dead.
1041 * KERN_INVALID_ARGUMENT Invalid special port.
1042 */
1043
1044 kern_return_t
1045 thread_set_special_port(
1046 thread_t thread,
1047 int which,
1048 ipc_port_t port)
1049 {
1050 kern_return_t result = KERN_SUCCESS;
1051 ipc_port_t *whichp, old = IP_NULL;
1052
1053 if (thread == THREAD_NULL) {
1054 return KERN_INVALID_ARGUMENT;
1055 }
1056
1057 switch (which) {
1058 case THREAD_KERNEL_PORT:
1059 whichp = &thread->ith_sself;
1060 break;
1061
1062 default:
1063 return KERN_INVALID_ARGUMENT;
1064 }
1065
1066 thread_mtx_lock(thread);
1067
1068 if (thread->active) {
1069 old = *whichp;
1070 *whichp = port;
1071 } else {
1072 result = KERN_FAILURE;
1073 }
1074
1075 thread_mtx_unlock(thread);
1076
1077 if (IP_VALID(old)) {
1078 ipc_port_release_send(old);
1079 }
1080
1081 return result;
1082 }
1083
1084 /*
1085 * Routine: task_get_special_port [kernel call]
1086 * Purpose:
1087 * Clones a send right for one of the task's
1088 * special ports.
1089 * Conditions:
1090 * Nothing locked.
1091 * Returns:
1092 * KERN_SUCCESS Extracted a send right.
1093 * KERN_INVALID_ARGUMENT The task is null.
1094 * KERN_FAILURE The task/space is dead.
1095 * KERN_INVALID_ARGUMENT Invalid special port.
1096 */
1097
1098 kern_return_t
1099 task_get_special_port(
1100 task_t task,
1101 int which,
1102 ipc_port_t *portp)
1103 {
1104 ipc_port_t port;
1105
1106 if (task == TASK_NULL) {
1107 return KERN_INVALID_ARGUMENT;
1108 }
1109
1110 itk_lock(task);
1111 if (task->itk_self == IP_NULL) {
1112 itk_unlock(task);
1113 return KERN_FAILURE;
1114 }
1115
1116 switch (which) {
1117 case TASK_KERNEL_PORT:
1118 port = ipc_port_copy_send(task->itk_sself);
1119 break;
1120
1121 case TASK_NAME_PORT:
1122 port = ipc_port_make_send(task->itk_nself);
1123 break;
1124
1125 case TASK_HOST_PORT:
1126 port = ipc_port_copy_send(task->itk_host);
1127 break;
1128
1129 case TASK_BOOTSTRAP_PORT:
1130 port = ipc_port_copy_send(task->itk_bootstrap);
1131 break;
1132
1133 case TASK_SEATBELT_PORT:
1134 port = ipc_port_copy_send(task->itk_seatbelt);
1135 break;
1136
1137 case TASK_ACCESS_PORT:
1138 port = ipc_port_copy_send(task->itk_task_access);
1139 break;
1140
1141 case TASK_DEBUG_CONTROL_PORT:
1142 port = ipc_port_copy_send(task->itk_debug_control);
1143 break;
1144
1145 default:
1146 itk_unlock(task);
1147 return KERN_INVALID_ARGUMENT;
1148 }
1149 itk_unlock(task);
1150
1151 *portp = port;
1152 return KERN_SUCCESS;
1153 }
1154
1155 /*
1156 * Routine: task_set_special_port [kernel call]
1157 * Purpose:
1158 * Changes one of the task's special ports,
1159 * setting it to the supplied send right.
1160 * Conditions:
1161 * Nothing locked. If successful, consumes
1162 * the supplied send right.
1163 * Returns:
1164 * KERN_SUCCESS Changed the special port.
1165 * KERN_INVALID_ARGUMENT The task is null.
1166 * KERN_FAILURE The task/space is dead.
1167 * KERN_INVALID_ARGUMENT Invalid special port.
1168 * KERN_NO_ACCESS Attempted overwrite of seatbelt port.
1169 */
1170
1171 kern_return_t
1172 task_set_special_port(
1173 task_t task,
1174 int which,
1175 ipc_port_t port)
1176 {
1177 ipc_port_t *whichp;
1178 ipc_port_t old;
1179
1180 if (task == TASK_NULL) {
1181 return KERN_INVALID_ARGUMENT;
1182 }
1183
1184 if (task_is_driver(current_task())) {
1185 return KERN_NO_ACCESS;
1186 }
1187
1188 switch (which) {
1189 case TASK_KERNEL_PORT:
1190 whichp = &task->itk_sself;
1191 break;
1192
1193 case TASK_HOST_PORT:
1194 whichp = &task->itk_host;
1195 break;
1196
1197 case TASK_BOOTSTRAP_PORT:
1198 whichp = &task->itk_bootstrap;
1199 break;
1200
1201 case TASK_SEATBELT_PORT:
1202 whichp = &task->itk_seatbelt;
1203 break;
1204
1205 case TASK_ACCESS_PORT:
1206 whichp = &task->itk_task_access;
1207 break;
1208
1209 case TASK_DEBUG_CONTROL_PORT:
1210 whichp = &task->itk_debug_control;
1211 break;
1212
1213 default:
1214 return KERN_INVALID_ARGUMENT;
1215 }/* switch */
1216
1217 itk_lock(task);
1218 if (task->itk_self == IP_NULL) {
1219 itk_unlock(task);
1220 return KERN_FAILURE;
1221 }
1222
1223 /* do not allow overwrite of seatbelt or task access ports */
1224 if ((TASK_SEATBELT_PORT == which || TASK_ACCESS_PORT == which)
1225 && IP_VALID(*whichp)) {
1226 itk_unlock(task);
1227 return KERN_NO_ACCESS;
1228 }
1229
1230 old = *whichp;
1231 *whichp = port;
1232 itk_unlock(task);
1233
1234 if (IP_VALID(old)) {
1235 ipc_port_release_send(old);
1236 }
1237 return KERN_SUCCESS;
1238 }
1239
1240
1241 /*
1242 * Routine: mach_ports_register [kernel call]
1243 * Purpose:
1244 * Stash a handful of port send rights in the task.
1245 * Child tasks will inherit these rights, but they
1246 * must use mach_ports_lookup to acquire them.
1247 *
1248 * The rights are supplied in a (wired) kalloc'd segment.
1249 * Rights which aren't supplied are assumed to be null.
1250 * Conditions:
1251 * Nothing locked. If successful, consumes
1252 * the supplied rights and memory.
1253 * Returns:
1254 * KERN_SUCCESS Stashed the port rights.
1255 * KERN_INVALID_ARGUMENT The task is null.
1256 * KERN_INVALID_ARGUMENT The task is dead.
1257 * KERN_INVALID_ARGUMENT The memory param is null.
1258 * KERN_INVALID_ARGUMENT Too many port rights supplied.
1259 */
1260
1261 kern_return_t
1262 mach_ports_register(
1263 task_t task,
1264 mach_port_array_t memory,
1265 mach_msg_type_number_t portsCnt)
1266 {
1267 ipc_port_t ports[TASK_PORT_REGISTER_MAX];
1268 unsigned int i;
1269
1270 if ((task == TASK_NULL) ||
1271 (portsCnt > TASK_PORT_REGISTER_MAX) ||
1272 (portsCnt && memory == NULL)) {
1273 return KERN_INVALID_ARGUMENT;
1274 }
1275
1276 /*
1277 * Pad the port rights with nulls.
1278 */
1279
1280 for (i = 0; i < portsCnt; i++) {
1281 ports[i] = memory[i];
1282 }
1283 for (; i < TASK_PORT_REGISTER_MAX; i++) {
1284 ports[i] = IP_NULL;
1285 }
1286
1287 itk_lock(task);
1288 if (task->itk_self == IP_NULL) {
1289 itk_unlock(task);
1290 return KERN_INVALID_ARGUMENT;
1291 }
1292
1293 /*
1294 * Replace the old send rights with the new.
1295 * Release the old rights after unlocking.
1296 */
1297
1298 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
1299 ipc_port_t old;
1300
1301 old = task->itk_registered[i];
1302 task->itk_registered[i] = ports[i];
1303 ports[i] = old;
1304 }
1305
1306 itk_unlock(task);
1307
1308 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
1309 if (IP_VALID(ports[i])) {
1310 ipc_port_release_send(ports[i]);
1311 }
1312 }
1313
1314 /*
1315 * Now that the operation is known to be successful,
1316 * we can free the memory.
1317 */
1318
1319 if (portsCnt != 0) {
1320 kfree(memory,
1321 (vm_size_t) (portsCnt * sizeof(mach_port_t)));
1322 }
1323
1324 return KERN_SUCCESS;
1325 }
1326
1327 /*
1328 * Routine: mach_ports_lookup [kernel call]
1329 * Purpose:
1330 * Retrieves (clones) the stashed port send rights.
1331 * Conditions:
1332 * Nothing locked. If successful, the caller gets
1333 * rights and memory.
1334 * Returns:
1335 * KERN_SUCCESS Retrieved the send rights.
1336 * KERN_INVALID_ARGUMENT The task is null.
1337 * KERN_INVALID_ARGUMENT The task is dead.
1338 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
1339 */
1340
1341 kern_return_t
1342 mach_ports_lookup(
1343 task_t task,
1344 mach_port_array_t *portsp,
1345 mach_msg_type_number_t *portsCnt)
1346 {
1347 void *memory;
1348 vm_size_t size;
1349 ipc_port_t *ports;
1350 int i;
1351
1352 if (task == TASK_NULL) {
1353 return KERN_INVALID_ARGUMENT;
1354 }
1355
1356 size = (vm_size_t) (TASK_PORT_REGISTER_MAX * sizeof(ipc_port_t));
1357
1358 memory = kalloc(size);
1359 if (memory == 0) {
1360 return KERN_RESOURCE_SHORTAGE;
1361 }
1362
1363 itk_lock(task);
1364 if (task->itk_self == IP_NULL) {
1365 itk_unlock(task);
1366
1367 kfree(memory, size);
1368 return KERN_INVALID_ARGUMENT;
1369 }
1370
1371 ports = (ipc_port_t *) memory;
1372
1373 /*
1374 * Clone port rights. Because kalloc'd memory
1375 * is wired, we won't fault while holding the task lock.
1376 */
1377
1378 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
1379 ports[i] = ipc_port_copy_send(task->itk_registered[i]);
1380 }
1381
1382 itk_unlock(task);
1383
1384 *portsp = (mach_port_array_t) ports;
1385 *portsCnt = TASK_PORT_REGISTER_MAX;
1386 return KERN_SUCCESS;
1387 }
1388
1389 kern_return_t
1390 task_conversion_eval(task_t caller, task_t victim)
1391 {
1392 /*
1393 * Tasks are allowed to resolve their own task ports, and the kernel is
1394 * allowed to resolve anyone's task port.
1395 */
1396 if (caller == kernel_task) {
1397 return KERN_SUCCESS;
1398 }
1399
1400 if (caller == victim) {
1401 return KERN_SUCCESS;
1402 }
1403
1404 /*
1405 * Only the kernel can can resolve the kernel's task port. We've established
1406 * by this point that the caller is not kernel_task.
1407 */
1408 if (victim == TASK_NULL || victim == kernel_task) {
1409 return KERN_INVALID_SECURITY;
1410 }
1411
1412 #if CONFIG_EMBEDDED
1413 /*
1414 * On embedded platforms, only a platform binary can resolve the task port
1415 * of another platform binary.
1416 */
1417 if ((victim->t_flags & TF_PLATFORM) && !(caller->t_flags & TF_PLATFORM)) {
1418 #if SECURE_KERNEL
1419 return KERN_INVALID_SECURITY;
1420 #else
1421 if (cs_relax_platform_task_ports) {
1422 return KERN_SUCCESS;
1423 } else {
1424 return KERN_INVALID_SECURITY;
1425 }
1426 #endif /* SECURE_KERNEL */
1427 }
1428 #endif /* CONFIG_EMBEDDED */
1429
1430 return KERN_SUCCESS;
1431 }
1432
1433 /*
1434 * Routine: convert_port_to_locked_task
1435 * Purpose:
1436 * Internal helper routine to convert from a port to a locked
1437 * task. Used by several routines that try to convert from a
1438 * task port to a reference on some task related object.
1439 * Conditions:
1440 * Nothing locked, blocking OK.
1441 */
1442 task_t
1443 convert_port_to_locked_task(ipc_port_t port)
1444 {
1445 int try_failed_count = 0;
1446
1447 while (IP_VALID(port)) {
1448 task_t ct = current_task();
1449 task_t task;
1450
1451 ip_lock(port);
1452 if (!ip_active(port) || (ip_kotype(port) != IKOT_TASK)) {
1453 ip_unlock(port);
1454 return TASK_NULL;
1455 }
1456 task = (task_t) port->ip_kobject;
1457 assert(task != TASK_NULL);
1458
1459 if (task_conversion_eval(ct, task)) {
1460 ip_unlock(port);
1461 return TASK_NULL;
1462 }
1463
1464 /*
1465 * Normal lock ordering puts task_lock() before ip_lock().
1466 * Attempt out-of-order locking here.
1467 */
1468 if (task_lock_try(task)) {
1469 ip_unlock(port);
1470 return task;
1471 }
1472 try_failed_count++;
1473
1474 ip_unlock(port);
1475 mutex_pause(try_failed_count);
1476 }
1477 return TASK_NULL;
1478 }
1479
1480 /*
1481 * Routine: convert_port_to_locked_task_inspect
1482 * Purpose:
1483 * Internal helper routine to convert from a port to a locked
1484 * task inspect right. Used by internal routines that try to convert from a
1485 * task inspect port to a reference on some task related object.
1486 * Conditions:
1487 * Nothing locked, blocking OK.
1488 */
1489 task_inspect_t
1490 convert_port_to_locked_task_inspect(ipc_port_t port)
1491 {
1492 int try_failed_count = 0;
1493
1494 while (IP_VALID(port)) {
1495 task_inspect_t task;
1496
1497 ip_lock(port);
1498 if (!ip_active(port) || (ip_kotype(port) != IKOT_TASK)) {
1499 ip_unlock(port);
1500 return TASK_INSPECT_NULL;
1501 }
1502 task = (task_inspect_t)port->ip_kobject;
1503 assert(task != TASK_INSPECT_NULL);
1504 /*
1505 * Normal lock ordering puts task_lock() before ip_lock().
1506 * Attempt out-of-order locking here.
1507 */
1508 if (task_lock_try((task_t)task)) {
1509 ip_unlock(port);
1510 return task;
1511 }
1512 try_failed_count++;
1513
1514 ip_unlock(port);
1515 mutex_pause(try_failed_count);
1516 }
1517 return TASK_INSPECT_NULL;
1518 }
1519
1520 static task_t
1521 convert_port_to_task_locked(
1522 ipc_port_t port,
1523 uint32_t *exec_token)
1524 {
1525 task_t task = TASK_NULL;
1526
1527 ip_lock_held(port);
1528 require_ip_active(port);
1529
1530 if (ip_kotype(port) == IKOT_TASK) {
1531 task_t ct = current_task();
1532 task = (task_t)port->ip_kobject;
1533 assert(task != TASK_NULL);
1534
1535 if (task_conversion_eval(ct, task)) {
1536 return TASK_NULL;
1537 }
1538
1539 if (exec_token) {
1540 *exec_token = task->exec_token;
1541 }
1542 task_reference_internal(task);
1543 }
1544
1545 return task;
1546 }
1547
1548 /*
1549 * Routine: convert_port_to_task_with_exec_token
1550 * Purpose:
1551 * Convert from a port to a task and return
1552 * the exec token stored in the task.
1553 * Doesn't consume the port ref; produces a task ref,
1554 * which may be null.
1555 * Conditions:
1556 * Nothing locked.
1557 */
1558 task_t
1559 convert_port_to_task_with_exec_token(
1560 ipc_port_t port,
1561 uint32_t *exec_token)
1562 {
1563 task_t task = TASK_NULL;
1564
1565 if (IP_VALID(port)) {
1566 ip_lock(port);
1567 if (ip_active(port)) {
1568 task = convert_port_to_task_locked(port, exec_token);
1569 }
1570 ip_unlock(port);
1571 }
1572
1573 return task;
1574 }
1575
1576 /*
1577 * Routine: convert_port_to_task
1578 * Purpose:
1579 * Convert from a port to a task.
1580 * Doesn't consume the port ref; produces a task ref,
1581 * which may be null.
1582 * Conditions:
1583 * Nothing locked.
1584 */
1585 task_t
1586 convert_port_to_task(
1587 ipc_port_t port)
1588 {
1589 return convert_port_to_task_with_exec_token(port, NULL);
1590 }
1591
1592
1593 /*
1594 * Routine: convert_port_to_task_name
1595 * Purpose:
1596 * Convert from a port to a task name.
1597 * Doesn't consume the port ref; produces a task name ref,
1598 * which may be null.
1599 * Conditions:
1600 * Nothing locked.
1601 */
1602 task_name_t
1603 convert_port_to_task_name(
1604 ipc_port_t port)
1605 {
1606 task_name_t task = TASK_NULL;
1607
1608 if (IP_VALID(port)) {
1609 ip_lock(port);
1610
1611 if (ip_active(port) &&
1612 (ip_kotype(port) == IKOT_TASK ||
1613 ip_kotype(port) == IKOT_TASK_NAME)) {
1614 task = (task_name_t)port->ip_kobject;
1615 assert(task != TASK_NAME_NULL);
1616
1617 task_reference_internal(task);
1618 }
1619
1620 ip_unlock(port);
1621 }
1622
1623 return task;
1624 }
1625
1626 static task_inspect_t
1627 convert_port_to_task_inspect_locked(
1628 ipc_port_t port)
1629 {
1630 task_inspect_t task = TASK_INSPECT_NULL;
1631
1632 ip_lock_held(port);
1633 require_ip_active(port);
1634
1635 if (ip_kotype(port) == IKOT_TASK) {
1636 task = (task_inspect_t)port->ip_kobject;
1637 assert(task != TASK_INSPECT_NULL);
1638
1639 task_reference_internal(task);
1640 }
1641
1642 return task;
1643 }
1644
1645 /*
1646 * Routine: convert_port_to_task_inspect
1647 * Purpose:
1648 * Convert from a port to a task inspection right
1649 * Doesn't consume the port ref; produces a task ref,
1650 * which may be null.
1651 * Conditions:
1652 * Nothing locked.
1653 */
1654 task_inspect_t
1655 convert_port_to_task_inspect(
1656 ipc_port_t port)
1657 {
1658 task_inspect_t task = TASK_INSPECT_NULL;
1659
1660 if (IP_VALID(port)) {
1661 ip_lock(port);
1662 if (ip_active(port)) {
1663 task = convert_port_to_task_inspect_locked(port);
1664 }
1665 ip_unlock(port);
1666 }
1667
1668 return task;
1669 }
1670
1671 /*
1672 * Routine: convert_port_to_task_suspension_token
1673 * Purpose:
1674 * Convert from a port to a task suspension token.
1675 * Doesn't consume the port ref; produces a suspension token ref,
1676 * which may be null.
1677 * Conditions:
1678 * Nothing locked.
1679 */
1680 task_suspension_token_t
1681 convert_port_to_task_suspension_token(
1682 ipc_port_t port)
1683 {
1684 task_suspension_token_t task = TASK_NULL;
1685
1686 if (IP_VALID(port)) {
1687 ip_lock(port);
1688
1689 if (ip_active(port) &&
1690 ip_kotype(port) == IKOT_TASK_RESUME) {
1691 task = (task_suspension_token_t)port->ip_kobject;
1692 assert(task != TASK_NULL);
1693
1694 task_reference_internal(task);
1695 }
1696
1697 ip_unlock(port);
1698 }
1699
1700 return task;
1701 }
1702
1703 /*
1704 * Routine: convert_port_to_space
1705 * Purpose:
1706 * Convert from a port to a space.
1707 * Doesn't consume the port ref; produces a space ref,
1708 * which may be null.
1709 * Conditions:
1710 * Nothing locked.
1711 */
1712 ipc_space_t
1713 convert_port_to_space(
1714 ipc_port_t port)
1715 {
1716 ipc_space_t space;
1717 task_t task;
1718
1719 task = convert_port_to_locked_task(port);
1720
1721 if (task == TASK_NULL) {
1722 return IPC_SPACE_NULL;
1723 }
1724
1725 if (!task->active) {
1726 task_unlock(task);
1727 return IPC_SPACE_NULL;
1728 }
1729
1730 space = task->itk_space;
1731 is_reference(space);
1732 task_unlock(task);
1733 return space;
1734 }
1735
1736 /*
1737 * Routine: convert_port_to_space_inspect
1738 * Purpose:
1739 * Convert from a port to a space inspect right.
1740 * Doesn't consume the port ref; produces a space inspect ref,
1741 * which may be null.
1742 * Conditions:
1743 * Nothing locked.
1744 */
1745 ipc_space_inspect_t
1746 convert_port_to_space_inspect(
1747 ipc_port_t port)
1748 {
1749 ipc_space_inspect_t space;
1750 task_inspect_t task;
1751
1752 task = convert_port_to_locked_task_inspect(port);
1753
1754 if (task == TASK_INSPECT_NULL) {
1755 return IPC_SPACE_INSPECT_NULL;
1756 }
1757
1758 if (!task->active) {
1759 task_unlock(task);
1760 return IPC_SPACE_INSPECT_NULL;
1761 }
1762
1763 space = (ipc_space_inspect_t)task->itk_space;
1764 is_reference((ipc_space_t)space);
1765 task_unlock((task_t)task);
1766 return space;
1767 }
1768
1769 /*
1770 * Routine: convert_port_to_map
1771 * Purpose:
1772 * Convert from a port to a map.
1773 * Doesn't consume the port ref; produces a map ref,
1774 * which may be null.
1775 * Conditions:
1776 * Nothing locked.
1777 */
1778
1779 vm_map_t
1780 convert_port_to_map(
1781 ipc_port_t port)
1782 {
1783 task_t task;
1784 vm_map_t map;
1785
1786 task = convert_port_to_locked_task(port);
1787
1788 if (task == TASK_NULL) {
1789 return VM_MAP_NULL;
1790 }
1791
1792 if (!task->active) {
1793 task_unlock(task);
1794 return VM_MAP_NULL;
1795 }
1796
1797 map = task->map;
1798 vm_map_reference_swap(map);
1799 task_unlock(task);
1800 return map;
1801 }
1802
1803
1804 /*
1805 * Routine: convert_port_to_thread
1806 * Purpose:
1807 * Convert from a port to a thread.
1808 * Doesn't consume the port ref; produces an thread ref,
1809 * which may be null.
1810 * Conditions:
1811 * Nothing locked.
1812 */
1813
1814 static thread_t
1815 convert_port_to_thread_locked(
1816 ipc_port_t port,
1817 port_to_thread_options_t options)
1818 {
1819 thread_t thread = THREAD_NULL;
1820
1821 ip_lock_held(port);
1822 require_ip_active(port);
1823
1824 if (ip_kotype(port) == IKOT_THREAD) {
1825 thread = (thread_t)port->ip_kobject;
1826 assert(thread != THREAD_NULL);
1827
1828 if (options & PORT_TO_THREAD_NOT_CURRENT_THREAD) {
1829 if (thread == current_thread()) {
1830 return THREAD_NULL;
1831 }
1832 }
1833
1834 if (options & PORT_TO_THREAD_IN_CURRENT_TASK) {
1835 if (thread->task != current_task()) {
1836 return THREAD_NULL;
1837 }
1838 } else {
1839 /* Use task conversion rules for thread control conversions */
1840 if (task_conversion_eval(current_task(), thread->task) != KERN_SUCCESS) {
1841 return THREAD_NULL;
1842 }
1843 }
1844
1845 thread_reference_internal(thread);
1846 }
1847
1848 return thread;
1849 }
1850
1851 thread_t
1852 convert_port_to_thread(
1853 ipc_port_t port)
1854 {
1855 thread_t thread = THREAD_NULL;
1856
1857 if (IP_VALID(port)) {
1858 ip_lock(port);
1859 if (ip_active(port)) {
1860 thread = convert_port_to_thread_locked(port, PORT_TO_THREAD_NONE);
1861 }
1862 ip_unlock(port);
1863 }
1864
1865 return thread;
1866 }
1867
1868 /*
1869 * Routine: convert_port_to_thread_inspect
1870 * Purpose:
1871 * Convert from a port to a thread inspection right
1872 * Doesn't consume the port ref; produces a thread ref,
1873 * which may be null.
1874 * Conditions:
1875 * Nothing locked.
1876 */
1877 thread_inspect_t
1878 convert_port_to_thread_inspect(
1879 ipc_port_t port)
1880 {
1881 thread_inspect_t thread = THREAD_INSPECT_NULL;
1882
1883 if (IP_VALID(port)) {
1884 ip_lock(port);
1885
1886 if (ip_active(port) &&
1887 ip_kotype(port) == IKOT_THREAD) {
1888 thread = (thread_inspect_t)port->ip_kobject;
1889 assert(thread != THREAD_INSPECT_NULL);
1890 thread_reference_internal((thread_t)thread);
1891 }
1892 ip_unlock(port);
1893 }
1894
1895 return thread;
1896 }
1897
1898 /*
1899 * Routine: convert_thread_inspect_to_port
1900 * Purpose:
1901 * Convert from a thread inspect reference to a port.
1902 * Consumes a thread ref;
1903 * As we never export thread inspect ports, always
1904 * creates a NULL port.
1905 * Conditions:
1906 * Nothing locked.
1907 */
1908
1909 ipc_port_t
1910 convert_thread_inspect_to_port(thread_inspect_t thread)
1911 {
1912 thread_deallocate(thread);
1913 return IP_NULL;
1914 }
1915
1916
1917 /*
1918 * Routine: port_name_to_thread
1919 * Purpose:
1920 * Convert from a port name to an thread reference
1921 * A name of MACH_PORT_NULL is valid for the null thread.
1922 * Conditions:
1923 * Nothing locked.
1924 */
1925 thread_t
1926 port_name_to_thread(
1927 mach_port_name_t name,
1928 port_to_thread_options_t options)
1929 {
1930 thread_t thread = THREAD_NULL;
1931 ipc_port_t kport;
1932 kern_return_t kr;
1933
1934 if (MACH_PORT_VALID(name)) {
1935 kr = ipc_port_translate_send(current_space(), name, &kport);
1936 if (kr == KERN_SUCCESS) {
1937 thread = convert_port_to_thread_locked(kport, options);
1938 ip_unlock(kport);
1939 }
1940 }
1941
1942 return thread;
1943 }
1944
1945 task_t
1946 port_name_to_task(
1947 mach_port_name_t name)
1948 {
1949 ipc_port_t kport;
1950 kern_return_t kr;
1951 task_t task = TASK_NULL;
1952
1953 if (MACH_PORT_VALID(name)) {
1954 kr = ipc_port_translate_send(current_space(), name, &kport);
1955 if (kr == KERN_SUCCESS) {
1956 task = convert_port_to_task_locked(kport, NULL);
1957 ip_unlock(kport);
1958 }
1959 }
1960 return task;
1961 }
1962
1963 task_inspect_t
1964 port_name_to_task_inspect(
1965 mach_port_name_t name)
1966 {
1967 ipc_port_t kport;
1968 kern_return_t kr;
1969 task_inspect_t ti = TASK_INSPECT_NULL;
1970
1971 if (MACH_PORT_VALID(name)) {
1972 kr = ipc_port_translate_send(current_space(), name, &kport);
1973 if (kr == KERN_SUCCESS) {
1974 ti = convert_port_to_task_inspect_locked(kport);
1975 ip_unlock(kport);
1976 }
1977 }
1978 return ti;
1979 }
1980
1981 /*
1982 * Routine: port_name_to_host
1983 * Purpose:
1984 * Convert from a port name to a host pointer.
1985 * NOTE: This does _not_ return a +1 reference to the host_t
1986 * Conditions:
1987 * Nothing locked.
1988 */
1989 host_t
1990 port_name_to_host(
1991 mach_port_name_t name)
1992 {
1993 host_t host = HOST_NULL;
1994 kern_return_t kr;
1995 ipc_port_t port;
1996
1997 if (MACH_PORT_VALID(name)) {
1998 kr = ipc_port_translate_send(current_space(), name, &port);
1999 if (kr == KERN_SUCCESS) {
2000 host = convert_port_to_host(port);
2001 ip_unlock(port);
2002 }
2003 }
2004 return host;
2005 }
2006
2007 /*
2008 * Routine: convert_task_to_port
2009 * Purpose:
2010 * Convert from a task to a port.
2011 * Consumes a task ref; produces a naked send right
2012 * which may be invalid.
2013 * Conditions:
2014 * Nothing locked.
2015 */
2016
2017 ipc_port_t
2018 convert_task_to_port(
2019 task_t task)
2020 {
2021 ipc_port_t port;
2022
2023 itk_lock(task);
2024
2025 if (task->itk_self != IP_NULL) {
2026 port = ipc_port_make_send(task->itk_self);
2027 } else {
2028 port = IP_NULL;
2029 }
2030
2031 itk_unlock(task);
2032
2033 task_deallocate(task);
2034 return port;
2035 }
2036
2037 /*
2038 * Routine: convert_task_inspect_to_port
2039 * Purpose:
2040 * Convert from a task inspect reference to a port.
2041 * Consumes a task ref;
2042 * As we never export task inspect ports, always
2043 * creates a NULL port.
2044 * Conditions:
2045 * Nothing locked.
2046 */
2047 ipc_port_t
2048 convert_task_inspect_to_port(
2049 task_inspect_t task)
2050 {
2051 task_deallocate(task);
2052
2053 return IP_NULL;
2054 }
2055
2056 /*
2057 * Routine: convert_task_suspend_token_to_port
2058 * Purpose:
2059 * Convert from a task suspension token to a port.
2060 * Consumes a task suspension token ref; produces a naked send-once right
2061 * which may be invalid.
2062 * Conditions:
2063 * Nothing locked.
2064 */
2065 ipc_port_t
2066 convert_task_suspension_token_to_port(
2067 task_suspension_token_t task)
2068 {
2069 ipc_port_t port;
2070
2071 task_lock(task);
2072 if (task->active) {
2073 if (task->itk_resume == IP_NULL) {
2074 task->itk_resume = ipc_kobject_alloc_port((ipc_kobject_t) task,
2075 IKOT_TASK_RESUME, IPC_KOBJECT_ALLOC_NONE);
2076 }
2077
2078 /*
2079 * Create a send-once right for each instance of a direct user-called
2080 * task_suspend2 call. Each time one of these send-once rights is abandoned,
2081 * the notification handler will resume the target task.
2082 */
2083 port = ipc_port_make_sonce(task->itk_resume);
2084 assert(IP_VALID(port));
2085 } else {
2086 port = IP_NULL;
2087 }
2088
2089 task_unlock(task);
2090 task_suspension_token_deallocate(task);
2091
2092 return port;
2093 }
2094
2095
2096 /*
2097 * Routine: convert_task_name_to_port
2098 * Purpose:
2099 * Convert from a task name ref to a port.
2100 * Consumes a task name ref; produces a naked send right
2101 * which may be invalid.
2102 * Conditions:
2103 * Nothing locked.
2104 */
2105
2106 ipc_port_t
2107 convert_task_name_to_port(
2108 task_name_t task_name)
2109 {
2110 ipc_port_t port;
2111
2112 itk_lock(task_name);
2113 if (task_name->itk_nself != IP_NULL) {
2114 port = ipc_port_make_send(task_name->itk_nself);
2115 } else {
2116 port = IP_NULL;
2117 }
2118 itk_unlock(task_name);
2119
2120 task_name_deallocate(task_name);
2121 return port;
2122 }
2123
2124 /*
2125 * Routine: convert_thread_to_port
2126 * Purpose:
2127 * Convert from a thread to a port.
2128 * Consumes an thread ref; produces a naked send right
2129 * which may be invalid.
2130 * Conditions:
2131 * Nothing locked.
2132 */
2133
2134 ipc_port_t
2135 convert_thread_to_port(
2136 thread_t thread)
2137 {
2138 ipc_port_t port;
2139
2140 thread_mtx_lock(thread);
2141
2142 if (thread->ith_self != IP_NULL) {
2143 port = ipc_port_make_send(thread->ith_self);
2144 } else {
2145 port = IP_NULL;
2146 }
2147
2148 thread_mtx_unlock(thread);
2149
2150 thread_deallocate(thread);
2151
2152 return port;
2153 }
2154
2155 /*
2156 * Routine: space_deallocate
2157 * Purpose:
2158 * Deallocate a space ref produced by convert_port_to_space.
2159 * Conditions:
2160 * Nothing locked.
2161 */
2162
2163 void
2164 space_deallocate(
2165 ipc_space_t space)
2166 {
2167 if (space != IS_NULL) {
2168 is_release(space);
2169 }
2170 }
2171
2172 /*
2173 * Routine: space_inspect_deallocate
2174 * Purpose:
2175 * Deallocate a space inspect ref produced by convert_port_to_space_inspect.
2176 * Conditions:
2177 * Nothing locked.
2178 */
2179
2180 void
2181 space_inspect_deallocate(
2182 ipc_space_inspect_t space)
2183 {
2184 if (space != IS_INSPECT_NULL) {
2185 is_release((ipc_space_t)space);
2186 }
2187 }
2188
2189 /*
2190 * Routine: thread/task_set_exception_ports [kernel call]
2191 * Purpose:
2192 * Sets the thread/task exception port, flavor and
2193 * behavior for the exception types specified by the mask.
2194 * There will be one send right per exception per valid
2195 * port.
2196 * Conditions:
2197 * Nothing locked. If successful, consumes
2198 * the supplied send right.
2199 * Returns:
2200 * KERN_SUCCESS Changed the special port.
2201 * KERN_INVALID_ARGUMENT The thread is null,
2202 * Illegal mask bit set.
2203 * Illegal exception behavior
2204 * KERN_FAILURE The thread is dead.
2205 */
2206
2207 kern_return_t
2208 thread_set_exception_ports(
2209 thread_t thread,
2210 exception_mask_t exception_mask,
2211 ipc_port_t new_port,
2212 exception_behavior_t new_behavior,
2213 thread_state_flavor_t new_flavor)
2214 {
2215 ipc_port_t old_port[EXC_TYPES_COUNT];
2216 boolean_t privileged = current_task()->sec_token.val[0] == 0;
2217 register int i;
2218
2219 #if CONFIG_MACF
2220 struct label *new_label;
2221 #endif
2222
2223 if (thread == THREAD_NULL) {
2224 return KERN_INVALID_ARGUMENT;
2225 }
2226
2227 if (exception_mask & ~EXC_MASK_VALID) {
2228 return KERN_INVALID_ARGUMENT;
2229 }
2230
2231 if (IP_VALID(new_port)) {
2232 switch (new_behavior & ~MACH_EXCEPTION_MASK) {
2233 case EXCEPTION_DEFAULT:
2234 case EXCEPTION_STATE:
2235 case EXCEPTION_STATE_IDENTITY:
2236 break;
2237
2238 default:
2239 return KERN_INVALID_ARGUMENT;
2240 }
2241 }
2242
2243 /*
2244 * Check the validity of the thread_state_flavor by calling the
2245 * VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in
2246 * osfmk/mach/ARCHITECTURE/thread_status.h
2247 */
2248 if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor)) {
2249 return KERN_INVALID_ARGUMENT;
2250 }
2251
2252 #if CONFIG_MACF
2253 new_label = mac_exc_create_label_for_current_proc();
2254 #endif
2255
2256 thread_mtx_lock(thread);
2257
2258 if (!thread->active) {
2259 thread_mtx_unlock(thread);
2260
2261 return KERN_FAILURE;
2262 }
2263
2264 if (thread->exc_actions == NULL) {
2265 ipc_thread_init_exc_actions(thread);
2266 }
2267 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
2268 if ((exception_mask & (1 << i))
2269 #if CONFIG_MACF
2270 && mac_exc_update_action_label(&thread->exc_actions[i], new_label) == 0
2271 #endif
2272 ) {
2273 old_port[i] = thread->exc_actions[i].port;
2274 thread->exc_actions[i].port = ipc_port_copy_send(new_port);
2275 thread->exc_actions[i].behavior = new_behavior;
2276 thread->exc_actions[i].flavor = new_flavor;
2277 thread->exc_actions[i].privileged = privileged;
2278 } else {
2279 old_port[i] = IP_NULL;
2280 }
2281 }
2282
2283 thread_mtx_unlock(thread);
2284
2285 #if CONFIG_MACF
2286 mac_exc_free_label(new_label);
2287 #endif
2288
2289 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
2290 if (IP_VALID(old_port[i])) {
2291 ipc_port_release_send(old_port[i]);
2292 }
2293 }
2294
2295 if (IP_VALID(new_port)) { /* consume send right */
2296 ipc_port_release_send(new_port);
2297 }
2298
2299 return KERN_SUCCESS;
2300 }
2301
2302 kern_return_t
2303 task_set_exception_ports(
2304 task_t task,
2305 exception_mask_t exception_mask,
2306 ipc_port_t new_port,
2307 exception_behavior_t new_behavior,
2308 thread_state_flavor_t new_flavor)
2309 {
2310 ipc_port_t old_port[EXC_TYPES_COUNT];
2311 boolean_t privileged = current_task()->sec_token.val[0] == 0;
2312 register int i;
2313
2314 #if CONFIG_MACF
2315 struct label *new_label;
2316 #endif
2317
2318 if (task == TASK_NULL) {
2319 return KERN_INVALID_ARGUMENT;
2320 }
2321
2322 if (exception_mask & ~EXC_MASK_VALID) {
2323 return KERN_INVALID_ARGUMENT;
2324 }
2325
2326 if (IP_VALID(new_port)) {
2327 switch (new_behavior & ~MACH_EXCEPTION_MASK) {
2328 case EXCEPTION_DEFAULT:
2329 case EXCEPTION_STATE:
2330 case EXCEPTION_STATE_IDENTITY:
2331 break;
2332
2333 default:
2334 return KERN_INVALID_ARGUMENT;
2335 }
2336 }
2337
2338 /*
2339 * Check the validity of the thread_state_flavor by calling the
2340 * VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in
2341 * osfmk/mach/ARCHITECTURE/thread_status.h
2342 */
2343 if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor)) {
2344 return KERN_INVALID_ARGUMENT;
2345 }
2346
2347 #if CONFIG_MACF
2348 new_label = mac_exc_create_label_for_current_proc();
2349 #endif
2350
2351 itk_lock(task);
2352
2353 if (task->itk_self == IP_NULL) {
2354 itk_unlock(task);
2355
2356 return KERN_FAILURE;
2357 }
2358
2359 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
2360 if ((exception_mask & (1 << i))
2361 #if CONFIG_MACF
2362 && mac_exc_update_action_label(&task->exc_actions[i], new_label) == 0
2363 #endif
2364 ) {
2365 old_port[i] = task->exc_actions[i].port;
2366 task->exc_actions[i].port =
2367 ipc_port_copy_send(new_port);
2368 task->exc_actions[i].behavior = new_behavior;
2369 task->exc_actions[i].flavor = new_flavor;
2370 task->exc_actions[i].privileged = privileged;
2371 } else {
2372 old_port[i] = IP_NULL;
2373 }
2374 }
2375
2376 itk_unlock(task);
2377
2378 #if CONFIG_MACF
2379 mac_exc_free_label(new_label);
2380 #endif
2381
2382 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
2383 if (IP_VALID(old_port[i])) {
2384 ipc_port_release_send(old_port[i]);
2385 }
2386 }
2387
2388 if (IP_VALID(new_port)) { /* consume send right */
2389 ipc_port_release_send(new_port);
2390 }
2391
2392 return KERN_SUCCESS;
2393 }
2394
2395 /*
2396 * Routine: thread/task_swap_exception_ports [kernel call]
2397 * Purpose:
2398 * Sets the thread/task exception port, flavor and
2399 * behavior for the exception types specified by the
2400 * mask.
2401 *
2402 * The old ports, behavior and flavors are returned
2403 * Count specifies the array sizes on input and
2404 * the number of returned ports etc. on output. The
2405 * arrays must be large enough to hold all the returned
2406 * data, MIG returnes an error otherwise. The masks
2407 * array specifies the corresponding exception type(s).
2408 *
2409 * Conditions:
2410 * Nothing locked. If successful, consumes
2411 * the supplied send right.
2412 *
2413 * Returns upto [in} CountCnt elements.
2414 * Returns:
2415 * KERN_SUCCESS Changed the special port.
2416 * KERN_INVALID_ARGUMENT The thread is null,
2417 * Illegal mask bit set.
2418 * Illegal exception behavior
2419 * KERN_FAILURE The thread is dead.
2420 */
2421
2422 kern_return_t
2423 thread_swap_exception_ports(
2424 thread_t thread,
2425 exception_mask_t exception_mask,
2426 ipc_port_t new_port,
2427 exception_behavior_t new_behavior,
2428 thread_state_flavor_t new_flavor,
2429 exception_mask_array_t masks,
2430 mach_msg_type_number_t *CountCnt,
2431 exception_port_array_t ports,
2432 exception_behavior_array_t behaviors,
2433 thread_state_flavor_array_t flavors)
2434 {
2435 ipc_port_t old_port[EXC_TYPES_COUNT];
2436 boolean_t privileged = current_task()->sec_token.val[0] == 0;
2437 unsigned int i, j, count;
2438
2439 #if CONFIG_MACF
2440 struct label *new_label;
2441 #endif
2442
2443 if (thread == THREAD_NULL) {
2444 return KERN_INVALID_ARGUMENT;
2445 }
2446
2447 if (exception_mask & ~EXC_MASK_VALID) {
2448 return KERN_INVALID_ARGUMENT;
2449 }
2450
2451 if (IP_VALID(new_port)) {
2452 switch (new_behavior & ~MACH_EXCEPTION_MASK) {
2453 case EXCEPTION_DEFAULT:
2454 case EXCEPTION_STATE:
2455 case EXCEPTION_STATE_IDENTITY:
2456 break;
2457
2458 default:
2459 return KERN_INVALID_ARGUMENT;
2460 }
2461 }
2462
2463 if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor)) {
2464 return KERN_INVALID_ARGUMENT;
2465 }
2466
2467 #if CONFIG_MACF
2468 new_label = mac_exc_create_label_for_current_proc();
2469 #endif
2470
2471 thread_mtx_lock(thread);
2472
2473 if (!thread->active) {
2474 thread_mtx_unlock(thread);
2475
2476 return KERN_FAILURE;
2477 }
2478
2479 if (thread->exc_actions == NULL) {
2480 ipc_thread_init_exc_actions(thread);
2481 }
2482
2483 assert(EXC_TYPES_COUNT > FIRST_EXCEPTION);
2484 for (count = 0, i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT && count < *CountCnt; ++i) {
2485 if ((exception_mask & (1 << i))
2486 #if CONFIG_MACF
2487 && mac_exc_update_action_label(&thread->exc_actions[i], new_label) == 0
2488 #endif
2489 ) {
2490 for (j = 0; j < count; ++j) {
2491 /*
2492 * search for an identical entry, if found
2493 * set corresponding mask for this exception.
2494 */
2495 if (thread->exc_actions[i].port == ports[j] &&
2496 thread->exc_actions[i].behavior == behaviors[j] &&
2497 thread->exc_actions[i].flavor == flavors[j]) {
2498 masks[j] |= (1 << i);
2499 break;
2500 }
2501 }
2502
2503 if (j == count) {
2504 masks[j] = (1 << i);
2505 ports[j] = ipc_port_copy_send(thread->exc_actions[i].port);
2506
2507 behaviors[j] = thread->exc_actions[i].behavior;
2508 flavors[j] = thread->exc_actions[i].flavor;
2509 ++count;
2510 }
2511
2512 old_port[i] = thread->exc_actions[i].port;
2513 thread->exc_actions[i].port = ipc_port_copy_send(new_port);
2514 thread->exc_actions[i].behavior = new_behavior;
2515 thread->exc_actions[i].flavor = new_flavor;
2516 thread->exc_actions[i].privileged = privileged;
2517 } else {
2518 old_port[i] = IP_NULL;
2519 }
2520 }
2521
2522 thread_mtx_unlock(thread);
2523
2524 #if CONFIG_MACF
2525 mac_exc_free_label(new_label);
2526 #endif
2527
2528 while (--i >= FIRST_EXCEPTION) {
2529 if (IP_VALID(old_port[i])) {
2530 ipc_port_release_send(old_port[i]);
2531 }
2532 }
2533
2534 if (IP_VALID(new_port)) { /* consume send right */
2535 ipc_port_release_send(new_port);
2536 }
2537
2538 *CountCnt = count;
2539
2540 return KERN_SUCCESS;
2541 }
2542
2543 kern_return_t
2544 task_swap_exception_ports(
2545 task_t task,
2546 exception_mask_t exception_mask,
2547 ipc_port_t new_port,
2548 exception_behavior_t new_behavior,
2549 thread_state_flavor_t new_flavor,
2550 exception_mask_array_t masks,
2551 mach_msg_type_number_t *CountCnt,
2552 exception_port_array_t ports,
2553 exception_behavior_array_t behaviors,
2554 thread_state_flavor_array_t flavors)
2555 {
2556 ipc_port_t old_port[EXC_TYPES_COUNT];
2557 boolean_t privileged = current_task()->sec_token.val[0] == 0;
2558 unsigned int i, j, count;
2559
2560 #if CONFIG_MACF
2561 struct label *new_label;
2562 #endif
2563
2564 if (task == TASK_NULL) {
2565 return KERN_INVALID_ARGUMENT;
2566 }
2567
2568 if (exception_mask & ~EXC_MASK_VALID) {
2569 return KERN_INVALID_ARGUMENT;
2570 }
2571
2572 if (IP_VALID(new_port)) {
2573 switch (new_behavior & ~MACH_EXCEPTION_MASK) {
2574 case EXCEPTION_DEFAULT:
2575 case EXCEPTION_STATE:
2576 case EXCEPTION_STATE_IDENTITY:
2577 break;
2578
2579 default:
2580 return KERN_INVALID_ARGUMENT;
2581 }
2582 }
2583
2584 if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor)) {
2585 return KERN_INVALID_ARGUMENT;
2586 }
2587
2588 #if CONFIG_MACF
2589 new_label = mac_exc_create_label_for_current_proc();
2590 #endif
2591
2592 itk_lock(task);
2593
2594 if (task->itk_self == IP_NULL) {
2595 itk_unlock(task);
2596
2597 return KERN_FAILURE;
2598 }
2599
2600 assert(EXC_TYPES_COUNT > FIRST_EXCEPTION);
2601 for (count = 0, i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT && count < *CountCnt; ++i) {
2602 if ((exception_mask & (1 << i))
2603 #if CONFIG_MACF
2604 && mac_exc_update_action_label(&task->exc_actions[i], new_label) == 0
2605 #endif
2606 ) {
2607 for (j = 0; j < count; j++) {
2608 /*
2609 * search for an identical entry, if found
2610 * set corresponding mask for this exception.
2611 */
2612 if (task->exc_actions[i].port == ports[j] &&
2613 task->exc_actions[i].behavior == behaviors[j] &&
2614 task->exc_actions[i].flavor == flavors[j]) {
2615 masks[j] |= (1 << i);
2616 break;
2617 }
2618 }
2619
2620 if (j == count) {
2621 masks[j] = (1 << i);
2622 ports[j] = ipc_port_copy_send(task->exc_actions[i].port);
2623 behaviors[j] = task->exc_actions[i].behavior;
2624 flavors[j] = task->exc_actions[i].flavor;
2625 ++count;
2626 }
2627
2628 old_port[i] = task->exc_actions[i].port;
2629
2630 task->exc_actions[i].port = ipc_port_copy_send(new_port);
2631 task->exc_actions[i].behavior = new_behavior;
2632 task->exc_actions[i].flavor = new_flavor;
2633 task->exc_actions[i].privileged = privileged;
2634 } else {
2635 old_port[i] = IP_NULL;
2636 }
2637 }
2638
2639 itk_unlock(task);
2640
2641 #if CONFIG_MACF
2642 mac_exc_free_label(new_label);
2643 #endif
2644
2645 while (--i >= FIRST_EXCEPTION) {
2646 if (IP_VALID(old_port[i])) {
2647 ipc_port_release_send(old_port[i]);
2648 }
2649 }
2650
2651 if (IP_VALID(new_port)) { /* consume send right */
2652 ipc_port_release_send(new_port);
2653 }
2654
2655 *CountCnt = count;
2656
2657 return KERN_SUCCESS;
2658 }
2659
2660 /*
2661 * Routine: thread/task_get_exception_ports [kernel call]
2662 * Purpose:
2663 * Clones a send right for each of the thread/task's exception
2664 * ports specified in the mask and returns the behaviour
2665 * and flavor of said port.
2666 *
2667 * Returns upto [in} CountCnt elements.
2668 *
2669 * Conditions:
2670 * Nothing locked.
2671 * Returns:
2672 * KERN_SUCCESS Extracted a send right.
2673 * KERN_INVALID_ARGUMENT The thread is null,
2674 * Invalid special port,
2675 * Illegal mask bit set.
2676 * KERN_FAILURE The thread is dead.
2677 */
2678
2679 kern_return_t
2680 thread_get_exception_ports(
2681 thread_t thread,
2682 exception_mask_t exception_mask,
2683 exception_mask_array_t masks,
2684 mach_msg_type_number_t *CountCnt,
2685 exception_port_array_t ports,
2686 exception_behavior_array_t behaviors,
2687 thread_state_flavor_array_t flavors)
2688 {
2689 unsigned int i, j, count;
2690
2691 if (thread == THREAD_NULL) {
2692 return KERN_INVALID_ARGUMENT;
2693 }
2694
2695 if (exception_mask & ~EXC_MASK_VALID) {
2696 return KERN_INVALID_ARGUMENT;
2697 }
2698
2699 thread_mtx_lock(thread);
2700
2701 if (!thread->active) {
2702 thread_mtx_unlock(thread);
2703
2704 return KERN_FAILURE;
2705 }
2706
2707 count = 0;
2708
2709 if (thread->exc_actions == NULL) {
2710 goto done;
2711 }
2712
2713 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
2714 if (exception_mask & (1 << i)) {
2715 for (j = 0; j < count; ++j) {
2716 /*
2717 * search for an identical entry, if found
2718 * set corresponding mask for this exception.
2719 */
2720 if (thread->exc_actions[i].port == ports[j] &&
2721 thread->exc_actions[i].behavior == behaviors[j] &&
2722 thread->exc_actions[i].flavor == flavors[j]) {
2723 masks[j] |= (1 << i);
2724 break;
2725 }
2726 }
2727
2728 if (j == count) {
2729 masks[j] = (1 << i);
2730 ports[j] = ipc_port_copy_send(thread->exc_actions[i].port);
2731 behaviors[j] = thread->exc_actions[i].behavior;
2732 flavors[j] = thread->exc_actions[i].flavor;
2733 ++count;
2734 if (count >= *CountCnt) {
2735 break;
2736 }
2737 }
2738 }
2739 }
2740
2741 done:
2742 thread_mtx_unlock(thread);
2743
2744 *CountCnt = count;
2745
2746 return KERN_SUCCESS;
2747 }
2748
2749 kern_return_t
2750 task_get_exception_ports(
2751 task_t task,
2752 exception_mask_t exception_mask,
2753 exception_mask_array_t masks,
2754 mach_msg_type_number_t *CountCnt,
2755 exception_port_array_t ports,
2756 exception_behavior_array_t behaviors,
2757 thread_state_flavor_array_t flavors)
2758 {
2759 unsigned int i, j, count;
2760
2761 if (task == TASK_NULL) {
2762 return KERN_INVALID_ARGUMENT;
2763 }
2764
2765 if (exception_mask & ~EXC_MASK_VALID) {
2766 return KERN_INVALID_ARGUMENT;
2767 }
2768
2769 itk_lock(task);
2770
2771 if (task->itk_self == IP_NULL) {
2772 itk_unlock(task);
2773
2774 return KERN_FAILURE;
2775 }
2776
2777 count = 0;
2778
2779 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
2780 if (exception_mask & (1 << i)) {
2781 for (j = 0; j < count; ++j) {
2782 /*
2783 * search for an identical entry, if found
2784 * set corresponding mask for this exception.
2785 */
2786 if (task->exc_actions[i].port == ports[j] &&
2787 task->exc_actions[i].behavior == behaviors[j] &&
2788 task->exc_actions[i].flavor == flavors[j]) {
2789 masks[j] |= (1 << i);
2790 break;
2791 }
2792 }
2793
2794 if (j == count) {
2795 masks[j] = (1 << i);
2796 ports[j] = ipc_port_copy_send(task->exc_actions[i].port);
2797 behaviors[j] = task->exc_actions[i].behavior;
2798 flavors[j] = task->exc_actions[i].flavor;
2799 ++count;
2800 if (count > *CountCnt) {
2801 break;
2802 }
2803 }
2804 }
2805 }
2806
2807 itk_unlock(task);
2808
2809 *CountCnt = count;
2810
2811 return KERN_SUCCESS;
2812 }