]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/ipc_tt.c
8af9d9cb20d09eb1541f90a14d130efb1773e02a
[apple/xnu.git] / osfmk / kern / ipc_tt.c
1 /*
2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
58 * support for mandatory and extensible security protections. This notice
59 * is included in support of clause 2.2 (b) of the Apple Public License,
60 * Version 2.0.
61 */
62 /*
63 */
64
65 /*
66 * File: ipc_tt.c
67 * Purpose:
68 * Task and thread related IPC functions.
69 */
70
71 #include <mach/mach_types.h>
72 #include <mach/boolean.h>
73 #include <mach/kern_return.h>
74 #include <mach/mach_param.h>
75 #include <mach/task_special_ports.h>
76 #include <mach/thread_special_ports.h>
77 #include <mach/thread_status.h>
78 #include <mach/exception_types.h>
79 #include <mach/memory_object_types.h>
80 #include <mach/mach_traps.h>
81 #include <mach/task_server.h>
82 #include <mach/thread_act_server.h>
83 #include <mach/mach_host_server.h>
84 #include <mach/host_priv_server.h>
85 #include <mach/vm_map_server.h>
86
87 #include <kern/kern_types.h>
88 #include <kern/host.h>
89 #include <kern/ipc_kobject.h>
90 #include <kern/ipc_tt.h>
91 #include <kern/kalloc.h>
92 #include <kern/thread.h>
93 #include <kern/misc_protos.h>
94
95 #include <vm/vm_map.h>
96 #include <vm/vm_pageout.h>
97 #include <vm/vm_protos.h>
98
99 #include <security/mac_mach_internal.h>
100
101 #if CONFIG_EMBEDDED && !SECURE_KERNEL
102 extern int cs_relax_platform_task_ports;
103 #endif
104
105 /* forward declarations */
106 task_t convert_port_to_locked_task(ipc_port_t port);
107 task_inspect_t convert_port_to_locked_task_inspect(ipc_port_t port);
108 static void ipc_port_bind_special_reply_port_locked(ipc_port_t port);
109 static kern_return_t ipc_port_unbind_special_reply_port(thread_t thread, boolean_t unbind_active_port);
110 kern_return_t task_conversion_eval(task_t caller, task_t victim);
111
112 /*
113 * Routine: ipc_task_init
114 * Purpose:
115 * Initialize a task's IPC state.
116 *
117 * If non-null, some state will be inherited from the parent.
118 * The parent must be appropriately initialized.
119 * Conditions:
120 * Nothing locked.
121 */
122
123 void
124 ipc_task_init(
125 task_t task,
126 task_t parent)
127 {
128 ipc_space_t space;
129 ipc_port_t kport;
130 ipc_port_t nport;
131 kern_return_t kr;
132 int i;
133
134
135 kr = ipc_space_create(&ipc_table_entries[0], &space);
136 if (kr != KERN_SUCCESS) {
137 panic("ipc_task_init");
138 }
139
140 space->is_task = task;
141
142 kport = ipc_port_alloc_kernel();
143 if (kport == IP_NULL) {
144 panic("ipc_task_init");
145 }
146
147 nport = ipc_port_alloc_kernel();
148 if (nport == IP_NULL) {
149 panic("ipc_task_init");
150 }
151
152 itk_lock_init(task);
153 task->itk_self = kport;
154 task->itk_nself = nport;
155 task->itk_resume = IP_NULL; /* Lazily allocated on-demand */
156 if (task_is_a_corpse_fork(task)) {
157 /*
158 * No sender's notification for corpse would not
159 * work with a naked send right in kernel.
160 */
161 task->itk_sself = IP_NULL;
162 } else {
163 task->itk_sself = ipc_port_make_send(kport);
164 }
165 task->itk_debug_control = IP_NULL;
166 task->itk_space = space;
167
168 #if CONFIG_MACF
169 task->exc_actions[0].label = NULL;
170 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
171 mac_exc_associate_action_label(&task->exc_actions[i], mac_exc_create_label());
172 }
173 #endif
174
175 /* always zero-out the first (unused) array element */
176 bzero(&task->exc_actions[0], sizeof(task->exc_actions[0]));
177
178 if (parent == TASK_NULL) {
179 ipc_port_t port;
180 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
181 task->exc_actions[i].port = IP_NULL;
182 task->exc_actions[i].flavor = 0;
183 task->exc_actions[i].behavior = 0;
184 task->exc_actions[i].privileged = FALSE;
185 }/* for */
186
187 kr = host_get_host_port(host_priv_self(), &port);
188 assert(kr == KERN_SUCCESS);
189 task->itk_host = port;
190
191 task->itk_bootstrap = IP_NULL;
192 task->itk_seatbelt = IP_NULL;
193 task->itk_gssd = IP_NULL;
194 task->itk_task_access = IP_NULL;
195
196 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
197 task->itk_registered[i] = IP_NULL;
198 }
199 } else {
200 itk_lock(parent);
201 assert(parent->itk_self != IP_NULL);
202
203 /* inherit registered ports */
204
205 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
206 task->itk_registered[i] =
207 ipc_port_copy_send(parent->itk_registered[i]);
208 }
209
210 /* inherit exception and bootstrap ports */
211
212 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
213 task->exc_actions[i].port =
214 ipc_port_copy_send(parent->exc_actions[i].port);
215 task->exc_actions[i].flavor =
216 parent->exc_actions[i].flavor;
217 task->exc_actions[i].behavior =
218 parent->exc_actions[i].behavior;
219 task->exc_actions[i].privileged =
220 parent->exc_actions[i].privileged;
221 #if CONFIG_MACF
222 mac_exc_inherit_action_label(parent->exc_actions + i, task->exc_actions + i);
223 #endif
224 }/* for */
225 task->itk_host =
226 ipc_port_copy_send(parent->itk_host);
227
228 task->itk_bootstrap =
229 ipc_port_copy_send(parent->itk_bootstrap);
230
231 task->itk_seatbelt =
232 ipc_port_copy_send(parent->itk_seatbelt);
233
234 task->itk_gssd =
235 ipc_port_copy_send(parent->itk_gssd);
236
237 task->itk_task_access =
238 ipc_port_copy_send(parent->itk_task_access);
239
240 itk_unlock(parent);
241 }
242 }
243
244 /*
245 * Routine: ipc_task_enable
246 * Purpose:
247 * Enable a task for IPC access.
248 * Conditions:
249 * Nothing locked.
250 */
251
252 void
253 ipc_task_enable(
254 task_t task)
255 {
256 ipc_port_t kport;
257 ipc_port_t nport;
258
259 itk_lock(task);
260 kport = task->itk_self;
261 if (kport != IP_NULL) {
262 ipc_kobject_set(kport, (ipc_kobject_t) task, IKOT_TASK);
263 }
264 nport = task->itk_nself;
265 if (nport != IP_NULL) {
266 ipc_kobject_set(nport, (ipc_kobject_t) task, IKOT_TASK_NAME);
267 }
268 itk_unlock(task);
269 }
270
271 /*
272 * Routine: ipc_task_disable
273 * Purpose:
274 * Disable IPC access to a task.
275 * Conditions:
276 * Nothing locked.
277 */
278
279 void
280 ipc_task_disable(
281 task_t task)
282 {
283 ipc_port_t kport;
284 ipc_port_t nport;
285 ipc_port_t rport;
286
287 itk_lock(task);
288 kport = task->itk_self;
289 if (kport != IP_NULL) {
290 ipc_kobject_set(kport, IKO_NULL, IKOT_NONE);
291 }
292 nport = task->itk_nself;
293 if (nport != IP_NULL) {
294 ipc_kobject_set(nport, IKO_NULL, IKOT_NONE);
295 }
296
297 rport = task->itk_resume;
298 if (rport != IP_NULL) {
299 /*
300 * From this point onwards this task is no longer accepting
301 * resumptions.
302 *
303 * There are still outstanding suspensions on this task,
304 * even as it is being torn down. Disconnect the task
305 * from the rport, thereby "orphaning" the rport. The rport
306 * itself will go away only when the last suspension holder
307 * destroys his SO right to it -- when he either
308 * exits, or tries to actually use that last SO right to
309 * resume this (now non-existent) task.
310 */
311 ipc_kobject_set(rport, IKO_NULL, IKOT_NONE);
312 }
313 itk_unlock(task);
314 }
315
316 /*
317 * Routine: ipc_task_terminate
318 * Purpose:
319 * Clean up and destroy a task's IPC state.
320 * Conditions:
321 * Nothing locked. The task must be suspended.
322 * (Or the current thread must be in the task.)
323 */
324
325 void
326 ipc_task_terminate(
327 task_t task)
328 {
329 ipc_port_t kport;
330 ipc_port_t nport;
331 ipc_port_t rport;
332 int i;
333
334 itk_lock(task);
335 kport = task->itk_self;
336
337 if (kport == IP_NULL) {
338 /* the task is already terminated (can this happen?) */
339 itk_unlock(task);
340 return;
341 }
342 task->itk_self = IP_NULL;
343
344 nport = task->itk_nself;
345 assert(nport != IP_NULL);
346 task->itk_nself = IP_NULL;
347
348 rport = task->itk_resume;
349 task->itk_resume = IP_NULL;
350
351 itk_unlock(task);
352
353 /* release the naked send rights */
354
355 if (IP_VALID(task->itk_sself)) {
356 ipc_port_release_send(task->itk_sself);
357 }
358
359 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
360 if (IP_VALID(task->exc_actions[i].port)) {
361 ipc_port_release_send(task->exc_actions[i].port);
362 }
363 #if CONFIG_MACF
364 mac_exc_free_action_label(task->exc_actions + i);
365 #endif
366 }
367
368 if (IP_VALID(task->itk_host)) {
369 ipc_port_release_send(task->itk_host);
370 }
371
372 if (IP_VALID(task->itk_bootstrap)) {
373 ipc_port_release_send(task->itk_bootstrap);
374 }
375
376 if (IP_VALID(task->itk_seatbelt)) {
377 ipc_port_release_send(task->itk_seatbelt);
378 }
379
380 if (IP_VALID(task->itk_gssd)) {
381 ipc_port_release_send(task->itk_gssd);
382 }
383
384 if (IP_VALID(task->itk_task_access)) {
385 ipc_port_release_send(task->itk_task_access);
386 }
387
388 if (IP_VALID(task->itk_debug_control)) {
389 ipc_port_release_send(task->itk_debug_control);
390 }
391
392 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
393 if (IP_VALID(task->itk_registered[i])) {
394 ipc_port_release_send(task->itk_registered[i]);
395 }
396 }
397
398 /* destroy the kernel ports */
399 ipc_port_dealloc_kernel(kport);
400 ipc_port_dealloc_kernel(nport);
401 if (rport != IP_NULL) {
402 ipc_port_dealloc_kernel(rport);
403 }
404
405 itk_lock_destroy(task);
406 }
407
408 /*
409 * Routine: ipc_task_reset
410 * Purpose:
411 * Reset a task's IPC state to protect it when
412 * it enters an elevated security context. The
413 * task name port can remain the same - since
414 * it represents no specific privilege.
415 * Conditions:
416 * Nothing locked. The task must be suspended.
417 * (Or the current thread must be in the task.)
418 */
419
420 void
421 ipc_task_reset(
422 task_t task)
423 {
424 ipc_port_t old_kport, new_kport;
425 ipc_port_t old_sself;
426 ipc_port_t old_exc_actions[EXC_TYPES_COUNT];
427 int i;
428
429 #if CONFIG_MACF
430 /* Fresh label to unset credentials in existing labels. */
431 struct label *unset_label = mac_exc_create_label();
432 #endif
433
434 new_kport = ipc_kobject_alloc_port((ipc_kobject_t)task, IKOT_TASK,
435 IPC_KOBJECT_ALLOC_MAKE_SEND);
436
437 itk_lock(task);
438
439 old_kport = task->itk_self;
440
441 if (old_kport == IP_NULL) {
442 /* the task is already terminated (can this happen?) */
443 itk_unlock(task);
444 ipc_port_release_send(new_kport);
445 ipc_port_dealloc_kernel(new_kport);
446 #if CONFIG_MACF
447 mac_exc_free_label(unset_label);
448 #endif
449 return;
450 }
451
452 old_sself = task->itk_sself;
453 task->itk_sself = task->itk_self = new_kport;
454
455 /* Set the old kport to IKOT_NONE and update the exec token while under the port lock */
456 ip_lock(old_kport);
457 ipc_kobject_set_atomically(old_kport, IKO_NULL, IKOT_NONE);
458 task->exec_token += 1;
459 ip_unlock(old_kport);
460
461 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
462 old_exc_actions[i] = IP_NULL;
463
464 if (i == EXC_CORPSE_NOTIFY && task_corpse_pending_report(task)) {
465 continue;
466 }
467
468 if (!task->exc_actions[i].privileged) {
469 #if CONFIG_MACF
470 mac_exc_update_action_label(task->exc_actions + i, unset_label);
471 #endif
472 old_exc_actions[i] = task->exc_actions[i].port;
473 task->exc_actions[i].port = IP_NULL;
474 }
475 }/* for */
476
477 if (IP_VALID(task->itk_debug_control)) {
478 ipc_port_release_send(task->itk_debug_control);
479 }
480 task->itk_debug_control = IP_NULL;
481
482 itk_unlock(task);
483
484 #if CONFIG_MACF
485 mac_exc_free_label(unset_label);
486 #endif
487
488 /* release the naked send rights */
489
490 if (IP_VALID(old_sself)) {
491 ipc_port_release_send(old_sself);
492 }
493
494 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
495 if (IP_VALID(old_exc_actions[i])) {
496 ipc_port_release_send(old_exc_actions[i]);
497 }
498 }/* for */
499
500 /* destroy the kernel port */
501 ipc_port_dealloc_kernel(old_kport);
502 }
503
504 /*
505 * Routine: ipc_thread_init
506 * Purpose:
507 * Initialize a thread's IPC state.
508 * Conditions:
509 * Nothing locked.
510 */
511
512 void
513 ipc_thread_init(
514 thread_t thread)
515 {
516 ipc_port_t kport;
517
518 kport = ipc_kobject_alloc_port((ipc_kobject_t)thread, IKOT_THREAD,
519 IPC_KOBJECT_ALLOC_MAKE_SEND);
520
521 thread->ith_sself = thread->ith_self = kport;
522 thread->ith_special_reply_port = NULL;
523 thread->exc_actions = NULL;
524
525 #if IMPORTANCE_INHERITANCE
526 thread->ith_assertions = 0;
527 #endif
528
529 ipc_kmsg_queue_init(&thread->ith_messages);
530
531 thread->ith_rpc_reply = IP_NULL;
532 }
533
534 void
535 ipc_thread_init_exc_actions(
536 thread_t thread)
537 {
538 assert(thread->exc_actions == NULL);
539
540 thread->exc_actions = kalloc(sizeof(struct exception_action) * EXC_TYPES_COUNT);
541 bzero(thread->exc_actions, sizeof(struct exception_action) * EXC_TYPES_COUNT);
542
543 #if CONFIG_MACF
544 for (size_t i = 0; i < EXC_TYPES_COUNT; ++i) {
545 mac_exc_associate_action_label(thread->exc_actions + i, mac_exc_create_label());
546 }
547 #endif
548 }
549
550 void
551 ipc_thread_destroy_exc_actions(
552 thread_t thread)
553 {
554 if (thread->exc_actions != NULL) {
555 #if CONFIG_MACF
556 for (size_t i = 0; i < EXC_TYPES_COUNT; ++i) {
557 mac_exc_free_action_label(thread->exc_actions + i);
558 }
559 #endif
560
561 kfree(thread->exc_actions,
562 sizeof(struct exception_action) * EXC_TYPES_COUNT);
563 thread->exc_actions = NULL;
564 }
565 }
566
567 void
568 ipc_thread_disable(
569 thread_t thread)
570 {
571 ipc_port_t kport = thread->ith_self;
572
573 if (kport != IP_NULL) {
574 ipc_kobject_set(kport, IKO_NULL, IKOT_NONE);
575 }
576
577 /* unbind the thread special reply port */
578 if (IP_VALID(thread->ith_special_reply_port)) {
579 ipc_port_unbind_special_reply_port(thread, TRUE);
580 }
581 }
582
583 /*
584 * Routine: ipc_thread_terminate
585 * Purpose:
586 * Clean up and destroy a thread's IPC state.
587 * Conditions:
588 * Nothing locked.
589 */
590
591 void
592 ipc_thread_terminate(
593 thread_t thread)
594 {
595 ipc_port_t kport = thread->ith_self;
596
597 if (kport != IP_NULL) {
598 int i;
599
600 if (IP_VALID(thread->ith_sself)) {
601 ipc_port_release_send(thread->ith_sself);
602 }
603
604 thread->ith_sself = thread->ith_self = IP_NULL;
605
606 if (thread->exc_actions != NULL) {
607 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
608 if (IP_VALID(thread->exc_actions[i].port)) {
609 ipc_port_release_send(thread->exc_actions[i].port);
610 }
611 }
612 ipc_thread_destroy_exc_actions(thread);
613 }
614
615 ipc_port_dealloc_kernel(kport);
616 }
617
618 #if IMPORTANCE_INHERITANCE
619 assert(thread->ith_assertions == 0);
620 #endif
621
622 assert(ipc_kmsg_queue_empty(&thread->ith_messages));
623
624 if (thread->ith_rpc_reply != IP_NULL) {
625 ipc_port_dealloc_reply(thread->ith_rpc_reply);
626 }
627
628 thread->ith_rpc_reply = IP_NULL;
629 }
630
631 /*
632 * Routine: ipc_thread_reset
633 * Purpose:
634 * Reset the IPC state for a given Mach thread when
635 * its task enters an elevated security context.
636 * Both the thread port and its exception ports have
637 * to be reset. Its RPC reply port cannot have any
638 * rights outstanding, so it should be fine.
639 * Conditions:
640 * Nothing locked.
641 */
642
643 void
644 ipc_thread_reset(
645 thread_t thread)
646 {
647 ipc_port_t old_kport, new_kport;
648 ipc_port_t old_sself;
649 ipc_port_t old_exc_actions[EXC_TYPES_COUNT];
650 boolean_t has_old_exc_actions = FALSE;
651 int i;
652
653 #if CONFIG_MACF
654 struct label *new_label = mac_exc_create_label();
655 #endif
656
657 new_kport = ipc_kobject_alloc_port((ipc_kobject_t)thread, IKOT_THREAD,
658 IPC_KOBJECT_ALLOC_MAKE_SEND);
659
660 thread_mtx_lock(thread);
661
662 old_kport = thread->ith_self;
663 old_sself = thread->ith_sself;
664
665 if (old_kport == IP_NULL && thread->inspection == FALSE) {
666 /* the is already terminated (can this happen?) */
667 thread_mtx_unlock(thread);
668 ipc_port_release_send(new_kport);
669 ipc_port_dealloc_kernel(new_kport);
670 #if CONFIG_MACF
671 mac_exc_free_label(new_label);
672 #endif
673 return;
674 }
675
676 thread->ith_sself = thread->ith_self = new_kport;
677 if (old_kport != IP_NULL) {
678 ipc_kobject_set(old_kport, IKO_NULL, IKOT_NONE);
679 }
680
681 /*
682 * Only ports that were set by root-owned processes
683 * (privileged ports) should survive
684 */
685 if (thread->exc_actions != NULL) {
686 has_old_exc_actions = TRUE;
687 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
688 if (thread->exc_actions[i].privileged) {
689 old_exc_actions[i] = IP_NULL;
690 } else {
691 #if CONFIG_MACF
692 mac_exc_update_action_label(thread->exc_actions + i, new_label);
693 #endif
694 old_exc_actions[i] = thread->exc_actions[i].port;
695 thread->exc_actions[i].port = IP_NULL;
696 }
697 }
698 }
699
700 thread_mtx_unlock(thread);
701
702 #if CONFIG_MACF
703 mac_exc_free_label(new_label);
704 #endif
705
706 /* release the naked send rights */
707
708 if (IP_VALID(old_sself)) {
709 ipc_port_release_send(old_sself);
710 }
711
712 if (has_old_exc_actions) {
713 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
714 ipc_port_release_send(old_exc_actions[i]);
715 }
716 }
717
718 /* destroy the kernel port */
719 if (old_kport != IP_NULL) {
720 ipc_port_dealloc_kernel(old_kport);
721 }
722
723 /* unbind the thread special reply port */
724 if (IP_VALID(thread->ith_special_reply_port)) {
725 ipc_port_unbind_special_reply_port(thread, TRUE);
726 }
727 }
728
729 /*
730 * Routine: retrieve_task_self_fast
731 * Purpose:
732 * Optimized version of retrieve_task_self,
733 * that only works for the current task.
734 *
735 * Return a send right (possibly null/dead)
736 * for the task's user-visible self port.
737 * Conditions:
738 * Nothing locked.
739 */
740
741 ipc_port_t
742 retrieve_task_self_fast(
743 task_t task)
744 {
745 __assert_only ipc_port_t sright;
746 ipc_port_t port;
747
748 assert(task == current_task());
749
750 itk_lock(task);
751 assert(task->itk_self != IP_NULL);
752
753 if ((port = task->itk_sself) == task->itk_self) {
754 /* no interposing */
755 sright = ipc_port_copy_send(port);
756 assert(sright == port);
757 } else {
758 port = ipc_port_copy_send(port);
759 }
760 itk_unlock(task);
761
762 return port;
763 }
764
765 /*
766 * Routine: retrieve_thread_self_fast
767 * Purpose:
768 * Return a send right (possibly null/dead)
769 * for the thread's user-visible self port.
770 *
771 * Only works for the current thread.
772 *
773 * Conditions:
774 * Nothing locked.
775 */
776
777 ipc_port_t
778 retrieve_thread_self_fast(
779 thread_t thread)
780 {
781 __assert_only ipc_port_t sright;
782 ipc_port_t port;
783
784 assert(thread == current_thread());
785
786 thread_mtx_lock(thread);
787
788 assert(thread->ith_self != IP_NULL);
789
790 if ((port = thread->ith_sself) == thread->ith_self) {
791 /* no interposing */
792 sright = ipc_port_copy_send(port);
793 assert(sright == port);
794 } else {
795 port = ipc_port_copy_send(port);
796 }
797
798 thread_mtx_unlock(thread);
799
800 return port;
801 }
802
803 /*
804 * Routine: task_self_trap [mach trap]
805 * Purpose:
806 * Give the caller send rights for his own task port.
807 * Conditions:
808 * Nothing locked.
809 * Returns:
810 * MACH_PORT_NULL if there are any resource failures
811 * or other errors.
812 */
813
814 mach_port_name_t
815 task_self_trap(
816 __unused struct task_self_trap_args *args)
817 {
818 task_t task = current_task();
819 ipc_port_t sright;
820 mach_port_name_t name;
821
822 sright = retrieve_task_self_fast(task);
823 name = ipc_port_copyout_send(sright, task->itk_space);
824 return name;
825 }
826
827 /*
828 * Routine: thread_self_trap [mach trap]
829 * Purpose:
830 * Give the caller send rights for his own thread port.
831 * Conditions:
832 * Nothing locked.
833 * Returns:
834 * MACH_PORT_NULL if there are any resource failures
835 * or other errors.
836 */
837
838 mach_port_name_t
839 thread_self_trap(
840 __unused struct thread_self_trap_args *args)
841 {
842 thread_t thread = current_thread();
843 task_t task = thread->task;
844 ipc_port_t sright;
845 mach_port_name_t name;
846
847 sright = retrieve_thread_self_fast(thread);
848 name = ipc_port_copyout_send(sright, task->itk_space);
849 return name;
850 }
851
852 /*
853 * Routine: mach_reply_port [mach trap]
854 * Purpose:
855 * Allocate a port for the caller.
856 * Conditions:
857 * Nothing locked.
858 * Returns:
859 * MACH_PORT_NULL if there are any resource failures
860 * or other errors.
861 */
862
863 mach_port_name_t
864 mach_reply_port(
865 __unused struct mach_reply_port_args *args)
866 {
867 ipc_port_t port;
868 mach_port_name_t name;
869 kern_return_t kr;
870
871 kr = ipc_port_alloc(current_task()->itk_space, IPC_PORT_INIT_MESSAGE_QUEUE,
872 &name, &port);
873 if (kr == KERN_SUCCESS) {
874 ip_unlock(port);
875 } else {
876 name = MACH_PORT_NULL;
877 }
878 return name;
879 }
880
881 /*
882 * Routine: thread_get_special_reply_port [mach trap]
883 * Purpose:
884 * Allocate a special reply port for the calling thread.
885 * Conditions:
886 * Nothing locked.
887 * Returns:
888 * mach_port_name_t: send right & receive right for special reply port.
889 * MACH_PORT_NULL if there are any resource failures
890 * or other errors.
891 */
892
893 mach_port_name_t
894 thread_get_special_reply_port(
895 __unused struct thread_get_special_reply_port_args *args)
896 {
897 ipc_port_t port;
898 mach_port_name_t name;
899 kern_return_t kr;
900 thread_t thread = current_thread();
901 ipc_port_init_flags_t flags = IPC_PORT_INIT_MESSAGE_QUEUE |
902 IPC_PORT_INIT_MAKE_SEND_RIGHT | IPC_PORT_INIT_SPECIAL_REPLY;
903
904 /* unbind the thread special reply port */
905 if (IP_VALID(thread->ith_special_reply_port)) {
906 kr = ipc_port_unbind_special_reply_port(thread, TRUE);
907 if (kr != KERN_SUCCESS) {
908 return MACH_PORT_NULL;
909 }
910 }
911
912 kr = ipc_port_alloc(current_task()->itk_space, flags, &name, &port);
913 if (kr == KERN_SUCCESS) {
914 ipc_port_bind_special_reply_port_locked(port);
915 ip_unlock(port);
916 } else {
917 name = MACH_PORT_NULL;
918 }
919 return name;
920 }
921
922 /*
923 * Routine: ipc_port_bind_special_reply_port_locked
924 * Purpose:
925 * Bind the given port to current thread as a special reply port.
926 * Conditions:
927 * Port locked.
928 * Returns:
929 * None.
930 */
931
932 static void
933 ipc_port_bind_special_reply_port_locked(
934 ipc_port_t port)
935 {
936 thread_t thread = current_thread();
937 assert(thread->ith_special_reply_port == NULL);
938 assert(port->ip_specialreply);
939 assert(port->ip_sync_link_state == PORT_SYNC_LINK_ANY);
940
941 ip_reference(port);
942 thread->ith_special_reply_port = port;
943 port->ip_messages.imq_srp_owner_thread = thread;
944
945 ipc_special_reply_port_bits_reset(port);
946 }
947
948 /*
949 * Routine: ipc_port_unbind_special_reply_port
950 * Purpose:
951 * Unbind the thread's special reply port.
952 * If the special port has threads waiting on turnstile,
953 * update it's inheritor.
954 * Condition:
955 * Nothing locked.
956 * Returns:
957 * None.
958 */
959 static kern_return_t
960 ipc_port_unbind_special_reply_port(
961 thread_t thread,
962 boolean_t unbind_active_port)
963 {
964 ipc_port_t special_reply_port = thread->ith_special_reply_port;
965
966 ip_lock(special_reply_port);
967
968 /* Return error if port active and unbind_active_port set to FALSE */
969 if (unbind_active_port == FALSE && ip_active(special_reply_port)) {
970 ip_unlock(special_reply_port);
971 return KERN_FAILURE;
972 }
973
974 thread->ith_special_reply_port = NULL;
975 ipc_port_adjust_special_reply_port_locked(special_reply_port, NULL,
976 IPC_PORT_ADJUST_UNLINK_THREAD, FALSE);
977 /* port unlocked */
978
979 ip_release(special_reply_port);
980 return KERN_SUCCESS;
981 }
982
983 /*
984 * Routine: thread_get_special_port [kernel call]
985 * Purpose:
986 * Clones a send right for one of the thread's
987 * special ports.
988 * Conditions:
989 * Nothing locked.
990 * Returns:
991 * KERN_SUCCESS Extracted a send right.
992 * KERN_INVALID_ARGUMENT The thread is null.
993 * KERN_FAILURE The thread is dead.
994 * KERN_INVALID_ARGUMENT Invalid special port.
995 */
996
997 kern_return_t
998 thread_get_special_port(
999 thread_t thread,
1000 int which,
1001 ipc_port_t *portp)
1002 {
1003 kern_return_t result = KERN_SUCCESS;
1004 ipc_port_t *whichp;
1005
1006 if (thread == THREAD_NULL) {
1007 return KERN_INVALID_ARGUMENT;
1008 }
1009
1010 switch (which) {
1011 case THREAD_KERNEL_PORT:
1012 whichp = &thread->ith_sself;
1013 break;
1014
1015 default:
1016 return KERN_INVALID_ARGUMENT;
1017 }
1018
1019 thread_mtx_lock(thread);
1020
1021 if (thread->active) {
1022 *portp = ipc_port_copy_send(*whichp);
1023 } else {
1024 result = KERN_FAILURE;
1025 }
1026
1027 thread_mtx_unlock(thread);
1028
1029 return result;
1030 }
1031
1032 /*
1033 * Routine: thread_set_special_port [kernel call]
1034 * Purpose:
1035 * Changes one of the thread's special ports,
1036 * setting it to the supplied send right.
1037 * Conditions:
1038 * Nothing locked. If successful, consumes
1039 * the supplied send right.
1040 * Returns:
1041 * KERN_SUCCESS Changed the special port.
1042 * KERN_INVALID_ARGUMENT The thread is null.
1043 * KERN_FAILURE The thread is dead.
1044 * KERN_INVALID_ARGUMENT Invalid special port.
1045 */
1046
1047 kern_return_t
1048 thread_set_special_port(
1049 thread_t thread,
1050 int which,
1051 ipc_port_t port)
1052 {
1053 kern_return_t result = KERN_SUCCESS;
1054 ipc_port_t *whichp, old = IP_NULL;
1055
1056 if (thread == THREAD_NULL) {
1057 return KERN_INVALID_ARGUMENT;
1058 }
1059
1060 switch (which) {
1061 case THREAD_KERNEL_PORT:
1062 whichp = &thread->ith_sself;
1063 break;
1064
1065 default:
1066 return KERN_INVALID_ARGUMENT;
1067 }
1068
1069 thread_mtx_lock(thread);
1070
1071 if (thread->active) {
1072 old = *whichp;
1073 *whichp = port;
1074 } else {
1075 result = KERN_FAILURE;
1076 }
1077
1078 thread_mtx_unlock(thread);
1079
1080 if (IP_VALID(old)) {
1081 ipc_port_release_send(old);
1082 }
1083
1084 return result;
1085 }
1086
1087 /*
1088 * Routine: task_get_special_port [kernel call]
1089 * Purpose:
1090 * Clones a send right for one of the task's
1091 * special ports.
1092 * Conditions:
1093 * Nothing locked.
1094 * Returns:
1095 * KERN_SUCCESS Extracted a send right.
1096 * KERN_INVALID_ARGUMENT The task is null.
1097 * KERN_FAILURE The task/space is dead.
1098 * KERN_INVALID_ARGUMENT Invalid special port.
1099 */
1100
1101 kern_return_t
1102 task_get_special_port(
1103 task_t task,
1104 int which,
1105 ipc_port_t *portp)
1106 {
1107 ipc_port_t port;
1108
1109 if (task == TASK_NULL) {
1110 return KERN_INVALID_ARGUMENT;
1111 }
1112
1113 itk_lock(task);
1114 if (task->itk_self == IP_NULL) {
1115 itk_unlock(task);
1116 return KERN_FAILURE;
1117 }
1118
1119 switch (which) {
1120 case TASK_KERNEL_PORT:
1121 port = ipc_port_copy_send(task->itk_sself);
1122 break;
1123
1124 case TASK_NAME_PORT:
1125 port = ipc_port_make_send(task->itk_nself);
1126 break;
1127
1128 case TASK_HOST_PORT:
1129 port = ipc_port_copy_send(task->itk_host);
1130 break;
1131
1132 case TASK_BOOTSTRAP_PORT:
1133 port = ipc_port_copy_send(task->itk_bootstrap);
1134 break;
1135
1136 case TASK_SEATBELT_PORT:
1137 port = ipc_port_copy_send(task->itk_seatbelt);
1138 break;
1139
1140 case TASK_ACCESS_PORT:
1141 port = ipc_port_copy_send(task->itk_task_access);
1142 break;
1143
1144 case TASK_DEBUG_CONTROL_PORT:
1145 port = ipc_port_copy_send(task->itk_debug_control);
1146 break;
1147
1148 default:
1149 itk_unlock(task);
1150 return KERN_INVALID_ARGUMENT;
1151 }
1152 itk_unlock(task);
1153
1154 *portp = port;
1155 return KERN_SUCCESS;
1156 }
1157
1158 /*
1159 * Routine: task_set_special_port [kernel call]
1160 * Purpose:
1161 * Changes one of the task's special ports,
1162 * setting it to the supplied send right.
1163 * Conditions:
1164 * Nothing locked. If successful, consumes
1165 * the supplied send right.
1166 * Returns:
1167 * KERN_SUCCESS Changed the special port.
1168 * KERN_INVALID_ARGUMENT The task is null.
1169 * KERN_FAILURE The task/space is dead.
1170 * KERN_INVALID_ARGUMENT Invalid special port.
1171 * KERN_NO_ACCESS Attempted overwrite of seatbelt port.
1172 */
1173
1174 kern_return_t
1175 task_set_special_port(
1176 task_t task,
1177 int which,
1178 ipc_port_t port)
1179 {
1180 ipc_port_t *whichp;
1181 ipc_port_t old;
1182
1183 if (task == TASK_NULL) {
1184 return KERN_INVALID_ARGUMENT;
1185 }
1186
1187 if (task_is_driver(current_task())) {
1188 return KERN_NO_ACCESS;
1189 }
1190
1191 switch (which) {
1192 case TASK_KERNEL_PORT:
1193 whichp = &task->itk_sself;
1194 break;
1195
1196 case TASK_HOST_PORT:
1197 whichp = &task->itk_host;
1198 break;
1199
1200 case TASK_BOOTSTRAP_PORT:
1201 whichp = &task->itk_bootstrap;
1202 break;
1203
1204 case TASK_SEATBELT_PORT:
1205 whichp = &task->itk_seatbelt;
1206 break;
1207
1208 case TASK_ACCESS_PORT:
1209 whichp = &task->itk_task_access;
1210 break;
1211
1212 case TASK_DEBUG_CONTROL_PORT:
1213 whichp = &task->itk_debug_control;
1214 break;
1215
1216 default:
1217 return KERN_INVALID_ARGUMENT;
1218 }/* switch */
1219
1220 itk_lock(task);
1221 if (task->itk_self == IP_NULL) {
1222 itk_unlock(task);
1223 return KERN_FAILURE;
1224 }
1225
1226 /* do not allow overwrite of seatbelt or task access ports */
1227 if ((TASK_SEATBELT_PORT == which || TASK_ACCESS_PORT == which)
1228 && IP_VALID(*whichp)) {
1229 itk_unlock(task);
1230 return KERN_NO_ACCESS;
1231 }
1232
1233 old = *whichp;
1234 *whichp = port;
1235 itk_unlock(task);
1236
1237 if (IP_VALID(old)) {
1238 ipc_port_release_send(old);
1239 }
1240 return KERN_SUCCESS;
1241 }
1242
1243
1244 /*
1245 * Routine: mach_ports_register [kernel call]
1246 * Purpose:
1247 * Stash a handful of port send rights in the task.
1248 * Child tasks will inherit these rights, but they
1249 * must use mach_ports_lookup to acquire them.
1250 *
1251 * The rights are supplied in a (wired) kalloc'd segment.
1252 * Rights which aren't supplied are assumed to be null.
1253 * Conditions:
1254 * Nothing locked. If successful, consumes
1255 * the supplied rights and memory.
1256 * Returns:
1257 * KERN_SUCCESS Stashed the port rights.
1258 * KERN_INVALID_ARGUMENT The task is null.
1259 * KERN_INVALID_ARGUMENT The task is dead.
1260 * KERN_INVALID_ARGUMENT The memory param is null.
1261 * KERN_INVALID_ARGUMENT Too many port rights supplied.
1262 */
1263
1264 kern_return_t
1265 mach_ports_register(
1266 task_t task,
1267 mach_port_array_t memory,
1268 mach_msg_type_number_t portsCnt)
1269 {
1270 ipc_port_t ports[TASK_PORT_REGISTER_MAX];
1271 unsigned int i;
1272
1273 if ((task == TASK_NULL) ||
1274 (portsCnt > TASK_PORT_REGISTER_MAX) ||
1275 (portsCnt && memory == NULL)) {
1276 return KERN_INVALID_ARGUMENT;
1277 }
1278
1279 /*
1280 * Pad the port rights with nulls.
1281 */
1282
1283 for (i = 0; i < portsCnt; i++) {
1284 ports[i] = memory[i];
1285 }
1286 for (; i < TASK_PORT_REGISTER_MAX; i++) {
1287 ports[i] = IP_NULL;
1288 }
1289
1290 itk_lock(task);
1291 if (task->itk_self == IP_NULL) {
1292 itk_unlock(task);
1293 return KERN_INVALID_ARGUMENT;
1294 }
1295
1296 /*
1297 * Replace the old send rights with the new.
1298 * Release the old rights after unlocking.
1299 */
1300
1301 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
1302 ipc_port_t old;
1303
1304 old = task->itk_registered[i];
1305 task->itk_registered[i] = ports[i];
1306 ports[i] = old;
1307 }
1308
1309 itk_unlock(task);
1310
1311 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
1312 if (IP_VALID(ports[i])) {
1313 ipc_port_release_send(ports[i]);
1314 }
1315 }
1316
1317 /*
1318 * Now that the operation is known to be successful,
1319 * we can free the memory.
1320 */
1321
1322 if (portsCnt != 0) {
1323 kfree(memory,
1324 (vm_size_t) (portsCnt * sizeof(mach_port_t)));
1325 }
1326
1327 return KERN_SUCCESS;
1328 }
1329
1330 /*
1331 * Routine: mach_ports_lookup [kernel call]
1332 * Purpose:
1333 * Retrieves (clones) the stashed port send rights.
1334 * Conditions:
1335 * Nothing locked. If successful, the caller gets
1336 * rights and memory.
1337 * Returns:
1338 * KERN_SUCCESS Retrieved the send rights.
1339 * KERN_INVALID_ARGUMENT The task is null.
1340 * KERN_INVALID_ARGUMENT The task is dead.
1341 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
1342 */
1343
1344 kern_return_t
1345 mach_ports_lookup(
1346 task_t task,
1347 mach_port_array_t *portsp,
1348 mach_msg_type_number_t *portsCnt)
1349 {
1350 void *memory;
1351 vm_size_t size;
1352 ipc_port_t *ports;
1353 int i;
1354
1355 if (task == TASK_NULL) {
1356 return KERN_INVALID_ARGUMENT;
1357 }
1358
1359 size = (vm_size_t) (TASK_PORT_REGISTER_MAX * sizeof(ipc_port_t));
1360
1361 memory = kalloc(size);
1362 if (memory == 0) {
1363 return KERN_RESOURCE_SHORTAGE;
1364 }
1365
1366 itk_lock(task);
1367 if (task->itk_self == IP_NULL) {
1368 itk_unlock(task);
1369
1370 kfree(memory, size);
1371 return KERN_INVALID_ARGUMENT;
1372 }
1373
1374 ports = (ipc_port_t *) memory;
1375
1376 /*
1377 * Clone port rights. Because kalloc'd memory
1378 * is wired, we won't fault while holding the task lock.
1379 */
1380
1381 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
1382 ports[i] = ipc_port_copy_send(task->itk_registered[i]);
1383 }
1384
1385 itk_unlock(task);
1386
1387 *portsp = (mach_port_array_t) ports;
1388 *portsCnt = TASK_PORT_REGISTER_MAX;
1389 return KERN_SUCCESS;
1390 }
1391
1392 extern zone_t task_zone;
1393
1394 kern_return_t
1395 task_conversion_eval(task_t caller, task_t victim)
1396 {
1397 /*
1398 * Tasks are allowed to resolve their own task ports, and the kernel is
1399 * allowed to resolve anyone's task port.
1400 */
1401 if (caller == kernel_task) {
1402 return KERN_SUCCESS;
1403 }
1404
1405 if (caller == victim) {
1406 return KERN_SUCCESS;
1407 }
1408
1409 /*
1410 * Only the kernel can can resolve the kernel's task port. We've established
1411 * by this point that the caller is not kernel_task.
1412 */
1413 if (victim == TASK_NULL || victim == kernel_task) {
1414 return KERN_INVALID_SECURITY;
1415 }
1416
1417 zone_require(victim, task_zone);
1418
1419 #if CONFIG_EMBEDDED
1420 /*
1421 * On embedded platforms, only a platform binary can resolve the task port
1422 * of another platform binary.
1423 */
1424 if ((victim->t_flags & TF_PLATFORM) && !(caller->t_flags & TF_PLATFORM)) {
1425 #if SECURE_KERNEL
1426 return KERN_INVALID_SECURITY;
1427 #else
1428 if (cs_relax_platform_task_ports) {
1429 return KERN_SUCCESS;
1430 } else {
1431 return KERN_INVALID_SECURITY;
1432 }
1433 #endif /* SECURE_KERNEL */
1434 }
1435 #endif /* CONFIG_EMBEDDED */
1436
1437 return KERN_SUCCESS;
1438 }
1439
1440 /*
1441 * Routine: convert_port_to_locked_task
1442 * Purpose:
1443 * Internal helper routine to convert from a port to a locked
1444 * task. Used by several routines that try to convert from a
1445 * task port to a reference on some task related object.
1446 * Conditions:
1447 * Nothing locked, blocking OK.
1448 */
1449 task_t
1450 convert_port_to_locked_task(ipc_port_t port)
1451 {
1452 int try_failed_count = 0;
1453
1454 while (IP_VALID(port)) {
1455 task_t ct = current_task();
1456 task_t task;
1457
1458 ip_lock(port);
1459 if (!ip_active(port) || (ip_kotype(port) != IKOT_TASK)) {
1460 ip_unlock(port);
1461 return TASK_NULL;
1462 }
1463 task = (task_t) port->ip_kobject;
1464 assert(task != TASK_NULL);
1465
1466 if (task_conversion_eval(ct, task)) {
1467 ip_unlock(port);
1468 return TASK_NULL;
1469 }
1470
1471 /*
1472 * Normal lock ordering puts task_lock() before ip_lock().
1473 * Attempt out-of-order locking here.
1474 */
1475 if (task_lock_try(task)) {
1476 ip_unlock(port);
1477 return task;
1478 }
1479 try_failed_count++;
1480
1481 ip_unlock(port);
1482 mutex_pause(try_failed_count);
1483 }
1484 return TASK_NULL;
1485 }
1486
1487 /*
1488 * Routine: convert_port_to_locked_task_inspect
1489 * Purpose:
1490 * Internal helper routine to convert from a port to a locked
1491 * task inspect right. Used by internal routines that try to convert from a
1492 * task inspect port to a reference on some task related object.
1493 * Conditions:
1494 * Nothing locked, blocking OK.
1495 */
1496 task_inspect_t
1497 convert_port_to_locked_task_inspect(ipc_port_t port)
1498 {
1499 int try_failed_count = 0;
1500
1501 while (IP_VALID(port)) {
1502 task_inspect_t task;
1503
1504 ip_lock(port);
1505 if (!ip_active(port) || (ip_kotype(port) != IKOT_TASK)) {
1506 ip_unlock(port);
1507 return TASK_INSPECT_NULL;
1508 }
1509 task = (task_inspect_t)port->ip_kobject;
1510 assert(task != TASK_INSPECT_NULL);
1511 /*
1512 * Normal lock ordering puts task_lock() before ip_lock().
1513 * Attempt out-of-order locking here.
1514 */
1515 if (task_lock_try((task_t)task)) {
1516 ip_unlock(port);
1517 return task;
1518 }
1519 try_failed_count++;
1520
1521 ip_unlock(port);
1522 mutex_pause(try_failed_count);
1523 }
1524 return TASK_INSPECT_NULL;
1525 }
1526
1527 static task_t
1528 convert_port_to_task_locked(
1529 ipc_port_t port,
1530 uint32_t *exec_token)
1531 {
1532 task_t task = TASK_NULL;
1533
1534 ip_lock_held(port);
1535 require_ip_active(port);
1536
1537 if (ip_kotype(port) == IKOT_TASK) {
1538 task_t ct = current_task();
1539 task = (task_t)port->ip_kobject;
1540 assert(task != TASK_NULL);
1541
1542 if (task_conversion_eval(ct, task)) {
1543 return TASK_NULL;
1544 }
1545
1546 if (exec_token) {
1547 *exec_token = task->exec_token;
1548 }
1549 task_reference_internal(task);
1550 }
1551
1552 return task;
1553 }
1554
1555 /*
1556 * Routine: convert_port_to_task_with_exec_token
1557 * Purpose:
1558 * Convert from a port to a task and return
1559 * the exec token stored in the task.
1560 * Doesn't consume the port ref; produces a task ref,
1561 * which may be null.
1562 * Conditions:
1563 * Nothing locked.
1564 */
1565 task_t
1566 convert_port_to_task_with_exec_token(
1567 ipc_port_t port,
1568 uint32_t *exec_token)
1569 {
1570 task_t task = TASK_NULL;
1571
1572 if (IP_VALID(port)) {
1573 ip_lock(port);
1574 if (ip_active(port)) {
1575 task = convert_port_to_task_locked(port, exec_token);
1576 }
1577 ip_unlock(port);
1578 }
1579
1580 return task;
1581 }
1582
1583 /*
1584 * Routine: convert_port_to_task
1585 * Purpose:
1586 * Convert from a port to a task.
1587 * Doesn't consume the port ref; produces a task ref,
1588 * which may be null.
1589 * Conditions:
1590 * Nothing locked.
1591 */
1592 task_t
1593 convert_port_to_task(
1594 ipc_port_t port)
1595 {
1596 return convert_port_to_task_with_exec_token(port, NULL);
1597 }
1598
1599
1600 /*
1601 * Routine: convert_port_to_task_name
1602 * Purpose:
1603 * Convert from a port to a task name.
1604 * Doesn't consume the port ref; produces a task name ref,
1605 * which may be null.
1606 * Conditions:
1607 * Nothing locked.
1608 */
1609 task_name_t
1610 convert_port_to_task_name(
1611 ipc_port_t port)
1612 {
1613 task_name_t task = TASK_NULL;
1614
1615 if (IP_VALID(port)) {
1616 ip_lock(port);
1617
1618 if (ip_active(port) &&
1619 (ip_kotype(port) == IKOT_TASK ||
1620 ip_kotype(port) == IKOT_TASK_NAME)) {
1621 task = (task_name_t)port->ip_kobject;
1622 assert(task != TASK_NAME_NULL);
1623
1624 task_reference_internal(task);
1625 }
1626
1627 ip_unlock(port);
1628 }
1629
1630 return task;
1631 }
1632
1633 static task_inspect_t
1634 convert_port_to_task_inspect_locked(
1635 ipc_port_t port)
1636 {
1637 task_inspect_t task = TASK_INSPECT_NULL;
1638
1639 ip_lock_held(port);
1640 require_ip_active(port);
1641
1642 if (ip_kotype(port) == IKOT_TASK) {
1643 task = (task_inspect_t)port->ip_kobject;
1644 assert(task != TASK_INSPECT_NULL);
1645
1646 task_reference_internal(task);
1647 }
1648
1649 return task;
1650 }
1651
1652 /*
1653 * Routine: convert_port_to_task_inspect
1654 * Purpose:
1655 * Convert from a port to a task inspection right
1656 * Doesn't consume the port ref; produces a task ref,
1657 * which may be null.
1658 * Conditions:
1659 * Nothing locked.
1660 */
1661 task_inspect_t
1662 convert_port_to_task_inspect(
1663 ipc_port_t port)
1664 {
1665 task_inspect_t task = TASK_INSPECT_NULL;
1666
1667 if (IP_VALID(port)) {
1668 ip_lock(port);
1669 if (ip_active(port)) {
1670 task = convert_port_to_task_inspect_locked(port);
1671 }
1672 ip_unlock(port);
1673 }
1674
1675 return task;
1676 }
1677
1678 /*
1679 * Routine: convert_port_to_task_suspension_token
1680 * Purpose:
1681 * Convert from a port to a task suspension token.
1682 * Doesn't consume the port ref; produces a suspension token ref,
1683 * which may be null.
1684 * Conditions:
1685 * Nothing locked.
1686 */
1687 task_suspension_token_t
1688 convert_port_to_task_suspension_token(
1689 ipc_port_t port)
1690 {
1691 task_suspension_token_t task = TASK_NULL;
1692
1693 if (IP_VALID(port)) {
1694 ip_lock(port);
1695
1696 if (ip_active(port) &&
1697 ip_kotype(port) == IKOT_TASK_RESUME) {
1698 task = (task_suspension_token_t)port->ip_kobject;
1699 assert(task != TASK_NULL);
1700
1701 task_reference_internal(task);
1702 }
1703
1704 ip_unlock(port);
1705 }
1706
1707 return task;
1708 }
1709
1710 /*
1711 * Routine: convert_port_to_space
1712 * Purpose:
1713 * Convert from a port to a space.
1714 * Doesn't consume the port ref; produces a space ref,
1715 * which may be null.
1716 * Conditions:
1717 * Nothing locked.
1718 */
1719 ipc_space_t
1720 convert_port_to_space(
1721 ipc_port_t port)
1722 {
1723 ipc_space_t space;
1724 task_t task;
1725
1726 task = convert_port_to_locked_task(port);
1727
1728 if (task == TASK_NULL) {
1729 return IPC_SPACE_NULL;
1730 }
1731
1732 if (!task->active) {
1733 task_unlock(task);
1734 return IPC_SPACE_NULL;
1735 }
1736
1737 space = task->itk_space;
1738 is_reference(space);
1739 task_unlock(task);
1740 return space;
1741 }
1742
1743 /*
1744 * Routine: convert_port_to_space_inspect
1745 * Purpose:
1746 * Convert from a port to a space inspect right.
1747 * Doesn't consume the port ref; produces a space inspect ref,
1748 * which may be null.
1749 * Conditions:
1750 * Nothing locked.
1751 */
1752 ipc_space_inspect_t
1753 convert_port_to_space_inspect(
1754 ipc_port_t port)
1755 {
1756 ipc_space_inspect_t space;
1757 task_inspect_t task;
1758
1759 task = convert_port_to_locked_task_inspect(port);
1760
1761 if (task == TASK_INSPECT_NULL) {
1762 return IPC_SPACE_INSPECT_NULL;
1763 }
1764
1765 if (!task->active) {
1766 task_unlock(task);
1767 return IPC_SPACE_INSPECT_NULL;
1768 }
1769
1770 space = (ipc_space_inspect_t)task->itk_space;
1771 is_reference((ipc_space_t)space);
1772 task_unlock((task_t)task);
1773 return space;
1774 }
1775
1776 /*
1777 * Routine: convert_port_to_map
1778 * Purpose:
1779 * Convert from a port to a map.
1780 * Doesn't consume the port ref; produces a map ref,
1781 * which may be null.
1782 * Conditions:
1783 * Nothing locked.
1784 */
1785
1786 vm_map_t
1787 convert_port_to_map(
1788 ipc_port_t port)
1789 {
1790 task_t task;
1791 vm_map_t map;
1792
1793 task = convert_port_to_locked_task(port);
1794
1795 if (task == TASK_NULL) {
1796 return VM_MAP_NULL;
1797 }
1798
1799 if (!task->active) {
1800 task_unlock(task);
1801 return VM_MAP_NULL;
1802 }
1803
1804 map = task->map;
1805 vm_map_reference_swap(map);
1806 task_unlock(task);
1807 return map;
1808 }
1809
1810
1811 /*
1812 * Routine: convert_port_to_thread
1813 * Purpose:
1814 * Convert from a port to a thread.
1815 * Doesn't consume the port ref; produces an thread ref,
1816 * which may be null.
1817 * Conditions:
1818 * Nothing locked.
1819 */
1820
1821 static thread_t
1822 convert_port_to_thread_locked(
1823 ipc_port_t port,
1824 port_to_thread_options_t options)
1825 {
1826 thread_t thread = THREAD_NULL;
1827
1828 ip_lock_held(port);
1829 require_ip_active(port);
1830
1831 if (ip_kotype(port) == IKOT_THREAD) {
1832 thread = (thread_t)port->ip_kobject;
1833 assert(thread != THREAD_NULL);
1834
1835 if (options & PORT_TO_THREAD_NOT_CURRENT_THREAD) {
1836 if (thread == current_thread()) {
1837 return THREAD_NULL;
1838 }
1839 }
1840
1841 if (options & PORT_TO_THREAD_IN_CURRENT_TASK) {
1842 if (thread->task != current_task()) {
1843 return THREAD_NULL;
1844 }
1845 } else {
1846 /* Use task conversion rules for thread control conversions */
1847 if (task_conversion_eval(current_task(), thread->task) != KERN_SUCCESS) {
1848 return THREAD_NULL;
1849 }
1850 }
1851
1852 thread_reference_internal(thread);
1853 }
1854
1855 return thread;
1856 }
1857
1858 thread_t
1859 convert_port_to_thread(
1860 ipc_port_t port)
1861 {
1862 thread_t thread = THREAD_NULL;
1863
1864 if (IP_VALID(port)) {
1865 ip_lock(port);
1866 if (ip_active(port)) {
1867 thread = convert_port_to_thread_locked(port, PORT_TO_THREAD_NONE);
1868 }
1869 ip_unlock(port);
1870 }
1871
1872 return thread;
1873 }
1874
1875 /*
1876 * Routine: convert_port_to_thread_inspect
1877 * Purpose:
1878 * Convert from a port to a thread inspection right
1879 * Doesn't consume the port ref; produces a thread ref,
1880 * which may be null.
1881 * Conditions:
1882 * Nothing locked.
1883 */
1884 thread_inspect_t
1885 convert_port_to_thread_inspect(
1886 ipc_port_t port)
1887 {
1888 thread_inspect_t thread = THREAD_INSPECT_NULL;
1889
1890 if (IP_VALID(port)) {
1891 ip_lock(port);
1892
1893 if (ip_active(port) &&
1894 ip_kotype(port) == IKOT_THREAD) {
1895 thread = (thread_inspect_t)port->ip_kobject;
1896 assert(thread != THREAD_INSPECT_NULL);
1897 thread_reference_internal((thread_t)thread);
1898 }
1899 ip_unlock(port);
1900 }
1901
1902 return thread;
1903 }
1904
1905 /*
1906 * Routine: convert_thread_inspect_to_port
1907 * Purpose:
1908 * Convert from a thread inspect reference to a port.
1909 * Consumes a thread ref;
1910 * As we never export thread inspect ports, always
1911 * creates a NULL port.
1912 * Conditions:
1913 * Nothing locked.
1914 */
1915
1916 ipc_port_t
1917 convert_thread_inspect_to_port(thread_inspect_t thread)
1918 {
1919 thread_deallocate(thread);
1920 return IP_NULL;
1921 }
1922
1923
1924 /*
1925 * Routine: port_name_to_thread
1926 * Purpose:
1927 * Convert from a port name to an thread reference
1928 * A name of MACH_PORT_NULL is valid for the null thread.
1929 * Conditions:
1930 * Nothing locked.
1931 */
1932 thread_t
1933 port_name_to_thread(
1934 mach_port_name_t name,
1935 port_to_thread_options_t options)
1936 {
1937 thread_t thread = THREAD_NULL;
1938 ipc_port_t kport;
1939 kern_return_t kr;
1940
1941 if (MACH_PORT_VALID(name)) {
1942 kr = ipc_port_translate_send(current_space(), name, &kport);
1943 if (kr == KERN_SUCCESS) {
1944 thread = convert_port_to_thread_locked(kport, options);
1945 ip_unlock(kport);
1946 }
1947 }
1948
1949 return thread;
1950 }
1951
1952 task_t
1953 port_name_to_task(
1954 mach_port_name_t name)
1955 {
1956 ipc_port_t kport;
1957 kern_return_t kr;
1958 task_t task = TASK_NULL;
1959
1960 if (MACH_PORT_VALID(name)) {
1961 kr = ipc_port_translate_send(current_space(), name, &kport);
1962 if (kr == KERN_SUCCESS) {
1963 task = convert_port_to_task_locked(kport, NULL);
1964 ip_unlock(kport);
1965 }
1966 }
1967 return task;
1968 }
1969
1970 task_inspect_t
1971 port_name_to_task_inspect(
1972 mach_port_name_t name)
1973 {
1974 ipc_port_t kport;
1975 kern_return_t kr;
1976 task_inspect_t ti = TASK_INSPECT_NULL;
1977
1978 if (MACH_PORT_VALID(name)) {
1979 kr = ipc_port_translate_send(current_space(), name, &kport);
1980 if (kr == KERN_SUCCESS) {
1981 ti = convert_port_to_task_inspect_locked(kport);
1982 ip_unlock(kport);
1983 }
1984 }
1985 return ti;
1986 }
1987
1988 /*
1989 * Routine: port_name_to_host
1990 * Purpose:
1991 * Convert from a port name to a host pointer.
1992 * NOTE: This does _not_ return a +1 reference to the host_t
1993 * Conditions:
1994 * Nothing locked.
1995 */
1996 host_t
1997 port_name_to_host(
1998 mach_port_name_t name)
1999 {
2000 host_t host = HOST_NULL;
2001 kern_return_t kr;
2002 ipc_port_t port;
2003
2004 if (MACH_PORT_VALID(name)) {
2005 kr = ipc_port_translate_send(current_space(), name, &port);
2006 if (kr == KERN_SUCCESS) {
2007 host = convert_port_to_host(port);
2008 ip_unlock(port);
2009 }
2010 }
2011 return host;
2012 }
2013
2014 /*
2015 * Routine: convert_task_to_port
2016 * Purpose:
2017 * Convert from a task to a port.
2018 * Consumes a task ref; produces a naked send right
2019 * which may be invalid.
2020 * Conditions:
2021 * Nothing locked.
2022 */
2023
2024 ipc_port_t
2025 convert_task_to_port(
2026 task_t task)
2027 {
2028 ipc_port_t port;
2029
2030 itk_lock(task);
2031
2032 if (task->itk_self != IP_NULL) {
2033 port = ipc_port_make_send(task->itk_self);
2034 } else {
2035 port = IP_NULL;
2036 }
2037
2038 itk_unlock(task);
2039
2040 task_deallocate(task);
2041 return port;
2042 }
2043
2044 /*
2045 * Routine: convert_task_inspect_to_port
2046 * Purpose:
2047 * Convert from a task inspect reference to a port.
2048 * Consumes a task ref;
2049 * As we never export task inspect ports, always
2050 * creates a NULL port.
2051 * Conditions:
2052 * Nothing locked.
2053 */
2054 ipc_port_t
2055 convert_task_inspect_to_port(
2056 task_inspect_t task)
2057 {
2058 task_deallocate(task);
2059
2060 return IP_NULL;
2061 }
2062
2063 /*
2064 * Routine: convert_task_suspend_token_to_port
2065 * Purpose:
2066 * Convert from a task suspension token to a port.
2067 * Consumes a task suspension token ref; produces a naked send-once right
2068 * which may be invalid.
2069 * Conditions:
2070 * Nothing locked.
2071 */
2072 ipc_port_t
2073 convert_task_suspension_token_to_port(
2074 task_suspension_token_t task)
2075 {
2076 ipc_port_t port;
2077
2078 task_lock(task);
2079 if (task->active) {
2080 if (task->itk_resume == IP_NULL) {
2081 task->itk_resume = ipc_kobject_alloc_port((ipc_kobject_t) task,
2082 IKOT_TASK_RESUME, IPC_KOBJECT_ALLOC_NONE);
2083 }
2084
2085 /*
2086 * Create a send-once right for each instance of a direct user-called
2087 * task_suspend2 call. Each time one of these send-once rights is abandoned,
2088 * the notification handler will resume the target task.
2089 */
2090 port = ipc_port_make_sonce(task->itk_resume);
2091 assert(IP_VALID(port));
2092 } else {
2093 port = IP_NULL;
2094 }
2095
2096 task_unlock(task);
2097 task_suspension_token_deallocate(task);
2098
2099 return port;
2100 }
2101
2102
2103 /*
2104 * Routine: convert_task_name_to_port
2105 * Purpose:
2106 * Convert from a task name ref to a port.
2107 * Consumes a task name ref; produces a naked send right
2108 * which may be invalid.
2109 * Conditions:
2110 * Nothing locked.
2111 */
2112
2113 ipc_port_t
2114 convert_task_name_to_port(
2115 task_name_t task_name)
2116 {
2117 ipc_port_t port;
2118
2119 itk_lock(task_name);
2120 if (task_name->itk_nself != IP_NULL) {
2121 port = ipc_port_make_send(task_name->itk_nself);
2122 } else {
2123 port = IP_NULL;
2124 }
2125 itk_unlock(task_name);
2126
2127 task_name_deallocate(task_name);
2128 return port;
2129 }
2130
2131 /*
2132 * Routine: convert_thread_to_port
2133 * Purpose:
2134 * Convert from a thread to a port.
2135 * Consumes an thread ref; produces a naked send right
2136 * which may be invalid.
2137 * Conditions:
2138 * Nothing locked.
2139 */
2140
2141 ipc_port_t
2142 convert_thread_to_port(
2143 thread_t thread)
2144 {
2145 ipc_port_t port;
2146
2147 thread_mtx_lock(thread);
2148
2149 if (thread->ith_self != IP_NULL) {
2150 port = ipc_port_make_send(thread->ith_self);
2151 } else {
2152 port = IP_NULL;
2153 }
2154
2155 thread_mtx_unlock(thread);
2156
2157 thread_deallocate(thread);
2158
2159 return port;
2160 }
2161
2162 /*
2163 * Routine: space_deallocate
2164 * Purpose:
2165 * Deallocate a space ref produced by convert_port_to_space.
2166 * Conditions:
2167 * Nothing locked.
2168 */
2169
2170 void
2171 space_deallocate(
2172 ipc_space_t space)
2173 {
2174 if (space != IS_NULL) {
2175 is_release(space);
2176 }
2177 }
2178
2179 /*
2180 * Routine: space_inspect_deallocate
2181 * Purpose:
2182 * Deallocate a space inspect ref produced by convert_port_to_space_inspect.
2183 * Conditions:
2184 * Nothing locked.
2185 */
2186
2187 void
2188 space_inspect_deallocate(
2189 ipc_space_inspect_t space)
2190 {
2191 if (space != IS_INSPECT_NULL) {
2192 is_release((ipc_space_t)space);
2193 }
2194 }
2195
2196 /*
2197 * Routine: thread/task_set_exception_ports [kernel call]
2198 * Purpose:
2199 * Sets the thread/task exception port, flavor and
2200 * behavior for the exception types specified by the mask.
2201 * There will be one send right per exception per valid
2202 * port.
2203 * Conditions:
2204 * Nothing locked. If successful, consumes
2205 * the supplied send right.
2206 * Returns:
2207 * KERN_SUCCESS Changed the special port.
2208 * KERN_INVALID_ARGUMENT The thread is null,
2209 * Illegal mask bit set.
2210 * Illegal exception behavior
2211 * KERN_FAILURE The thread is dead.
2212 */
2213
2214 kern_return_t
2215 thread_set_exception_ports(
2216 thread_t thread,
2217 exception_mask_t exception_mask,
2218 ipc_port_t new_port,
2219 exception_behavior_t new_behavior,
2220 thread_state_flavor_t new_flavor)
2221 {
2222 ipc_port_t old_port[EXC_TYPES_COUNT];
2223 boolean_t privileged = current_task()->sec_token.val[0] == 0;
2224 register int i;
2225
2226 #if CONFIG_MACF
2227 struct label *new_label;
2228 #endif
2229
2230 if (thread == THREAD_NULL) {
2231 return KERN_INVALID_ARGUMENT;
2232 }
2233
2234 if (exception_mask & ~EXC_MASK_VALID) {
2235 return KERN_INVALID_ARGUMENT;
2236 }
2237
2238 if (IP_VALID(new_port)) {
2239 switch (new_behavior & ~MACH_EXCEPTION_MASK) {
2240 case EXCEPTION_DEFAULT:
2241 case EXCEPTION_STATE:
2242 case EXCEPTION_STATE_IDENTITY:
2243 break;
2244
2245 default:
2246 return KERN_INVALID_ARGUMENT;
2247 }
2248 }
2249
2250 /*
2251 * Check the validity of the thread_state_flavor by calling the
2252 * VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in
2253 * osfmk/mach/ARCHITECTURE/thread_status.h
2254 */
2255 if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor)) {
2256 return KERN_INVALID_ARGUMENT;
2257 }
2258
2259 #if CONFIG_MACF
2260 new_label = mac_exc_create_label_for_current_proc();
2261 #endif
2262
2263 thread_mtx_lock(thread);
2264
2265 if (!thread->active) {
2266 thread_mtx_unlock(thread);
2267
2268 return KERN_FAILURE;
2269 }
2270
2271 if (thread->exc_actions == NULL) {
2272 ipc_thread_init_exc_actions(thread);
2273 }
2274 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
2275 if ((exception_mask & (1 << i))
2276 #if CONFIG_MACF
2277 && mac_exc_update_action_label(&thread->exc_actions[i], new_label) == 0
2278 #endif
2279 ) {
2280 old_port[i] = thread->exc_actions[i].port;
2281 thread->exc_actions[i].port = ipc_port_copy_send(new_port);
2282 thread->exc_actions[i].behavior = new_behavior;
2283 thread->exc_actions[i].flavor = new_flavor;
2284 thread->exc_actions[i].privileged = privileged;
2285 } else {
2286 old_port[i] = IP_NULL;
2287 }
2288 }
2289
2290 thread_mtx_unlock(thread);
2291
2292 #if CONFIG_MACF
2293 mac_exc_free_label(new_label);
2294 #endif
2295
2296 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
2297 if (IP_VALID(old_port[i])) {
2298 ipc_port_release_send(old_port[i]);
2299 }
2300 }
2301
2302 if (IP_VALID(new_port)) { /* consume send right */
2303 ipc_port_release_send(new_port);
2304 }
2305
2306 return KERN_SUCCESS;
2307 }
2308
2309 kern_return_t
2310 task_set_exception_ports(
2311 task_t task,
2312 exception_mask_t exception_mask,
2313 ipc_port_t new_port,
2314 exception_behavior_t new_behavior,
2315 thread_state_flavor_t new_flavor)
2316 {
2317 ipc_port_t old_port[EXC_TYPES_COUNT];
2318 boolean_t privileged = current_task()->sec_token.val[0] == 0;
2319 register int i;
2320
2321 #if CONFIG_MACF
2322 struct label *new_label;
2323 #endif
2324
2325 if (task == TASK_NULL) {
2326 return KERN_INVALID_ARGUMENT;
2327 }
2328
2329 if (exception_mask & ~EXC_MASK_VALID) {
2330 return KERN_INVALID_ARGUMENT;
2331 }
2332
2333 if (IP_VALID(new_port)) {
2334 switch (new_behavior & ~MACH_EXCEPTION_MASK) {
2335 case EXCEPTION_DEFAULT:
2336 case EXCEPTION_STATE:
2337 case EXCEPTION_STATE_IDENTITY:
2338 break;
2339
2340 default:
2341 return KERN_INVALID_ARGUMENT;
2342 }
2343 }
2344
2345 /*
2346 * Check the validity of the thread_state_flavor by calling the
2347 * VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in
2348 * osfmk/mach/ARCHITECTURE/thread_status.h
2349 */
2350 if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor)) {
2351 return KERN_INVALID_ARGUMENT;
2352 }
2353
2354 #if CONFIG_MACF
2355 new_label = mac_exc_create_label_for_current_proc();
2356 #endif
2357
2358 itk_lock(task);
2359
2360 if (task->itk_self == IP_NULL) {
2361 itk_unlock(task);
2362
2363 return KERN_FAILURE;
2364 }
2365
2366 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
2367 if ((exception_mask & (1 << i))
2368 #if CONFIG_MACF
2369 && mac_exc_update_action_label(&task->exc_actions[i], new_label) == 0
2370 #endif
2371 ) {
2372 old_port[i] = task->exc_actions[i].port;
2373 task->exc_actions[i].port =
2374 ipc_port_copy_send(new_port);
2375 task->exc_actions[i].behavior = new_behavior;
2376 task->exc_actions[i].flavor = new_flavor;
2377 task->exc_actions[i].privileged = privileged;
2378 } else {
2379 old_port[i] = IP_NULL;
2380 }
2381 }
2382
2383 itk_unlock(task);
2384
2385 #if CONFIG_MACF
2386 mac_exc_free_label(new_label);
2387 #endif
2388
2389 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
2390 if (IP_VALID(old_port[i])) {
2391 ipc_port_release_send(old_port[i]);
2392 }
2393 }
2394
2395 if (IP_VALID(new_port)) { /* consume send right */
2396 ipc_port_release_send(new_port);
2397 }
2398
2399 return KERN_SUCCESS;
2400 }
2401
2402 /*
2403 * Routine: thread/task_swap_exception_ports [kernel call]
2404 * Purpose:
2405 * Sets the thread/task exception port, flavor and
2406 * behavior for the exception types specified by the
2407 * mask.
2408 *
2409 * The old ports, behavior and flavors are returned
2410 * Count specifies the array sizes on input and
2411 * the number of returned ports etc. on output. The
2412 * arrays must be large enough to hold all the returned
2413 * data, MIG returnes an error otherwise. The masks
2414 * array specifies the corresponding exception type(s).
2415 *
2416 * Conditions:
2417 * Nothing locked. If successful, consumes
2418 * the supplied send right.
2419 *
2420 * Returns upto [in} CountCnt elements.
2421 * Returns:
2422 * KERN_SUCCESS Changed the special port.
2423 * KERN_INVALID_ARGUMENT The thread is null,
2424 * Illegal mask bit set.
2425 * Illegal exception behavior
2426 * KERN_FAILURE The thread is dead.
2427 */
2428
2429 kern_return_t
2430 thread_swap_exception_ports(
2431 thread_t thread,
2432 exception_mask_t exception_mask,
2433 ipc_port_t new_port,
2434 exception_behavior_t new_behavior,
2435 thread_state_flavor_t new_flavor,
2436 exception_mask_array_t masks,
2437 mach_msg_type_number_t *CountCnt,
2438 exception_port_array_t ports,
2439 exception_behavior_array_t behaviors,
2440 thread_state_flavor_array_t flavors)
2441 {
2442 ipc_port_t old_port[EXC_TYPES_COUNT];
2443 boolean_t privileged = current_task()->sec_token.val[0] == 0;
2444 unsigned int i, j, count;
2445
2446 #if CONFIG_MACF
2447 struct label *new_label;
2448 #endif
2449
2450 if (thread == THREAD_NULL) {
2451 return KERN_INVALID_ARGUMENT;
2452 }
2453
2454 if (exception_mask & ~EXC_MASK_VALID) {
2455 return KERN_INVALID_ARGUMENT;
2456 }
2457
2458 if (IP_VALID(new_port)) {
2459 switch (new_behavior & ~MACH_EXCEPTION_MASK) {
2460 case EXCEPTION_DEFAULT:
2461 case EXCEPTION_STATE:
2462 case EXCEPTION_STATE_IDENTITY:
2463 break;
2464
2465 default:
2466 return KERN_INVALID_ARGUMENT;
2467 }
2468 }
2469
2470 if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor)) {
2471 return KERN_INVALID_ARGUMENT;
2472 }
2473
2474 #if CONFIG_MACF
2475 new_label = mac_exc_create_label_for_current_proc();
2476 #endif
2477
2478 thread_mtx_lock(thread);
2479
2480 if (!thread->active) {
2481 thread_mtx_unlock(thread);
2482
2483 return KERN_FAILURE;
2484 }
2485
2486 if (thread->exc_actions == NULL) {
2487 ipc_thread_init_exc_actions(thread);
2488 }
2489
2490 assert(EXC_TYPES_COUNT > FIRST_EXCEPTION);
2491 for (count = 0, i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT && count < *CountCnt; ++i) {
2492 if ((exception_mask & (1 << i))
2493 #if CONFIG_MACF
2494 && mac_exc_update_action_label(&thread->exc_actions[i], new_label) == 0
2495 #endif
2496 ) {
2497 for (j = 0; j < count; ++j) {
2498 /*
2499 * search for an identical entry, if found
2500 * set corresponding mask for this exception.
2501 */
2502 if (thread->exc_actions[i].port == ports[j] &&
2503 thread->exc_actions[i].behavior == behaviors[j] &&
2504 thread->exc_actions[i].flavor == flavors[j]) {
2505 masks[j] |= (1 << i);
2506 break;
2507 }
2508 }
2509
2510 if (j == count) {
2511 masks[j] = (1 << i);
2512 ports[j] = ipc_port_copy_send(thread->exc_actions[i].port);
2513
2514 behaviors[j] = thread->exc_actions[i].behavior;
2515 flavors[j] = thread->exc_actions[i].flavor;
2516 ++count;
2517 }
2518
2519 old_port[i] = thread->exc_actions[i].port;
2520 thread->exc_actions[i].port = ipc_port_copy_send(new_port);
2521 thread->exc_actions[i].behavior = new_behavior;
2522 thread->exc_actions[i].flavor = new_flavor;
2523 thread->exc_actions[i].privileged = privileged;
2524 } else {
2525 old_port[i] = IP_NULL;
2526 }
2527 }
2528
2529 thread_mtx_unlock(thread);
2530
2531 #if CONFIG_MACF
2532 mac_exc_free_label(new_label);
2533 #endif
2534
2535 while (--i >= FIRST_EXCEPTION) {
2536 if (IP_VALID(old_port[i])) {
2537 ipc_port_release_send(old_port[i]);
2538 }
2539 }
2540
2541 if (IP_VALID(new_port)) { /* consume send right */
2542 ipc_port_release_send(new_port);
2543 }
2544
2545 *CountCnt = count;
2546
2547 return KERN_SUCCESS;
2548 }
2549
2550 kern_return_t
2551 task_swap_exception_ports(
2552 task_t task,
2553 exception_mask_t exception_mask,
2554 ipc_port_t new_port,
2555 exception_behavior_t new_behavior,
2556 thread_state_flavor_t new_flavor,
2557 exception_mask_array_t masks,
2558 mach_msg_type_number_t *CountCnt,
2559 exception_port_array_t ports,
2560 exception_behavior_array_t behaviors,
2561 thread_state_flavor_array_t flavors)
2562 {
2563 ipc_port_t old_port[EXC_TYPES_COUNT];
2564 boolean_t privileged = current_task()->sec_token.val[0] == 0;
2565 unsigned int i, j, count;
2566
2567 #if CONFIG_MACF
2568 struct label *new_label;
2569 #endif
2570
2571 if (task == TASK_NULL) {
2572 return KERN_INVALID_ARGUMENT;
2573 }
2574
2575 if (exception_mask & ~EXC_MASK_VALID) {
2576 return KERN_INVALID_ARGUMENT;
2577 }
2578
2579 if (IP_VALID(new_port)) {
2580 switch (new_behavior & ~MACH_EXCEPTION_MASK) {
2581 case EXCEPTION_DEFAULT:
2582 case EXCEPTION_STATE:
2583 case EXCEPTION_STATE_IDENTITY:
2584 break;
2585
2586 default:
2587 return KERN_INVALID_ARGUMENT;
2588 }
2589 }
2590
2591 if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor)) {
2592 return KERN_INVALID_ARGUMENT;
2593 }
2594
2595 #if CONFIG_MACF
2596 new_label = mac_exc_create_label_for_current_proc();
2597 #endif
2598
2599 itk_lock(task);
2600
2601 if (task->itk_self == IP_NULL) {
2602 itk_unlock(task);
2603
2604 return KERN_FAILURE;
2605 }
2606
2607 assert(EXC_TYPES_COUNT > FIRST_EXCEPTION);
2608 for (count = 0, i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT && count < *CountCnt; ++i) {
2609 if ((exception_mask & (1 << i))
2610 #if CONFIG_MACF
2611 && mac_exc_update_action_label(&task->exc_actions[i], new_label) == 0
2612 #endif
2613 ) {
2614 for (j = 0; j < count; j++) {
2615 /*
2616 * search for an identical entry, if found
2617 * set corresponding mask for this exception.
2618 */
2619 if (task->exc_actions[i].port == ports[j] &&
2620 task->exc_actions[i].behavior == behaviors[j] &&
2621 task->exc_actions[i].flavor == flavors[j]) {
2622 masks[j] |= (1 << i);
2623 break;
2624 }
2625 }
2626
2627 if (j == count) {
2628 masks[j] = (1 << i);
2629 ports[j] = ipc_port_copy_send(task->exc_actions[i].port);
2630 behaviors[j] = task->exc_actions[i].behavior;
2631 flavors[j] = task->exc_actions[i].flavor;
2632 ++count;
2633 }
2634
2635 old_port[i] = task->exc_actions[i].port;
2636
2637 task->exc_actions[i].port = ipc_port_copy_send(new_port);
2638 task->exc_actions[i].behavior = new_behavior;
2639 task->exc_actions[i].flavor = new_flavor;
2640 task->exc_actions[i].privileged = privileged;
2641 } else {
2642 old_port[i] = IP_NULL;
2643 }
2644 }
2645
2646 itk_unlock(task);
2647
2648 #if CONFIG_MACF
2649 mac_exc_free_label(new_label);
2650 #endif
2651
2652 while (--i >= FIRST_EXCEPTION) {
2653 if (IP_VALID(old_port[i])) {
2654 ipc_port_release_send(old_port[i]);
2655 }
2656 }
2657
2658 if (IP_VALID(new_port)) { /* consume send right */
2659 ipc_port_release_send(new_port);
2660 }
2661
2662 *CountCnt = count;
2663
2664 return KERN_SUCCESS;
2665 }
2666
2667 /*
2668 * Routine: thread/task_get_exception_ports [kernel call]
2669 * Purpose:
2670 * Clones a send right for each of the thread/task's exception
2671 * ports specified in the mask and returns the behaviour
2672 * and flavor of said port.
2673 *
2674 * Returns upto [in} CountCnt elements.
2675 *
2676 * Conditions:
2677 * Nothing locked.
2678 * Returns:
2679 * KERN_SUCCESS Extracted a send right.
2680 * KERN_INVALID_ARGUMENT The thread is null,
2681 * Invalid special port,
2682 * Illegal mask bit set.
2683 * KERN_FAILURE The thread is dead.
2684 */
2685
2686 kern_return_t
2687 thread_get_exception_ports(
2688 thread_t thread,
2689 exception_mask_t exception_mask,
2690 exception_mask_array_t masks,
2691 mach_msg_type_number_t *CountCnt,
2692 exception_port_array_t ports,
2693 exception_behavior_array_t behaviors,
2694 thread_state_flavor_array_t flavors)
2695 {
2696 unsigned int i, j, count;
2697
2698 if (thread == THREAD_NULL) {
2699 return KERN_INVALID_ARGUMENT;
2700 }
2701
2702 if (exception_mask & ~EXC_MASK_VALID) {
2703 return KERN_INVALID_ARGUMENT;
2704 }
2705
2706 thread_mtx_lock(thread);
2707
2708 if (!thread->active) {
2709 thread_mtx_unlock(thread);
2710
2711 return KERN_FAILURE;
2712 }
2713
2714 count = 0;
2715
2716 if (thread->exc_actions == NULL) {
2717 goto done;
2718 }
2719
2720 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
2721 if (exception_mask & (1 << i)) {
2722 for (j = 0; j < count; ++j) {
2723 /*
2724 * search for an identical entry, if found
2725 * set corresponding mask for this exception.
2726 */
2727 if (thread->exc_actions[i].port == ports[j] &&
2728 thread->exc_actions[i].behavior == behaviors[j] &&
2729 thread->exc_actions[i].flavor == flavors[j]) {
2730 masks[j] |= (1 << i);
2731 break;
2732 }
2733 }
2734
2735 if (j == count) {
2736 masks[j] = (1 << i);
2737 ports[j] = ipc_port_copy_send(thread->exc_actions[i].port);
2738 behaviors[j] = thread->exc_actions[i].behavior;
2739 flavors[j] = thread->exc_actions[i].flavor;
2740 ++count;
2741 if (count >= *CountCnt) {
2742 break;
2743 }
2744 }
2745 }
2746 }
2747
2748 done:
2749 thread_mtx_unlock(thread);
2750
2751 *CountCnt = count;
2752
2753 return KERN_SUCCESS;
2754 }
2755
2756 kern_return_t
2757 task_get_exception_ports(
2758 task_t task,
2759 exception_mask_t exception_mask,
2760 exception_mask_array_t masks,
2761 mach_msg_type_number_t *CountCnt,
2762 exception_port_array_t ports,
2763 exception_behavior_array_t behaviors,
2764 thread_state_flavor_array_t flavors)
2765 {
2766 unsigned int i, j, count;
2767
2768 if (task == TASK_NULL) {
2769 return KERN_INVALID_ARGUMENT;
2770 }
2771
2772 if (exception_mask & ~EXC_MASK_VALID) {
2773 return KERN_INVALID_ARGUMENT;
2774 }
2775
2776 itk_lock(task);
2777
2778 if (task->itk_self == IP_NULL) {
2779 itk_unlock(task);
2780
2781 return KERN_FAILURE;
2782 }
2783
2784 count = 0;
2785
2786 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
2787 if (exception_mask & (1 << i)) {
2788 for (j = 0; j < count; ++j) {
2789 /*
2790 * search for an identical entry, if found
2791 * set corresponding mask for this exception.
2792 */
2793 if (task->exc_actions[i].port == ports[j] &&
2794 task->exc_actions[i].behavior == behaviors[j] &&
2795 task->exc_actions[i].flavor == flavors[j]) {
2796 masks[j] |= (1 << i);
2797 break;
2798 }
2799 }
2800
2801 if (j == count) {
2802 masks[j] = (1 << i);
2803 ports[j] = ipc_port_copy_send(task->exc_actions[i].port);
2804 behaviors[j] = task->exc_actions[i].behavior;
2805 flavors[j] = task->exc_actions[i].flavor;
2806 ++count;
2807 if (count > *CountCnt) {
2808 break;
2809 }
2810 }
2811 }
2812 }
2813
2814 itk_unlock(task);
2815
2816 *CountCnt = count;
2817
2818 return KERN_SUCCESS;
2819 }