]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/ipc_tt.c
xnu-4903.270.47.tar.gz
[apple/xnu.git] / osfmk / kern / ipc_tt.c
1 /*
2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
58 * support for mandatory and extensible security protections. This notice
59 * is included in support of clause 2.2 (b) of the Apple Public License,
60 * Version 2.0.
61 */
62 /*
63 */
64
65 /*
66 * File: ipc_tt.c
67 * Purpose:
68 * Task and thread related IPC functions.
69 */
70
71 #include <mach/mach_types.h>
72 #include <mach/boolean.h>
73 #include <mach/kern_return.h>
74 #include <mach/mach_param.h>
75 #include <mach/task_special_ports.h>
76 #include <mach/thread_special_ports.h>
77 #include <mach/thread_status.h>
78 #include <mach/exception_types.h>
79 #include <mach/memory_object_types.h>
80 #include <mach/mach_traps.h>
81 #include <mach/task_server.h>
82 #include <mach/thread_act_server.h>
83 #include <mach/mach_host_server.h>
84 #include <mach/host_priv_server.h>
85 #include <mach/vm_map_server.h>
86
87 #include <kern/kern_types.h>
88 #include <kern/host.h>
89 #include <kern/ipc_kobject.h>
90 #include <kern/ipc_tt.h>
91 #include <kern/kalloc.h>
92 #include <kern/thread.h>
93 #include <kern/misc_protos.h>
94
95 #include <vm/vm_map.h>
96 #include <vm/vm_pageout.h>
97 #include <vm/vm_protos.h>
98
99 #include <security/mac_mach_internal.h>
100
101 #if CONFIG_EMBEDDED && !SECURE_KERNEL
102 extern int cs_relax_platform_task_ports;
103 #endif
104
105 /* forward declarations */
106 task_t convert_port_to_locked_task(ipc_port_t port);
107 task_inspect_t convert_port_to_locked_task_inspect(ipc_port_t port);
108 static void ipc_port_bind_special_reply_port_locked(ipc_port_t port);
109 static kern_return_t ipc_port_unbind_special_reply_port(thread_t thread, boolean_t unbind_active_port);
110 kern_return_t task_conversion_eval(task_t caller, task_t victim);
111
112 /*
113 * Routine: ipc_task_init
114 * Purpose:
115 * Initialize a task's IPC state.
116 *
117 * If non-null, some state will be inherited from the parent.
118 * The parent must be appropriately initialized.
119 * Conditions:
120 * Nothing locked.
121 */
122
123 void
124 ipc_task_init(
125 task_t task,
126 task_t parent)
127 {
128 ipc_space_t space;
129 ipc_port_t kport;
130 ipc_port_t nport;
131 kern_return_t kr;
132 int i;
133
134
135 kr = ipc_space_create(&ipc_table_entries[0], &space);
136 if (kr != KERN_SUCCESS) {
137 panic("ipc_task_init");
138 }
139
140 space->is_task = task;
141
142 kport = ipc_port_alloc_kernel();
143 if (kport == IP_NULL) {
144 panic("ipc_task_init");
145 }
146
147 nport = ipc_port_alloc_kernel();
148 if (nport == IP_NULL) {
149 panic("ipc_task_init");
150 }
151
152 itk_lock_init(task);
153 task->itk_self = kport;
154 task->itk_nself = nport;
155 task->itk_resume = IP_NULL; /* Lazily allocated on-demand */
156 if (task_is_a_corpse_fork(task)) {
157 /*
158 * No sender's notification for corpse would not
159 * work with a naked send right in kernel.
160 */
161 task->itk_sself = IP_NULL;
162 } else {
163 task->itk_sself = ipc_port_make_send(kport);
164 }
165 task->itk_debug_control = IP_NULL;
166 task->itk_space = space;
167
168 #if CONFIG_MACF
169 task->exc_actions[0].label = NULL;
170 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
171 mac_exc_associate_action_label(&task->exc_actions[i], mac_exc_create_label());
172 }
173 #endif
174
175 /* always zero-out the first (unused) array element */
176 bzero(&task->exc_actions[0], sizeof(task->exc_actions[0]));
177
178 if (parent == TASK_NULL) {
179 ipc_port_t port;
180 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
181 task->exc_actions[i].port = IP_NULL;
182 task->exc_actions[i].flavor = 0;
183 task->exc_actions[i].behavior = 0;
184 task->exc_actions[i].privileged = FALSE;
185 }/* for */
186
187 kr = host_get_host_port(host_priv_self(), &port);
188 assert(kr == KERN_SUCCESS);
189 task->itk_host = port;
190
191 task->itk_bootstrap = IP_NULL;
192 task->itk_seatbelt = IP_NULL;
193 task->itk_gssd = IP_NULL;
194 task->itk_task_access = IP_NULL;
195
196 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
197 task->itk_registered[i] = IP_NULL;
198 }
199 } else {
200 itk_lock(parent);
201 assert(parent->itk_self != IP_NULL);
202
203 /* inherit registered ports */
204
205 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
206 task->itk_registered[i] =
207 ipc_port_copy_send(parent->itk_registered[i]);
208 }
209
210 /* inherit exception and bootstrap ports */
211
212 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
213 task->exc_actions[i].port =
214 ipc_port_copy_send(parent->exc_actions[i].port);
215 task->exc_actions[i].flavor =
216 parent->exc_actions[i].flavor;
217 task->exc_actions[i].behavior =
218 parent->exc_actions[i].behavior;
219 task->exc_actions[i].privileged =
220 parent->exc_actions[i].privileged;
221 #if CONFIG_MACF
222 mac_exc_inherit_action_label(parent->exc_actions + i, task->exc_actions + i);
223 #endif
224 }/* for */
225 task->itk_host =
226 ipc_port_copy_send(parent->itk_host);
227
228 task->itk_bootstrap =
229 ipc_port_copy_send(parent->itk_bootstrap);
230
231 task->itk_seatbelt =
232 ipc_port_copy_send(parent->itk_seatbelt);
233
234 task->itk_gssd =
235 ipc_port_copy_send(parent->itk_gssd);
236
237 task->itk_task_access =
238 ipc_port_copy_send(parent->itk_task_access);
239
240 itk_unlock(parent);
241 }
242 }
243
244 /*
245 * Routine: ipc_task_enable
246 * Purpose:
247 * Enable a task for IPC access.
248 * Conditions:
249 * Nothing locked.
250 */
251
252 void
253 ipc_task_enable(
254 task_t task)
255 {
256 ipc_port_t kport;
257 ipc_port_t nport;
258
259 itk_lock(task);
260 kport = task->itk_self;
261 if (kport != IP_NULL) {
262 ipc_kobject_set(kport, (ipc_kobject_t) task, IKOT_TASK);
263 }
264 nport = task->itk_nself;
265 if (nport != IP_NULL) {
266 ipc_kobject_set(nport, (ipc_kobject_t) task, IKOT_TASK_NAME);
267 }
268 itk_unlock(task);
269 }
270
271 /*
272 * Routine: ipc_task_disable
273 * Purpose:
274 * Disable IPC access to a task.
275 * Conditions:
276 * Nothing locked.
277 */
278
279 void
280 ipc_task_disable(
281 task_t task)
282 {
283 ipc_port_t kport;
284 ipc_port_t nport;
285 ipc_port_t rport;
286
287 itk_lock(task);
288 kport = task->itk_self;
289 if (kport != IP_NULL) {
290 ipc_kobject_set(kport, IKO_NULL, IKOT_NONE);
291 }
292 nport = task->itk_nself;
293 if (nport != IP_NULL) {
294 ipc_kobject_set(nport, IKO_NULL, IKOT_NONE);
295 }
296
297 rport = task->itk_resume;
298 if (rport != IP_NULL) {
299 /*
300 * From this point onwards this task is no longer accepting
301 * resumptions.
302 *
303 * There are still outstanding suspensions on this task,
304 * even as it is being torn down. Disconnect the task
305 * from the rport, thereby "orphaning" the rport. The rport
306 * itself will go away only when the last suspension holder
307 * destroys his SO right to it -- when he either
308 * exits, or tries to actually use that last SO right to
309 * resume this (now non-existent) task.
310 */
311 ipc_kobject_set(rport, IKO_NULL, IKOT_NONE);
312 }
313 itk_unlock(task);
314 }
315
316 /*
317 * Routine: ipc_task_terminate
318 * Purpose:
319 * Clean up and destroy a task's IPC state.
320 * Conditions:
321 * Nothing locked. The task must be suspended.
322 * (Or the current thread must be in the task.)
323 */
324
325 void
326 ipc_task_terminate(
327 task_t task)
328 {
329 ipc_port_t kport;
330 ipc_port_t nport;
331 ipc_port_t rport;
332 int i;
333
334 itk_lock(task);
335 kport = task->itk_self;
336
337 if (kport == IP_NULL) {
338 /* the task is already terminated (can this happen?) */
339 itk_unlock(task);
340 return;
341 }
342 task->itk_self = IP_NULL;
343
344 nport = task->itk_nself;
345 assert(nport != IP_NULL);
346 task->itk_nself = IP_NULL;
347
348 rport = task->itk_resume;
349 task->itk_resume = IP_NULL;
350
351 itk_unlock(task);
352
353 /* release the naked send rights */
354
355 if (IP_VALID(task->itk_sself)) {
356 ipc_port_release_send(task->itk_sself);
357 }
358
359 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
360 if (IP_VALID(task->exc_actions[i].port)) {
361 ipc_port_release_send(task->exc_actions[i].port);
362 }
363 #if CONFIG_MACF
364 mac_exc_free_action_label(task->exc_actions + i);
365 #endif
366 }
367
368 if (IP_VALID(task->itk_host)) {
369 ipc_port_release_send(task->itk_host);
370 }
371
372 if (IP_VALID(task->itk_bootstrap)) {
373 ipc_port_release_send(task->itk_bootstrap);
374 }
375
376 if (IP_VALID(task->itk_seatbelt)) {
377 ipc_port_release_send(task->itk_seatbelt);
378 }
379
380 if (IP_VALID(task->itk_gssd)) {
381 ipc_port_release_send(task->itk_gssd);
382 }
383
384 if (IP_VALID(task->itk_task_access)) {
385 ipc_port_release_send(task->itk_task_access);
386 }
387
388 if (IP_VALID(task->itk_debug_control)) {
389 ipc_port_release_send(task->itk_debug_control);
390 }
391
392 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
393 if (IP_VALID(task->itk_registered[i])) {
394 ipc_port_release_send(task->itk_registered[i]);
395 }
396 }
397
398 /* destroy the kernel ports */
399 ipc_port_dealloc_kernel(kport);
400 ipc_port_dealloc_kernel(nport);
401 if (rport != IP_NULL) {
402 ipc_port_dealloc_kernel(rport);
403 }
404
405 itk_lock_destroy(task);
406 }
407
408 /*
409 * Routine: ipc_task_reset
410 * Purpose:
411 * Reset a task's IPC state to protect it when
412 * it enters an elevated security context. The
413 * task name port can remain the same - since
414 * it represents no specific privilege.
415 * Conditions:
416 * Nothing locked. The task must be suspended.
417 * (Or the current thread must be in the task.)
418 */
419
420 void
421 ipc_task_reset(
422 task_t task)
423 {
424 ipc_port_t old_kport, new_kport;
425 ipc_port_t old_sself;
426 ipc_port_t old_exc_actions[EXC_TYPES_COUNT];
427 int i;
428
429 #if CONFIG_MACF
430 /* Fresh label to unset credentials in existing labels. */
431 struct label *unset_label = mac_exc_create_label();
432 #endif
433
434 new_kport = ipc_port_alloc_kernel();
435 if (new_kport == IP_NULL) {
436 panic("ipc_task_reset");
437 }
438
439 itk_lock(task);
440
441 old_kport = task->itk_self;
442
443 if (old_kport == IP_NULL) {
444 /* the task is already terminated (can this happen?) */
445 itk_unlock(task);
446 ipc_port_dealloc_kernel(new_kport);
447 #if CONFIG_MACF
448 mac_exc_free_label(unset_label);
449 #endif
450 return;
451 }
452
453 task->itk_self = new_kport;
454 old_sself = task->itk_sself;
455 task->itk_sself = ipc_port_make_send(new_kport);
456
457 /* Set the old kport to IKOT_NONE and update the exec token while under the port lock */
458 ip_lock(old_kport);
459 ipc_kobject_set_atomically(old_kport, IKO_NULL, IKOT_NONE);
460 task->exec_token += 1;
461 ip_unlock(old_kport);
462
463 ipc_kobject_set(new_kport, (ipc_kobject_t) task, IKOT_TASK);
464
465 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
466 old_exc_actions[i] = IP_NULL;
467
468 if (i == EXC_CORPSE_NOTIFY && task_corpse_pending_report(task)) {
469 continue;
470 }
471
472 if (!task->exc_actions[i].privileged) {
473 #if CONFIG_MACF
474 mac_exc_update_action_label(task->exc_actions + i, unset_label);
475 #endif
476 old_exc_actions[i] = task->exc_actions[i].port;
477 task->exc_actions[i].port = IP_NULL;
478 }
479 }/* for */
480
481 if (IP_VALID(task->itk_debug_control)) {
482 ipc_port_release_send(task->itk_debug_control);
483 }
484 task->itk_debug_control = IP_NULL;
485
486 itk_unlock(task);
487
488 #if CONFIG_MACF
489 mac_exc_free_label(unset_label);
490 #endif
491
492 /* release the naked send rights */
493
494 if (IP_VALID(old_sself)) {
495 ipc_port_release_send(old_sself);
496 }
497
498 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
499 if (IP_VALID(old_exc_actions[i])) {
500 ipc_port_release_send(old_exc_actions[i]);
501 }
502 }/* for */
503
504 /* destroy the kernel port */
505 ipc_port_dealloc_kernel(old_kport);
506 }
507
508 /*
509 * Routine: ipc_thread_init
510 * Purpose:
511 * Initialize a thread's IPC state.
512 * Conditions:
513 * Nothing locked.
514 */
515
516 void
517 ipc_thread_init(
518 thread_t thread)
519 {
520 ipc_port_t kport;
521
522 kport = ipc_port_alloc_kernel();
523 if (kport == IP_NULL) {
524 panic("ipc_thread_init");
525 }
526
527 thread->ith_self = kport;
528 thread->ith_sself = ipc_port_make_send(kport);
529 thread->ith_special_reply_port = NULL;
530 thread->exc_actions = NULL;
531
532 ipc_kobject_set(kport, (ipc_kobject_t)thread, IKOT_THREAD);
533
534 #if IMPORTANCE_INHERITANCE
535 thread->ith_assertions = 0;
536 #endif
537
538 ipc_kmsg_queue_init(&thread->ith_messages);
539
540 thread->ith_rpc_reply = IP_NULL;
541 }
542
543 void
544 ipc_thread_init_exc_actions(
545 thread_t thread)
546 {
547 assert(thread->exc_actions == NULL);
548
549 thread->exc_actions = kalloc(sizeof(struct exception_action) * EXC_TYPES_COUNT);
550 bzero(thread->exc_actions, sizeof(struct exception_action) * EXC_TYPES_COUNT);
551
552 #if CONFIG_MACF
553 for (size_t i = 0; i < EXC_TYPES_COUNT; ++i) {
554 mac_exc_associate_action_label(thread->exc_actions + i, mac_exc_create_label());
555 }
556 #endif
557 }
558
559 void
560 ipc_thread_destroy_exc_actions(
561 thread_t thread)
562 {
563 if (thread->exc_actions != NULL) {
564 #if CONFIG_MACF
565 for (size_t i = 0; i < EXC_TYPES_COUNT; ++i) {
566 mac_exc_free_action_label(thread->exc_actions + i);
567 }
568 #endif
569
570 kfree(thread->exc_actions,
571 sizeof(struct exception_action) * EXC_TYPES_COUNT);
572 thread->exc_actions = NULL;
573 }
574 }
575
576 void
577 ipc_thread_disable(
578 thread_t thread)
579 {
580 ipc_port_t kport = thread->ith_self;
581
582 if (kport != IP_NULL) {
583 ipc_kobject_set(kport, IKO_NULL, IKOT_NONE);
584 }
585 }
586
587 /*
588 * Routine: ipc_thread_terminate
589 * Purpose:
590 * Clean up and destroy a thread's IPC state.
591 * Conditions:
592 * Nothing locked.
593 */
594
595 void
596 ipc_thread_terminate(
597 thread_t thread)
598 {
599 ipc_port_t kport = thread->ith_self;
600
601 if (kport != IP_NULL) {
602 int i;
603
604 if (IP_VALID(thread->ith_sself)) {
605 ipc_port_release_send(thread->ith_sself);
606 }
607
608 thread->ith_sself = thread->ith_self = IP_NULL;
609
610 if (thread->exc_actions != NULL) {
611 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
612 if (IP_VALID(thread->exc_actions[i].port)) {
613 ipc_port_release_send(thread->exc_actions[i].port);
614 }
615 }
616 ipc_thread_destroy_exc_actions(thread);
617 }
618
619 ipc_port_dealloc_kernel(kport);
620 }
621
622 #if IMPORTANCE_INHERITANCE
623 assert(thread->ith_assertions == 0);
624 #endif
625
626 /* unbind the thread special reply port */
627 if (IP_VALID(thread->ith_special_reply_port)) {
628 ipc_port_unbind_special_reply_port(thread, TRUE);
629 }
630
631 assert(ipc_kmsg_queue_empty(&thread->ith_messages));
632
633 if (thread->ith_rpc_reply != IP_NULL) {
634 ipc_port_dealloc_reply(thread->ith_rpc_reply);
635 }
636
637 thread->ith_rpc_reply = IP_NULL;
638 }
639
640 /*
641 * Routine: ipc_thread_reset
642 * Purpose:
643 * Reset the IPC state for a given Mach thread when
644 * its task enters an elevated security context.
645 * Both the thread port and its exception ports have
646 * to be reset. Its RPC reply port cannot have any
647 * rights outstanding, so it should be fine.
648 * Conditions:
649 * Nothing locked.
650 */
651
652 void
653 ipc_thread_reset(
654 thread_t thread)
655 {
656 ipc_port_t old_kport, new_kport;
657 ipc_port_t old_sself;
658 ipc_port_t old_exc_actions[EXC_TYPES_COUNT];
659 boolean_t has_old_exc_actions = FALSE;
660 int i;
661
662 #if CONFIG_MACF
663 struct label *new_label = mac_exc_create_label();
664 #endif
665
666 new_kport = ipc_port_alloc_kernel();
667 if (new_kport == IP_NULL) {
668 panic("ipc_task_reset");
669 }
670
671 thread_mtx_lock(thread);
672
673 old_kport = thread->ith_self;
674
675 if (old_kport == IP_NULL && thread->inspection == FALSE) {
676 /* the is already terminated (can this happen?) */
677 thread_mtx_unlock(thread);
678 ipc_port_dealloc_kernel(new_kport);
679 #if CONFIG_MACF
680 mac_exc_free_label(new_label);
681 #endif
682 return;
683 }
684
685 thread->ith_self = new_kport;
686 old_sself = thread->ith_sself;
687 thread->ith_sself = ipc_port_make_send(new_kport);
688 if (old_kport != IP_NULL) {
689 ipc_kobject_set(old_kport, IKO_NULL, IKOT_NONE);
690 }
691 ipc_kobject_set(new_kport, (ipc_kobject_t) thread, IKOT_THREAD);
692
693 /*
694 * Only ports that were set by root-owned processes
695 * (privileged ports) should survive
696 */
697 if (thread->exc_actions != NULL) {
698 has_old_exc_actions = TRUE;
699 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
700 if (thread->exc_actions[i].privileged) {
701 old_exc_actions[i] = IP_NULL;
702 } else {
703 #if CONFIG_MACF
704 mac_exc_update_action_label(thread->exc_actions + i, new_label);
705 #endif
706 old_exc_actions[i] = thread->exc_actions[i].port;
707 thread->exc_actions[i].port = IP_NULL;
708 }
709 }
710 }
711
712 thread_mtx_unlock(thread);
713
714 #if CONFIG_MACF
715 mac_exc_free_label(new_label);
716 #endif
717
718 /* release the naked send rights */
719
720 if (IP_VALID(old_sself)) {
721 ipc_port_release_send(old_sself);
722 }
723
724 if (has_old_exc_actions) {
725 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
726 ipc_port_release_send(old_exc_actions[i]);
727 }
728 }
729
730 /* destroy the kernel port */
731 if (old_kport != IP_NULL) {
732 ipc_port_dealloc_kernel(old_kport);
733 }
734
735 /* unbind the thread special reply port */
736 if (IP_VALID(thread->ith_special_reply_port)) {
737 ipc_port_unbind_special_reply_port(thread, TRUE);
738 }
739 }
740
741 /*
742 * Routine: retrieve_task_self_fast
743 * Purpose:
744 * Optimized version of retrieve_task_self,
745 * that only works for the current task.
746 *
747 * Return a send right (possibly null/dead)
748 * for the task's user-visible self port.
749 * Conditions:
750 * Nothing locked.
751 */
752
753 ipc_port_t
754 retrieve_task_self_fast(
755 task_t task)
756 {
757 ipc_port_t port;
758
759 assert(task == current_task());
760
761 itk_lock(task);
762 assert(task->itk_self != IP_NULL);
763
764 if ((port = task->itk_sself) == task->itk_self) {
765 /* no interposing */
766
767 ip_lock(port);
768 assert(ip_active(port));
769 ip_reference(port);
770 port->ip_srights++;
771 ip_unlock(port);
772 } else {
773 port = ipc_port_copy_send(port);
774 }
775 itk_unlock(task);
776
777 return port;
778 }
779
780 /*
781 * Routine: retrieve_thread_self_fast
782 * Purpose:
783 * Return a send right (possibly null/dead)
784 * for the thread's user-visible self port.
785 *
786 * Only works for the current thread.
787 *
788 * Conditions:
789 * Nothing locked.
790 */
791
792 ipc_port_t
793 retrieve_thread_self_fast(
794 thread_t thread)
795 {
796 ipc_port_t port;
797
798 assert(thread == current_thread());
799
800 thread_mtx_lock(thread);
801
802 assert(thread->ith_self != IP_NULL);
803
804 if ((port = thread->ith_sself) == thread->ith_self) {
805 /* no interposing */
806
807 ip_lock(port);
808 assert(ip_active(port));
809 ip_reference(port);
810 port->ip_srights++;
811 ip_unlock(port);
812 } else {
813 port = ipc_port_copy_send(port);
814 }
815
816 thread_mtx_unlock(thread);
817
818 return port;
819 }
820
821 /*
822 * Routine: task_self_trap [mach trap]
823 * Purpose:
824 * Give the caller send rights for his own task port.
825 * Conditions:
826 * Nothing locked.
827 * Returns:
828 * MACH_PORT_NULL if there are any resource failures
829 * or other errors.
830 */
831
832 mach_port_name_t
833 task_self_trap(
834 __unused struct task_self_trap_args *args)
835 {
836 task_t task = current_task();
837 ipc_port_t sright;
838 mach_port_name_t name;
839
840 sright = retrieve_task_self_fast(task);
841 name = ipc_port_copyout_send(sright, task->itk_space);
842 return name;
843 }
844
845 /*
846 * Routine: thread_self_trap [mach trap]
847 * Purpose:
848 * Give the caller send rights for his own thread port.
849 * Conditions:
850 * Nothing locked.
851 * Returns:
852 * MACH_PORT_NULL if there are any resource failures
853 * or other errors.
854 */
855
856 mach_port_name_t
857 thread_self_trap(
858 __unused struct thread_self_trap_args *args)
859 {
860 thread_t thread = current_thread();
861 task_t task = thread->task;
862 ipc_port_t sright;
863 mach_port_name_t name;
864
865 sright = retrieve_thread_self_fast(thread);
866 name = ipc_port_copyout_send(sright, task->itk_space);
867 return name;
868 }
869
870 /*
871 * Routine: mach_reply_port [mach trap]
872 * Purpose:
873 * Allocate a port for the caller.
874 * Conditions:
875 * Nothing locked.
876 * Returns:
877 * MACH_PORT_NULL if there are any resource failures
878 * or other errors.
879 */
880
881 mach_port_name_t
882 mach_reply_port(
883 __unused struct mach_reply_port_args *args)
884 {
885 ipc_port_t port;
886 mach_port_name_t name;
887 kern_return_t kr;
888
889 kr = ipc_port_alloc(current_task()->itk_space, &name, &port);
890 if (kr == KERN_SUCCESS) {
891 ip_unlock(port);
892 } else {
893 name = MACH_PORT_NULL;
894 }
895 return name;
896 }
897
898 /*
899 * Routine: thread_get_special_reply_port [mach trap]
900 * Purpose:
901 * Allocate a special reply port for the calling thread.
902 * Conditions:
903 * Nothing locked.
904 * Returns:
905 * mach_port_name_t: send right & receive right for special reply port.
906 * MACH_PORT_NULL if there are any resource failures
907 * or other errors.
908 */
909
910 mach_port_name_t
911 thread_get_special_reply_port(
912 __unused struct thread_get_special_reply_port_args *args)
913 {
914 ipc_port_t port;
915 mach_port_name_t name;
916 mach_port_name_t send_name;
917 kern_return_t kr;
918 thread_t thread = current_thread();
919
920 /* unbind the thread special reply port */
921 if (IP_VALID(thread->ith_special_reply_port)) {
922 kr = ipc_port_unbind_special_reply_port(thread, TRUE);
923 if (kr != KERN_SUCCESS) {
924 return MACH_PORT_NULL;
925 }
926 }
927
928 kr = ipc_port_alloc(current_task()->itk_space, &name, &port);
929 if (kr == KERN_SUCCESS) {
930 ipc_port_bind_special_reply_port_locked(port);
931
932 /* Make a send right and insert it in the space at specified name */
933 ipc_port_make_send_locked(port);
934 ip_unlock(port);
935 send_name = ipc_port_copyout_name_send(port, current_task()->itk_space, name);
936 /*
937 * If insertion of send right failed, userland is doing something bad, error out.
938 * The space was marked inactive or the receive right just inserted above at the
939 * given name was moved, in either case do not try to deallocate the receive right.
940 */
941 if (send_name == MACH_PORT_NULL || send_name == MACH_PORT_DEAD) {
942 if (IP_VALID(thread->ith_special_reply_port)) {
943 ipc_port_unbind_special_reply_port(thread, TRUE);
944 }
945 name = MACH_PORT_NULL;
946 }
947 } else {
948 name = MACH_PORT_NULL;
949 }
950 return name;
951 }
952
953 /*
954 * Routine: ipc_port_bind_special_reply_port_locked
955 * Purpose:
956 * Bind the given port to current thread as a special reply port.
957 * Conditions:
958 * Port locked.
959 * Returns:
960 * None.
961 */
962
963 static void
964 ipc_port_bind_special_reply_port_locked(
965 ipc_port_t port)
966 {
967 thread_t thread = current_thread();
968 assert(thread->ith_special_reply_port == NULL);
969
970 ip_reference(port);
971 thread->ith_special_reply_port = port;
972 port->ip_specialreply = 1;
973 port->ip_sync_link_state = PORT_SYNC_LINK_ANY;
974
975 reset_ip_srp_bits(port);
976 }
977
978 /*
979 * Routine: ipc_port_unbind_special_reply_port
980 * Purpose:
981 * Unbind the thread's special reply port.
982 * If the special port has threads waiting on turnstile,
983 * update it's inheritor.
984 * Condition:
985 * Nothing locked.
986 * Returns:
987 * None.
988 */
989 static kern_return_t
990 ipc_port_unbind_special_reply_port(
991 thread_t thread,
992 boolean_t unbind_active_port)
993 {
994 ipc_port_t special_reply_port = thread->ith_special_reply_port;
995
996 ip_lock(special_reply_port);
997
998 /* Return error if port active and unbind_active_port set to FALSE */
999 if (unbind_active_port == FALSE && ip_active(special_reply_port)) {
1000 ip_unlock(special_reply_port);
1001 return KERN_FAILURE;
1002 }
1003
1004 thread->ith_special_reply_port = NULL;
1005 ipc_port_adjust_special_reply_port_locked(special_reply_port, NULL,
1006 IPC_PORT_ADJUST_SR_CLEAR_SPECIAL_REPLY, FALSE);
1007 /* port unlocked */
1008
1009 ip_release(special_reply_port);
1010 return KERN_SUCCESS;
1011 }
1012
1013 /*
1014 * Routine: thread_get_special_port [kernel call]
1015 * Purpose:
1016 * Clones a send right for one of the thread's
1017 * special ports.
1018 * Conditions:
1019 * Nothing locked.
1020 * Returns:
1021 * KERN_SUCCESS Extracted a send right.
1022 * KERN_INVALID_ARGUMENT The thread is null.
1023 * KERN_FAILURE The thread is dead.
1024 * KERN_INVALID_ARGUMENT Invalid special port.
1025 */
1026
1027 kern_return_t
1028 thread_get_special_port(
1029 thread_t thread,
1030 int which,
1031 ipc_port_t *portp)
1032 {
1033 kern_return_t result = KERN_SUCCESS;
1034 ipc_port_t *whichp;
1035
1036 if (thread == THREAD_NULL) {
1037 return KERN_INVALID_ARGUMENT;
1038 }
1039
1040 switch (which) {
1041 case THREAD_KERNEL_PORT:
1042 whichp = &thread->ith_sself;
1043 break;
1044
1045 default:
1046 return KERN_INVALID_ARGUMENT;
1047 }
1048
1049 thread_mtx_lock(thread);
1050
1051 if (thread->active) {
1052 *portp = ipc_port_copy_send(*whichp);
1053 } else {
1054 result = KERN_FAILURE;
1055 }
1056
1057 thread_mtx_unlock(thread);
1058
1059 return result;
1060 }
1061
1062 /*
1063 * Routine: thread_set_special_port [kernel call]
1064 * Purpose:
1065 * Changes one of the thread's special ports,
1066 * setting it to the supplied send right.
1067 * Conditions:
1068 * Nothing locked. If successful, consumes
1069 * the supplied send right.
1070 * Returns:
1071 * KERN_SUCCESS Changed the special port.
1072 * KERN_INVALID_ARGUMENT The thread is null.
1073 * KERN_FAILURE The thread is dead.
1074 * KERN_INVALID_ARGUMENT Invalid special port.
1075 */
1076
1077 kern_return_t
1078 thread_set_special_port(
1079 thread_t thread,
1080 int which,
1081 ipc_port_t port)
1082 {
1083 kern_return_t result = KERN_SUCCESS;
1084 ipc_port_t *whichp, old = IP_NULL;
1085
1086 if (thread == THREAD_NULL) {
1087 return KERN_INVALID_ARGUMENT;
1088 }
1089
1090 switch (which) {
1091 case THREAD_KERNEL_PORT:
1092 whichp = &thread->ith_sself;
1093 break;
1094
1095 default:
1096 return KERN_INVALID_ARGUMENT;
1097 }
1098
1099 thread_mtx_lock(thread);
1100
1101 if (thread->active) {
1102 old = *whichp;
1103 *whichp = port;
1104 } else {
1105 result = KERN_FAILURE;
1106 }
1107
1108 thread_mtx_unlock(thread);
1109
1110 if (IP_VALID(old)) {
1111 ipc_port_release_send(old);
1112 }
1113
1114 return result;
1115 }
1116
1117 /*
1118 * Routine: task_get_special_port [kernel call]
1119 * Purpose:
1120 * Clones a send right for one of the task's
1121 * special ports.
1122 * Conditions:
1123 * Nothing locked.
1124 * Returns:
1125 * KERN_SUCCESS Extracted a send right.
1126 * KERN_INVALID_ARGUMENT The task is null.
1127 * KERN_FAILURE The task/space is dead.
1128 * KERN_INVALID_ARGUMENT Invalid special port.
1129 */
1130
1131 kern_return_t
1132 task_get_special_port(
1133 task_t task,
1134 int which,
1135 ipc_port_t *portp)
1136 {
1137 ipc_port_t port;
1138
1139 if (task == TASK_NULL) {
1140 return KERN_INVALID_ARGUMENT;
1141 }
1142
1143 itk_lock(task);
1144 if (task->itk_self == IP_NULL) {
1145 itk_unlock(task);
1146 return KERN_FAILURE;
1147 }
1148
1149 switch (which) {
1150 case TASK_KERNEL_PORT:
1151 port = ipc_port_copy_send(task->itk_sself);
1152 break;
1153
1154 case TASK_NAME_PORT:
1155 port = ipc_port_make_send(task->itk_nself);
1156 break;
1157
1158 case TASK_HOST_PORT:
1159 port = ipc_port_copy_send(task->itk_host);
1160 break;
1161
1162 case TASK_BOOTSTRAP_PORT:
1163 port = ipc_port_copy_send(task->itk_bootstrap);
1164 break;
1165
1166 case TASK_SEATBELT_PORT:
1167 port = ipc_port_copy_send(task->itk_seatbelt);
1168 break;
1169
1170 case TASK_ACCESS_PORT:
1171 port = ipc_port_copy_send(task->itk_task_access);
1172 break;
1173
1174 case TASK_DEBUG_CONTROL_PORT:
1175 port = ipc_port_copy_send(task->itk_debug_control);
1176 break;
1177
1178 default:
1179 itk_unlock(task);
1180 return KERN_INVALID_ARGUMENT;
1181 }
1182 itk_unlock(task);
1183
1184 *portp = port;
1185 return KERN_SUCCESS;
1186 }
1187
1188 /*
1189 * Routine: task_set_special_port [kernel call]
1190 * Purpose:
1191 * Changes one of the task's special ports,
1192 * setting it to the supplied send right.
1193 * Conditions:
1194 * Nothing locked. If successful, consumes
1195 * the supplied send right.
1196 * Returns:
1197 * KERN_SUCCESS Changed the special port.
1198 * KERN_INVALID_ARGUMENT The task is null.
1199 * KERN_FAILURE The task/space is dead.
1200 * KERN_INVALID_ARGUMENT Invalid special port.
1201 * KERN_NO_ACCESS Attempted overwrite of seatbelt port.
1202 */
1203
1204 kern_return_t
1205 task_set_special_port(
1206 task_t task,
1207 int which,
1208 ipc_port_t port)
1209 {
1210 ipc_port_t *whichp;
1211 ipc_port_t old;
1212
1213 if (task == TASK_NULL) {
1214 return KERN_INVALID_ARGUMENT;
1215 }
1216
1217 switch (which) {
1218 case TASK_KERNEL_PORT:
1219 whichp = &task->itk_sself;
1220 break;
1221
1222 case TASK_HOST_PORT:
1223 whichp = &task->itk_host;
1224 break;
1225
1226 case TASK_BOOTSTRAP_PORT:
1227 whichp = &task->itk_bootstrap;
1228 break;
1229
1230 case TASK_SEATBELT_PORT:
1231 whichp = &task->itk_seatbelt;
1232 break;
1233
1234 case TASK_ACCESS_PORT:
1235 whichp = &task->itk_task_access;
1236 break;
1237
1238 case TASK_DEBUG_CONTROL_PORT:
1239 whichp = &task->itk_debug_control;
1240 break;
1241
1242 default:
1243 return KERN_INVALID_ARGUMENT;
1244 }/* switch */
1245
1246 itk_lock(task);
1247 if (task->itk_self == IP_NULL) {
1248 itk_unlock(task);
1249 return KERN_FAILURE;
1250 }
1251
1252 /* do not allow overwrite of seatbelt or task access ports */
1253 if ((TASK_SEATBELT_PORT == which || TASK_ACCESS_PORT == which)
1254 && IP_VALID(*whichp)) {
1255 itk_unlock(task);
1256 return KERN_NO_ACCESS;
1257 }
1258
1259 old = *whichp;
1260 *whichp = port;
1261 itk_unlock(task);
1262
1263 if (IP_VALID(old)) {
1264 ipc_port_release_send(old);
1265 }
1266 return KERN_SUCCESS;
1267 }
1268
1269
1270 /*
1271 * Routine: mach_ports_register [kernel call]
1272 * Purpose:
1273 * Stash a handful of port send rights in the task.
1274 * Child tasks will inherit these rights, but they
1275 * must use mach_ports_lookup to acquire them.
1276 *
1277 * The rights are supplied in a (wired) kalloc'd segment.
1278 * Rights which aren't supplied are assumed to be null.
1279 * Conditions:
1280 * Nothing locked. If successful, consumes
1281 * the supplied rights and memory.
1282 * Returns:
1283 * KERN_SUCCESS Stashed the port rights.
1284 * KERN_INVALID_ARGUMENT The task is null.
1285 * KERN_INVALID_ARGUMENT The task is dead.
1286 * KERN_INVALID_ARGUMENT The memory param is null.
1287 * KERN_INVALID_ARGUMENT Too many port rights supplied.
1288 */
1289
1290 kern_return_t
1291 mach_ports_register(
1292 task_t task,
1293 mach_port_array_t memory,
1294 mach_msg_type_number_t portsCnt)
1295 {
1296 ipc_port_t ports[TASK_PORT_REGISTER_MAX];
1297 unsigned int i;
1298
1299 if ((task == TASK_NULL) ||
1300 (portsCnt > TASK_PORT_REGISTER_MAX) ||
1301 (portsCnt && memory == NULL)) {
1302 return KERN_INVALID_ARGUMENT;
1303 }
1304
1305 /*
1306 * Pad the port rights with nulls.
1307 */
1308
1309 for (i = 0; i < portsCnt; i++) {
1310 ports[i] = memory[i];
1311 }
1312 for (; i < TASK_PORT_REGISTER_MAX; i++) {
1313 ports[i] = IP_NULL;
1314 }
1315
1316 itk_lock(task);
1317 if (task->itk_self == IP_NULL) {
1318 itk_unlock(task);
1319 return KERN_INVALID_ARGUMENT;
1320 }
1321
1322 /*
1323 * Replace the old send rights with the new.
1324 * Release the old rights after unlocking.
1325 */
1326
1327 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
1328 ipc_port_t old;
1329
1330 old = task->itk_registered[i];
1331 task->itk_registered[i] = ports[i];
1332 ports[i] = old;
1333 }
1334
1335 itk_unlock(task);
1336
1337 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
1338 if (IP_VALID(ports[i])) {
1339 ipc_port_release_send(ports[i]);
1340 }
1341 }
1342
1343 /*
1344 * Now that the operation is known to be successful,
1345 * we can free the memory.
1346 */
1347
1348 if (portsCnt != 0) {
1349 kfree(memory,
1350 (vm_size_t) (portsCnt * sizeof(mach_port_t)));
1351 }
1352
1353 return KERN_SUCCESS;
1354 }
1355
1356 /*
1357 * Routine: mach_ports_lookup [kernel call]
1358 * Purpose:
1359 * Retrieves (clones) the stashed port send rights.
1360 * Conditions:
1361 * Nothing locked. If successful, the caller gets
1362 * rights and memory.
1363 * Returns:
1364 * KERN_SUCCESS Retrieved the send rights.
1365 * KERN_INVALID_ARGUMENT The task is null.
1366 * KERN_INVALID_ARGUMENT The task is dead.
1367 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
1368 */
1369
1370 kern_return_t
1371 mach_ports_lookup(
1372 task_t task,
1373 mach_port_array_t *portsp,
1374 mach_msg_type_number_t *portsCnt)
1375 {
1376 void *memory;
1377 vm_size_t size;
1378 ipc_port_t *ports;
1379 int i;
1380
1381 if (task == TASK_NULL) {
1382 return KERN_INVALID_ARGUMENT;
1383 }
1384
1385 size = (vm_size_t) (TASK_PORT_REGISTER_MAX * sizeof(ipc_port_t));
1386
1387 memory = kalloc(size);
1388 if (memory == 0) {
1389 return KERN_RESOURCE_SHORTAGE;
1390 }
1391
1392 itk_lock(task);
1393 if (task->itk_self == IP_NULL) {
1394 itk_unlock(task);
1395
1396 kfree(memory, size);
1397 return KERN_INVALID_ARGUMENT;
1398 }
1399
1400 ports = (ipc_port_t *) memory;
1401
1402 /*
1403 * Clone port rights. Because kalloc'd memory
1404 * is wired, we won't fault while holding the task lock.
1405 */
1406
1407 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
1408 ports[i] = ipc_port_copy_send(task->itk_registered[i]);
1409 }
1410
1411 itk_unlock(task);
1412
1413 *portsp = (mach_port_array_t) ports;
1414 *portsCnt = TASK_PORT_REGISTER_MAX;
1415 return KERN_SUCCESS;
1416 }
1417
1418 kern_return_t
1419 task_conversion_eval(task_t caller, task_t victim)
1420 {
1421 /*
1422 * Tasks are allowed to resolve their own task ports, and the kernel is
1423 * allowed to resolve anyone's task port.
1424 */
1425 if (caller == kernel_task) {
1426 return KERN_SUCCESS;
1427 }
1428
1429 if (caller == victim) {
1430 return KERN_SUCCESS;
1431 }
1432
1433 /*
1434 * Only the kernel can can resolve the kernel's task port. We've established
1435 * by this point that the caller is not kernel_task.
1436 */
1437 if (victim == TASK_NULL || victim == kernel_task) {
1438 return KERN_INVALID_SECURITY;
1439 }
1440
1441 #if CONFIG_EMBEDDED
1442 /*
1443 * On embedded platforms, only a platform binary can resolve the task port
1444 * of another platform binary.
1445 */
1446 if ((victim->t_flags & TF_PLATFORM) && !(caller->t_flags & TF_PLATFORM)) {
1447 #if SECURE_KERNEL
1448 return KERN_INVALID_SECURITY;
1449 #else
1450 if (cs_relax_platform_task_ports) {
1451 return KERN_SUCCESS;
1452 } else {
1453 return KERN_INVALID_SECURITY;
1454 }
1455 #endif /* SECURE_KERNEL */
1456 }
1457 #endif /* CONFIG_EMBEDDED */
1458
1459 return KERN_SUCCESS;
1460 }
1461
1462 /*
1463 * Routine: convert_port_to_locked_task
1464 * Purpose:
1465 * Internal helper routine to convert from a port to a locked
1466 * task. Used by several routines that try to convert from a
1467 * task port to a reference on some task related object.
1468 * Conditions:
1469 * Nothing locked, blocking OK.
1470 */
1471 task_t
1472 convert_port_to_locked_task(ipc_port_t port)
1473 {
1474 int try_failed_count = 0;
1475
1476 while (IP_VALID(port)) {
1477 task_t ct = current_task();
1478 task_t task;
1479
1480 ip_lock(port);
1481 if (!ip_active(port) || (ip_kotype(port) != IKOT_TASK)) {
1482 ip_unlock(port);
1483 return TASK_NULL;
1484 }
1485 task = (task_t) port->ip_kobject;
1486 assert(task != TASK_NULL);
1487
1488 if (task_conversion_eval(ct, task)) {
1489 ip_unlock(port);
1490 return TASK_NULL;
1491 }
1492
1493 /*
1494 * Normal lock ordering puts task_lock() before ip_lock().
1495 * Attempt out-of-order locking here.
1496 */
1497 if (task_lock_try(task)) {
1498 ip_unlock(port);
1499 return task;
1500 }
1501 try_failed_count++;
1502
1503 ip_unlock(port);
1504 mutex_pause(try_failed_count);
1505 }
1506 return TASK_NULL;
1507 }
1508
1509 /*
1510 * Routine: convert_port_to_locked_task_inspect
1511 * Purpose:
1512 * Internal helper routine to convert from a port to a locked
1513 * task inspect right. Used by internal routines that try to convert from a
1514 * task inspect port to a reference on some task related object.
1515 * Conditions:
1516 * Nothing locked, blocking OK.
1517 */
1518 task_inspect_t
1519 convert_port_to_locked_task_inspect(ipc_port_t port)
1520 {
1521 int try_failed_count = 0;
1522
1523 while (IP_VALID(port)) {
1524 task_inspect_t task;
1525
1526 ip_lock(port);
1527 if (!ip_active(port) || (ip_kotype(port) != IKOT_TASK)) {
1528 ip_unlock(port);
1529 return TASK_INSPECT_NULL;
1530 }
1531 task = (task_inspect_t)port->ip_kobject;
1532 assert(task != TASK_INSPECT_NULL);
1533 /*
1534 * Normal lock ordering puts task_lock() before ip_lock().
1535 * Attempt out-of-order locking here.
1536 */
1537 if (task_lock_try((task_t)task)) {
1538 ip_unlock(port);
1539 return task;
1540 }
1541 try_failed_count++;
1542
1543 ip_unlock(port);
1544 mutex_pause(try_failed_count);
1545 }
1546 return TASK_INSPECT_NULL;
1547 }
1548
1549
1550 /*
1551 * Routine: convert_port_to_task
1552 * Purpose:
1553 * Convert from a port to a task.
1554 * Doesn't consume the port ref; produces a task ref,
1555 * which may be null.
1556 * Conditions:
1557 * Nothing locked.
1558 */
1559 task_t
1560 convert_port_to_task(
1561 ipc_port_t port)
1562 {
1563 return convert_port_to_task_with_exec_token(port, NULL);
1564 }
1565
1566 /*
1567 * Routine: convert_port_to_task_with_exec_token
1568 * Purpose:
1569 * Convert from a port to a task and return
1570 * the exec token stored in the task.
1571 * Doesn't consume the port ref; produces a task ref,
1572 * which may be null.
1573 * Conditions:
1574 * Nothing locked.
1575 */
1576 task_t
1577 convert_port_to_task_with_exec_token(
1578 ipc_port_t port,
1579 uint32_t *exec_token)
1580 {
1581 task_t task = TASK_NULL;
1582
1583 if (IP_VALID(port)) {
1584 ip_lock(port);
1585
1586 if (ip_active(port) &&
1587 ip_kotype(port) == IKOT_TASK) {
1588 task_t ct = current_task();
1589 task = (task_t)port->ip_kobject;
1590 assert(task != TASK_NULL);
1591
1592 if (task_conversion_eval(ct, task)) {
1593 ip_unlock(port);
1594 return TASK_NULL;
1595 }
1596
1597 if (exec_token) {
1598 *exec_token = task->exec_token;
1599 }
1600 task_reference_internal(task);
1601 }
1602
1603 ip_unlock(port);
1604 }
1605
1606 return task;
1607 }
1608
1609 /*
1610 * Routine: convert_port_to_task_name
1611 * Purpose:
1612 * Convert from a port to a task name.
1613 * Doesn't consume the port ref; produces a task name ref,
1614 * which may be null.
1615 * Conditions:
1616 * Nothing locked.
1617 */
1618 task_name_t
1619 convert_port_to_task_name(
1620 ipc_port_t port)
1621 {
1622 task_name_t task = TASK_NULL;
1623
1624 if (IP_VALID(port)) {
1625 ip_lock(port);
1626
1627 if (ip_active(port) &&
1628 (ip_kotype(port) == IKOT_TASK ||
1629 ip_kotype(port) == IKOT_TASK_NAME)) {
1630 task = (task_name_t)port->ip_kobject;
1631 assert(task != TASK_NAME_NULL);
1632
1633 task_reference_internal(task);
1634 }
1635
1636 ip_unlock(port);
1637 }
1638
1639 return task;
1640 }
1641
1642 /*
1643 * Routine: convert_port_to_task_inspect
1644 * Purpose:
1645 * Convert from a port to a task inspection right
1646 * Doesn't consume the port ref; produces a task ref,
1647 * which may be null.
1648 * Conditions:
1649 * Nothing locked.
1650 */
1651 task_inspect_t
1652 convert_port_to_task_inspect(
1653 ipc_port_t port)
1654 {
1655 task_inspect_t task = TASK_INSPECT_NULL;
1656
1657 if (IP_VALID(port)) {
1658 ip_lock(port);
1659
1660 if (ip_active(port) &&
1661 ip_kotype(port) == IKOT_TASK) {
1662 task = (task_inspect_t)port->ip_kobject;
1663 assert(task != TASK_INSPECT_NULL);
1664
1665 task_reference_internal(task);
1666 }
1667
1668 ip_unlock(port);
1669 }
1670
1671 return task;
1672 }
1673
1674 /*
1675 * Routine: convert_port_to_task_suspension_token
1676 * Purpose:
1677 * Convert from a port to a task suspension token.
1678 * Doesn't consume the port ref; produces a suspension token ref,
1679 * which may be null.
1680 * Conditions:
1681 * Nothing locked.
1682 */
1683 task_suspension_token_t
1684 convert_port_to_task_suspension_token(
1685 ipc_port_t port)
1686 {
1687 task_suspension_token_t task = TASK_NULL;
1688
1689 if (IP_VALID(port)) {
1690 ip_lock(port);
1691
1692 if (ip_active(port) &&
1693 ip_kotype(port) == IKOT_TASK_RESUME) {
1694 task = (task_suspension_token_t)port->ip_kobject;
1695 assert(task != TASK_NULL);
1696
1697 task_reference_internal(task);
1698 }
1699
1700 ip_unlock(port);
1701 }
1702
1703 return task;
1704 }
1705
1706 /*
1707 * Routine: convert_port_to_space
1708 * Purpose:
1709 * Convert from a port to a space.
1710 * Doesn't consume the port ref; produces a space ref,
1711 * which may be null.
1712 * Conditions:
1713 * Nothing locked.
1714 */
1715 ipc_space_t
1716 convert_port_to_space(
1717 ipc_port_t port)
1718 {
1719 ipc_space_t space;
1720 task_t task;
1721
1722 task = convert_port_to_locked_task(port);
1723
1724 if (task == TASK_NULL) {
1725 return IPC_SPACE_NULL;
1726 }
1727
1728 if (!task->active) {
1729 task_unlock(task);
1730 return IPC_SPACE_NULL;
1731 }
1732
1733 space = task->itk_space;
1734 is_reference(space);
1735 task_unlock(task);
1736 return space;
1737 }
1738
1739 /*
1740 * Routine: convert_port_to_space_inspect
1741 * Purpose:
1742 * Convert from a port to a space inspect right.
1743 * Doesn't consume the port ref; produces a space inspect ref,
1744 * which may be null.
1745 * Conditions:
1746 * Nothing locked.
1747 */
1748 ipc_space_inspect_t
1749 convert_port_to_space_inspect(
1750 ipc_port_t port)
1751 {
1752 ipc_space_inspect_t space;
1753 task_inspect_t task;
1754
1755 task = convert_port_to_locked_task_inspect(port);
1756
1757 if (task == TASK_INSPECT_NULL) {
1758 return IPC_SPACE_INSPECT_NULL;
1759 }
1760
1761 if (!task->active) {
1762 task_unlock(task);
1763 return IPC_SPACE_INSPECT_NULL;
1764 }
1765
1766 space = (ipc_space_inspect_t)task->itk_space;
1767 is_reference((ipc_space_t)space);
1768 task_unlock((task_t)task);
1769 return space;
1770 }
1771
1772 /*
1773 * Routine: convert_port_to_map
1774 * Purpose:
1775 * Convert from a port to a map.
1776 * Doesn't consume the port ref; produces a map ref,
1777 * which may be null.
1778 * Conditions:
1779 * Nothing locked.
1780 */
1781
1782 vm_map_t
1783 convert_port_to_map(
1784 ipc_port_t port)
1785 {
1786 task_t task;
1787 vm_map_t map;
1788
1789 task = convert_port_to_locked_task(port);
1790
1791 if (task == TASK_NULL) {
1792 return VM_MAP_NULL;
1793 }
1794
1795 if (!task->active) {
1796 task_unlock(task);
1797 return VM_MAP_NULL;
1798 }
1799
1800 map = task->map;
1801 vm_map_reference_swap(map);
1802 task_unlock(task);
1803 return map;
1804 }
1805
1806
1807 /*
1808 * Routine: convert_port_to_thread
1809 * Purpose:
1810 * Convert from a port to a thread.
1811 * Doesn't consume the port ref; produces an thread ref,
1812 * which may be null.
1813 * Conditions:
1814 * Nothing locked.
1815 */
1816
1817 thread_t
1818 convert_port_to_thread(
1819 ipc_port_t port)
1820 {
1821 thread_t thread = THREAD_NULL;
1822
1823 if (IP_VALID(port)) {
1824 ip_lock(port);
1825
1826 if (ip_active(port) &&
1827 ip_kotype(port) == IKOT_THREAD) {
1828 thread = (thread_t)port->ip_kobject;
1829 assert(thread != THREAD_NULL);
1830
1831 /* Use task conversion rules for thread control conversions */
1832 if (task_conversion_eval(current_task(), thread->task) != KERN_SUCCESS) {
1833 ip_unlock(port);
1834 return THREAD_NULL;
1835 }
1836
1837 thread_reference_internal(thread);
1838 }
1839
1840 ip_unlock(port);
1841 }
1842
1843 return thread;
1844 }
1845
1846 /*
1847 * Routine: convert_port_to_thread_inspect
1848 * Purpose:
1849 * Convert from a port to a thread inspection right
1850 * Doesn't consume the port ref; produces a thread ref,
1851 * which may be null.
1852 * Conditions:
1853 * Nothing locked.
1854 */
1855 thread_inspect_t
1856 convert_port_to_thread_inspect(
1857 ipc_port_t port)
1858 {
1859 thread_inspect_t thread = THREAD_INSPECT_NULL;
1860
1861 if (IP_VALID(port)) {
1862 ip_lock(port);
1863
1864 if (ip_active(port) &&
1865 ip_kotype(port) == IKOT_THREAD) {
1866 thread = (thread_inspect_t)port->ip_kobject;
1867 assert(thread != THREAD_INSPECT_NULL);
1868 thread_reference_internal((thread_t)thread);
1869 }
1870 ip_unlock(port);
1871 }
1872
1873 return thread;
1874 }
1875
1876 /*
1877 * Routine: convert_thread_inspect_to_port
1878 * Purpose:
1879 * Convert from a thread inspect reference to a port.
1880 * Consumes a thread ref;
1881 * As we never export thread inspect ports, always
1882 * creates a NULL port.
1883 * Conditions:
1884 * Nothing locked.
1885 */
1886
1887 ipc_port_t
1888 convert_thread_inspect_to_port(thread_inspect_t thread)
1889 {
1890 thread_deallocate(thread);
1891 return IP_NULL;
1892 }
1893
1894
1895 /*
1896 * Routine: port_name_to_thread
1897 * Purpose:
1898 * Convert from a port name to an thread reference
1899 * A name of MACH_PORT_NULL is valid for the null thread.
1900 * Conditions:
1901 * Nothing locked.
1902 *
1903 * TODO: Could this be faster if it were ipc_port_translate_send based, like thread_switch?
1904 * We could avoid extra lock/unlock and extra ref operations on the port.
1905 */
1906 thread_t
1907 port_name_to_thread(
1908 mach_port_name_t name)
1909 {
1910 thread_t thread = THREAD_NULL;
1911 ipc_port_t kport;
1912
1913 if (MACH_PORT_VALID(name)) {
1914 if (ipc_object_copyin(current_space(), name,
1915 MACH_MSG_TYPE_COPY_SEND,
1916 (ipc_object_t *)&kport) != KERN_SUCCESS) {
1917 return THREAD_NULL;
1918 }
1919
1920 thread = convert_port_to_thread(kport);
1921
1922 if (IP_VALID(kport)) {
1923 ipc_port_release_send(kport);
1924 }
1925 }
1926
1927 return thread;
1928 }
1929
1930 task_t
1931 port_name_to_task(
1932 mach_port_name_t name)
1933 {
1934 ipc_port_t kern_port;
1935 kern_return_t kr;
1936 task_t task = TASK_NULL;
1937
1938 if (MACH_PORT_VALID(name)) {
1939 kr = ipc_object_copyin(current_space(), name,
1940 MACH_MSG_TYPE_COPY_SEND,
1941 (ipc_object_t *) &kern_port);
1942 if (kr != KERN_SUCCESS) {
1943 return TASK_NULL;
1944 }
1945
1946 task = convert_port_to_task(kern_port);
1947
1948 if (IP_VALID(kern_port)) {
1949 ipc_port_release_send(kern_port);
1950 }
1951 }
1952 return task;
1953 }
1954
1955 task_inspect_t
1956 port_name_to_task_inspect(
1957 mach_port_name_t name)
1958 {
1959 ipc_port_t kern_port;
1960 kern_return_t kr;
1961 task_inspect_t ti = TASK_INSPECT_NULL;
1962
1963 if (MACH_PORT_VALID(name)) {
1964 kr = ipc_object_copyin(current_space(), name,
1965 MACH_MSG_TYPE_COPY_SEND,
1966 (ipc_object_t *)&kern_port);
1967 if (kr != KERN_SUCCESS) {
1968 return TASK_NULL;
1969 }
1970
1971 ti = convert_port_to_task_inspect(kern_port);
1972
1973 if (IP_VALID(kern_port)) {
1974 ipc_port_release_send(kern_port);
1975 }
1976 }
1977 return ti;
1978 }
1979
1980 /*
1981 * Routine: port_name_to_host
1982 * Purpose:
1983 * Convert from a port name to a host pointer.
1984 * NOTE: This does _not_ return a +1 reference to the host_t
1985 * Conditions:
1986 * Nothing locked.
1987 */
1988 host_t
1989 port_name_to_host(
1990 mach_port_name_t name)
1991 {
1992 host_t host = HOST_NULL;
1993 kern_return_t kr;
1994 ipc_port_t port;
1995
1996 if (MACH_PORT_VALID(name)) {
1997 kr = ipc_port_translate_send(current_space(), name, &port);
1998 if (kr == KERN_SUCCESS) {
1999 host = convert_port_to_host(port);
2000 ip_unlock(port);
2001 }
2002 }
2003 return host;
2004 }
2005
2006 /*
2007 * Routine: convert_task_to_port
2008 * Purpose:
2009 * Convert from a task to a port.
2010 * Consumes a task ref; produces a naked send right
2011 * which may be invalid.
2012 * Conditions:
2013 * Nothing locked.
2014 */
2015
2016 ipc_port_t
2017 convert_task_to_port(
2018 task_t task)
2019 {
2020 ipc_port_t port;
2021
2022 itk_lock(task);
2023
2024 if (task->itk_self != IP_NULL) {
2025 port = ipc_port_make_send(task->itk_self);
2026 } else {
2027 port = IP_NULL;
2028 }
2029
2030 itk_unlock(task);
2031
2032 task_deallocate(task);
2033 return port;
2034 }
2035
2036 /*
2037 * Routine: convert_task_inspect_to_port
2038 * Purpose:
2039 * Convert from a task inspect reference to a port.
2040 * Consumes a task ref;
2041 * As we never export task inspect ports, always
2042 * creates a NULL port.
2043 * Conditions:
2044 * Nothing locked.
2045 */
2046 ipc_port_t
2047 convert_task_inspect_to_port(
2048 task_inspect_t task)
2049 {
2050 task_deallocate(task);
2051
2052 return IP_NULL;
2053 }
2054
2055 /*
2056 * Routine: convert_task_suspend_token_to_port
2057 * Purpose:
2058 * Convert from a task suspension token to a port.
2059 * Consumes a task suspension token ref; produces a naked send-once right
2060 * which may be invalid.
2061 * Conditions:
2062 * Nothing locked.
2063 */
2064 ipc_port_t
2065 convert_task_suspension_token_to_port(
2066 task_suspension_token_t task)
2067 {
2068 ipc_port_t port;
2069
2070 task_lock(task);
2071 if (task->active) {
2072 if (task->itk_resume == IP_NULL) {
2073 task->itk_resume = ipc_port_alloc_kernel();
2074 if (!IP_VALID(task->itk_resume)) {
2075 panic("failed to create resume port");
2076 }
2077
2078 ipc_kobject_set(task->itk_resume, (ipc_kobject_t) task, IKOT_TASK_RESUME);
2079 }
2080
2081 /*
2082 * Create a send-once right for each instance of a direct user-called
2083 * task_suspend2 call. Each time one of these send-once rights is abandoned,
2084 * the notification handler will resume the target task.
2085 */
2086 port = ipc_port_make_sonce(task->itk_resume);
2087 assert(IP_VALID(port));
2088 } else {
2089 port = IP_NULL;
2090 }
2091
2092 task_unlock(task);
2093 task_suspension_token_deallocate(task);
2094
2095 return port;
2096 }
2097
2098
2099 /*
2100 * Routine: convert_task_name_to_port
2101 * Purpose:
2102 * Convert from a task name ref to a port.
2103 * Consumes a task name ref; produces a naked send right
2104 * which may be invalid.
2105 * Conditions:
2106 * Nothing locked.
2107 */
2108
2109 ipc_port_t
2110 convert_task_name_to_port(
2111 task_name_t task_name)
2112 {
2113 ipc_port_t port;
2114
2115 itk_lock(task_name);
2116 if (task_name->itk_nself != IP_NULL) {
2117 port = ipc_port_make_send(task_name->itk_nself);
2118 } else {
2119 port = IP_NULL;
2120 }
2121 itk_unlock(task_name);
2122
2123 task_name_deallocate(task_name);
2124 return port;
2125 }
2126
2127 /*
2128 * Routine: convert_thread_to_port
2129 * Purpose:
2130 * Convert from a thread to a port.
2131 * Consumes an thread ref; produces a naked send right
2132 * which may be invalid.
2133 * Conditions:
2134 * Nothing locked.
2135 */
2136
2137 ipc_port_t
2138 convert_thread_to_port(
2139 thread_t thread)
2140 {
2141 ipc_port_t port;
2142
2143 thread_mtx_lock(thread);
2144
2145 if (thread->ith_self != IP_NULL) {
2146 port = ipc_port_make_send(thread->ith_self);
2147 } else {
2148 port = IP_NULL;
2149 }
2150
2151 thread_mtx_unlock(thread);
2152
2153 thread_deallocate(thread);
2154
2155 return port;
2156 }
2157
2158 /*
2159 * Routine: space_deallocate
2160 * Purpose:
2161 * Deallocate a space ref produced by convert_port_to_space.
2162 * Conditions:
2163 * Nothing locked.
2164 */
2165
2166 void
2167 space_deallocate(
2168 ipc_space_t space)
2169 {
2170 if (space != IS_NULL) {
2171 is_release(space);
2172 }
2173 }
2174
2175 /*
2176 * Routine: space_inspect_deallocate
2177 * Purpose:
2178 * Deallocate a space inspect ref produced by convert_port_to_space_inspect.
2179 * Conditions:
2180 * Nothing locked.
2181 */
2182
2183 void
2184 space_inspect_deallocate(
2185 ipc_space_inspect_t space)
2186 {
2187 if (space != IS_INSPECT_NULL) {
2188 is_release((ipc_space_t)space);
2189 }
2190 }
2191
2192 /*
2193 * Routine: thread/task_set_exception_ports [kernel call]
2194 * Purpose:
2195 * Sets the thread/task exception port, flavor and
2196 * behavior for the exception types specified by the mask.
2197 * There will be one send right per exception per valid
2198 * port.
2199 * Conditions:
2200 * Nothing locked. If successful, consumes
2201 * the supplied send right.
2202 * Returns:
2203 * KERN_SUCCESS Changed the special port.
2204 * KERN_INVALID_ARGUMENT The thread is null,
2205 * Illegal mask bit set.
2206 * Illegal exception behavior
2207 * KERN_FAILURE The thread is dead.
2208 */
2209
2210 kern_return_t
2211 thread_set_exception_ports(
2212 thread_t thread,
2213 exception_mask_t exception_mask,
2214 ipc_port_t new_port,
2215 exception_behavior_t new_behavior,
2216 thread_state_flavor_t new_flavor)
2217 {
2218 ipc_port_t old_port[EXC_TYPES_COUNT];
2219 boolean_t privileged = current_task()->sec_token.val[0] == 0;
2220 register int i;
2221
2222 #if CONFIG_MACF
2223 struct label *new_label;
2224 #endif
2225
2226 if (thread == THREAD_NULL) {
2227 return KERN_INVALID_ARGUMENT;
2228 }
2229
2230 if (exception_mask & ~EXC_MASK_VALID) {
2231 return KERN_INVALID_ARGUMENT;
2232 }
2233
2234 if (IP_VALID(new_port)) {
2235 switch (new_behavior & ~MACH_EXCEPTION_CODES) {
2236 case EXCEPTION_DEFAULT:
2237 case EXCEPTION_STATE:
2238 case EXCEPTION_STATE_IDENTITY:
2239 break;
2240
2241 default:
2242 return KERN_INVALID_ARGUMENT;
2243 }
2244 }
2245
2246 /*
2247 * Check the validity of the thread_state_flavor by calling the
2248 * VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in
2249 * osfmk/mach/ARCHITECTURE/thread_status.h
2250 */
2251 if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor)) {
2252 return KERN_INVALID_ARGUMENT;
2253 }
2254
2255 #if CONFIG_MACF
2256 new_label = mac_exc_create_label_for_current_proc();
2257 #endif
2258
2259 thread_mtx_lock(thread);
2260
2261 if (!thread->active) {
2262 thread_mtx_unlock(thread);
2263
2264 return KERN_FAILURE;
2265 }
2266
2267 if (thread->exc_actions == NULL) {
2268 ipc_thread_init_exc_actions(thread);
2269 }
2270 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
2271 if ((exception_mask & (1 << i))
2272 #if CONFIG_MACF
2273 && mac_exc_update_action_label(&thread->exc_actions[i], new_label) == 0
2274 #endif
2275 ) {
2276 old_port[i] = thread->exc_actions[i].port;
2277 thread->exc_actions[i].port = ipc_port_copy_send(new_port);
2278 thread->exc_actions[i].behavior = new_behavior;
2279 thread->exc_actions[i].flavor = new_flavor;
2280 thread->exc_actions[i].privileged = privileged;
2281 } else {
2282 old_port[i] = IP_NULL;
2283 }
2284 }
2285
2286 thread_mtx_unlock(thread);
2287
2288 #if CONFIG_MACF
2289 mac_exc_free_label(new_label);
2290 #endif
2291
2292 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
2293 if (IP_VALID(old_port[i])) {
2294 ipc_port_release_send(old_port[i]);
2295 }
2296 }
2297
2298 if (IP_VALID(new_port)) { /* consume send right */
2299 ipc_port_release_send(new_port);
2300 }
2301
2302 return KERN_SUCCESS;
2303 }
2304
2305 kern_return_t
2306 task_set_exception_ports(
2307 task_t task,
2308 exception_mask_t exception_mask,
2309 ipc_port_t new_port,
2310 exception_behavior_t new_behavior,
2311 thread_state_flavor_t new_flavor)
2312 {
2313 ipc_port_t old_port[EXC_TYPES_COUNT];
2314 boolean_t privileged = current_task()->sec_token.val[0] == 0;
2315 register int i;
2316
2317 #if CONFIG_MACF
2318 struct label *new_label;
2319 #endif
2320
2321 if (task == TASK_NULL) {
2322 return KERN_INVALID_ARGUMENT;
2323 }
2324
2325 if (exception_mask & ~EXC_MASK_VALID) {
2326 return KERN_INVALID_ARGUMENT;
2327 }
2328
2329 if (IP_VALID(new_port)) {
2330 switch (new_behavior & ~MACH_EXCEPTION_CODES) {
2331 case EXCEPTION_DEFAULT:
2332 case EXCEPTION_STATE:
2333 case EXCEPTION_STATE_IDENTITY:
2334 break;
2335
2336 default:
2337 return KERN_INVALID_ARGUMENT;
2338 }
2339 }
2340
2341 /*
2342 * Check the validity of the thread_state_flavor by calling the
2343 * VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in
2344 * osfmk/mach/ARCHITECTURE/thread_status.h
2345 */
2346 if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor)) {
2347 return KERN_INVALID_ARGUMENT;
2348 }
2349
2350 #if CONFIG_MACF
2351 new_label = mac_exc_create_label_for_current_proc();
2352 #endif
2353
2354 itk_lock(task);
2355
2356 if (task->itk_self == IP_NULL) {
2357 itk_unlock(task);
2358
2359 return KERN_FAILURE;
2360 }
2361
2362 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
2363 if ((exception_mask & (1 << i))
2364 #if CONFIG_MACF
2365 && mac_exc_update_action_label(&task->exc_actions[i], new_label) == 0
2366 #endif
2367 ) {
2368 old_port[i] = task->exc_actions[i].port;
2369 task->exc_actions[i].port =
2370 ipc_port_copy_send(new_port);
2371 task->exc_actions[i].behavior = new_behavior;
2372 task->exc_actions[i].flavor = new_flavor;
2373 task->exc_actions[i].privileged = privileged;
2374 } else {
2375 old_port[i] = IP_NULL;
2376 }
2377 }
2378
2379 itk_unlock(task);
2380
2381 #if CONFIG_MACF
2382 mac_exc_free_label(new_label);
2383 #endif
2384
2385 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
2386 if (IP_VALID(old_port[i])) {
2387 ipc_port_release_send(old_port[i]);
2388 }
2389 }
2390
2391 if (IP_VALID(new_port)) { /* consume send right */
2392 ipc_port_release_send(new_port);
2393 }
2394
2395 return KERN_SUCCESS;
2396 }
2397
2398 /*
2399 * Routine: thread/task_swap_exception_ports [kernel call]
2400 * Purpose:
2401 * Sets the thread/task exception port, flavor and
2402 * behavior for the exception types specified by the
2403 * mask.
2404 *
2405 * The old ports, behavior and flavors are returned
2406 * Count specifies the array sizes on input and
2407 * the number of returned ports etc. on output. The
2408 * arrays must be large enough to hold all the returned
2409 * data, MIG returnes an error otherwise. The masks
2410 * array specifies the corresponding exception type(s).
2411 *
2412 * Conditions:
2413 * Nothing locked. If successful, consumes
2414 * the supplied send right.
2415 *
2416 * Returns upto [in} CountCnt elements.
2417 * Returns:
2418 * KERN_SUCCESS Changed the special port.
2419 * KERN_INVALID_ARGUMENT The thread is null,
2420 * Illegal mask bit set.
2421 * Illegal exception behavior
2422 * KERN_FAILURE The thread is dead.
2423 */
2424
2425 kern_return_t
2426 thread_swap_exception_ports(
2427 thread_t thread,
2428 exception_mask_t exception_mask,
2429 ipc_port_t new_port,
2430 exception_behavior_t new_behavior,
2431 thread_state_flavor_t new_flavor,
2432 exception_mask_array_t masks,
2433 mach_msg_type_number_t *CountCnt,
2434 exception_port_array_t ports,
2435 exception_behavior_array_t behaviors,
2436 thread_state_flavor_array_t flavors)
2437 {
2438 ipc_port_t old_port[EXC_TYPES_COUNT];
2439 boolean_t privileged = current_task()->sec_token.val[0] == 0;
2440 unsigned int i, j, count;
2441
2442 #if CONFIG_MACF
2443 struct label *new_label;
2444 #endif
2445
2446 if (thread == THREAD_NULL) {
2447 return KERN_INVALID_ARGUMENT;
2448 }
2449
2450 if (exception_mask & ~EXC_MASK_VALID) {
2451 return KERN_INVALID_ARGUMENT;
2452 }
2453
2454 if (IP_VALID(new_port)) {
2455 switch (new_behavior & ~MACH_EXCEPTION_CODES) {
2456 case EXCEPTION_DEFAULT:
2457 case EXCEPTION_STATE:
2458 case EXCEPTION_STATE_IDENTITY:
2459 break;
2460
2461 default:
2462 return KERN_INVALID_ARGUMENT;
2463 }
2464 }
2465
2466 if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor)) {
2467 return KERN_INVALID_ARGUMENT;
2468 }
2469
2470 #if CONFIG_MACF
2471 new_label = mac_exc_create_label_for_current_proc();
2472 #endif
2473
2474 thread_mtx_lock(thread);
2475
2476 if (!thread->active) {
2477 thread_mtx_unlock(thread);
2478
2479 return KERN_FAILURE;
2480 }
2481
2482 if (thread->exc_actions == NULL) {
2483 ipc_thread_init_exc_actions(thread);
2484 }
2485
2486 assert(EXC_TYPES_COUNT > FIRST_EXCEPTION);
2487 for (count = 0, i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT && count < *CountCnt; ++i) {
2488 if ((exception_mask & (1 << i))
2489 #if CONFIG_MACF
2490 && mac_exc_update_action_label(&thread->exc_actions[i], new_label) == 0
2491 #endif
2492 ) {
2493 for (j = 0; j < count; ++j) {
2494 /*
2495 * search for an identical entry, if found
2496 * set corresponding mask for this exception.
2497 */
2498 if (thread->exc_actions[i].port == ports[j] &&
2499 thread->exc_actions[i].behavior == behaviors[j] &&
2500 thread->exc_actions[i].flavor == flavors[j]) {
2501 masks[j] |= (1 << i);
2502 break;
2503 }
2504 }
2505
2506 if (j == count) {
2507 masks[j] = (1 << i);
2508 ports[j] = ipc_port_copy_send(thread->exc_actions[i].port);
2509
2510 behaviors[j] = thread->exc_actions[i].behavior;
2511 flavors[j] = thread->exc_actions[i].flavor;
2512 ++count;
2513 }
2514
2515 old_port[i] = thread->exc_actions[i].port;
2516 thread->exc_actions[i].port = ipc_port_copy_send(new_port);
2517 thread->exc_actions[i].behavior = new_behavior;
2518 thread->exc_actions[i].flavor = new_flavor;
2519 thread->exc_actions[i].privileged = privileged;
2520 } else {
2521 old_port[i] = IP_NULL;
2522 }
2523 }
2524
2525 thread_mtx_unlock(thread);
2526
2527 #if CONFIG_MACF
2528 mac_exc_free_label(new_label);
2529 #endif
2530
2531 while (--i >= FIRST_EXCEPTION) {
2532 if (IP_VALID(old_port[i])) {
2533 ipc_port_release_send(old_port[i]);
2534 }
2535 }
2536
2537 if (IP_VALID(new_port)) { /* consume send right */
2538 ipc_port_release_send(new_port);
2539 }
2540
2541 *CountCnt = count;
2542
2543 return KERN_SUCCESS;
2544 }
2545
2546 kern_return_t
2547 task_swap_exception_ports(
2548 task_t task,
2549 exception_mask_t exception_mask,
2550 ipc_port_t new_port,
2551 exception_behavior_t new_behavior,
2552 thread_state_flavor_t new_flavor,
2553 exception_mask_array_t masks,
2554 mach_msg_type_number_t *CountCnt,
2555 exception_port_array_t ports,
2556 exception_behavior_array_t behaviors,
2557 thread_state_flavor_array_t flavors)
2558 {
2559 ipc_port_t old_port[EXC_TYPES_COUNT];
2560 boolean_t privileged = current_task()->sec_token.val[0] == 0;
2561 unsigned int i, j, count;
2562
2563 #if CONFIG_MACF
2564 struct label *new_label;
2565 #endif
2566
2567 if (task == TASK_NULL) {
2568 return KERN_INVALID_ARGUMENT;
2569 }
2570
2571 if (exception_mask & ~EXC_MASK_VALID) {
2572 return KERN_INVALID_ARGUMENT;
2573 }
2574
2575 if (IP_VALID(new_port)) {
2576 switch (new_behavior & ~MACH_EXCEPTION_CODES) {
2577 case EXCEPTION_DEFAULT:
2578 case EXCEPTION_STATE:
2579 case EXCEPTION_STATE_IDENTITY:
2580 break;
2581
2582 default:
2583 return KERN_INVALID_ARGUMENT;
2584 }
2585 }
2586
2587 if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor)) {
2588 return KERN_INVALID_ARGUMENT;
2589 }
2590
2591 #if CONFIG_MACF
2592 new_label = mac_exc_create_label_for_current_proc();
2593 #endif
2594
2595 itk_lock(task);
2596
2597 if (task->itk_self == IP_NULL) {
2598 itk_unlock(task);
2599
2600 return KERN_FAILURE;
2601 }
2602
2603 assert(EXC_TYPES_COUNT > FIRST_EXCEPTION);
2604 for (count = 0, i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT && count < *CountCnt; ++i) {
2605 if ((exception_mask & (1 << i))
2606 #if CONFIG_MACF
2607 && mac_exc_update_action_label(&task->exc_actions[i], new_label) == 0
2608 #endif
2609 ) {
2610 for (j = 0; j < count; j++) {
2611 /*
2612 * search for an identical entry, if found
2613 * set corresponding mask for this exception.
2614 */
2615 if (task->exc_actions[i].port == ports[j] &&
2616 task->exc_actions[i].behavior == behaviors[j] &&
2617 task->exc_actions[i].flavor == flavors[j]) {
2618 masks[j] |= (1 << i);
2619 break;
2620 }
2621 }
2622
2623 if (j == count) {
2624 masks[j] = (1 << i);
2625 ports[j] = ipc_port_copy_send(task->exc_actions[i].port);
2626 behaviors[j] = task->exc_actions[i].behavior;
2627 flavors[j] = task->exc_actions[i].flavor;
2628 ++count;
2629 }
2630
2631 old_port[i] = task->exc_actions[i].port;
2632
2633 task->exc_actions[i].port = ipc_port_copy_send(new_port);
2634 task->exc_actions[i].behavior = new_behavior;
2635 task->exc_actions[i].flavor = new_flavor;
2636 task->exc_actions[i].privileged = privileged;
2637 } else {
2638 old_port[i] = IP_NULL;
2639 }
2640 }
2641
2642 itk_unlock(task);
2643
2644 #if CONFIG_MACF
2645 mac_exc_free_label(new_label);
2646 #endif
2647
2648 while (--i >= FIRST_EXCEPTION) {
2649 if (IP_VALID(old_port[i])) {
2650 ipc_port_release_send(old_port[i]);
2651 }
2652 }
2653
2654 if (IP_VALID(new_port)) { /* consume send right */
2655 ipc_port_release_send(new_port);
2656 }
2657
2658 *CountCnt = count;
2659
2660 return KERN_SUCCESS;
2661 }
2662
2663 /*
2664 * Routine: thread/task_get_exception_ports [kernel call]
2665 * Purpose:
2666 * Clones a send right for each of the thread/task's exception
2667 * ports specified in the mask and returns the behaviour
2668 * and flavor of said port.
2669 *
2670 * Returns upto [in} CountCnt elements.
2671 *
2672 * Conditions:
2673 * Nothing locked.
2674 * Returns:
2675 * KERN_SUCCESS Extracted a send right.
2676 * KERN_INVALID_ARGUMENT The thread is null,
2677 * Invalid special port,
2678 * Illegal mask bit set.
2679 * KERN_FAILURE The thread is dead.
2680 */
2681
2682 kern_return_t
2683 thread_get_exception_ports(
2684 thread_t thread,
2685 exception_mask_t exception_mask,
2686 exception_mask_array_t masks,
2687 mach_msg_type_number_t *CountCnt,
2688 exception_port_array_t ports,
2689 exception_behavior_array_t behaviors,
2690 thread_state_flavor_array_t flavors)
2691 {
2692 unsigned int i, j, count;
2693
2694 if (thread == THREAD_NULL) {
2695 return KERN_INVALID_ARGUMENT;
2696 }
2697
2698 if (exception_mask & ~EXC_MASK_VALID) {
2699 return KERN_INVALID_ARGUMENT;
2700 }
2701
2702 thread_mtx_lock(thread);
2703
2704 if (!thread->active) {
2705 thread_mtx_unlock(thread);
2706
2707 return KERN_FAILURE;
2708 }
2709
2710 count = 0;
2711
2712 if (thread->exc_actions == NULL) {
2713 goto done;
2714 }
2715
2716 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
2717 if (exception_mask & (1 << i)) {
2718 for (j = 0; j < count; ++j) {
2719 /*
2720 * search for an identical entry, if found
2721 * set corresponding mask for this exception.
2722 */
2723 if (thread->exc_actions[i].port == ports[j] &&
2724 thread->exc_actions[i].behavior == behaviors[j] &&
2725 thread->exc_actions[i].flavor == flavors[j]) {
2726 masks[j] |= (1 << i);
2727 break;
2728 }
2729 }
2730
2731 if (j == count) {
2732 masks[j] = (1 << i);
2733 ports[j] = ipc_port_copy_send(thread->exc_actions[i].port);
2734 behaviors[j] = thread->exc_actions[i].behavior;
2735 flavors[j] = thread->exc_actions[i].flavor;
2736 ++count;
2737 if (count >= *CountCnt) {
2738 break;
2739 }
2740 }
2741 }
2742 }
2743
2744 done:
2745 thread_mtx_unlock(thread);
2746
2747 *CountCnt = count;
2748
2749 return KERN_SUCCESS;
2750 }
2751
2752 kern_return_t
2753 task_get_exception_ports(
2754 task_t task,
2755 exception_mask_t exception_mask,
2756 exception_mask_array_t masks,
2757 mach_msg_type_number_t *CountCnt,
2758 exception_port_array_t ports,
2759 exception_behavior_array_t behaviors,
2760 thread_state_flavor_array_t flavors)
2761 {
2762 unsigned int i, j, count;
2763
2764 if (task == TASK_NULL) {
2765 return KERN_INVALID_ARGUMENT;
2766 }
2767
2768 if (exception_mask & ~EXC_MASK_VALID) {
2769 return KERN_INVALID_ARGUMENT;
2770 }
2771
2772 itk_lock(task);
2773
2774 if (task->itk_self == IP_NULL) {
2775 itk_unlock(task);
2776
2777 return KERN_FAILURE;
2778 }
2779
2780 count = 0;
2781
2782 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
2783 if (exception_mask & (1 << i)) {
2784 for (j = 0; j < count; ++j) {
2785 /*
2786 * search for an identical entry, if found
2787 * set corresponding mask for this exception.
2788 */
2789 if (task->exc_actions[i].port == ports[j] &&
2790 task->exc_actions[i].behavior == behaviors[j] &&
2791 task->exc_actions[i].flavor == flavors[j]) {
2792 masks[j] |= (1 << i);
2793 break;
2794 }
2795 }
2796
2797 if (j == count) {
2798 masks[j] = (1 << i);
2799 ports[j] = ipc_port_copy_send(task->exc_actions[i].port);
2800 behaviors[j] = task->exc_actions[i].behavior;
2801 flavors[j] = task->exc_actions[i].flavor;
2802 ++count;
2803 if (count > *CountCnt) {
2804 break;
2805 }
2806 }
2807 }
2808 }
2809
2810 itk_unlock(task);
2811
2812 *CountCnt = count;
2813
2814 return KERN_SUCCESS;
2815 }