]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/ipc_tt.c
xnu-4903.241.1.tar.gz
[apple/xnu.git] / osfmk / kern / ipc_tt.c
1 /*
2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
58 * support for mandatory and extensible security protections. This notice
59 * is included in support of clause 2.2 (b) of the Apple Public License,
60 * Version 2.0.
61 */
62 /*
63 */
64
65 /*
66 * File: ipc_tt.c
67 * Purpose:
68 * Task and thread related IPC functions.
69 */
70
71 #include <mach/mach_types.h>
72 #include <mach/boolean.h>
73 #include <mach/kern_return.h>
74 #include <mach/mach_param.h>
75 #include <mach/task_special_ports.h>
76 #include <mach/thread_special_ports.h>
77 #include <mach/thread_status.h>
78 #include <mach/exception_types.h>
79 #include <mach/memory_object_types.h>
80 #include <mach/mach_traps.h>
81 #include <mach/task_server.h>
82 #include <mach/thread_act_server.h>
83 #include <mach/mach_host_server.h>
84 #include <mach/host_priv_server.h>
85 #include <mach/vm_map_server.h>
86
87 #include <kern/kern_types.h>
88 #include <kern/host.h>
89 #include <kern/ipc_kobject.h>
90 #include <kern/ipc_tt.h>
91 #include <kern/kalloc.h>
92 #include <kern/thread.h>
93 #include <kern/misc_protos.h>
94
95 #include <vm/vm_map.h>
96 #include <vm/vm_pageout.h>
97 #include <vm/vm_protos.h>
98
99 #include <security/mac_mach_internal.h>
100
101 #if CONFIG_EMBEDDED && !SECURE_KERNEL
102 extern int cs_relax_platform_task_ports;
103 #endif
104
105 /* forward declarations */
106 task_t convert_port_to_locked_task(ipc_port_t port);
107 task_inspect_t convert_port_to_locked_task_inspect(ipc_port_t port);
108 static void ipc_port_bind_special_reply_port_locked(ipc_port_t port);
109 static kern_return_t ipc_port_unbind_special_reply_port(thread_t thread, boolean_t unbind_active_port);
110 kern_return_t task_conversion_eval(task_t caller, task_t victim);
111
112 /*
113 * Routine: ipc_task_init
114 * Purpose:
115 * Initialize a task's IPC state.
116 *
117 * If non-null, some state will be inherited from the parent.
118 * The parent must be appropriately initialized.
119 * Conditions:
120 * Nothing locked.
121 */
122
123 void
124 ipc_task_init(
125 task_t task,
126 task_t parent)
127 {
128 ipc_space_t space;
129 ipc_port_t kport;
130 ipc_port_t nport;
131 kern_return_t kr;
132 int i;
133
134
135 kr = ipc_space_create(&ipc_table_entries[0], &space);
136 if (kr != KERN_SUCCESS)
137 panic("ipc_task_init");
138
139 space->is_task = task;
140
141 kport = ipc_port_alloc_kernel();
142 if (kport == IP_NULL)
143 panic("ipc_task_init");
144
145 nport = ipc_port_alloc_kernel();
146 if (nport == IP_NULL)
147 panic("ipc_task_init");
148
149 itk_lock_init(task);
150 task->itk_self = kport;
151 task->itk_nself = nport;
152 task->itk_resume = IP_NULL; /* Lazily allocated on-demand */
153 if (task_is_a_corpse_fork(task)) {
154 /*
155 * No sender's notification for corpse would not
156 * work with a naked send right in kernel.
157 */
158 task->itk_sself = IP_NULL;
159 } else {
160 task->itk_sself = ipc_port_make_send(kport);
161 }
162 task->itk_debug_control = IP_NULL;
163 task->itk_space = space;
164
165 #if CONFIG_MACF
166 task->exc_actions[0].label = NULL;
167 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
168 mac_exc_associate_action_label(&task->exc_actions[i], mac_exc_create_label());
169 }
170 #endif
171
172 /* always zero-out the first (unused) array element */
173
174 bzero(&task->exc_actions[0], sizeof(task->exc_actions[0]));
175 if (parent == TASK_NULL) {
176 ipc_port_t port;
177
178 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
179 task->exc_actions[i].port = IP_NULL;
180 task->exc_actions[i].flavor = 0;
181 task->exc_actions[i].behavior = 0;
182 task->exc_actions[i].privileged = FALSE;
183 }/* for */
184
185 kr = host_get_host_port(host_priv_self(), &port);
186 assert(kr == KERN_SUCCESS);
187 task->itk_host = port;
188
189 task->itk_bootstrap = IP_NULL;
190 task->itk_seatbelt = IP_NULL;
191 task->itk_gssd = IP_NULL;
192 task->itk_task_access = IP_NULL;
193
194 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
195 task->itk_registered[i] = IP_NULL;
196 } else {
197 itk_lock(parent);
198 assert(parent->itk_self != IP_NULL);
199
200 /* inherit registered ports */
201
202 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
203 task->itk_registered[i] =
204 ipc_port_copy_send(parent->itk_registered[i]);
205
206 /* inherit exception and bootstrap ports */
207
208 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
209 task->exc_actions[i].port =
210 ipc_port_copy_send(parent->exc_actions[i].port);
211 task->exc_actions[i].flavor =
212 parent->exc_actions[i].flavor;
213 task->exc_actions[i].behavior =
214 parent->exc_actions[i].behavior;
215 task->exc_actions[i].privileged =
216 parent->exc_actions[i].privileged;
217 #if CONFIG_MACF
218 mac_exc_inherit_action_label(parent->exc_actions + i, task->exc_actions + i);
219 #endif
220 }/* for */
221 task->itk_host =
222 ipc_port_copy_send(parent->itk_host);
223
224 task->itk_bootstrap =
225 ipc_port_copy_send(parent->itk_bootstrap);
226
227 task->itk_seatbelt =
228 ipc_port_copy_send(parent->itk_seatbelt);
229
230 task->itk_gssd =
231 ipc_port_copy_send(parent->itk_gssd);
232
233 task->itk_task_access =
234 ipc_port_copy_send(parent->itk_task_access);
235
236 itk_unlock(parent);
237 }
238 }
239
240 /*
241 * Routine: ipc_task_enable
242 * Purpose:
243 * Enable a task for IPC access.
244 * Conditions:
245 * Nothing locked.
246 */
247
248 void
249 ipc_task_enable(
250 task_t task)
251 {
252 ipc_port_t kport;
253 ipc_port_t nport;
254
255 itk_lock(task);
256 kport = task->itk_self;
257 if (kport != IP_NULL)
258 ipc_kobject_set(kport, (ipc_kobject_t) task, IKOT_TASK);
259 nport = task->itk_nself;
260 if (nport != IP_NULL)
261 ipc_kobject_set(nport, (ipc_kobject_t) task, IKOT_TASK_NAME);
262 itk_unlock(task);
263 }
264
265 /*
266 * Routine: ipc_task_disable
267 * Purpose:
268 * Disable IPC access to a task.
269 * Conditions:
270 * Nothing locked.
271 */
272
273 void
274 ipc_task_disable(
275 task_t task)
276 {
277 ipc_port_t kport;
278 ipc_port_t nport;
279 ipc_port_t rport;
280
281 itk_lock(task);
282 kport = task->itk_self;
283 if (kport != IP_NULL)
284 ipc_kobject_set(kport, IKO_NULL, IKOT_NONE);
285 nport = task->itk_nself;
286 if (nport != IP_NULL)
287 ipc_kobject_set(nport, IKO_NULL, IKOT_NONE);
288
289 rport = task->itk_resume;
290 if (rport != IP_NULL) {
291 /*
292 * From this point onwards this task is no longer accepting
293 * resumptions.
294 *
295 * There are still outstanding suspensions on this task,
296 * even as it is being torn down. Disconnect the task
297 * from the rport, thereby "orphaning" the rport. The rport
298 * itself will go away only when the last suspension holder
299 * destroys his SO right to it -- when he either
300 * exits, or tries to actually use that last SO right to
301 * resume this (now non-existent) task.
302 */
303 ipc_kobject_set(rport, IKO_NULL, IKOT_NONE);
304 }
305 itk_unlock(task);
306 }
307
308 /*
309 * Routine: ipc_task_terminate
310 * Purpose:
311 * Clean up and destroy a task's IPC state.
312 * Conditions:
313 * Nothing locked. The task must be suspended.
314 * (Or the current thread must be in the task.)
315 */
316
317 void
318 ipc_task_terminate(
319 task_t task)
320 {
321 ipc_port_t kport;
322 ipc_port_t nport;
323 ipc_port_t rport;
324 int i;
325
326 itk_lock(task);
327 kport = task->itk_self;
328
329 if (kport == IP_NULL) {
330 /* the task is already terminated (can this happen?) */
331 itk_unlock(task);
332 return;
333 }
334 task->itk_self = IP_NULL;
335
336 nport = task->itk_nself;
337 assert(nport != IP_NULL);
338 task->itk_nself = IP_NULL;
339
340 rport = task->itk_resume;
341 task->itk_resume = IP_NULL;
342
343 itk_unlock(task);
344
345 /* release the naked send rights */
346
347 if (IP_VALID(task->itk_sself))
348 ipc_port_release_send(task->itk_sself);
349
350 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
351 if (IP_VALID(task->exc_actions[i].port)) {
352 ipc_port_release_send(task->exc_actions[i].port);
353 }
354 #if CONFIG_MACF
355 mac_exc_free_action_label(task->exc_actions + i);
356 #endif
357 }
358
359 if (IP_VALID(task->itk_host))
360 ipc_port_release_send(task->itk_host);
361
362 if (IP_VALID(task->itk_bootstrap))
363 ipc_port_release_send(task->itk_bootstrap);
364
365 if (IP_VALID(task->itk_seatbelt))
366 ipc_port_release_send(task->itk_seatbelt);
367
368 if (IP_VALID(task->itk_gssd))
369 ipc_port_release_send(task->itk_gssd);
370
371 if (IP_VALID(task->itk_task_access))
372 ipc_port_release_send(task->itk_task_access);
373
374 if (IP_VALID(task->itk_debug_control))
375 ipc_port_release_send(task->itk_debug_control);
376
377 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
378 if (IP_VALID(task->itk_registered[i]))
379 ipc_port_release_send(task->itk_registered[i]);
380
381 /* destroy the kernel ports */
382 ipc_port_dealloc_kernel(kport);
383 ipc_port_dealloc_kernel(nport);
384 if (rport != IP_NULL)
385 ipc_port_dealloc_kernel(rport);
386
387 itk_lock_destroy(task);
388 }
389
390 /*
391 * Routine: ipc_task_reset
392 * Purpose:
393 * Reset a task's IPC state to protect it when
394 * it enters an elevated security context. The
395 * task name port can remain the same - since
396 * it represents no specific privilege.
397 * Conditions:
398 * Nothing locked. The task must be suspended.
399 * (Or the current thread must be in the task.)
400 */
401
402 void
403 ipc_task_reset(
404 task_t task)
405 {
406 ipc_port_t old_kport, new_kport;
407 ipc_port_t old_sself;
408 ipc_port_t old_exc_actions[EXC_TYPES_COUNT];
409 int i;
410
411 #if CONFIG_MACF
412 /* Fresh label to unset credentials in existing labels. */
413 struct label *unset_label = mac_exc_create_label();
414 #endif
415
416 new_kport = ipc_port_alloc_kernel();
417 if (new_kport == IP_NULL)
418 panic("ipc_task_reset");
419
420 itk_lock(task);
421
422 old_kport = task->itk_self;
423
424 if (old_kport == IP_NULL) {
425 /* the task is already terminated (can this happen?) */
426 itk_unlock(task);
427 ipc_port_dealloc_kernel(new_kport);
428 #if CONFIG_MACF
429 mac_exc_free_label(unset_label);
430 #endif
431 return;
432 }
433
434 task->itk_self = new_kport;
435 old_sself = task->itk_sself;
436 task->itk_sself = ipc_port_make_send(new_kport);
437
438 /* Set the old kport to IKOT_NONE and update the exec token while under the port lock */
439 ip_lock(old_kport);
440 ipc_kobject_set_atomically(old_kport, IKO_NULL, IKOT_NONE);
441 task->exec_token += 1;
442 ip_unlock(old_kport);
443
444 ipc_kobject_set(new_kport, (ipc_kobject_t) task, IKOT_TASK);
445
446 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
447 old_exc_actions[i] = IP_NULL;
448
449 if (i == EXC_CORPSE_NOTIFY && task_corpse_pending_report(task)) {
450 continue;
451 }
452
453 if (!task->exc_actions[i].privileged) {
454 #if CONFIG_MACF
455 mac_exc_update_action_label(task->exc_actions + i, unset_label);
456 #endif
457 old_exc_actions[i] = task->exc_actions[i].port;
458 task->exc_actions[i].port = IP_NULL;
459 }
460 }/* for */
461
462 if (IP_VALID(task->itk_debug_control)) {
463 ipc_port_release_send(task->itk_debug_control);
464 }
465 task->itk_debug_control = IP_NULL;
466
467 itk_unlock(task);
468
469 #if CONFIG_MACF
470 mac_exc_free_label(unset_label);
471 #endif
472
473 /* release the naked send rights */
474
475 if (IP_VALID(old_sself))
476 ipc_port_release_send(old_sself);
477
478 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
479 if (IP_VALID(old_exc_actions[i])) {
480 ipc_port_release_send(old_exc_actions[i]);
481 }
482 }/* for */
483
484 /* destroy the kernel port */
485 ipc_port_dealloc_kernel(old_kport);
486 }
487
488 /*
489 * Routine: ipc_thread_init
490 * Purpose:
491 * Initialize a thread's IPC state.
492 * Conditions:
493 * Nothing locked.
494 */
495
496 void
497 ipc_thread_init(
498 thread_t thread)
499 {
500 ipc_port_t kport;
501
502 kport = ipc_port_alloc_kernel();
503 if (kport == IP_NULL)
504 panic("ipc_thread_init");
505
506 thread->ith_self = kport;
507 thread->ith_sself = ipc_port_make_send(kport);
508 thread->ith_special_reply_port = NULL;
509 thread->exc_actions = NULL;
510
511 ipc_kobject_set(kport, (ipc_kobject_t)thread, IKOT_THREAD);
512
513 #if IMPORTANCE_INHERITANCE
514 thread->ith_assertions = 0;
515 #endif
516
517 ipc_kmsg_queue_init(&thread->ith_messages);
518
519 thread->ith_rpc_reply = IP_NULL;
520 }
521
522 void
523 ipc_thread_init_exc_actions(
524 thread_t thread)
525 {
526 assert(thread->exc_actions == NULL);
527
528 thread->exc_actions = kalloc(sizeof(struct exception_action) * EXC_TYPES_COUNT);
529 bzero(thread->exc_actions, sizeof(struct exception_action) * EXC_TYPES_COUNT);
530
531 #if CONFIG_MACF
532 for (size_t i = 0; i < EXC_TYPES_COUNT; ++i) {
533 mac_exc_associate_action_label(thread->exc_actions + i, mac_exc_create_label());
534 }
535 #endif
536 }
537
538 void
539 ipc_thread_destroy_exc_actions(
540 thread_t thread)
541 {
542 if (thread->exc_actions != NULL) {
543 #if CONFIG_MACF
544 for (size_t i = 0; i < EXC_TYPES_COUNT; ++i) {
545 mac_exc_free_action_label(thread->exc_actions + i);
546 }
547 #endif
548
549 kfree(thread->exc_actions,
550 sizeof(struct exception_action) * EXC_TYPES_COUNT);
551 thread->exc_actions = NULL;
552 }
553 }
554
555 void
556 ipc_thread_disable(
557 thread_t thread)
558 {
559 ipc_port_t kport = thread->ith_self;
560
561 if (kport != IP_NULL)
562 ipc_kobject_set(kport, IKO_NULL, IKOT_NONE);
563 }
564
565 /*
566 * Routine: ipc_thread_terminate
567 * Purpose:
568 * Clean up and destroy a thread's IPC state.
569 * Conditions:
570 * Nothing locked.
571 */
572
573 void
574 ipc_thread_terminate(
575 thread_t thread)
576 {
577 ipc_port_t kport = thread->ith_self;
578
579 if (kport != IP_NULL) {
580 int i;
581
582 if (IP_VALID(thread->ith_sself))
583 ipc_port_release_send(thread->ith_sself);
584
585 thread->ith_sself = thread->ith_self = IP_NULL;
586
587 if (thread->exc_actions != NULL) {
588 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
589 if (IP_VALID(thread->exc_actions[i].port))
590 ipc_port_release_send(thread->exc_actions[i].port);
591 }
592 ipc_thread_destroy_exc_actions(thread);
593 }
594
595 ipc_port_dealloc_kernel(kport);
596 }
597
598 #if IMPORTANCE_INHERITANCE
599 assert(thread->ith_assertions == 0);
600 #endif
601
602 /* unbind the thread special reply port */
603 if (IP_VALID(thread->ith_special_reply_port)) {
604 ipc_port_unbind_special_reply_port(thread, TRUE);
605 }
606
607 assert(ipc_kmsg_queue_empty(&thread->ith_messages));
608
609 if (thread->ith_rpc_reply != IP_NULL)
610 ipc_port_dealloc_reply(thread->ith_rpc_reply);
611
612 thread->ith_rpc_reply = IP_NULL;
613 }
614
615 /*
616 * Routine: ipc_thread_reset
617 * Purpose:
618 * Reset the IPC state for a given Mach thread when
619 * its task enters an elevated security context.
620 * Both the thread port and its exception ports have
621 * to be reset. Its RPC reply port cannot have any
622 * rights outstanding, so it should be fine.
623 * Conditions:
624 * Nothing locked.
625 */
626
627 void
628 ipc_thread_reset(
629 thread_t thread)
630 {
631 ipc_port_t old_kport, new_kport;
632 ipc_port_t old_sself;
633 ipc_port_t old_exc_actions[EXC_TYPES_COUNT];
634 boolean_t has_old_exc_actions = FALSE;
635 int i;
636
637 #if CONFIG_MACF
638 struct label *new_label = mac_exc_create_label();
639 #endif
640
641 new_kport = ipc_port_alloc_kernel();
642 if (new_kport == IP_NULL)
643 panic("ipc_task_reset");
644
645 thread_mtx_lock(thread);
646
647 old_kport = thread->ith_self;
648
649 if (old_kport == IP_NULL && thread->inspection == FALSE) {
650 /* the is already terminated (can this happen?) */
651 thread_mtx_unlock(thread);
652 ipc_port_dealloc_kernel(new_kport);
653 #if CONFIG_MACF
654 mac_exc_free_label(new_label);
655 #endif
656 return;
657 }
658
659 thread->ith_self = new_kport;
660 old_sself = thread->ith_sself;
661 thread->ith_sself = ipc_port_make_send(new_kport);
662 if (old_kport != IP_NULL) {
663 ipc_kobject_set(old_kport, IKO_NULL, IKOT_NONE);
664 }
665 ipc_kobject_set(new_kport, (ipc_kobject_t) thread, IKOT_THREAD);
666
667 /*
668 * Only ports that were set by root-owned processes
669 * (privileged ports) should survive
670 */
671 if (thread->exc_actions != NULL) {
672 has_old_exc_actions = TRUE;
673 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
674 if (thread->exc_actions[i].privileged) {
675 old_exc_actions[i] = IP_NULL;
676 } else {
677 #if CONFIG_MACF
678 mac_exc_update_action_label(thread->exc_actions + i, new_label);
679 #endif
680 old_exc_actions[i] = thread->exc_actions[i].port;
681 thread->exc_actions[i].port = IP_NULL;
682 }
683 }
684 }
685
686 thread_mtx_unlock(thread);
687
688 #if CONFIG_MACF
689 mac_exc_free_label(new_label);
690 #endif
691
692 /* release the naked send rights */
693
694 if (IP_VALID(old_sself))
695 ipc_port_release_send(old_sself);
696
697 if (has_old_exc_actions) {
698 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
699 ipc_port_release_send(old_exc_actions[i]);
700 }
701 }
702
703 /* destroy the kernel port */
704 if (old_kport != IP_NULL) {
705 ipc_port_dealloc_kernel(old_kport);
706 }
707
708 /* unbind the thread special reply port */
709 if (IP_VALID(thread->ith_special_reply_port)) {
710 ipc_port_unbind_special_reply_port(thread, TRUE);
711 }
712 }
713
714 /*
715 * Routine: retrieve_task_self_fast
716 * Purpose:
717 * Optimized version of retrieve_task_self,
718 * that only works for the current task.
719 *
720 * Return a send right (possibly null/dead)
721 * for the task's user-visible self port.
722 * Conditions:
723 * Nothing locked.
724 */
725
726 ipc_port_t
727 retrieve_task_self_fast(
728 task_t task)
729 {
730 ipc_port_t port;
731
732 assert(task == current_task());
733
734 itk_lock(task);
735 assert(task->itk_self != IP_NULL);
736
737 if ((port = task->itk_sself) == task->itk_self) {
738 /* no interposing */
739
740 ip_lock(port);
741 assert(ip_active(port));
742 ip_reference(port);
743 port->ip_srights++;
744 ip_unlock(port);
745 } else
746 port = ipc_port_copy_send(port);
747 itk_unlock(task);
748
749 return port;
750 }
751
752 /*
753 * Routine: retrieve_thread_self_fast
754 * Purpose:
755 * Return a send right (possibly null/dead)
756 * for the thread's user-visible self port.
757 *
758 * Only works for the current thread.
759 *
760 * Conditions:
761 * Nothing locked.
762 */
763
764 ipc_port_t
765 retrieve_thread_self_fast(
766 thread_t thread)
767 {
768 ipc_port_t port;
769
770 assert(thread == current_thread());
771
772 thread_mtx_lock(thread);
773
774 assert(thread->ith_self != IP_NULL);
775
776 if ((port = thread->ith_sself) == thread->ith_self) {
777 /* no interposing */
778
779 ip_lock(port);
780 assert(ip_active(port));
781 ip_reference(port);
782 port->ip_srights++;
783 ip_unlock(port);
784 }
785 else
786 port = ipc_port_copy_send(port);
787
788 thread_mtx_unlock(thread);
789
790 return port;
791 }
792
793 /*
794 * Routine: task_self_trap [mach trap]
795 * Purpose:
796 * Give the caller send rights for his own task port.
797 * Conditions:
798 * Nothing locked.
799 * Returns:
800 * MACH_PORT_NULL if there are any resource failures
801 * or other errors.
802 */
803
804 mach_port_name_t
805 task_self_trap(
806 __unused struct task_self_trap_args *args)
807 {
808 task_t task = current_task();
809 ipc_port_t sright;
810 mach_port_name_t name;
811
812 sright = retrieve_task_self_fast(task);
813 name = ipc_port_copyout_send(sright, task->itk_space);
814 return name;
815 }
816
817 /*
818 * Routine: thread_self_trap [mach trap]
819 * Purpose:
820 * Give the caller send rights for his own thread port.
821 * Conditions:
822 * Nothing locked.
823 * Returns:
824 * MACH_PORT_NULL if there are any resource failures
825 * or other errors.
826 */
827
828 mach_port_name_t
829 thread_self_trap(
830 __unused struct thread_self_trap_args *args)
831 {
832 thread_t thread = current_thread();
833 task_t task = thread->task;
834 ipc_port_t sright;
835 mach_port_name_t name;
836
837 sright = retrieve_thread_self_fast(thread);
838 name = ipc_port_copyout_send(sright, task->itk_space);
839 return name;
840
841 }
842
843 /*
844 * Routine: mach_reply_port [mach trap]
845 * Purpose:
846 * Allocate a port for the caller.
847 * Conditions:
848 * Nothing locked.
849 * Returns:
850 * MACH_PORT_NULL if there are any resource failures
851 * or other errors.
852 */
853
854 mach_port_name_t
855 mach_reply_port(
856 __unused struct mach_reply_port_args *args)
857 {
858 ipc_port_t port;
859 mach_port_name_t name;
860 kern_return_t kr;
861
862 kr = ipc_port_alloc(current_task()->itk_space, &name, &port);
863 if (kr == KERN_SUCCESS)
864 ip_unlock(port);
865 else
866 name = MACH_PORT_NULL;
867 return name;
868 }
869
870 /*
871 * Routine: thread_get_special_reply_port [mach trap]
872 * Purpose:
873 * Allocate a special reply port for the calling thread.
874 * Conditions:
875 * Nothing locked.
876 * Returns:
877 * mach_port_name_t: send right & receive right for special reply port.
878 * MACH_PORT_NULL if there are any resource failures
879 * or other errors.
880 */
881
882 mach_port_name_t
883 thread_get_special_reply_port(
884 __unused struct thread_get_special_reply_port_args *args)
885 {
886 ipc_port_t port;
887 mach_port_name_t name;
888 mach_port_name_t send_name;
889 kern_return_t kr;
890 thread_t thread = current_thread();
891
892 /* unbind the thread special reply port */
893 if (IP_VALID(thread->ith_special_reply_port)) {
894 kr = ipc_port_unbind_special_reply_port(thread, TRUE);
895 if (kr != KERN_SUCCESS) {
896 return MACH_PORT_NULL;
897 }
898 }
899
900 kr = ipc_port_alloc(current_task()->itk_space, &name, &port);
901 if (kr == KERN_SUCCESS) {
902 ipc_port_bind_special_reply_port_locked(port);
903
904 /* Make a send right and insert it in the space at specified name */
905 ipc_port_make_send_locked(port);
906 ip_unlock(port);
907 send_name = ipc_port_copyout_name_send(port, current_task()->itk_space, name);
908 /*
909 * If insertion of send right failed, userland is doing something bad, error out.
910 * The space was marked inactive or the receive right just inserted above at the
911 * given name was moved, in either case do not try to deallocate the receive right.
912 */
913 if (send_name == MACH_PORT_NULL || send_name == MACH_PORT_DEAD) {
914 if (IP_VALID(thread->ith_special_reply_port)) {
915 ipc_port_unbind_special_reply_port(thread, TRUE);
916 }
917 name = MACH_PORT_NULL;
918 }
919 } else {
920 name = MACH_PORT_NULL;
921 }
922 return name;
923 }
924
925 /*
926 * Routine: ipc_port_bind_special_reply_port_locked
927 * Purpose:
928 * Bind the given port to current thread as a special reply port.
929 * Conditions:
930 * Port locked.
931 * Returns:
932 * None.
933 */
934
935 static void
936 ipc_port_bind_special_reply_port_locked(
937 ipc_port_t port)
938 {
939 thread_t thread = current_thread();
940 assert(thread->ith_special_reply_port == NULL);
941
942 ip_reference(port);
943 thread->ith_special_reply_port = port;
944 port->ip_specialreply = 1;
945 port->ip_sync_link_state = PORT_SYNC_LINK_ANY;
946
947 reset_ip_srp_bits(port);
948 }
949
950 /*
951 * Routine: ipc_port_unbind_special_reply_port
952 * Purpose:
953 * Unbind the thread's special reply port.
954 * If the special port has threads waiting on turnstile,
955 * update it's inheritor.
956 * Condition:
957 * Nothing locked.
958 * Returns:
959 * None.
960 */
961 static kern_return_t
962 ipc_port_unbind_special_reply_port(
963 thread_t thread,
964 boolean_t unbind_active_port)
965 {
966 ipc_port_t special_reply_port = thread->ith_special_reply_port;
967
968 ip_lock(special_reply_port);
969
970 /* Return error if port active and unbind_active_port set to FALSE */
971 if (unbind_active_port == FALSE && ip_active(special_reply_port)) {
972 ip_unlock(special_reply_port);
973 return KERN_FAILURE;
974 }
975
976 thread->ith_special_reply_port = NULL;
977 ipc_port_adjust_special_reply_port_locked(special_reply_port, NULL,
978 IPC_PORT_ADJUST_SR_CLEAR_SPECIAL_REPLY, FALSE);
979 /* port unlocked */
980
981 ip_release(special_reply_port);
982 return KERN_SUCCESS;
983 }
984
985 /*
986 * Routine: thread_get_special_port [kernel call]
987 * Purpose:
988 * Clones a send right for one of the thread's
989 * special ports.
990 * Conditions:
991 * Nothing locked.
992 * Returns:
993 * KERN_SUCCESS Extracted a send right.
994 * KERN_INVALID_ARGUMENT The thread is null.
995 * KERN_FAILURE The thread is dead.
996 * KERN_INVALID_ARGUMENT Invalid special port.
997 */
998
999 kern_return_t
1000 thread_get_special_port(
1001 thread_t thread,
1002 int which,
1003 ipc_port_t *portp)
1004 {
1005 kern_return_t result = KERN_SUCCESS;
1006 ipc_port_t *whichp;
1007
1008 if (thread == THREAD_NULL)
1009 return (KERN_INVALID_ARGUMENT);
1010
1011 switch (which) {
1012
1013 case THREAD_KERNEL_PORT:
1014 whichp = &thread->ith_sself;
1015 break;
1016
1017 default:
1018 return (KERN_INVALID_ARGUMENT);
1019 }
1020
1021 thread_mtx_lock(thread);
1022
1023 if (thread->active)
1024 *portp = ipc_port_copy_send(*whichp);
1025 else
1026 result = KERN_FAILURE;
1027
1028 thread_mtx_unlock(thread);
1029
1030 return (result);
1031 }
1032
1033 /*
1034 * Routine: thread_set_special_port [kernel call]
1035 * Purpose:
1036 * Changes one of the thread's special ports,
1037 * setting it to the supplied send right.
1038 * Conditions:
1039 * Nothing locked. If successful, consumes
1040 * the supplied send right.
1041 * Returns:
1042 * KERN_SUCCESS Changed the special port.
1043 * KERN_INVALID_ARGUMENT The thread is null.
1044 * KERN_FAILURE The thread is dead.
1045 * KERN_INVALID_ARGUMENT Invalid special port.
1046 */
1047
1048 kern_return_t
1049 thread_set_special_port(
1050 thread_t thread,
1051 int which,
1052 ipc_port_t port)
1053 {
1054 kern_return_t result = KERN_SUCCESS;
1055 ipc_port_t *whichp, old = IP_NULL;
1056
1057 if (thread == THREAD_NULL)
1058 return (KERN_INVALID_ARGUMENT);
1059
1060 switch (which) {
1061
1062 case THREAD_KERNEL_PORT:
1063 whichp = &thread->ith_sself;
1064 break;
1065
1066 default:
1067 return (KERN_INVALID_ARGUMENT);
1068 }
1069
1070 thread_mtx_lock(thread);
1071
1072 if (thread->active) {
1073 old = *whichp;
1074 *whichp = port;
1075 }
1076 else
1077 result = KERN_FAILURE;
1078
1079 thread_mtx_unlock(thread);
1080
1081 if (IP_VALID(old))
1082 ipc_port_release_send(old);
1083
1084 return (result);
1085 }
1086
1087 /*
1088 * Routine: task_get_special_port [kernel call]
1089 * Purpose:
1090 * Clones a send right for one of the task's
1091 * special ports.
1092 * Conditions:
1093 * Nothing locked.
1094 * Returns:
1095 * KERN_SUCCESS Extracted a send right.
1096 * KERN_INVALID_ARGUMENT The task is null.
1097 * KERN_FAILURE The task/space is dead.
1098 * KERN_INVALID_ARGUMENT Invalid special port.
1099 */
1100
1101 kern_return_t
1102 task_get_special_port(
1103 task_t task,
1104 int which,
1105 ipc_port_t *portp)
1106 {
1107 ipc_port_t port;
1108
1109 if (task == TASK_NULL)
1110 return KERN_INVALID_ARGUMENT;
1111
1112 itk_lock(task);
1113 if (task->itk_self == IP_NULL) {
1114 itk_unlock(task);
1115 return KERN_FAILURE;
1116 }
1117
1118 switch (which) {
1119 case TASK_KERNEL_PORT:
1120 port = ipc_port_copy_send(task->itk_sself);
1121 break;
1122
1123 case TASK_NAME_PORT:
1124 port = ipc_port_make_send(task->itk_nself);
1125 break;
1126
1127 case TASK_HOST_PORT:
1128 port = ipc_port_copy_send(task->itk_host);
1129 break;
1130
1131 case TASK_BOOTSTRAP_PORT:
1132 port = ipc_port_copy_send(task->itk_bootstrap);
1133 break;
1134
1135 case TASK_SEATBELT_PORT:
1136 port = ipc_port_copy_send(task->itk_seatbelt);
1137 break;
1138
1139 case TASK_ACCESS_PORT:
1140 port = ipc_port_copy_send(task->itk_task_access);
1141 break;
1142
1143 case TASK_DEBUG_CONTROL_PORT:
1144 port = ipc_port_copy_send(task->itk_debug_control);
1145 break;
1146
1147 default:
1148 itk_unlock(task);
1149 return KERN_INVALID_ARGUMENT;
1150 }
1151 itk_unlock(task);
1152
1153 *portp = port;
1154 return KERN_SUCCESS;
1155 }
1156
1157 /*
1158 * Routine: task_set_special_port [kernel call]
1159 * Purpose:
1160 * Changes one of the task's special ports,
1161 * setting it to the supplied send right.
1162 * Conditions:
1163 * Nothing locked. If successful, consumes
1164 * the supplied send right.
1165 * Returns:
1166 * KERN_SUCCESS Changed the special port.
1167 * KERN_INVALID_ARGUMENT The task is null.
1168 * KERN_FAILURE The task/space is dead.
1169 * KERN_INVALID_ARGUMENT Invalid special port.
1170 * KERN_NO_ACCESS Attempted overwrite of seatbelt port.
1171 */
1172
1173 kern_return_t
1174 task_set_special_port(
1175 task_t task,
1176 int which,
1177 ipc_port_t port)
1178 {
1179 ipc_port_t *whichp;
1180 ipc_port_t old;
1181
1182 if (task == TASK_NULL)
1183 return KERN_INVALID_ARGUMENT;
1184
1185 switch (which) {
1186 case TASK_KERNEL_PORT:
1187 whichp = &task->itk_sself;
1188 break;
1189
1190 case TASK_HOST_PORT:
1191 whichp = &task->itk_host;
1192 break;
1193
1194 case TASK_BOOTSTRAP_PORT:
1195 whichp = &task->itk_bootstrap;
1196 break;
1197
1198 case TASK_SEATBELT_PORT:
1199 whichp = &task->itk_seatbelt;
1200 break;
1201
1202 case TASK_ACCESS_PORT:
1203 whichp = &task->itk_task_access;
1204 break;
1205
1206 case TASK_DEBUG_CONTROL_PORT:
1207 whichp = &task->itk_debug_control;
1208 break;
1209
1210 default:
1211 return KERN_INVALID_ARGUMENT;
1212 }/* switch */
1213
1214 itk_lock(task);
1215 if (task->itk_self == IP_NULL) {
1216 itk_unlock(task);
1217 return KERN_FAILURE;
1218 }
1219
1220 /* do not allow overwrite of seatbelt or task access ports */
1221 if ((TASK_SEATBELT_PORT == which || TASK_ACCESS_PORT == which)
1222 && IP_VALID(*whichp)) {
1223 itk_unlock(task);
1224 return KERN_NO_ACCESS;
1225 }
1226
1227 old = *whichp;
1228 *whichp = port;
1229 itk_unlock(task);
1230
1231 if (IP_VALID(old))
1232 ipc_port_release_send(old);
1233 return KERN_SUCCESS;
1234 }
1235
1236
1237 /*
1238 * Routine: mach_ports_register [kernel call]
1239 * Purpose:
1240 * Stash a handful of port send rights in the task.
1241 * Child tasks will inherit these rights, but they
1242 * must use mach_ports_lookup to acquire them.
1243 *
1244 * The rights are supplied in a (wired) kalloc'd segment.
1245 * Rights which aren't supplied are assumed to be null.
1246 * Conditions:
1247 * Nothing locked. If successful, consumes
1248 * the supplied rights and memory.
1249 * Returns:
1250 * KERN_SUCCESS Stashed the port rights.
1251 * KERN_INVALID_ARGUMENT The task is null.
1252 * KERN_INVALID_ARGUMENT The task is dead.
1253 * KERN_INVALID_ARGUMENT The memory param is null.
1254 * KERN_INVALID_ARGUMENT Too many port rights supplied.
1255 */
1256
1257 kern_return_t
1258 mach_ports_register(
1259 task_t task,
1260 mach_port_array_t memory,
1261 mach_msg_type_number_t portsCnt)
1262 {
1263 ipc_port_t ports[TASK_PORT_REGISTER_MAX];
1264 unsigned int i;
1265
1266 if ((task == TASK_NULL) ||
1267 (portsCnt > TASK_PORT_REGISTER_MAX) ||
1268 (portsCnt && memory == NULL))
1269 return KERN_INVALID_ARGUMENT;
1270
1271 /*
1272 * Pad the port rights with nulls.
1273 */
1274
1275 for (i = 0; i < portsCnt; i++)
1276 ports[i] = memory[i];
1277 for (; i < TASK_PORT_REGISTER_MAX; i++)
1278 ports[i] = IP_NULL;
1279
1280 itk_lock(task);
1281 if (task->itk_self == IP_NULL) {
1282 itk_unlock(task);
1283 return KERN_INVALID_ARGUMENT;
1284 }
1285
1286 /*
1287 * Replace the old send rights with the new.
1288 * Release the old rights after unlocking.
1289 */
1290
1291 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
1292 ipc_port_t old;
1293
1294 old = task->itk_registered[i];
1295 task->itk_registered[i] = ports[i];
1296 ports[i] = old;
1297 }
1298
1299 itk_unlock(task);
1300
1301 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
1302 if (IP_VALID(ports[i]))
1303 ipc_port_release_send(ports[i]);
1304
1305 /*
1306 * Now that the operation is known to be successful,
1307 * we can free the memory.
1308 */
1309
1310 if (portsCnt != 0)
1311 kfree(memory,
1312 (vm_size_t) (portsCnt * sizeof(mach_port_t)));
1313
1314 return KERN_SUCCESS;
1315 }
1316
1317 /*
1318 * Routine: mach_ports_lookup [kernel call]
1319 * Purpose:
1320 * Retrieves (clones) the stashed port send rights.
1321 * Conditions:
1322 * Nothing locked. If successful, the caller gets
1323 * rights and memory.
1324 * Returns:
1325 * KERN_SUCCESS Retrieved the send rights.
1326 * KERN_INVALID_ARGUMENT The task is null.
1327 * KERN_INVALID_ARGUMENT The task is dead.
1328 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
1329 */
1330
1331 kern_return_t
1332 mach_ports_lookup(
1333 task_t task,
1334 mach_port_array_t *portsp,
1335 mach_msg_type_number_t *portsCnt)
1336 {
1337 void *memory;
1338 vm_size_t size;
1339 ipc_port_t *ports;
1340 int i;
1341
1342 if (task == TASK_NULL)
1343 return KERN_INVALID_ARGUMENT;
1344
1345 size = (vm_size_t) (TASK_PORT_REGISTER_MAX * sizeof(ipc_port_t));
1346
1347 memory = kalloc(size);
1348 if (memory == 0)
1349 return KERN_RESOURCE_SHORTAGE;
1350
1351 itk_lock(task);
1352 if (task->itk_self == IP_NULL) {
1353 itk_unlock(task);
1354
1355 kfree(memory, size);
1356 return KERN_INVALID_ARGUMENT;
1357 }
1358
1359 ports = (ipc_port_t *) memory;
1360
1361 /*
1362 * Clone port rights. Because kalloc'd memory
1363 * is wired, we won't fault while holding the task lock.
1364 */
1365
1366 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
1367 ports[i] = ipc_port_copy_send(task->itk_registered[i]);
1368
1369 itk_unlock(task);
1370
1371 *portsp = (mach_port_array_t) ports;
1372 *portsCnt = TASK_PORT_REGISTER_MAX;
1373 return KERN_SUCCESS;
1374 }
1375
1376 kern_return_t
1377 task_conversion_eval(task_t caller, task_t victim)
1378 {
1379 /*
1380 * Tasks are allowed to resolve their own task ports, and the kernel is
1381 * allowed to resolve anyone's task port.
1382 */
1383 if (caller == kernel_task) {
1384 return KERN_SUCCESS;
1385 }
1386
1387 if (caller == victim) {
1388 return KERN_SUCCESS;
1389 }
1390
1391 /*
1392 * Only the kernel can can resolve the kernel's task port. We've established
1393 * by this point that the caller is not kernel_task.
1394 */
1395 if (victim == TASK_NULL || victim == kernel_task) {
1396 return KERN_INVALID_SECURITY;
1397 }
1398
1399 #if CONFIG_EMBEDDED
1400 /*
1401 * On embedded platforms, only a platform binary can resolve the task port
1402 * of another platform binary.
1403 */
1404 if ((victim->t_flags & TF_PLATFORM) && !(caller->t_flags & TF_PLATFORM)) {
1405 #if SECURE_KERNEL
1406 return KERN_INVALID_SECURITY;
1407 #else
1408 if (cs_relax_platform_task_ports) {
1409 return KERN_SUCCESS;
1410 } else {
1411 return KERN_INVALID_SECURITY;
1412 }
1413 #endif /* SECURE_KERNEL */
1414 }
1415 #endif /* CONFIG_EMBEDDED */
1416
1417 return KERN_SUCCESS;
1418 }
1419
1420 /*
1421 * Routine: convert_port_to_locked_task
1422 * Purpose:
1423 * Internal helper routine to convert from a port to a locked
1424 * task. Used by several routines that try to convert from a
1425 * task port to a reference on some task related object.
1426 * Conditions:
1427 * Nothing locked, blocking OK.
1428 */
1429 task_t
1430 convert_port_to_locked_task(ipc_port_t port)
1431 {
1432 int try_failed_count = 0;
1433
1434 while (IP_VALID(port)) {
1435 task_t ct = current_task();
1436 task_t task;
1437
1438 ip_lock(port);
1439 if (!ip_active(port) || (ip_kotype(port) != IKOT_TASK)) {
1440 ip_unlock(port);
1441 return TASK_NULL;
1442 }
1443 task = (task_t) port->ip_kobject;
1444 assert(task != TASK_NULL);
1445
1446 if (task_conversion_eval(ct, task)) {
1447 ip_unlock(port);
1448 return TASK_NULL;
1449 }
1450
1451 /*
1452 * Normal lock ordering puts task_lock() before ip_lock().
1453 * Attempt out-of-order locking here.
1454 */
1455 if (task_lock_try(task)) {
1456 ip_unlock(port);
1457 return(task);
1458 }
1459 try_failed_count++;
1460
1461 ip_unlock(port);
1462 mutex_pause(try_failed_count);
1463 }
1464 return TASK_NULL;
1465 }
1466
1467 /*
1468 * Routine: convert_port_to_locked_task_inspect
1469 * Purpose:
1470 * Internal helper routine to convert from a port to a locked
1471 * task inspect right. Used by internal routines that try to convert from a
1472 * task inspect port to a reference on some task related object.
1473 * Conditions:
1474 * Nothing locked, blocking OK.
1475 */
1476 task_inspect_t
1477 convert_port_to_locked_task_inspect(ipc_port_t port)
1478 {
1479 int try_failed_count = 0;
1480
1481 while (IP_VALID(port)) {
1482 task_inspect_t task;
1483
1484 ip_lock(port);
1485 if (!ip_active(port) || (ip_kotype(port) != IKOT_TASK)) {
1486 ip_unlock(port);
1487 return TASK_INSPECT_NULL;
1488 }
1489 task = (task_inspect_t)port->ip_kobject;
1490 assert(task != TASK_INSPECT_NULL);
1491 /*
1492 * Normal lock ordering puts task_lock() before ip_lock().
1493 * Attempt out-of-order locking here.
1494 */
1495 if (task_lock_try((task_t)task)) {
1496 ip_unlock(port);
1497 return task;
1498 }
1499 try_failed_count++;
1500
1501 ip_unlock(port);
1502 mutex_pause(try_failed_count);
1503 }
1504 return TASK_INSPECT_NULL;
1505 }
1506
1507
1508 /*
1509 * Routine: convert_port_to_task
1510 * Purpose:
1511 * Convert from a port to a task.
1512 * Doesn't consume the port ref; produces a task ref,
1513 * which may be null.
1514 * Conditions:
1515 * Nothing locked.
1516 */
1517 task_t
1518 convert_port_to_task(
1519 ipc_port_t port)
1520 {
1521 return convert_port_to_task_with_exec_token(port, NULL);
1522 }
1523
1524 /*
1525 * Routine: convert_port_to_task_with_exec_token
1526 * Purpose:
1527 * Convert from a port to a task and return
1528 * the exec token stored in the task.
1529 * Doesn't consume the port ref; produces a task ref,
1530 * which may be null.
1531 * Conditions:
1532 * Nothing locked.
1533 */
1534 task_t
1535 convert_port_to_task_with_exec_token(
1536 ipc_port_t port,
1537 uint32_t *exec_token)
1538 {
1539 task_t task = TASK_NULL;
1540
1541 if (IP_VALID(port)) {
1542 ip_lock(port);
1543
1544 if ( ip_active(port) &&
1545 ip_kotype(port) == IKOT_TASK ) {
1546 task_t ct = current_task();
1547 task = (task_t)port->ip_kobject;
1548 assert(task != TASK_NULL);
1549
1550 if (task_conversion_eval(ct, task)) {
1551 ip_unlock(port);
1552 return TASK_NULL;
1553 }
1554
1555 if (exec_token) {
1556 *exec_token = task->exec_token;
1557 }
1558 task_reference_internal(task);
1559 }
1560
1561 ip_unlock(port);
1562 }
1563
1564 return (task);
1565 }
1566
1567 /*
1568 * Routine: convert_port_to_task_name
1569 * Purpose:
1570 * Convert from a port to a task name.
1571 * Doesn't consume the port ref; produces a task name ref,
1572 * which may be null.
1573 * Conditions:
1574 * Nothing locked.
1575 */
1576 task_name_t
1577 convert_port_to_task_name(
1578 ipc_port_t port)
1579 {
1580 task_name_t task = TASK_NULL;
1581
1582 if (IP_VALID(port)) {
1583 ip_lock(port);
1584
1585 if ( ip_active(port) &&
1586 (ip_kotype(port) == IKOT_TASK ||
1587 ip_kotype(port) == IKOT_TASK_NAME)) {
1588 task = (task_name_t)port->ip_kobject;
1589 assert(task != TASK_NAME_NULL);
1590
1591 task_reference_internal(task);
1592 }
1593
1594 ip_unlock(port);
1595 }
1596
1597 return (task);
1598 }
1599
1600 /*
1601 * Routine: convert_port_to_task_inspect
1602 * Purpose:
1603 * Convert from a port to a task inspection right
1604 * Doesn't consume the port ref; produces a task ref,
1605 * which may be null.
1606 * Conditions:
1607 * Nothing locked.
1608 */
1609 task_inspect_t
1610 convert_port_to_task_inspect(
1611 ipc_port_t port)
1612 {
1613 task_inspect_t task = TASK_INSPECT_NULL;
1614
1615 if (IP_VALID(port)) {
1616 ip_lock(port);
1617
1618 if (ip_active(port) &&
1619 ip_kotype(port) == IKOT_TASK) {
1620 task = (task_inspect_t)port->ip_kobject;
1621 assert(task != TASK_INSPECT_NULL);
1622
1623 task_reference_internal(task);
1624 }
1625
1626 ip_unlock(port);
1627 }
1628
1629 return (task);
1630 }
1631
1632 /*
1633 * Routine: convert_port_to_task_suspension_token
1634 * Purpose:
1635 * Convert from a port to a task suspension token.
1636 * Doesn't consume the port ref; produces a suspension token ref,
1637 * which may be null.
1638 * Conditions:
1639 * Nothing locked.
1640 */
1641 task_suspension_token_t
1642 convert_port_to_task_suspension_token(
1643 ipc_port_t port)
1644 {
1645 task_suspension_token_t task = TASK_NULL;
1646
1647 if (IP_VALID(port)) {
1648 ip_lock(port);
1649
1650 if ( ip_active(port) &&
1651 ip_kotype(port) == IKOT_TASK_RESUME) {
1652 task = (task_suspension_token_t)port->ip_kobject;
1653 assert(task != TASK_NULL);
1654
1655 task_reference_internal(task);
1656 }
1657
1658 ip_unlock(port);
1659 }
1660
1661 return (task);
1662 }
1663
1664 /*
1665 * Routine: convert_port_to_space
1666 * Purpose:
1667 * Convert from a port to a space.
1668 * Doesn't consume the port ref; produces a space ref,
1669 * which may be null.
1670 * Conditions:
1671 * Nothing locked.
1672 */
1673 ipc_space_t
1674 convert_port_to_space(
1675 ipc_port_t port)
1676 {
1677 ipc_space_t space;
1678 task_t task;
1679
1680 task = convert_port_to_locked_task(port);
1681
1682 if (task == TASK_NULL)
1683 return IPC_SPACE_NULL;
1684
1685 if (!task->active) {
1686 task_unlock(task);
1687 return IPC_SPACE_NULL;
1688 }
1689
1690 space = task->itk_space;
1691 is_reference(space);
1692 task_unlock(task);
1693 return (space);
1694 }
1695
1696 /*
1697 * Routine: convert_port_to_space_inspect
1698 * Purpose:
1699 * Convert from a port to a space inspect right.
1700 * Doesn't consume the port ref; produces a space inspect ref,
1701 * which may be null.
1702 * Conditions:
1703 * Nothing locked.
1704 */
1705 ipc_space_inspect_t
1706 convert_port_to_space_inspect(
1707 ipc_port_t port)
1708 {
1709 ipc_space_inspect_t space;
1710 task_inspect_t task;
1711
1712 task = convert_port_to_locked_task_inspect(port);
1713
1714 if (task == TASK_INSPECT_NULL)
1715 return IPC_SPACE_INSPECT_NULL;
1716
1717 if (!task->active) {
1718 task_unlock(task);
1719 return IPC_SPACE_INSPECT_NULL;
1720 }
1721
1722 space = (ipc_space_inspect_t)task->itk_space;
1723 is_reference((ipc_space_t)space);
1724 task_unlock((task_t)task);
1725 return space;
1726 }
1727
1728 /*
1729 * Routine: convert_port_to_map
1730 * Purpose:
1731 * Convert from a port to a map.
1732 * Doesn't consume the port ref; produces a map ref,
1733 * which may be null.
1734 * Conditions:
1735 * Nothing locked.
1736 */
1737
1738 vm_map_t
1739 convert_port_to_map(
1740 ipc_port_t port)
1741 {
1742 task_t task;
1743 vm_map_t map;
1744
1745 task = convert_port_to_locked_task(port);
1746
1747 if (task == TASK_NULL)
1748 return VM_MAP_NULL;
1749
1750 if (!task->active) {
1751 task_unlock(task);
1752 return VM_MAP_NULL;
1753 }
1754
1755 map = task->map;
1756 vm_map_reference_swap(map);
1757 task_unlock(task);
1758 return map;
1759 }
1760
1761
1762 /*
1763 * Routine: convert_port_to_thread
1764 * Purpose:
1765 * Convert from a port to a thread.
1766 * Doesn't consume the port ref; produces an thread ref,
1767 * which may be null.
1768 * Conditions:
1769 * Nothing locked.
1770 */
1771
1772 thread_t
1773 convert_port_to_thread(
1774 ipc_port_t port)
1775 {
1776 thread_t thread = THREAD_NULL;
1777
1778 if (IP_VALID(port)) {
1779 ip_lock(port);
1780
1781 if (ip_active(port) &&
1782 ip_kotype(port) == IKOT_THREAD) {
1783 thread = (thread_t)port->ip_kobject;
1784 assert(thread != THREAD_NULL);
1785
1786 /* Use task conversion rules for thread control conversions */
1787 if (task_conversion_eval(current_task(), thread->task) != KERN_SUCCESS) {
1788 ip_unlock(port);
1789 return THREAD_NULL;
1790 }
1791
1792 thread_reference_internal(thread);
1793 }
1794
1795 ip_unlock(port);
1796 }
1797
1798 return (thread);
1799 }
1800
1801 /*
1802 * Routine: convert_port_to_thread_inspect
1803 * Purpose:
1804 * Convert from a port to a thread inspection right
1805 * Doesn't consume the port ref; produces a thread ref,
1806 * which may be null.
1807 * Conditions:
1808 * Nothing locked.
1809 */
1810 thread_inspect_t
1811 convert_port_to_thread_inspect(
1812 ipc_port_t port)
1813 {
1814 thread_inspect_t thread = THREAD_INSPECT_NULL;
1815
1816 if (IP_VALID(port)) {
1817 ip_lock(port);
1818
1819 if (ip_active(port) &&
1820 ip_kotype(port) == IKOT_THREAD) {
1821 thread = (thread_inspect_t)port->ip_kobject;
1822 assert(thread != THREAD_INSPECT_NULL);
1823 thread_reference_internal((thread_t)thread);
1824 }
1825 ip_unlock(port);
1826 }
1827
1828 return thread;
1829 }
1830
1831 /*
1832 * Routine: convert_thread_inspect_to_port
1833 * Purpose:
1834 * Convert from a thread inspect reference to a port.
1835 * Consumes a thread ref;
1836 * As we never export thread inspect ports, always
1837 * creates a NULL port.
1838 * Conditions:
1839 * Nothing locked.
1840 */
1841
1842 ipc_port_t
1843 convert_thread_inspect_to_port(thread_inspect_t thread)
1844 {
1845 thread_deallocate(thread);
1846 return IP_NULL;
1847 }
1848
1849
1850 /*
1851 * Routine: port_name_to_thread
1852 * Purpose:
1853 * Convert from a port name to an thread reference
1854 * A name of MACH_PORT_NULL is valid for the null thread.
1855 * Conditions:
1856 * Nothing locked.
1857 *
1858 * TODO: Could this be faster if it were ipc_port_translate_send based, like thread_switch?
1859 * We could avoid extra lock/unlock and extra ref operations on the port.
1860 */
1861 thread_t
1862 port_name_to_thread(
1863 mach_port_name_t name)
1864 {
1865 thread_t thread = THREAD_NULL;
1866 ipc_port_t kport;
1867
1868 if (MACH_PORT_VALID(name)) {
1869 if (ipc_object_copyin(current_space(), name,
1870 MACH_MSG_TYPE_COPY_SEND,
1871 (ipc_object_t *)&kport) != KERN_SUCCESS)
1872 return (THREAD_NULL);
1873
1874 thread = convert_port_to_thread(kport);
1875
1876 if (IP_VALID(kport))
1877 ipc_port_release_send(kport);
1878 }
1879
1880 return (thread);
1881 }
1882
1883 task_t
1884 port_name_to_task(
1885 mach_port_name_t name)
1886 {
1887 ipc_port_t kern_port;
1888 kern_return_t kr;
1889 task_t task = TASK_NULL;
1890
1891 if (MACH_PORT_VALID(name)) {
1892 kr = ipc_object_copyin(current_space(), name,
1893 MACH_MSG_TYPE_COPY_SEND,
1894 (ipc_object_t *) &kern_port);
1895 if (kr != KERN_SUCCESS)
1896 return TASK_NULL;
1897
1898 task = convert_port_to_task(kern_port);
1899
1900 if (IP_VALID(kern_port))
1901 ipc_port_release_send(kern_port);
1902 }
1903 return task;
1904 }
1905
1906 task_inspect_t
1907 port_name_to_task_inspect(
1908 mach_port_name_t name)
1909 {
1910 ipc_port_t kern_port;
1911 kern_return_t kr;
1912 task_inspect_t ti = TASK_INSPECT_NULL;
1913
1914 if (MACH_PORT_VALID(name)) {
1915 kr = ipc_object_copyin(current_space(), name,
1916 MACH_MSG_TYPE_COPY_SEND,
1917 (ipc_object_t *)&kern_port);
1918 if (kr != KERN_SUCCESS)
1919 return TASK_NULL;
1920
1921 ti = convert_port_to_task_inspect(kern_port);
1922
1923 if (IP_VALID(kern_port))
1924 ipc_port_release_send(kern_port);
1925 }
1926 return ti;
1927 }
1928
1929 /*
1930 * Routine: port_name_to_host
1931 * Purpose:
1932 * Convert from a port name to a host pointer.
1933 * NOTE: This does _not_ return a +1 reference to the host_t
1934 * Conditions:
1935 * Nothing locked.
1936 */
1937 host_t
1938 port_name_to_host(
1939 mach_port_name_t name)
1940 {
1941
1942 host_t host = HOST_NULL;
1943 kern_return_t kr;
1944 ipc_port_t port;
1945
1946 if (MACH_PORT_VALID(name)) {
1947 kr = ipc_port_translate_send(current_space(), name, &port);
1948 if (kr == KERN_SUCCESS) {
1949 host = convert_port_to_host(port);
1950 ip_unlock(port);
1951 }
1952 }
1953 return host;
1954 }
1955
1956 /*
1957 * Routine: convert_task_to_port
1958 * Purpose:
1959 * Convert from a task to a port.
1960 * Consumes a task ref; produces a naked send right
1961 * which may be invalid.
1962 * Conditions:
1963 * Nothing locked.
1964 */
1965
1966 ipc_port_t
1967 convert_task_to_port(
1968 task_t task)
1969 {
1970 ipc_port_t port;
1971
1972 itk_lock(task);
1973
1974 if (task->itk_self != IP_NULL)
1975 port = ipc_port_make_send(task->itk_self);
1976 else
1977 port = IP_NULL;
1978
1979 itk_unlock(task);
1980
1981 task_deallocate(task);
1982 return port;
1983 }
1984
1985 /*
1986 * Routine: convert_task_inspect_to_port
1987 * Purpose:
1988 * Convert from a task inspect reference to a port.
1989 * Consumes a task ref;
1990 * As we never export task inspect ports, always
1991 * creates a NULL port.
1992 * Conditions:
1993 * Nothing locked.
1994 */
1995 ipc_port_t
1996 convert_task_inspect_to_port(
1997 task_inspect_t task)
1998 {
1999 task_deallocate(task);
2000
2001 return IP_NULL;
2002 }
2003
2004 /*
2005 * Routine: convert_task_suspend_token_to_port
2006 * Purpose:
2007 * Convert from a task suspension token to a port.
2008 * Consumes a task suspension token ref; produces a naked send-once right
2009 * which may be invalid.
2010 * Conditions:
2011 * Nothing locked.
2012 */
2013 ipc_port_t
2014 convert_task_suspension_token_to_port(
2015 task_suspension_token_t task)
2016 {
2017 ipc_port_t port;
2018
2019 task_lock(task);
2020 if (task->active) {
2021 if (task->itk_resume == IP_NULL) {
2022 task->itk_resume = ipc_port_alloc_kernel();
2023 if (!IP_VALID(task->itk_resume)) {
2024 panic("failed to create resume port");
2025 }
2026
2027 ipc_kobject_set(task->itk_resume, (ipc_kobject_t) task, IKOT_TASK_RESUME);
2028 }
2029
2030 /*
2031 * Create a send-once right for each instance of a direct user-called
2032 * task_suspend2 call. Each time one of these send-once rights is abandoned,
2033 * the notification handler will resume the target task.
2034 */
2035 port = ipc_port_make_sonce(task->itk_resume);
2036 assert(IP_VALID(port));
2037 } else {
2038 port = IP_NULL;
2039 }
2040
2041 task_unlock(task);
2042 task_suspension_token_deallocate(task);
2043
2044 return port;
2045 }
2046
2047
2048 /*
2049 * Routine: convert_task_name_to_port
2050 * Purpose:
2051 * Convert from a task name ref to a port.
2052 * Consumes a task name ref; produces a naked send right
2053 * which may be invalid.
2054 * Conditions:
2055 * Nothing locked.
2056 */
2057
2058 ipc_port_t
2059 convert_task_name_to_port(
2060 task_name_t task_name)
2061 {
2062 ipc_port_t port;
2063
2064 itk_lock(task_name);
2065 if (task_name->itk_nself != IP_NULL)
2066 port = ipc_port_make_send(task_name->itk_nself);
2067 else
2068 port = IP_NULL;
2069 itk_unlock(task_name);
2070
2071 task_name_deallocate(task_name);
2072 return port;
2073 }
2074
2075 /*
2076 * Routine: convert_thread_to_port
2077 * Purpose:
2078 * Convert from a thread to a port.
2079 * Consumes an thread ref; produces a naked send right
2080 * which may be invalid.
2081 * Conditions:
2082 * Nothing locked.
2083 */
2084
2085 ipc_port_t
2086 convert_thread_to_port(
2087 thread_t thread)
2088 {
2089 ipc_port_t port;
2090
2091 thread_mtx_lock(thread);
2092
2093 if (thread->ith_self != IP_NULL)
2094 port = ipc_port_make_send(thread->ith_self);
2095 else
2096 port = IP_NULL;
2097
2098 thread_mtx_unlock(thread);
2099
2100 thread_deallocate(thread);
2101
2102 return (port);
2103 }
2104
2105 /*
2106 * Routine: space_deallocate
2107 * Purpose:
2108 * Deallocate a space ref produced by convert_port_to_space.
2109 * Conditions:
2110 * Nothing locked.
2111 */
2112
2113 void
2114 space_deallocate(
2115 ipc_space_t space)
2116 {
2117 if (space != IS_NULL)
2118 is_release(space);
2119 }
2120
2121 /*
2122 * Routine: space_inspect_deallocate
2123 * Purpose:
2124 * Deallocate a space inspect ref produced by convert_port_to_space_inspect.
2125 * Conditions:
2126 * Nothing locked.
2127 */
2128
2129 void
2130 space_inspect_deallocate(
2131 ipc_space_inspect_t space)
2132 {
2133 if (space != IS_INSPECT_NULL)
2134 is_release((ipc_space_t)space);
2135 }
2136
2137 /*
2138 * Routine: thread/task_set_exception_ports [kernel call]
2139 * Purpose:
2140 * Sets the thread/task exception port, flavor and
2141 * behavior for the exception types specified by the mask.
2142 * There will be one send right per exception per valid
2143 * port.
2144 * Conditions:
2145 * Nothing locked. If successful, consumes
2146 * the supplied send right.
2147 * Returns:
2148 * KERN_SUCCESS Changed the special port.
2149 * KERN_INVALID_ARGUMENT The thread is null,
2150 * Illegal mask bit set.
2151 * Illegal exception behavior
2152 * KERN_FAILURE The thread is dead.
2153 */
2154
2155 kern_return_t
2156 thread_set_exception_ports(
2157 thread_t thread,
2158 exception_mask_t exception_mask,
2159 ipc_port_t new_port,
2160 exception_behavior_t new_behavior,
2161 thread_state_flavor_t new_flavor)
2162 {
2163 ipc_port_t old_port[EXC_TYPES_COUNT];
2164 boolean_t privileged = current_task()->sec_token.val[0] == 0;
2165 register int i;
2166
2167 #if CONFIG_MACF
2168 struct label *new_label;
2169 #endif
2170
2171 if (thread == THREAD_NULL)
2172 return (KERN_INVALID_ARGUMENT);
2173
2174 if (exception_mask & ~EXC_MASK_VALID)
2175 return (KERN_INVALID_ARGUMENT);
2176
2177 if (IP_VALID(new_port)) {
2178 switch (new_behavior & ~MACH_EXCEPTION_CODES) {
2179
2180 case EXCEPTION_DEFAULT:
2181 case EXCEPTION_STATE:
2182 case EXCEPTION_STATE_IDENTITY:
2183 break;
2184
2185 default:
2186 return (KERN_INVALID_ARGUMENT);
2187 }
2188 }
2189
2190 /*
2191 * Check the validity of the thread_state_flavor by calling the
2192 * VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in
2193 * osfmk/mach/ARCHITECTURE/thread_status.h
2194 */
2195 if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor))
2196 return (KERN_INVALID_ARGUMENT);
2197
2198 #if CONFIG_MACF
2199 new_label = mac_exc_create_label_for_current_proc();
2200 #endif
2201
2202 thread_mtx_lock(thread);
2203
2204 if (!thread->active) {
2205 thread_mtx_unlock(thread);
2206
2207 return (KERN_FAILURE);
2208 }
2209
2210 if (thread->exc_actions == NULL) {
2211 ipc_thread_init_exc_actions(thread);
2212 }
2213 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
2214 if ((exception_mask & (1 << i))
2215 #if CONFIG_MACF
2216 && mac_exc_update_action_label(&thread->exc_actions[i], new_label) == 0
2217 #endif
2218 ) {
2219 old_port[i] = thread->exc_actions[i].port;
2220 thread->exc_actions[i].port = ipc_port_copy_send(new_port);
2221 thread->exc_actions[i].behavior = new_behavior;
2222 thread->exc_actions[i].flavor = new_flavor;
2223 thread->exc_actions[i].privileged = privileged;
2224 }
2225 else
2226 old_port[i] = IP_NULL;
2227 }
2228
2229 thread_mtx_unlock(thread);
2230
2231 #if CONFIG_MACF
2232 mac_exc_free_label(new_label);
2233 #endif
2234
2235 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i)
2236 if (IP_VALID(old_port[i]))
2237 ipc_port_release_send(old_port[i]);
2238
2239 if (IP_VALID(new_port)) /* consume send right */
2240 ipc_port_release_send(new_port);
2241
2242 return (KERN_SUCCESS);
2243 }
2244
2245 kern_return_t
2246 task_set_exception_ports(
2247 task_t task,
2248 exception_mask_t exception_mask,
2249 ipc_port_t new_port,
2250 exception_behavior_t new_behavior,
2251 thread_state_flavor_t new_flavor)
2252 {
2253 ipc_port_t old_port[EXC_TYPES_COUNT];
2254 boolean_t privileged = current_task()->sec_token.val[0] == 0;
2255 register int i;
2256
2257 #if CONFIG_MACF
2258 struct label *new_label;
2259 #endif
2260
2261 if (task == TASK_NULL)
2262 return (KERN_INVALID_ARGUMENT);
2263
2264 if (exception_mask & ~EXC_MASK_VALID)
2265 return (KERN_INVALID_ARGUMENT);
2266
2267 if (IP_VALID(new_port)) {
2268 switch (new_behavior & ~MACH_EXCEPTION_CODES) {
2269
2270 case EXCEPTION_DEFAULT:
2271 case EXCEPTION_STATE:
2272 case EXCEPTION_STATE_IDENTITY:
2273 break;
2274
2275 default:
2276 return (KERN_INVALID_ARGUMENT);
2277 }
2278 }
2279
2280 /*
2281 * Check the validity of the thread_state_flavor by calling the
2282 * VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in
2283 * osfmk/mach/ARCHITECTURE/thread_status.h
2284 */
2285 if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor))
2286 return (KERN_INVALID_ARGUMENT);
2287
2288 #if CONFIG_MACF
2289 new_label = mac_exc_create_label_for_current_proc();
2290 #endif
2291
2292 itk_lock(task);
2293
2294 if (task->itk_self == IP_NULL) {
2295 itk_unlock(task);
2296
2297 return (KERN_FAILURE);
2298 }
2299
2300 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
2301 if ((exception_mask & (1 << i))
2302 #if CONFIG_MACF
2303 && mac_exc_update_action_label(&task->exc_actions[i], new_label) == 0
2304 #endif
2305 ) {
2306 old_port[i] = task->exc_actions[i].port;
2307 task->exc_actions[i].port =
2308 ipc_port_copy_send(new_port);
2309 task->exc_actions[i].behavior = new_behavior;
2310 task->exc_actions[i].flavor = new_flavor;
2311 task->exc_actions[i].privileged = privileged;
2312 }
2313 else
2314 old_port[i] = IP_NULL;
2315 }
2316
2317 itk_unlock(task);
2318
2319 #if CONFIG_MACF
2320 mac_exc_free_label(new_label);
2321 #endif
2322
2323 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i)
2324 if (IP_VALID(old_port[i]))
2325 ipc_port_release_send(old_port[i]);
2326
2327 if (IP_VALID(new_port)) /* consume send right */
2328 ipc_port_release_send(new_port);
2329
2330 return (KERN_SUCCESS);
2331 }
2332
2333 /*
2334 * Routine: thread/task_swap_exception_ports [kernel call]
2335 * Purpose:
2336 * Sets the thread/task exception port, flavor and
2337 * behavior for the exception types specified by the
2338 * mask.
2339 *
2340 * The old ports, behavior and flavors are returned
2341 * Count specifies the array sizes on input and
2342 * the number of returned ports etc. on output. The
2343 * arrays must be large enough to hold all the returned
2344 * data, MIG returnes an error otherwise. The masks
2345 * array specifies the corresponding exception type(s).
2346 *
2347 * Conditions:
2348 * Nothing locked. If successful, consumes
2349 * the supplied send right.
2350 *
2351 * Returns upto [in} CountCnt elements.
2352 * Returns:
2353 * KERN_SUCCESS Changed the special port.
2354 * KERN_INVALID_ARGUMENT The thread is null,
2355 * Illegal mask bit set.
2356 * Illegal exception behavior
2357 * KERN_FAILURE The thread is dead.
2358 */
2359
2360 kern_return_t
2361 thread_swap_exception_ports(
2362 thread_t thread,
2363 exception_mask_t exception_mask,
2364 ipc_port_t new_port,
2365 exception_behavior_t new_behavior,
2366 thread_state_flavor_t new_flavor,
2367 exception_mask_array_t masks,
2368 mach_msg_type_number_t *CountCnt,
2369 exception_port_array_t ports,
2370 exception_behavior_array_t behaviors,
2371 thread_state_flavor_array_t flavors)
2372 {
2373 ipc_port_t old_port[EXC_TYPES_COUNT];
2374 boolean_t privileged = current_task()->sec_token.val[0] == 0;
2375 unsigned int i, j, count;
2376
2377 #if CONFIG_MACF
2378 struct label *new_label;
2379 #endif
2380
2381 if (thread == THREAD_NULL)
2382 return (KERN_INVALID_ARGUMENT);
2383
2384 if (exception_mask & ~EXC_MASK_VALID)
2385 return (KERN_INVALID_ARGUMENT);
2386
2387 if (IP_VALID(new_port)) {
2388 switch (new_behavior & ~MACH_EXCEPTION_CODES) {
2389
2390 case EXCEPTION_DEFAULT:
2391 case EXCEPTION_STATE:
2392 case EXCEPTION_STATE_IDENTITY:
2393 break;
2394
2395 default:
2396 return (KERN_INVALID_ARGUMENT);
2397 }
2398 }
2399
2400 if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor))
2401 return (KERN_INVALID_ARGUMENT);
2402
2403 #if CONFIG_MACF
2404 new_label = mac_exc_create_label_for_current_proc();
2405 #endif
2406
2407 thread_mtx_lock(thread);
2408
2409 if (!thread->active) {
2410 thread_mtx_unlock(thread);
2411
2412 return (KERN_FAILURE);
2413 }
2414
2415 if (thread->exc_actions == NULL) {
2416 ipc_thread_init_exc_actions(thread);
2417 }
2418
2419 assert(EXC_TYPES_COUNT > FIRST_EXCEPTION);
2420 for (count = 0, i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT && count < *CountCnt; ++i) {
2421 if ((exception_mask & (1 << i))
2422 #if CONFIG_MACF
2423 && mac_exc_update_action_label(&thread->exc_actions[i], new_label) == 0
2424 #endif
2425 ) {
2426 for (j = 0; j < count; ++j) {
2427 /*
2428 * search for an identical entry, if found
2429 * set corresponding mask for this exception.
2430 */
2431 if ( thread->exc_actions[i].port == ports[j] &&
2432 thread->exc_actions[i].behavior == behaviors[j] &&
2433 thread->exc_actions[i].flavor == flavors[j] ) {
2434 masks[j] |= (1 << i);
2435 break;
2436 }
2437 }
2438
2439 if (j == count) {
2440 masks[j] = (1 << i);
2441 ports[j] = ipc_port_copy_send(thread->exc_actions[i].port);
2442
2443 behaviors[j] = thread->exc_actions[i].behavior;
2444 flavors[j] = thread->exc_actions[i].flavor;
2445 ++count;
2446 }
2447
2448 old_port[i] = thread->exc_actions[i].port;
2449 thread->exc_actions[i].port = ipc_port_copy_send(new_port);
2450 thread->exc_actions[i].behavior = new_behavior;
2451 thread->exc_actions[i].flavor = new_flavor;
2452 thread->exc_actions[i].privileged = privileged;
2453 }
2454 else
2455 old_port[i] = IP_NULL;
2456 }
2457
2458 thread_mtx_unlock(thread);
2459
2460 #if CONFIG_MACF
2461 mac_exc_free_label(new_label);
2462 #endif
2463
2464 while (--i >= FIRST_EXCEPTION) {
2465 if (IP_VALID(old_port[i]))
2466 ipc_port_release_send(old_port[i]);
2467 }
2468
2469 if (IP_VALID(new_port)) /* consume send right */
2470 ipc_port_release_send(new_port);
2471
2472 *CountCnt = count;
2473
2474 return (KERN_SUCCESS);
2475 }
2476
2477 kern_return_t
2478 task_swap_exception_ports(
2479 task_t task,
2480 exception_mask_t exception_mask,
2481 ipc_port_t new_port,
2482 exception_behavior_t new_behavior,
2483 thread_state_flavor_t new_flavor,
2484 exception_mask_array_t masks,
2485 mach_msg_type_number_t *CountCnt,
2486 exception_port_array_t ports,
2487 exception_behavior_array_t behaviors,
2488 thread_state_flavor_array_t flavors)
2489 {
2490 ipc_port_t old_port[EXC_TYPES_COUNT];
2491 boolean_t privileged = current_task()->sec_token.val[0] == 0;
2492 unsigned int i, j, count;
2493
2494 #if CONFIG_MACF
2495 struct label *new_label;
2496 #endif
2497
2498 if (task == TASK_NULL)
2499 return (KERN_INVALID_ARGUMENT);
2500
2501 if (exception_mask & ~EXC_MASK_VALID)
2502 return (KERN_INVALID_ARGUMENT);
2503
2504 if (IP_VALID(new_port)) {
2505 switch (new_behavior & ~MACH_EXCEPTION_CODES) {
2506
2507 case EXCEPTION_DEFAULT:
2508 case EXCEPTION_STATE:
2509 case EXCEPTION_STATE_IDENTITY:
2510 break;
2511
2512 default:
2513 return (KERN_INVALID_ARGUMENT);
2514 }
2515 }
2516
2517 if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor))
2518 return (KERN_INVALID_ARGUMENT);
2519
2520 #if CONFIG_MACF
2521 new_label = mac_exc_create_label_for_current_proc();
2522 #endif
2523
2524 itk_lock(task);
2525
2526 if (task->itk_self == IP_NULL) {
2527 itk_unlock(task);
2528
2529 return (KERN_FAILURE);
2530 }
2531
2532 assert(EXC_TYPES_COUNT > FIRST_EXCEPTION);
2533 for (count = 0, i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT && count < *CountCnt; ++i) {
2534 if ((exception_mask & (1 << i))
2535 #if CONFIG_MACF
2536 && mac_exc_update_action_label(&task->exc_actions[i], new_label) == 0
2537 #endif
2538 ) {
2539 for (j = 0; j < count; j++) {
2540 /*
2541 * search for an identical entry, if found
2542 * set corresponding mask for this exception.
2543 */
2544 if ( task->exc_actions[i].port == ports[j] &&
2545 task->exc_actions[i].behavior == behaviors[j] &&
2546 task->exc_actions[i].flavor == flavors[j] ) {
2547 masks[j] |= (1 << i);
2548 break;
2549 }
2550 }
2551
2552 if (j == count) {
2553 masks[j] = (1 << i);
2554 ports[j] = ipc_port_copy_send(task->exc_actions[i].port);
2555 behaviors[j] = task->exc_actions[i].behavior;
2556 flavors[j] = task->exc_actions[i].flavor;
2557 ++count;
2558 }
2559
2560 old_port[i] = task->exc_actions[i].port;
2561
2562 task->exc_actions[i].port = ipc_port_copy_send(new_port);
2563 task->exc_actions[i].behavior = new_behavior;
2564 task->exc_actions[i].flavor = new_flavor;
2565 task->exc_actions[i].privileged = privileged;
2566 }
2567 else
2568 old_port[i] = IP_NULL;
2569 }
2570
2571 itk_unlock(task);
2572
2573 #if CONFIG_MACF
2574 mac_exc_free_label(new_label);
2575 #endif
2576
2577 while (--i >= FIRST_EXCEPTION) {
2578 if (IP_VALID(old_port[i]))
2579 ipc_port_release_send(old_port[i]);
2580 }
2581
2582 if (IP_VALID(new_port)) /* consume send right */
2583 ipc_port_release_send(new_port);
2584
2585 *CountCnt = count;
2586
2587 return (KERN_SUCCESS);
2588 }
2589
2590 /*
2591 * Routine: thread/task_get_exception_ports [kernel call]
2592 * Purpose:
2593 * Clones a send right for each of the thread/task's exception
2594 * ports specified in the mask and returns the behaviour
2595 * and flavor of said port.
2596 *
2597 * Returns upto [in} CountCnt elements.
2598 *
2599 * Conditions:
2600 * Nothing locked.
2601 * Returns:
2602 * KERN_SUCCESS Extracted a send right.
2603 * KERN_INVALID_ARGUMENT The thread is null,
2604 * Invalid special port,
2605 * Illegal mask bit set.
2606 * KERN_FAILURE The thread is dead.
2607 */
2608
2609 kern_return_t
2610 thread_get_exception_ports(
2611 thread_t thread,
2612 exception_mask_t exception_mask,
2613 exception_mask_array_t masks,
2614 mach_msg_type_number_t *CountCnt,
2615 exception_port_array_t ports,
2616 exception_behavior_array_t behaviors,
2617 thread_state_flavor_array_t flavors)
2618 {
2619 unsigned int i, j, count;
2620
2621 if (thread == THREAD_NULL)
2622 return (KERN_INVALID_ARGUMENT);
2623
2624 if (exception_mask & ~EXC_MASK_VALID)
2625 return (KERN_INVALID_ARGUMENT);
2626
2627 thread_mtx_lock(thread);
2628
2629 if (!thread->active) {
2630 thread_mtx_unlock(thread);
2631
2632 return (KERN_FAILURE);
2633 }
2634
2635 count = 0;
2636
2637 if (thread->exc_actions == NULL) {
2638 goto done;
2639 }
2640
2641 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
2642 if (exception_mask & (1 << i)) {
2643 for (j = 0; j < count; ++j) {
2644 /*
2645 * search for an identical entry, if found
2646 * set corresponding mask for this exception.
2647 */
2648 if ( thread->exc_actions[i].port == ports[j] &&
2649 thread->exc_actions[i].behavior ==behaviors[j] &&
2650 thread->exc_actions[i].flavor == flavors[j] ) {
2651 masks[j] |= (1 << i);
2652 break;
2653 }
2654 }
2655
2656 if (j == count) {
2657 masks[j] = (1 << i);
2658 ports[j] = ipc_port_copy_send(thread->exc_actions[i].port);
2659 behaviors[j] = thread->exc_actions[i].behavior;
2660 flavors[j] = thread->exc_actions[i].flavor;
2661 ++count;
2662 if (count >= *CountCnt)
2663 break;
2664 }
2665 }
2666 }
2667
2668 done:
2669 thread_mtx_unlock(thread);
2670
2671 *CountCnt = count;
2672
2673 return (KERN_SUCCESS);
2674 }
2675
2676 kern_return_t
2677 task_get_exception_ports(
2678 task_t task,
2679 exception_mask_t exception_mask,
2680 exception_mask_array_t masks,
2681 mach_msg_type_number_t *CountCnt,
2682 exception_port_array_t ports,
2683 exception_behavior_array_t behaviors,
2684 thread_state_flavor_array_t flavors)
2685 {
2686 unsigned int i, j, count;
2687
2688 if (task == TASK_NULL)
2689 return (KERN_INVALID_ARGUMENT);
2690
2691 if (exception_mask & ~EXC_MASK_VALID)
2692 return (KERN_INVALID_ARGUMENT);
2693
2694 itk_lock(task);
2695
2696 if (task->itk_self == IP_NULL) {
2697 itk_unlock(task);
2698
2699 return (KERN_FAILURE);
2700 }
2701
2702 count = 0;
2703
2704 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
2705 if (exception_mask & (1 << i)) {
2706 for (j = 0; j < count; ++j) {
2707 /*
2708 * search for an identical entry, if found
2709 * set corresponding mask for this exception.
2710 */
2711 if ( task->exc_actions[i].port == ports[j] &&
2712 task->exc_actions[i].behavior == behaviors[j] &&
2713 task->exc_actions[i].flavor == flavors[j] ) {
2714 masks[j] |= (1 << i);
2715 break;
2716 }
2717 }
2718
2719 if (j == count) {
2720 masks[j] = (1 << i);
2721 ports[j] = ipc_port_copy_send(task->exc_actions[i].port);
2722 behaviors[j] = task->exc_actions[i].behavior;
2723 flavors[j] = task->exc_actions[i].flavor;
2724 ++count;
2725 if (count > *CountCnt)
2726 break;
2727 }
2728 }
2729 }
2730
2731 itk_unlock(task);
2732
2733 *CountCnt = count;
2734
2735 return (KERN_SUCCESS);
2736 }