]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/ipc_tt.c
xnu-4570.51.1.tar.gz
[apple/xnu.git] / osfmk / kern / ipc_tt.c
1 /*
2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
58 * support for mandatory and extensible security protections. This notice
59 * is included in support of clause 2.2 (b) of the Apple Public License,
60 * Version 2.0.
61 */
62 /*
63 */
64
65 /*
66 * File: ipc_tt.c
67 * Purpose:
68 * Task and thread related IPC functions.
69 */
70
71 #include <mach/mach_types.h>
72 #include <mach/boolean.h>
73 #include <mach/kern_return.h>
74 #include <mach/mach_param.h>
75 #include <mach/task_special_ports.h>
76 #include <mach/thread_special_ports.h>
77 #include <mach/thread_status.h>
78 #include <mach/exception_types.h>
79 #include <mach/memory_object_types.h>
80 #include <mach/mach_traps.h>
81 #include <mach/task_server.h>
82 #include <mach/thread_act_server.h>
83 #include <mach/mach_host_server.h>
84 #include <mach/host_priv_server.h>
85 #include <mach/vm_map_server.h>
86
87 #include <kern/kern_types.h>
88 #include <kern/host.h>
89 #include <kern/ipc_kobject.h>
90 #include <kern/ipc_tt.h>
91 #include <kern/kalloc.h>
92 #include <kern/thread.h>
93 #include <kern/misc_protos.h>
94
95 #include <vm/vm_map.h>
96 #include <vm/vm_pageout.h>
97 #include <vm/vm_protos.h>
98
99 #include <security/mac_mach_internal.h>
100
101 #if CONFIG_EMBEDDED && !SECURE_KERNEL
102 extern int cs_relax_platform_task_ports;
103 #endif
104
105 /* forward declarations */
106 task_t convert_port_to_locked_task(ipc_port_t port);
107 task_inspect_t convert_port_to_locked_task_inspect(ipc_port_t port);
108 static void ipc_port_bind_special_reply_port_locked(ipc_port_t port);
109 static kern_return_t ipc_port_unbind_special_reply_port(thread_t thread, boolean_t unbind_active_port);
110 kern_return_t task_conversion_eval(task_t caller, task_t victim);
111
112 /*
113 * Routine: ipc_task_init
114 * Purpose:
115 * Initialize a task's IPC state.
116 *
117 * If non-null, some state will be inherited from the parent.
118 * The parent must be appropriately initialized.
119 * Conditions:
120 * Nothing locked.
121 */
122
123 void
124 ipc_task_init(
125 task_t task,
126 task_t parent)
127 {
128 ipc_space_t space;
129 ipc_port_t kport;
130 ipc_port_t nport;
131 kern_return_t kr;
132 int i;
133
134
135 kr = ipc_space_create(&ipc_table_entries[0], &space);
136 if (kr != KERN_SUCCESS)
137 panic("ipc_task_init");
138
139 space->is_task = task;
140
141 kport = ipc_port_alloc_kernel();
142 if (kport == IP_NULL)
143 panic("ipc_task_init");
144
145 nport = ipc_port_alloc_kernel();
146 if (nport == IP_NULL)
147 panic("ipc_task_init");
148
149 itk_lock_init(task);
150 task->itk_self = kport;
151 task->itk_nself = nport;
152 task->itk_resume = IP_NULL; /* Lazily allocated on-demand */
153 if (task_is_a_corpse_fork(task)) {
154 /*
155 * No sender's notification for corpse would not
156 * work with a naked send right in kernel.
157 */
158 task->itk_sself = IP_NULL;
159 } else {
160 task->itk_sself = ipc_port_make_send(kport);
161 }
162 task->itk_debug_control = IP_NULL;
163 task->itk_space = space;
164
165 #if CONFIG_MACF
166 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
167 mac_exc_associate_action_label(&task->exc_actions[i], mac_exc_create_label());
168 }
169 #endif
170
171 if (parent == TASK_NULL) {
172 ipc_port_t port;
173
174 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
175 task->exc_actions[i].port = IP_NULL;
176 }/* for */
177
178 kr = host_get_host_port(host_priv_self(), &port);
179 assert(kr == KERN_SUCCESS);
180 task->itk_host = port;
181
182 task->itk_bootstrap = IP_NULL;
183 task->itk_seatbelt = IP_NULL;
184 task->itk_gssd = IP_NULL;
185 task->itk_task_access = IP_NULL;
186
187 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
188 task->itk_registered[i] = IP_NULL;
189 } else {
190 itk_lock(parent);
191 assert(parent->itk_self != IP_NULL);
192
193 /* inherit registered ports */
194
195 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
196 task->itk_registered[i] =
197 ipc_port_copy_send(parent->itk_registered[i]);
198
199 /* inherit exception and bootstrap ports */
200
201 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
202 task->exc_actions[i].port =
203 ipc_port_copy_send(parent->exc_actions[i].port);
204 task->exc_actions[i].flavor =
205 parent->exc_actions[i].flavor;
206 task->exc_actions[i].behavior =
207 parent->exc_actions[i].behavior;
208 task->exc_actions[i].privileged =
209 parent->exc_actions[i].privileged;
210 #if CONFIG_MACF
211 mac_exc_inherit_action_label(parent->exc_actions + i, task->exc_actions + i);
212 #endif
213 }/* for */
214 task->itk_host =
215 ipc_port_copy_send(parent->itk_host);
216
217 task->itk_bootstrap =
218 ipc_port_copy_send(parent->itk_bootstrap);
219
220 task->itk_seatbelt =
221 ipc_port_copy_send(parent->itk_seatbelt);
222
223 task->itk_gssd =
224 ipc_port_copy_send(parent->itk_gssd);
225
226 task->itk_task_access =
227 ipc_port_copy_send(parent->itk_task_access);
228
229 itk_unlock(parent);
230 }
231 }
232
233 /*
234 * Routine: ipc_task_enable
235 * Purpose:
236 * Enable a task for IPC access.
237 * Conditions:
238 * Nothing locked.
239 */
240
241 void
242 ipc_task_enable(
243 task_t task)
244 {
245 ipc_port_t kport;
246 ipc_port_t nport;
247
248 itk_lock(task);
249 kport = task->itk_self;
250 if (kport != IP_NULL)
251 ipc_kobject_set(kport, (ipc_kobject_t) task, IKOT_TASK);
252 nport = task->itk_nself;
253 if (nport != IP_NULL)
254 ipc_kobject_set(nport, (ipc_kobject_t) task, IKOT_TASK_NAME);
255 itk_unlock(task);
256 }
257
258 /*
259 * Routine: ipc_task_disable
260 * Purpose:
261 * Disable IPC access to a task.
262 * Conditions:
263 * Nothing locked.
264 */
265
266 void
267 ipc_task_disable(
268 task_t task)
269 {
270 ipc_port_t kport;
271 ipc_port_t nport;
272 ipc_port_t rport;
273
274 itk_lock(task);
275 kport = task->itk_self;
276 if (kport != IP_NULL)
277 ipc_kobject_set(kport, IKO_NULL, IKOT_NONE);
278 nport = task->itk_nself;
279 if (nport != IP_NULL)
280 ipc_kobject_set(nport, IKO_NULL, IKOT_NONE);
281
282 rport = task->itk_resume;
283 if (rport != IP_NULL) {
284 /*
285 * From this point onwards this task is no longer accepting
286 * resumptions.
287 *
288 * There are still outstanding suspensions on this task,
289 * even as it is being torn down. Disconnect the task
290 * from the rport, thereby "orphaning" the rport. The rport
291 * itself will go away only when the last suspension holder
292 * destroys his SO right to it -- when he either
293 * exits, or tries to actually use that last SO right to
294 * resume this (now non-existent) task.
295 */
296 ipc_kobject_set(rport, IKO_NULL, IKOT_NONE);
297 }
298 itk_unlock(task);
299 }
300
301 /*
302 * Routine: ipc_task_terminate
303 * Purpose:
304 * Clean up and destroy a task's IPC state.
305 * Conditions:
306 * Nothing locked. The task must be suspended.
307 * (Or the current thread must be in the task.)
308 */
309
310 void
311 ipc_task_terminate(
312 task_t task)
313 {
314 ipc_port_t kport;
315 ipc_port_t nport;
316 ipc_port_t rport;
317 int i;
318
319 itk_lock(task);
320 kport = task->itk_self;
321
322 if (kport == IP_NULL) {
323 /* the task is already terminated (can this happen?) */
324 itk_unlock(task);
325 return;
326 }
327 task->itk_self = IP_NULL;
328
329 nport = task->itk_nself;
330 assert(nport != IP_NULL);
331 task->itk_nself = IP_NULL;
332
333 rport = task->itk_resume;
334 task->itk_resume = IP_NULL;
335
336 itk_unlock(task);
337
338 /* release the naked send rights */
339
340 if (IP_VALID(task->itk_sself))
341 ipc_port_release_send(task->itk_sself);
342
343 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
344 if (IP_VALID(task->exc_actions[i].port)) {
345 ipc_port_release_send(task->exc_actions[i].port);
346 }
347 #if CONFIG_MACF
348 mac_exc_free_action_label(task->exc_actions + i);
349 #endif
350 }
351
352 if (IP_VALID(task->itk_host))
353 ipc_port_release_send(task->itk_host);
354
355 if (IP_VALID(task->itk_bootstrap))
356 ipc_port_release_send(task->itk_bootstrap);
357
358 if (IP_VALID(task->itk_seatbelt))
359 ipc_port_release_send(task->itk_seatbelt);
360
361 if (IP_VALID(task->itk_gssd))
362 ipc_port_release_send(task->itk_gssd);
363
364 if (IP_VALID(task->itk_task_access))
365 ipc_port_release_send(task->itk_task_access);
366
367 if (IP_VALID(task->itk_debug_control))
368 ipc_port_release_send(task->itk_debug_control);
369
370 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
371 if (IP_VALID(task->itk_registered[i]))
372 ipc_port_release_send(task->itk_registered[i]);
373
374 /* destroy the kernel ports */
375 ipc_port_dealloc_kernel(kport);
376 ipc_port_dealloc_kernel(nport);
377 if (rport != IP_NULL)
378 ipc_port_dealloc_kernel(rport);
379
380 itk_lock_destroy(task);
381 }
382
383 /*
384 * Routine: ipc_task_reset
385 * Purpose:
386 * Reset a task's IPC state to protect it when
387 * it enters an elevated security context. The
388 * task name port can remain the same - since
389 * it represents no specific privilege.
390 * Conditions:
391 * Nothing locked. The task must be suspended.
392 * (Or the current thread must be in the task.)
393 */
394
395 void
396 ipc_task_reset(
397 task_t task)
398 {
399 ipc_port_t old_kport, new_kport;
400 ipc_port_t old_sself;
401 ipc_port_t old_exc_actions[EXC_TYPES_COUNT];
402 int i;
403
404 #if CONFIG_MACF
405 /* Fresh label to unset credentials in existing labels. */
406 struct label *unset_label = mac_exc_create_label();
407 #endif
408
409 new_kport = ipc_port_alloc_kernel();
410 if (new_kport == IP_NULL)
411 panic("ipc_task_reset");
412
413 itk_lock(task);
414
415 old_kport = task->itk_self;
416
417 if (old_kport == IP_NULL) {
418 /* the task is already terminated (can this happen?) */
419 itk_unlock(task);
420 ipc_port_dealloc_kernel(new_kport);
421 #if CONFIG_MACF
422 mac_exc_free_label(unset_label);
423 #endif
424 return;
425 }
426
427 task->itk_self = new_kport;
428 old_sself = task->itk_sself;
429 task->itk_sself = ipc_port_make_send(new_kport);
430
431 /* Set the old kport to IKOT_NONE and update the exec token while under the port lock */
432 ip_lock(old_kport);
433 ipc_kobject_set_atomically(old_kport, IKO_NULL, IKOT_NONE);
434 task->exec_token += 1;
435 ip_unlock(old_kport);
436
437 ipc_kobject_set(new_kport, (ipc_kobject_t) task, IKOT_TASK);
438
439 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
440 old_exc_actions[i] = IP_NULL;
441
442 if (i == EXC_CORPSE_NOTIFY && task_corpse_pending_report(task)) {
443 continue;
444 }
445
446 if (!task->exc_actions[i].privileged) {
447 #if CONFIG_MACF
448 mac_exc_update_action_label(task->exc_actions + i, unset_label);
449 #endif
450 old_exc_actions[i] = task->exc_actions[i].port;
451 task->exc_actions[i].port = IP_NULL;
452 }
453 }/* for */
454
455 if (IP_VALID(task->itk_debug_control)) {
456 ipc_port_release_send(task->itk_debug_control);
457 }
458 task->itk_debug_control = IP_NULL;
459
460 itk_unlock(task);
461
462 #if CONFIG_MACF
463 mac_exc_free_label(unset_label);
464 #endif
465
466 /* release the naked send rights */
467
468 if (IP_VALID(old_sself))
469 ipc_port_release_send(old_sself);
470
471 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
472 if (IP_VALID(old_exc_actions[i])) {
473 ipc_port_release_send(old_exc_actions[i]);
474 }
475 }/* for */
476
477 /* destroy the kernel port */
478 ipc_port_dealloc_kernel(old_kport);
479 }
480
481 /*
482 * Routine: ipc_thread_init
483 * Purpose:
484 * Initialize a thread's IPC state.
485 * Conditions:
486 * Nothing locked.
487 */
488
489 void
490 ipc_thread_init(
491 thread_t thread)
492 {
493 ipc_port_t kport;
494
495 kport = ipc_port_alloc_kernel();
496 if (kport == IP_NULL)
497 panic("ipc_thread_init");
498
499 thread->ith_self = kport;
500 thread->ith_sself = ipc_port_make_send(kport);
501 thread->ith_special_reply_port = NULL;
502 thread->exc_actions = NULL;
503
504 ipc_kobject_set(kport, (ipc_kobject_t)thread, IKOT_THREAD);
505
506 #if IMPORTANCE_INHERITANCE
507 thread->ith_assertions = 0;
508 #endif
509
510 ipc_kmsg_queue_init(&thread->ith_messages);
511
512 thread->ith_rpc_reply = IP_NULL;
513 }
514
515 void
516 ipc_thread_init_exc_actions(
517 thread_t thread)
518 {
519 assert(thread->exc_actions == NULL);
520
521 thread->exc_actions = kalloc(sizeof(struct exception_action) * EXC_TYPES_COUNT);
522 bzero(thread->exc_actions, sizeof(struct exception_action) * EXC_TYPES_COUNT);
523
524 #if CONFIG_MACF
525 for (size_t i = 0; i < EXC_TYPES_COUNT; ++i) {
526 mac_exc_associate_action_label(thread->exc_actions + i, mac_exc_create_label());
527 }
528 #endif
529 }
530
531 void
532 ipc_thread_destroy_exc_actions(
533 thread_t thread)
534 {
535 if (thread->exc_actions != NULL) {
536 #if CONFIG_MACF
537 for (size_t i = 0; i < EXC_TYPES_COUNT; ++i) {
538 mac_exc_free_action_label(thread->exc_actions + i);
539 }
540 #endif
541
542 kfree(thread->exc_actions,
543 sizeof(struct exception_action) * EXC_TYPES_COUNT);
544 thread->exc_actions = NULL;
545 }
546 }
547
548 void
549 ipc_thread_disable(
550 thread_t thread)
551 {
552 ipc_port_t kport = thread->ith_self;
553
554 if (kport != IP_NULL)
555 ipc_kobject_set(kport, IKO_NULL, IKOT_NONE);
556 }
557
558 /*
559 * Routine: ipc_thread_terminate
560 * Purpose:
561 * Clean up and destroy a thread's IPC state.
562 * Conditions:
563 * Nothing locked.
564 */
565
566 void
567 ipc_thread_terminate(
568 thread_t thread)
569 {
570 ipc_port_t kport = thread->ith_self;
571
572 if (kport != IP_NULL) {
573 int i;
574
575 if (IP_VALID(thread->ith_sself))
576 ipc_port_release_send(thread->ith_sself);
577
578 thread->ith_sself = thread->ith_self = IP_NULL;
579
580 if (thread->exc_actions != NULL) {
581 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
582 if (IP_VALID(thread->exc_actions[i].port))
583 ipc_port_release_send(thread->exc_actions[i].port);
584 }
585 ipc_thread_destroy_exc_actions(thread);
586 }
587
588 ipc_port_dealloc_kernel(kport);
589 }
590
591 #if IMPORTANCE_INHERITANCE
592 assert(thread->ith_assertions == 0);
593 #endif
594
595 /* unbind the thread special reply port */
596 if (IP_VALID(thread->ith_special_reply_port)) {
597 ipc_port_unbind_special_reply_port(thread, TRUE);
598 }
599
600 assert(ipc_kmsg_queue_empty(&thread->ith_messages));
601
602 if (thread->ith_rpc_reply != IP_NULL)
603 ipc_port_dealloc_reply(thread->ith_rpc_reply);
604
605 thread->ith_rpc_reply = IP_NULL;
606 }
607
608 /*
609 * Routine: ipc_thread_reset
610 * Purpose:
611 * Reset the IPC state for a given Mach thread when
612 * its task enters an elevated security context.
613 * Both the thread port and its exception ports have
614 * to be reset. Its RPC reply port cannot have any
615 * rights outstanding, so it should be fine.
616 * Conditions:
617 * Nothing locked.
618 */
619
620 void
621 ipc_thread_reset(
622 thread_t thread)
623 {
624 ipc_port_t old_kport, new_kport;
625 ipc_port_t old_sself;
626 ipc_port_t old_exc_actions[EXC_TYPES_COUNT];
627 boolean_t has_old_exc_actions = FALSE;
628 int i;
629
630 #if CONFIG_MACF
631 struct label *new_label = mac_exc_create_label();
632 #endif
633
634 new_kport = ipc_port_alloc_kernel();
635 if (new_kport == IP_NULL)
636 panic("ipc_task_reset");
637
638 thread_mtx_lock(thread);
639
640 old_kport = thread->ith_self;
641
642 if (old_kport == IP_NULL && thread->inspection == FALSE) {
643 /* the is already terminated (can this happen?) */
644 thread_mtx_unlock(thread);
645 ipc_port_dealloc_kernel(new_kport);
646 #if CONFIG_MACF
647 mac_exc_free_label(new_label);
648 #endif
649 return;
650 }
651
652 thread->ith_self = new_kport;
653 old_sself = thread->ith_sself;
654 thread->ith_sself = ipc_port_make_send(new_kport);
655 if (old_kport != IP_NULL) {
656 ipc_kobject_set(old_kport, IKO_NULL, IKOT_NONE);
657 }
658 ipc_kobject_set(new_kport, (ipc_kobject_t) thread, IKOT_THREAD);
659
660 /*
661 * Only ports that were set by root-owned processes
662 * (privileged ports) should survive
663 */
664 if (thread->exc_actions != NULL) {
665 has_old_exc_actions = TRUE;
666 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
667 if (thread->exc_actions[i].privileged) {
668 old_exc_actions[i] = IP_NULL;
669 } else {
670 #if CONFIG_MACF
671 mac_exc_update_action_label(thread->exc_actions + i, new_label);
672 #endif
673 old_exc_actions[i] = thread->exc_actions[i].port;
674 thread->exc_actions[i].port = IP_NULL;
675 }
676 }
677 }
678
679 thread_mtx_unlock(thread);
680
681 #if CONFIG_MACF
682 mac_exc_free_label(new_label);
683 #endif
684
685 /* release the naked send rights */
686
687 if (IP_VALID(old_sself))
688 ipc_port_release_send(old_sself);
689
690 if (has_old_exc_actions) {
691 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
692 ipc_port_release_send(old_exc_actions[i]);
693 }
694 }
695
696 /* destroy the kernel port */
697 if (old_kport != IP_NULL) {
698 ipc_port_dealloc_kernel(old_kport);
699 }
700
701 /* unbind the thread special reply port */
702 if (IP_VALID(thread->ith_special_reply_port)) {
703 ipc_port_unbind_special_reply_port(thread, TRUE);
704 }
705 }
706
707 /*
708 * Routine: retrieve_task_self_fast
709 * Purpose:
710 * Optimized version of retrieve_task_self,
711 * that only works for the current task.
712 *
713 * Return a send right (possibly null/dead)
714 * for the task's user-visible self port.
715 * Conditions:
716 * Nothing locked.
717 */
718
719 ipc_port_t
720 retrieve_task_self_fast(
721 task_t task)
722 {
723 ipc_port_t port;
724
725 assert(task == current_task());
726
727 itk_lock(task);
728 assert(task->itk_self != IP_NULL);
729
730 if ((port = task->itk_sself) == task->itk_self) {
731 /* no interposing */
732
733 ip_lock(port);
734 assert(ip_active(port));
735 ip_reference(port);
736 port->ip_srights++;
737 ip_unlock(port);
738 } else
739 port = ipc_port_copy_send(port);
740 itk_unlock(task);
741
742 return port;
743 }
744
745 /*
746 * Routine: retrieve_thread_self_fast
747 * Purpose:
748 * Return a send right (possibly null/dead)
749 * for the thread's user-visible self port.
750 *
751 * Only works for the current thread.
752 *
753 * Conditions:
754 * Nothing locked.
755 */
756
757 ipc_port_t
758 retrieve_thread_self_fast(
759 thread_t thread)
760 {
761 ipc_port_t port;
762
763 assert(thread == current_thread());
764
765 thread_mtx_lock(thread);
766
767 assert(thread->ith_self != IP_NULL);
768
769 if ((port = thread->ith_sself) == thread->ith_self) {
770 /* no interposing */
771
772 ip_lock(port);
773 assert(ip_active(port));
774 ip_reference(port);
775 port->ip_srights++;
776 ip_unlock(port);
777 }
778 else
779 port = ipc_port_copy_send(port);
780
781 thread_mtx_unlock(thread);
782
783 return port;
784 }
785
786 /*
787 * Routine: task_self_trap [mach trap]
788 * Purpose:
789 * Give the caller send rights for his own task port.
790 * Conditions:
791 * Nothing locked.
792 * Returns:
793 * MACH_PORT_NULL if there are any resource failures
794 * or other errors.
795 */
796
797 mach_port_name_t
798 task_self_trap(
799 __unused struct task_self_trap_args *args)
800 {
801 task_t task = current_task();
802 ipc_port_t sright;
803 mach_port_name_t name;
804
805 sright = retrieve_task_self_fast(task);
806 name = ipc_port_copyout_send(sright, task->itk_space);
807 return name;
808 }
809
810 /*
811 * Routine: thread_self_trap [mach trap]
812 * Purpose:
813 * Give the caller send rights for his own thread port.
814 * Conditions:
815 * Nothing locked.
816 * Returns:
817 * MACH_PORT_NULL if there are any resource failures
818 * or other errors.
819 */
820
821 mach_port_name_t
822 thread_self_trap(
823 __unused struct thread_self_trap_args *args)
824 {
825 thread_t thread = current_thread();
826 task_t task = thread->task;
827 ipc_port_t sright;
828 mach_port_name_t name;
829
830 sright = retrieve_thread_self_fast(thread);
831 name = ipc_port_copyout_send(sright, task->itk_space);
832 return name;
833
834 }
835
836 /*
837 * Routine: mach_reply_port [mach trap]
838 * Purpose:
839 * Allocate a port for the caller.
840 * Conditions:
841 * Nothing locked.
842 * Returns:
843 * MACH_PORT_NULL if there are any resource failures
844 * or other errors.
845 */
846
847 mach_port_name_t
848 mach_reply_port(
849 __unused struct mach_reply_port_args *args)
850 {
851 ipc_port_t port;
852 mach_port_name_t name;
853 kern_return_t kr;
854
855 kr = ipc_port_alloc(current_task()->itk_space, &name, &port);
856 if (kr == KERN_SUCCESS)
857 ip_unlock(port);
858 else
859 name = MACH_PORT_NULL;
860 return name;
861 }
862
863 /*
864 * Routine: thread_get_special_reply_port [mach trap]
865 * Purpose:
866 * Allocate a special reply port for the calling thread.
867 * Conditions:
868 * Nothing locked.
869 * Returns:
870 * MACH_PORT_NULL if there are any resource failures
871 * or other errors.
872 */
873
874 mach_port_name_t
875 thread_get_special_reply_port(
876 __unused struct thread_get_special_reply_port_args *args)
877 {
878 ipc_port_t port;
879 mach_port_name_t name;
880 kern_return_t kr;
881 thread_t thread = current_thread();
882
883 /* unbind the thread special reply port */
884 if (IP_VALID(thread->ith_special_reply_port)) {
885 kr = ipc_port_unbind_special_reply_port(thread, TRUE);
886 if (kr != KERN_SUCCESS) {
887 return MACH_PORT_NULL;
888 }
889 }
890
891 kr = ipc_port_alloc(current_task()->itk_space, &name, &port);
892 if (kr == KERN_SUCCESS) {
893 ipc_port_bind_special_reply_port_locked(port);
894 ip_unlock(port);
895 } else {
896 name = MACH_PORT_NULL;
897 }
898 return name;
899 }
900
901 /*
902 * Routine: ipc_port_bind_special_reply_port_locked
903 * Purpose:
904 * Bind the given port to current thread as a special reply port.
905 * Conditions:
906 * Port locked.
907 * Returns:
908 * None.
909 */
910
911 static void
912 ipc_port_bind_special_reply_port_locked(
913 ipc_port_t port)
914 {
915 thread_t thread = current_thread();
916 assert(thread->ith_special_reply_port == NULL);
917
918 ip_reference(port);
919 thread->ith_special_reply_port = port;
920 port->ip_specialreply = 1;
921 port->ip_link_sync_qos = 1;
922 }
923
924 /*
925 * Routine: ipc_port_unbind_special_reply_port
926 * Purpose:
927 * Unbind the thread's special reply port.
928 * If the special port is linked to a port, adjust it's sync qos delta`.
929 * Condition:
930 * Nothing locked.
931 * Returns:
932 * None.
933 */
934 static kern_return_t
935 ipc_port_unbind_special_reply_port(
936 thread_t thread,
937 boolean_t unbind_active_port)
938 {
939 ipc_port_t special_reply_port = thread->ith_special_reply_port;
940
941 ip_lock(special_reply_port);
942
943 /* Return error if port active and unbind_active_port set to FALSE */
944 if (unbind_active_port == FALSE && ip_active(special_reply_port)) {
945 ip_unlock(special_reply_port);
946 return KERN_FAILURE;
947 }
948
949 thread->ith_special_reply_port = NULL;
950 ipc_port_unlink_special_reply_port_locked(special_reply_port, NULL,
951 IPC_PORT_UNLINK_SR_CLEAR_SPECIAL_REPLY);
952 /* port unlocked */
953
954 ip_release(special_reply_port);
955 return KERN_SUCCESS;
956 }
957
958 /*
959 * Routine: thread_get_special_port [kernel call]
960 * Purpose:
961 * Clones a send right for one of the thread's
962 * special ports.
963 * Conditions:
964 * Nothing locked.
965 * Returns:
966 * KERN_SUCCESS Extracted a send right.
967 * KERN_INVALID_ARGUMENT The thread is null.
968 * KERN_FAILURE The thread is dead.
969 * KERN_INVALID_ARGUMENT Invalid special port.
970 */
971
972 kern_return_t
973 thread_get_special_port(
974 thread_t thread,
975 int which,
976 ipc_port_t *portp)
977 {
978 kern_return_t result = KERN_SUCCESS;
979 ipc_port_t *whichp;
980
981 if (thread == THREAD_NULL)
982 return (KERN_INVALID_ARGUMENT);
983
984 switch (which) {
985
986 case THREAD_KERNEL_PORT:
987 whichp = &thread->ith_sself;
988 break;
989
990 default:
991 return (KERN_INVALID_ARGUMENT);
992 }
993
994 thread_mtx_lock(thread);
995
996 if (thread->active)
997 *portp = ipc_port_copy_send(*whichp);
998 else
999 result = KERN_FAILURE;
1000
1001 thread_mtx_unlock(thread);
1002
1003 return (result);
1004 }
1005
1006 /*
1007 * Routine: thread_set_special_port [kernel call]
1008 * Purpose:
1009 * Changes one of the thread's special ports,
1010 * setting it to the supplied send right.
1011 * Conditions:
1012 * Nothing locked. If successful, consumes
1013 * the supplied send right.
1014 * Returns:
1015 * KERN_SUCCESS Changed the special port.
1016 * KERN_INVALID_ARGUMENT The thread is null.
1017 * KERN_FAILURE The thread is dead.
1018 * KERN_INVALID_ARGUMENT Invalid special port.
1019 */
1020
1021 kern_return_t
1022 thread_set_special_port(
1023 thread_t thread,
1024 int which,
1025 ipc_port_t port)
1026 {
1027 kern_return_t result = KERN_SUCCESS;
1028 ipc_port_t *whichp, old = IP_NULL;
1029
1030 if (thread == THREAD_NULL)
1031 return (KERN_INVALID_ARGUMENT);
1032
1033 switch (which) {
1034
1035 case THREAD_KERNEL_PORT:
1036 whichp = &thread->ith_sself;
1037 break;
1038
1039 default:
1040 return (KERN_INVALID_ARGUMENT);
1041 }
1042
1043 thread_mtx_lock(thread);
1044
1045 if (thread->active) {
1046 old = *whichp;
1047 *whichp = port;
1048 }
1049 else
1050 result = KERN_FAILURE;
1051
1052 thread_mtx_unlock(thread);
1053
1054 if (IP_VALID(old))
1055 ipc_port_release_send(old);
1056
1057 return (result);
1058 }
1059
1060 /*
1061 * Routine: task_get_special_port [kernel call]
1062 * Purpose:
1063 * Clones a send right for one of the task's
1064 * special ports.
1065 * Conditions:
1066 * Nothing locked.
1067 * Returns:
1068 * KERN_SUCCESS Extracted a send right.
1069 * KERN_INVALID_ARGUMENT The task is null.
1070 * KERN_FAILURE The task/space is dead.
1071 * KERN_INVALID_ARGUMENT Invalid special port.
1072 */
1073
1074 kern_return_t
1075 task_get_special_port(
1076 task_t task,
1077 int which,
1078 ipc_port_t *portp)
1079 {
1080 ipc_port_t port;
1081
1082 if (task == TASK_NULL)
1083 return KERN_INVALID_ARGUMENT;
1084
1085 itk_lock(task);
1086 if (task->itk_self == IP_NULL) {
1087 itk_unlock(task);
1088 return KERN_FAILURE;
1089 }
1090
1091 switch (which) {
1092 case TASK_KERNEL_PORT:
1093 port = ipc_port_copy_send(task->itk_sself);
1094 break;
1095
1096 case TASK_NAME_PORT:
1097 port = ipc_port_make_send(task->itk_nself);
1098 break;
1099
1100 case TASK_HOST_PORT:
1101 port = ipc_port_copy_send(task->itk_host);
1102 break;
1103
1104 case TASK_BOOTSTRAP_PORT:
1105 port = ipc_port_copy_send(task->itk_bootstrap);
1106 break;
1107
1108 case TASK_SEATBELT_PORT:
1109 port = ipc_port_copy_send(task->itk_seatbelt);
1110 break;
1111
1112 case TASK_ACCESS_PORT:
1113 port = ipc_port_copy_send(task->itk_task_access);
1114 break;
1115
1116 case TASK_DEBUG_CONTROL_PORT:
1117 port = ipc_port_copy_send(task->itk_debug_control);
1118 break;
1119
1120 default:
1121 itk_unlock(task);
1122 return KERN_INVALID_ARGUMENT;
1123 }
1124 itk_unlock(task);
1125
1126 *portp = port;
1127 return KERN_SUCCESS;
1128 }
1129
1130 /*
1131 * Routine: task_set_special_port [kernel call]
1132 * Purpose:
1133 * Changes one of the task's special ports,
1134 * setting it to the supplied send right.
1135 * Conditions:
1136 * Nothing locked. If successful, consumes
1137 * the supplied send right.
1138 * Returns:
1139 * KERN_SUCCESS Changed the special port.
1140 * KERN_INVALID_ARGUMENT The task is null.
1141 * KERN_FAILURE The task/space is dead.
1142 * KERN_INVALID_ARGUMENT Invalid special port.
1143 * KERN_NO_ACCESS Attempted overwrite of seatbelt port.
1144 */
1145
1146 kern_return_t
1147 task_set_special_port(
1148 task_t task,
1149 int which,
1150 ipc_port_t port)
1151 {
1152 ipc_port_t *whichp;
1153 ipc_port_t old;
1154
1155 if (task == TASK_NULL)
1156 return KERN_INVALID_ARGUMENT;
1157
1158 switch (which) {
1159 case TASK_KERNEL_PORT:
1160 whichp = &task->itk_sself;
1161 break;
1162
1163 case TASK_HOST_PORT:
1164 whichp = &task->itk_host;
1165 break;
1166
1167 case TASK_BOOTSTRAP_PORT:
1168 whichp = &task->itk_bootstrap;
1169 break;
1170
1171 case TASK_SEATBELT_PORT:
1172 whichp = &task->itk_seatbelt;
1173 break;
1174
1175 case TASK_ACCESS_PORT:
1176 whichp = &task->itk_task_access;
1177 break;
1178
1179 case TASK_DEBUG_CONTROL_PORT:
1180 whichp = &task->itk_debug_control;
1181 break;
1182
1183 default:
1184 return KERN_INVALID_ARGUMENT;
1185 }/* switch */
1186
1187 itk_lock(task);
1188 if (task->itk_self == IP_NULL) {
1189 itk_unlock(task);
1190 return KERN_FAILURE;
1191 }
1192
1193 /* do not allow overwrite of seatbelt or task access ports */
1194 if ((TASK_SEATBELT_PORT == which || TASK_ACCESS_PORT == which)
1195 && IP_VALID(*whichp)) {
1196 itk_unlock(task);
1197 return KERN_NO_ACCESS;
1198 }
1199
1200 old = *whichp;
1201 *whichp = port;
1202 itk_unlock(task);
1203
1204 if (IP_VALID(old))
1205 ipc_port_release_send(old);
1206 return KERN_SUCCESS;
1207 }
1208
1209
1210 /*
1211 * Routine: mach_ports_register [kernel call]
1212 * Purpose:
1213 * Stash a handful of port send rights in the task.
1214 * Child tasks will inherit these rights, but they
1215 * must use mach_ports_lookup to acquire them.
1216 *
1217 * The rights are supplied in a (wired) kalloc'd segment.
1218 * Rights which aren't supplied are assumed to be null.
1219 * Conditions:
1220 * Nothing locked. If successful, consumes
1221 * the supplied rights and memory.
1222 * Returns:
1223 * KERN_SUCCESS Stashed the port rights.
1224 * KERN_INVALID_ARGUMENT The task is null.
1225 * KERN_INVALID_ARGUMENT The task is dead.
1226 * KERN_INVALID_ARGUMENT The memory param is null.
1227 * KERN_INVALID_ARGUMENT Too many port rights supplied.
1228 */
1229
1230 kern_return_t
1231 mach_ports_register(
1232 task_t task,
1233 mach_port_array_t memory,
1234 mach_msg_type_number_t portsCnt)
1235 {
1236 ipc_port_t ports[TASK_PORT_REGISTER_MAX];
1237 unsigned int i;
1238
1239 if ((task == TASK_NULL) ||
1240 (portsCnt > TASK_PORT_REGISTER_MAX) ||
1241 (portsCnt && memory == NULL))
1242 return KERN_INVALID_ARGUMENT;
1243
1244 /*
1245 * Pad the port rights with nulls.
1246 */
1247
1248 for (i = 0; i < portsCnt; i++)
1249 ports[i] = memory[i];
1250 for (; i < TASK_PORT_REGISTER_MAX; i++)
1251 ports[i] = IP_NULL;
1252
1253 itk_lock(task);
1254 if (task->itk_self == IP_NULL) {
1255 itk_unlock(task);
1256 return KERN_INVALID_ARGUMENT;
1257 }
1258
1259 /*
1260 * Replace the old send rights with the new.
1261 * Release the old rights after unlocking.
1262 */
1263
1264 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
1265 ipc_port_t old;
1266
1267 old = task->itk_registered[i];
1268 task->itk_registered[i] = ports[i];
1269 ports[i] = old;
1270 }
1271
1272 itk_unlock(task);
1273
1274 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
1275 if (IP_VALID(ports[i]))
1276 ipc_port_release_send(ports[i]);
1277
1278 /*
1279 * Now that the operation is known to be successful,
1280 * we can free the memory.
1281 */
1282
1283 if (portsCnt != 0)
1284 kfree(memory,
1285 (vm_size_t) (portsCnt * sizeof(mach_port_t)));
1286
1287 return KERN_SUCCESS;
1288 }
1289
1290 /*
1291 * Routine: mach_ports_lookup [kernel call]
1292 * Purpose:
1293 * Retrieves (clones) the stashed port send rights.
1294 * Conditions:
1295 * Nothing locked. If successful, the caller gets
1296 * rights and memory.
1297 * Returns:
1298 * KERN_SUCCESS Retrieved the send rights.
1299 * KERN_INVALID_ARGUMENT The task is null.
1300 * KERN_INVALID_ARGUMENT The task is dead.
1301 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
1302 */
1303
1304 kern_return_t
1305 mach_ports_lookup(
1306 task_t task,
1307 mach_port_array_t *portsp,
1308 mach_msg_type_number_t *portsCnt)
1309 {
1310 void *memory;
1311 vm_size_t size;
1312 ipc_port_t *ports;
1313 int i;
1314
1315 if (task == TASK_NULL)
1316 return KERN_INVALID_ARGUMENT;
1317
1318 size = (vm_size_t) (TASK_PORT_REGISTER_MAX * sizeof(ipc_port_t));
1319
1320 memory = kalloc(size);
1321 if (memory == 0)
1322 return KERN_RESOURCE_SHORTAGE;
1323
1324 itk_lock(task);
1325 if (task->itk_self == IP_NULL) {
1326 itk_unlock(task);
1327
1328 kfree(memory, size);
1329 return KERN_INVALID_ARGUMENT;
1330 }
1331
1332 ports = (ipc_port_t *) memory;
1333
1334 /*
1335 * Clone port rights. Because kalloc'd memory
1336 * is wired, we won't fault while holding the task lock.
1337 */
1338
1339 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
1340 ports[i] = ipc_port_copy_send(task->itk_registered[i]);
1341
1342 itk_unlock(task);
1343
1344 *portsp = (mach_port_array_t) ports;
1345 *portsCnt = TASK_PORT_REGISTER_MAX;
1346 return KERN_SUCCESS;
1347 }
1348
1349 kern_return_t
1350 task_conversion_eval(task_t caller, task_t victim)
1351 {
1352 /*
1353 * Tasks are allowed to resolve their own task ports, and the kernel is
1354 * allowed to resolve anyone's task port.
1355 */
1356 if (caller == kernel_task) {
1357 return KERN_SUCCESS;
1358 }
1359
1360 if (caller == victim) {
1361 return KERN_SUCCESS;
1362 }
1363
1364 /*
1365 * Only the kernel can can resolve the kernel's task port. We've established
1366 * by this point that the caller is not kernel_task.
1367 */
1368 if (victim == kernel_task) {
1369 return KERN_INVALID_SECURITY;
1370 }
1371
1372 #if CONFIG_EMBEDDED
1373 /*
1374 * On embedded platforms, only a platform binary can resolve the task port
1375 * of another platform binary.
1376 */
1377 if ((victim->t_flags & TF_PLATFORM) && !(caller->t_flags & TF_PLATFORM)) {
1378 #if SECURE_KERNEL
1379 return KERN_INVALID_SECURITY;
1380 #else
1381 if (cs_relax_platform_task_ports) {
1382 return KERN_SUCCESS;
1383 } else {
1384 return KERN_INVALID_SECURITY;
1385 }
1386 #endif /* SECURE_KERNEL */
1387 }
1388 #endif /* CONFIG_EMBEDDED */
1389
1390 return KERN_SUCCESS;
1391 }
1392
1393 /*
1394 * Routine: convert_port_to_locked_task
1395 * Purpose:
1396 * Internal helper routine to convert from a port to a locked
1397 * task. Used by several routines that try to convert from a
1398 * task port to a reference on some task related object.
1399 * Conditions:
1400 * Nothing locked, blocking OK.
1401 */
1402 task_t
1403 convert_port_to_locked_task(ipc_port_t port)
1404 {
1405 int try_failed_count = 0;
1406
1407 while (IP_VALID(port)) {
1408 task_t ct = current_task();
1409 task_t task;
1410
1411 ip_lock(port);
1412 if (!ip_active(port) || (ip_kotype(port) != IKOT_TASK)) {
1413 ip_unlock(port);
1414 return TASK_NULL;
1415 }
1416 task = (task_t) port->ip_kobject;
1417 assert(task != TASK_NULL);
1418
1419 if (task_conversion_eval(ct, task)) {
1420 ip_unlock(port);
1421 return TASK_NULL;
1422 }
1423
1424 /*
1425 * Normal lock ordering puts task_lock() before ip_lock().
1426 * Attempt out-of-order locking here.
1427 */
1428 if (task_lock_try(task)) {
1429 ip_unlock(port);
1430 return(task);
1431 }
1432 try_failed_count++;
1433
1434 ip_unlock(port);
1435 mutex_pause(try_failed_count);
1436 }
1437 return TASK_NULL;
1438 }
1439
1440 /*
1441 * Routine: convert_port_to_locked_task_inspect
1442 * Purpose:
1443 * Internal helper routine to convert from a port to a locked
1444 * task inspect right. Used by internal routines that try to convert from a
1445 * task inspect port to a reference on some task related object.
1446 * Conditions:
1447 * Nothing locked, blocking OK.
1448 */
1449 task_inspect_t
1450 convert_port_to_locked_task_inspect(ipc_port_t port)
1451 {
1452 int try_failed_count = 0;
1453
1454 while (IP_VALID(port)) {
1455 task_inspect_t task;
1456
1457 ip_lock(port);
1458 if (!ip_active(port) || (ip_kotype(port) != IKOT_TASK)) {
1459 ip_unlock(port);
1460 return TASK_INSPECT_NULL;
1461 }
1462 task = (task_inspect_t)port->ip_kobject;
1463 assert(task != TASK_INSPECT_NULL);
1464 /*
1465 * Normal lock ordering puts task_lock() before ip_lock().
1466 * Attempt out-of-order locking here.
1467 */
1468 if (task_lock_try((task_t)task)) {
1469 ip_unlock(port);
1470 return task;
1471 }
1472 try_failed_count++;
1473
1474 ip_unlock(port);
1475 mutex_pause(try_failed_count);
1476 }
1477 return TASK_INSPECT_NULL;
1478 }
1479
1480
1481 /*
1482 * Routine: convert_port_to_task
1483 * Purpose:
1484 * Convert from a port to a task.
1485 * Doesn't consume the port ref; produces a task ref,
1486 * which may be null.
1487 * Conditions:
1488 * Nothing locked.
1489 */
1490 task_t
1491 convert_port_to_task(
1492 ipc_port_t port)
1493 {
1494 return convert_port_to_task_with_exec_token(port, NULL);
1495 }
1496
1497 /*
1498 * Routine: convert_port_to_task_with_exec_token
1499 * Purpose:
1500 * Convert from a port to a task and return
1501 * the exec token stored in the task.
1502 * Doesn't consume the port ref; produces a task ref,
1503 * which may be null.
1504 * Conditions:
1505 * Nothing locked.
1506 */
1507 task_t
1508 convert_port_to_task_with_exec_token(
1509 ipc_port_t port,
1510 uint32_t *exec_token)
1511 {
1512 task_t task = TASK_NULL;
1513
1514 if (IP_VALID(port)) {
1515 ip_lock(port);
1516
1517 if ( ip_active(port) &&
1518 ip_kotype(port) == IKOT_TASK ) {
1519 task_t ct = current_task();
1520 task = (task_t)port->ip_kobject;
1521 assert(task != TASK_NULL);
1522
1523 if (task_conversion_eval(ct, task)) {
1524 ip_unlock(port);
1525 return TASK_NULL;
1526 }
1527
1528 if (exec_token) {
1529 *exec_token = task->exec_token;
1530 }
1531 task_reference_internal(task);
1532 }
1533
1534 ip_unlock(port);
1535 }
1536
1537 return (task);
1538 }
1539
1540 /*
1541 * Routine: convert_port_to_task_name
1542 * Purpose:
1543 * Convert from a port to a task name.
1544 * Doesn't consume the port ref; produces a task name ref,
1545 * which may be null.
1546 * Conditions:
1547 * Nothing locked.
1548 */
1549 task_name_t
1550 convert_port_to_task_name(
1551 ipc_port_t port)
1552 {
1553 task_name_t task = TASK_NULL;
1554
1555 if (IP_VALID(port)) {
1556 ip_lock(port);
1557
1558 if ( ip_active(port) &&
1559 (ip_kotype(port) == IKOT_TASK ||
1560 ip_kotype(port) == IKOT_TASK_NAME)) {
1561 task = (task_name_t)port->ip_kobject;
1562 assert(task != TASK_NAME_NULL);
1563
1564 task_reference_internal(task);
1565 }
1566
1567 ip_unlock(port);
1568 }
1569
1570 return (task);
1571 }
1572
1573 /*
1574 * Routine: convert_port_to_task_inspect
1575 * Purpose:
1576 * Convert from a port to a task inspection right
1577 * Doesn't consume the port ref; produces a task ref,
1578 * which may be null.
1579 * Conditions:
1580 * Nothing locked.
1581 */
1582 task_inspect_t
1583 convert_port_to_task_inspect(
1584 ipc_port_t port)
1585 {
1586 task_inspect_t task = TASK_INSPECT_NULL;
1587
1588 if (IP_VALID(port)) {
1589 ip_lock(port);
1590
1591 if (ip_active(port) &&
1592 ip_kotype(port) == IKOT_TASK) {
1593 task = (task_inspect_t)port->ip_kobject;
1594 assert(task != TASK_INSPECT_NULL);
1595
1596 task_reference_internal(task);
1597 }
1598
1599 ip_unlock(port);
1600 }
1601
1602 return (task);
1603 }
1604
1605 /*
1606 * Routine: convert_port_to_task_suspension_token
1607 * Purpose:
1608 * Convert from a port to a task suspension token.
1609 * Doesn't consume the port ref; produces a suspension token ref,
1610 * which may be null.
1611 * Conditions:
1612 * Nothing locked.
1613 */
1614 task_suspension_token_t
1615 convert_port_to_task_suspension_token(
1616 ipc_port_t port)
1617 {
1618 task_suspension_token_t task = TASK_NULL;
1619
1620 if (IP_VALID(port)) {
1621 ip_lock(port);
1622
1623 if ( ip_active(port) &&
1624 ip_kotype(port) == IKOT_TASK_RESUME) {
1625 task = (task_suspension_token_t)port->ip_kobject;
1626 assert(task != TASK_NULL);
1627
1628 task_reference_internal(task);
1629 }
1630
1631 ip_unlock(port);
1632 }
1633
1634 return (task);
1635 }
1636
1637 /*
1638 * Routine: convert_port_to_space
1639 * Purpose:
1640 * Convert from a port to a space.
1641 * Doesn't consume the port ref; produces a space ref,
1642 * which may be null.
1643 * Conditions:
1644 * Nothing locked.
1645 */
1646 ipc_space_t
1647 convert_port_to_space(
1648 ipc_port_t port)
1649 {
1650 ipc_space_t space;
1651 task_t task;
1652
1653 task = convert_port_to_locked_task(port);
1654
1655 if (task == TASK_NULL)
1656 return IPC_SPACE_NULL;
1657
1658 if (!task->active) {
1659 task_unlock(task);
1660 return IPC_SPACE_NULL;
1661 }
1662
1663 space = task->itk_space;
1664 is_reference(space);
1665 task_unlock(task);
1666 return (space);
1667 }
1668
1669 /*
1670 * Routine: convert_port_to_space_inspect
1671 * Purpose:
1672 * Convert from a port to a space inspect right.
1673 * Doesn't consume the port ref; produces a space inspect ref,
1674 * which may be null.
1675 * Conditions:
1676 * Nothing locked.
1677 */
1678 ipc_space_inspect_t
1679 convert_port_to_space_inspect(
1680 ipc_port_t port)
1681 {
1682 ipc_space_inspect_t space;
1683 task_inspect_t task;
1684
1685 task = convert_port_to_locked_task_inspect(port);
1686
1687 if (task == TASK_INSPECT_NULL)
1688 return IPC_SPACE_INSPECT_NULL;
1689
1690 if (!task->active) {
1691 task_unlock(task);
1692 return IPC_SPACE_INSPECT_NULL;
1693 }
1694
1695 space = (ipc_space_inspect_t)task->itk_space;
1696 is_reference((ipc_space_t)space);
1697 task_unlock((task_t)task);
1698 return space;
1699 }
1700
1701 /*
1702 * Routine: convert_port_to_map
1703 * Purpose:
1704 * Convert from a port to a map.
1705 * Doesn't consume the port ref; produces a map ref,
1706 * which may be null.
1707 * Conditions:
1708 * Nothing locked.
1709 */
1710
1711 vm_map_t
1712 convert_port_to_map(
1713 ipc_port_t port)
1714 {
1715 task_t task;
1716 vm_map_t map;
1717
1718 task = convert_port_to_locked_task(port);
1719
1720 if (task == TASK_NULL)
1721 return VM_MAP_NULL;
1722
1723 if (!task->active) {
1724 task_unlock(task);
1725 return VM_MAP_NULL;
1726 }
1727
1728 map = task->map;
1729 vm_map_reference_swap(map);
1730 task_unlock(task);
1731 return map;
1732 }
1733
1734
1735 /*
1736 * Routine: convert_port_to_thread
1737 * Purpose:
1738 * Convert from a port to a thread.
1739 * Doesn't consume the port ref; produces an thread ref,
1740 * which may be null.
1741 * Conditions:
1742 * Nothing locked.
1743 */
1744
1745 thread_t
1746 convert_port_to_thread(
1747 ipc_port_t port)
1748 {
1749 thread_t thread = THREAD_NULL;
1750
1751 if (IP_VALID(port)) {
1752 ip_lock(port);
1753
1754 if ( ip_active(port) &&
1755 ip_kotype(port) == IKOT_THREAD ) {
1756 thread = (thread_t)port->ip_kobject;
1757 assert(thread != THREAD_NULL);
1758 if (thread->task && thread->task == kernel_task &&
1759 current_task() != kernel_task) {
1760 ip_unlock(port);
1761 return THREAD_NULL;
1762 }
1763
1764 thread_reference_internal(thread);
1765 }
1766
1767 ip_unlock(port);
1768 }
1769
1770 return (thread);
1771 }
1772
1773 /*
1774 * Routine: convert_port_to_thread_inspect
1775 * Purpose:
1776 * Convert from a port to a thread inspection right
1777 * Doesn't consume the port ref; produces a thread ref,
1778 * which may be null.
1779 * Conditions:
1780 * Nothing locked.
1781 */
1782 thread_inspect_t
1783 convert_port_to_thread_inspect(
1784 ipc_port_t port)
1785 {
1786 thread_inspect_t thread = THREAD_INSPECT_NULL;
1787
1788 if (IP_VALID(port)) {
1789 ip_lock(port);
1790
1791 if (ip_active(port) &&
1792 ip_kotype(port) == IKOT_THREAD) {
1793 thread = (thread_inspect_t)port->ip_kobject;
1794 assert(thread != THREAD_INSPECT_NULL);
1795 thread_reference_internal((thread_t)thread);
1796 }
1797 ip_unlock(port);
1798 }
1799
1800 return thread;
1801 }
1802
1803 /*
1804 * Routine: convert_thread_inspect_to_port
1805 * Purpose:
1806 * Convert from a thread inspect reference to a port.
1807 * Consumes a thread ref;
1808 * As we never export thread inspect ports, always
1809 * creates a NULL port.
1810 * Conditions:
1811 * Nothing locked.
1812 */
1813
1814 ipc_port_t
1815 convert_thread_inspect_to_port(thread_inspect_t thread)
1816 {
1817 thread_deallocate(thread);
1818 return IP_NULL;
1819 }
1820
1821
1822 /*
1823 * Routine: port_name_to_thread
1824 * Purpose:
1825 * Convert from a port name to an thread reference
1826 * A name of MACH_PORT_NULL is valid for the null thread.
1827 * Conditions:
1828 * Nothing locked.
1829 *
1830 * TODO: Could this be faster if it were ipc_port_translate_send based, like thread_switch?
1831 * We could avoid extra lock/unlock and extra ref operations on the port.
1832 */
1833 thread_t
1834 port_name_to_thread(
1835 mach_port_name_t name)
1836 {
1837 thread_t thread = THREAD_NULL;
1838 ipc_port_t kport;
1839
1840 if (MACH_PORT_VALID(name)) {
1841 if (ipc_object_copyin(current_space(), name,
1842 MACH_MSG_TYPE_COPY_SEND,
1843 (ipc_object_t *)&kport) != KERN_SUCCESS)
1844 return (THREAD_NULL);
1845
1846 thread = convert_port_to_thread(kport);
1847
1848 if (IP_VALID(kport))
1849 ipc_port_release_send(kport);
1850 }
1851
1852 return (thread);
1853 }
1854
1855 task_t
1856 port_name_to_task(
1857 mach_port_name_t name)
1858 {
1859 ipc_port_t kern_port;
1860 kern_return_t kr;
1861 task_t task = TASK_NULL;
1862
1863 if (MACH_PORT_VALID(name)) {
1864 kr = ipc_object_copyin(current_space(), name,
1865 MACH_MSG_TYPE_COPY_SEND,
1866 (ipc_object_t *) &kern_port);
1867 if (kr != KERN_SUCCESS)
1868 return TASK_NULL;
1869
1870 task = convert_port_to_task(kern_port);
1871
1872 if (IP_VALID(kern_port))
1873 ipc_port_release_send(kern_port);
1874 }
1875 return task;
1876 }
1877
1878 task_inspect_t
1879 port_name_to_task_inspect(
1880 mach_port_name_t name)
1881 {
1882 ipc_port_t kern_port;
1883 kern_return_t kr;
1884 task_inspect_t ti = TASK_INSPECT_NULL;
1885
1886 if (MACH_PORT_VALID(name)) {
1887 kr = ipc_object_copyin(current_space(), name,
1888 MACH_MSG_TYPE_COPY_SEND,
1889 (ipc_object_t *)&kern_port);
1890 if (kr != KERN_SUCCESS)
1891 return TASK_NULL;
1892
1893 ti = convert_port_to_task_inspect(kern_port);
1894
1895 if (IP_VALID(kern_port))
1896 ipc_port_release_send(kern_port);
1897 }
1898 return ti;
1899 }
1900
1901 /*
1902 * Routine: port_name_to_host
1903 * Purpose:
1904 * Convert from a port name to a host pointer.
1905 * NOTE: This does _not_ return a +1 reference to the host_t
1906 * Conditions:
1907 * Nothing locked.
1908 */
1909 host_t
1910 port_name_to_host(
1911 mach_port_name_t name)
1912 {
1913
1914 host_t host = HOST_NULL;
1915 kern_return_t kr;
1916 ipc_port_t port;
1917
1918 if (MACH_PORT_VALID(name)) {
1919 kr = ipc_port_translate_send(current_space(), name, &port);
1920 if (kr == KERN_SUCCESS) {
1921 host = convert_port_to_host(port);
1922 ip_unlock(port);
1923 }
1924 }
1925 return host;
1926 }
1927
1928 /*
1929 * Routine: convert_task_to_port
1930 * Purpose:
1931 * Convert from a task to a port.
1932 * Consumes a task ref; produces a naked send right
1933 * which may be invalid.
1934 * Conditions:
1935 * Nothing locked.
1936 */
1937
1938 ipc_port_t
1939 convert_task_to_port(
1940 task_t task)
1941 {
1942 ipc_port_t port;
1943
1944 itk_lock(task);
1945
1946 if (task->itk_self != IP_NULL)
1947 port = ipc_port_make_send(task->itk_self);
1948 else
1949 port = IP_NULL;
1950
1951 itk_unlock(task);
1952
1953 task_deallocate(task);
1954 return port;
1955 }
1956
1957 /*
1958 * Routine: convert_task_inspect_to_port
1959 * Purpose:
1960 * Convert from a task inspect reference to a port.
1961 * Consumes a task ref;
1962 * As we never export task inspect ports, always
1963 * creates a NULL port.
1964 * Conditions:
1965 * Nothing locked.
1966 */
1967 ipc_port_t
1968 convert_task_inspect_to_port(
1969 task_inspect_t task)
1970 {
1971 task_deallocate(task);
1972
1973 return IP_NULL;
1974 }
1975
1976 /*
1977 * Routine: convert_task_suspend_token_to_port
1978 * Purpose:
1979 * Convert from a task suspension token to a port.
1980 * Consumes a task suspension token ref; produces a naked send-once right
1981 * which may be invalid.
1982 * Conditions:
1983 * Nothing locked.
1984 */
1985 ipc_port_t
1986 convert_task_suspension_token_to_port(
1987 task_suspension_token_t task)
1988 {
1989 ipc_port_t port;
1990
1991 task_lock(task);
1992 if (task->active) {
1993 if (task->itk_resume == IP_NULL) {
1994 task->itk_resume = ipc_port_alloc_kernel();
1995 if (!IP_VALID(task->itk_resume)) {
1996 panic("failed to create resume port");
1997 }
1998
1999 ipc_kobject_set(task->itk_resume, (ipc_kobject_t) task, IKOT_TASK_RESUME);
2000 }
2001
2002 /*
2003 * Create a send-once right for each instance of a direct user-called
2004 * task_suspend2 call. Each time one of these send-once rights is abandoned,
2005 * the notification handler will resume the target task.
2006 */
2007 port = ipc_port_make_sonce(task->itk_resume);
2008 assert(IP_VALID(port));
2009 } else {
2010 port = IP_NULL;
2011 }
2012
2013 task_unlock(task);
2014 task_suspension_token_deallocate(task);
2015
2016 return port;
2017 }
2018
2019
2020 /*
2021 * Routine: convert_task_name_to_port
2022 * Purpose:
2023 * Convert from a task name ref to a port.
2024 * Consumes a task name ref; produces a naked send right
2025 * which may be invalid.
2026 * Conditions:
2027 * Nothing locked.
2028 */
2029
2030 ipc_port_t
2031 convert_task_name_to_port(
2032 task_name_t task_name)
2033 {
2034 ipc_port_t port;
2035
2036 itk_lock(task_name);
2037 if (task_name->itk_nself != IP_NULL)
2038 port = ipc_port_make_send(task_name->itk_nself);
2039 else
2040 port = IP_NULL;
2041 itk_unlock(task_name);
2042
2043 task_name_deallocate(task_name);
2044 return port;
2045 }
2046
2047 /*
2048 * Routine: convert_thread_to_port
2049 * Purpose:
2050 * Convert from a thread to a port.
2051 * Consumes an thread ref; produces a naked send right
2052 * which may be invalid.
2053 * Conditions:
2054 * Nothing locked.
2055 */
2056
2057 ipc_port_t
2058 convert_thread_to_port(
2059 thread_t thread)
2060 {
2061 ipc_port_t port;
2062
2063 thread_mtx_lock(thread);
2064
2065 if (thread->ith_self != IP_NULL)
2066 port = ipc_port_make_send(thread->ith_self);
2067 else
2068 port = IP_NULL;
2069
2070 thread_mtx_unlock(thread);
2071
2072 thread_deallocate(thread);
2073
2074 return (port);
2075 }
2076
2077 /*
2078 * Routine: space_deallocate
2079 * Purpose:
2080 * Deallocate a space ref produced by convert_port_to_space.
2081 * Conditions:
2082 * Nothing locked.
2083 */
2084
2085 void
2086 space_deallocate(
2087 ipc_space_t space)
2088 {
2089 if (space != IS_NULL)
2090 is_release(space);
2091 }
2092
2093 /*
2094 * Routine: space_inspect_deallocate
2095 * Purpose:
2096 * Deallocate a space inspect ref produced by convert_port_to_space_inspect.
2097 * Conditions:
2098 * Nothing locked.
2099 */
2100
2101 void
2102 space_inspect_deallocate(
2103 ipc_space_inspect_t space)
2104 {
2105 if (space != IS_INSPECT_NULL)
2106 is_release((ipc_space_t)space);
2107 }
2108
2109 /*
2110 * Routine: thread/task_set_exception_ports [kernel call]
2111 * Purpose:
2112 * Sets the thread/task exception port, flavor and
2113 * behavior for the exception types specified by the mask.
2114 * There will be one send right per exception per valid
2115 * port.
2116 * Conditions:
2117 * Nothing locked. If successful, consumes
2118 * the supplied send right.
2119 * Returns:
2120 * KERN_SUCCESS Changed the special port.
2121 * KERN_INVALID_ARGUMENT The thread is null,
2122 * Illegal mask bit set.
2123 * Illegal exception behavior
2124 * KERN_FAILURE The thread is dead.
2125 */
2126
2127 kern_return_t
2128 thread_set_exception_ports(
2129 thread_t thread,
2130 exception_mask_t exception_mask,
2131 ipc_port_t new_port,
2132 exception_behavior_t new_behavior,
2133 thread_state_flavor_t new_flavor)
2134 {
2135 ipc_port_t old_port[EXC_TYPES_COUNT];
2136 boolean_t privileged = current_task()->sec_token.val[0] == 0;
2137 register int i;
2138
2139 #if CONFIG_MACF
2140 struct label *new_label;
2141 #endif
2142
2143 if (thread == THREAD_NULL)
2144 return (KERN_INVALID_ARGUMENT);
2145
2146 if (exception_mask & ~EXC_MASK_VALID)
2147 return (KERN_INVALID_ARGUMENT);
2148
2149 if (IP_VALID(new_port)) {
2150 switch (new_behavior & ~MACH_EXCEPTION_CODES) {
2151
2152 case EXCEPTION_DEFAULT:
2153 case EXCEPTION_STATE:
2154 case EXCEPTION_STATE_IDENTITY:
2155 break;
2156
2157 default:
2158 return (KERN_INVALID_ARGUMENT);
2159 }
2160 }
2161
2162 /*
2163 * Check the validity of the thread_state_flavor by calling the
2164 * VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in
2165 * osfmk/mach/ARCHITECTURE/thread_status.h
2166 */
2167 if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor))
2168 return (KERN_INVALID_ARGUMENT);
2169
2170 #if CONFIG_MACF
2171 new_label = mac_exc_create_label_for_current_proc();
2172 #endif
2173
2174 thread_mtx_lock(thread);
2175
2176 if (!thread->active) {
2177 thread_mtx_unlock(thread);
2178
2179 return (KERN_FAILURE);
2180 }
2181
2182 if (thread->exc_actions == NULL) {
2183 ipc_thread_init_exc_actions(thread);
2184 }
2185 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
2186 if ((exception_mask & (1 << i))
2187 #if CONFIG_MACF
2188 && mac_exc_update_action_label(&thread->exc_actions[i], new_label) == 0
2189 #endif
2190 ) {
2191 old_port[i] = thread->exc_actions[i].port;
2192 thread->exc_actions[i].port = ipc_port_copy_send(new_port);
2193 thread->exc_actions[i].behavior = new_behavior;
2194 thread->exc_actions[i].flavor = new_flavor;
2195 thread->exc_actions[i].privileged = privileged;
2196 }
2197 else
2198 old_port[i] = IP_NULL;
2199 }
2200
2201 thread_mtx_unlock(thread);
2202
2203 #if CONFIG_MACF
2204 mac_exc_free_label(new_label);
2205 #endif
2206
2207 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i)
2208 if (IP_VALID(old_port[i]))
2209 ipc_port_release_send(old_port[i]);
2210
2211 if (IP_VALID(new_port)) /* consume send right */
2212 ipc_port_release_send(new_port);
2213
2214 return (KERN_SUCCESS);
2215 }
2216
2217 kern_return_t
2218 task_set_exception_ports(
2219 task_t task,
2220 exception_mask_t exception_mask,
2221 ipc_port_t new_port,
2222 exception_behavior_t new_behavior,
2223 thread_state_flavor_t new_flavor)
2224 {
2225 ipc_port_t old_port[EXC_TYPES_COUNT];
2226 boolean_t privileged = current_task()->sec_token.val[0] == 0;
2227 register int i;
2228
2229 #if CONFIG_MACF
2230 struct label *new_label;
2231 #endif
2232
2233 if (task == TASK_NULL)
2234 return (KERN_INVALID_ARGUMENT);
2235
2236 if (exception_mask & ~EXC_MASK_VALID)
2237 return (KERN_INVALID_ARGUMENT);
2238
2239 if (IP_VALID(new_port)) {
2240 switch (new_behavior & ~MACH_EXCEPTION_CODES) {
2241
2242 case EXCEPTION_DEFAULT:
2243 case EXCEPTION_STATE:
2244 case EXCEPTION_STATE_IDENTITY:
2245 break;
2246
2247 default:
2248 return (KERN_INVALID_ARGUMENT);
2249 }
2250 }
2251
2252 /*
2253 * Check the validity of the thread_state_flavor by calling the
2254 * VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in
2255 * osfmk/mach/ARCHITECTURE/thread_status.h
2256 */
2257 if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor))
2258 return (KERN_INVALID_ARGUMENT);
2259
2260 #if CONFIG_MACF
2261 new_label = mac_exc_create_label_for_current_proc();
2262 #endif
2263
2264 itk_lock(task);
2265
2266 if (task->itk_self == IP_NULL) {
2267 itk_unlock(task);
2268
2269 return (KERN_FAILURE);
2270 }
2271
2272 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
2273 if ((exception_mask & (1 << i))
2274 #if CONFIG_MACF
2275 && mac_exc_update_action_label(&task->exc_actions[i], new_label) == 0
2276 #endif
2277 ) {
2278 old_port[i] = task->exc_actions[i].port;
2279 task->exc_actions[i].port =
2280 ipc_port_copy_send(new_port);
2281 task->exc_actions[i].behavior = new_behavior;
2282 task->exc_actions[i].flavor = new_flavor;
2283 task->exc_actions[i].privileged = privileged;
2284 }
2285 else
2286 old_port[i] = IP_NULL;
2287 }
2288
2289 itk_unlock(task);
2290
2291 #if CONFIG_MACF
2292 mac_exc_free_label(new_label);
2293 #endif
2294
2295 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i)
2296 if (IP_VALID(old_port[i]))
2297 ipc_port_release_send(old_port[i]);
2298
2299 if (IP_VALID(new_port)) /* consume send right */
2300 ipc_port_release_send(new_port);
2301
2302 return (KERN_SUCCESS);
2303 }
2304
2305 /*
2306 * Routine: thread/task_swap_exception_ports [kernel call]
2307 * Purpose:
2308 * Sets the thread/task exception port, flavor and
2309 * behavior for the exception types specified by the
2310 * mask.
2311 *
2312 * The old ports, behavior and flavors are returned
2313 * Count specifies the array sizes on input and
2314 * the number of returned ports etc. on output. The
2315 * arrays must be large enough to hold all the returned
2316 * data, MIG returnes an error otherwise. The masks
2317 * array specifies the corresponding exception type(s).
2318 *
2319 * Conditions:
2320 * Nothing locked. If successful, consumes
2321 * the supplied send right.
2322 *
2323 * Returns upto [in} CountCnt elements.
2324 * Returns:
2325 * KERN_SUCCESS Changed the special port.
2326 * KERN_INVALID_ARGUMENT The thread is null,
2327 * Illegal mask bit set.
2328 * Illegal exception behavior
2329 * KERN_FAILURE The thread is dead.
2330 */
2331
2332 kern_return_t
2333 thread_swap_exception_ports(
2334 thread_t thread,
2335 exception_mask_t exception_mask,
2336 ipc_port_t new_port,
2337 exception_behavior_t new_behavior,
2338 thread_state_flavor_t new_flavor,
2339 exception_mask_array_t masks,
2340 mach_msg_type_number_t *CountCnt,
2341 exception_port_array_t ports,
2342 exception_behavior_array_t behaviors,
2343 thread_state_flavor_array_t flavors)
2344 {
2345 ipc_port_t old_port[EXC_TYPES_COUNT];
2346 boolean_t privileged = current_task()->sec_token.val[0] == 0;
2347 unsigned int i, j, count;
2348
2349 #if CONFIG_MACF
2350 struct label *new_label;
2351 #endif
2352
2353 if (thread == THREAD_NULL)
2354 return (KERN_INVALID_ARGUMENT);
2355
2356 if (exception_mask & ~EXC_MASK_VALID)
2357 return (KERN_INVALID_ARGUMENT);
2358
2359 if (IP_VALID(new_port)) {
2360 switch (new_behavior & ~MACH_EXCEPTION_CODES) {
2361
2362 case EXCEPTION_DEFAULT:
2363 case EXCEPTION_STATE:
2364 case EXCEPTION_STATE_IDENTITY:
2365 break;
2366
2367 default:
2368 return (KERN_INVALID_ARGUMENT);
2369 }
2370 }
2371
2372 if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor))
2373 return (KERN_INVALID_ARGUMENT);
2374
2375 #if CONFIG_MACF
2376 new_label = mac_exc_create_label_for_current_proc();
2377 #endif
2378
2379 thread_mtx_lock(thread);
2380
2381 if (!thread->active) {
2382 thread_mtx_unlock(thread);
2383
2384 return (KERN_FAILURE);
2385 }
2386
2387 if (thread->exc_actions == NULL) {
2388 ipc_thread_init_exc_actions(thread);
2389 }
2390
2391 assert(EXC_TYPES_COUNT > FIRST_EXCEPTION);
2392 for (count = 0, i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT && count < *CountCnt; ++i) {
2393 if ((exception_mask & (1 << i))
2394 #if CONFIG_MACF
2395 && mac_exc_update_action_label(&thread->exc_actions[i], new_label) == 0
2396 #endif
2397 ) {
2398 for (j = 0; j < count; ++j) {
2399 /*
2400 * search for an identical entry, if found
2401 * set corresponding mask for this exception.
2402 */
2403 if ( thread->exc_actions[i].port == ports[j] &&
2404 thread->exc_actions[i].behavior == behaviors[j] &&
2405 thread->exc_actions[i].flavor == flavors[j] ) {
2406 masks[j] |= (1 << i);
2407 break;
2408 }
2409 }
2410
2411 if (j == count) {
2412 masks[j] = (1 << i);
2413 ports[j] = ipc_port_copy_send(thread->exc_actions[i].port);
2414
2415 behaviors[j] = thread->exc_actions[i].behavior;
2416 flavors[j] = thread->exc_actions[i].flavor;
2417 ++count;
2418 }
2419
2420 old_port[i] = thread->exc_actions[i].port;
2421 thread->exc_actions[i].port = ipc_port_copy_send(new_port);
2422 thread->exc_actions[i].behavior = new_behavior;
2423 thread->exc_actions[i].flavor = new_flavor;
2424 thread->exc_actions[i].privileged = privileged;
2425 }
2426 else
2427 old_port[i] = IP_NULL;
2428 }
2429
2430 thread_mtx_unlock(thread);
2431
2432 #if CONFIG_MACF
2433 mac_exc_free_label(new_label);
2434 #endif
2435
2436 while (--i >= FIRST_EXCEPTION) {
2437 if (IP_VALID(old_port[i]))
2438 ipc_port_release_send(old_port[i]);
2439 }
2440
2441 if (IP_VALID(new_port)) /* consume send right */
2442 ipc_port_release_send(new_port);
2443
2444 *CountCnt = count;
2445
2446 return (KERN_SUCCESS);
2447 }
2448
2449 kern_return_t
2450 task_swap_exception_ports(
2451 task_t task,
2452 exception_mask_t exception_mask,
2453 ipc_port_t new_port,
2454 exception_behavior_t new_behavior,
2455 thread_state_flavor_t new_flavor,
2456 exception_mask_array_t masks,
2457 mach_msg_type_number_t *CountCnt,
2458 exception_port_array_t ports,
2459 exception_behavior_array_t behaviors,
2460 thread_state_flavor_array_t flavors)
2461 {
2462 ipc_port_t old_port[EXC_TYPES_COUNT];
2463 boolean_t privileged = current_task()->sec_token.val[0] == 0;
2464 unsigned int i, j, count;
2465
2466 #if CONFIG_MACF
2467 struct label *new_label;
2468 #endif
2469
2470 if (task == TASK_NULL)
2471 return (KERN_INVALID_ARGUMENT);
2472
2473 if (exception_mask & ~EXC_MASK_VALID)
2474 return (KERN_INVALID_ARGUMENT);
2475
2476 if (IP_VALID(new_port)) {
2477 switch (new_behavior & ~MACH_EXCEPTION_CODES) {
2478
2479 case EXCEPTION_DEFAULT:
2480 case EXCEPTION_STATE:
2481 case EXCEPTION_STATE_IDENTITY:
2482 break;
2483
2484 default:
2485 return (KERN_INVALID_ARGUMENT);
2486 }
2487 }
2488
2489 if (new_flavor != 0 && !VALID_THREAD_STATE_FLAVOR(new_flavor))
2490 return (KERN_INVALID_ARGUMENT);
2491
2492 #if CONFIG_MACF
2493 new_label = mac_exc_create_label_for_current_proc();
2494 #endif
2495
2496 itk_lock(task);
2497
2498 if (task->itk_self == IP_NULL) {
2499 itk_unlock(task);
2500
2501 return (KERN_FAILURE);
2502 }
2503
2504 assert(EXC_TYPES_COUNT > FIRST_EXCEPTION);
2505 for (count = 0, i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT && count < *CountCnt; ++i) {
2506 if ((exception_mask & (1 << i))
2507 #if CONFIG_MACF
2508 && mac_exc_update_action_label(&task->exc_actions[i], new_label) == 0
2509 #endif
2510 ) {
2511 for (j = 0; j < count; j++) {
2512 /*
2513 * search for an identical entry, if found
2514 * set corresponding mask for this exception.
2515 */
2516 if ( task->exc_actions[i].port == ports[j] &&
2517 task->exc_actions[i].behavior == behaviors[j] &&
2518 task->exc_actions[i].flavor == flavors[j] ) {
2519 masks[j] |= (1 << i);
2520 break;
2521 }
2522 }
2523
2524 if (j == count) {
2525 masks[j] = (1 << i);
2526 ports[j] = ipc_port_copy_send(task->exc_actions[i].port);
2527 behaviors[j] = task->exc_actions[i].behavior;
2528 flavors[j] = task->exc_actions[i].flavor;
2529 ++count;
2530 }
2531
2532 old_port[i] = task->exc_actions[i].port;
2533
2534 task->exc_actions[i].port = ipc_port_copy_send(new_port);
2535 task->exc_actions[i].behavior = new_behavior;
2536 task->exc_actions[i].flavor = new_flavor;
2537 task->exc_actions[i].privileged = privileged;
2538 }
2539 else
2540 old_port[i] = IP_NULL;
2541 }
2542
2543 itk_unlock(task);
2544
2545 #if CONFIG_MACF
2546 mac_exc_free_label(new_label);
2547 #endif
2548
2549 while (--i >= FIRST_EXCEPTION) {
2550 if (IP_VALID(old_port[i]))
2551 ipc_port_release_send(old_port[i]);
2552 }
2553
2554 if (IP_VALID(new_port)) /* consume send right */
2555 ipc_port_release_send(new_port);
2556
2557 *CountCnt = count;
2558
2559 return (KERN_SUCCESS);
2560 }
2561
2562 /*
2563 * Routine: thread/task_get_exception_ports [kernel call]
2564 * Purpose:
2565 * Clones a send right for each of the thread/task's exception
2566 * ports specified in the mask and returns the behaviour
2567 * and flavor of said port.
2568 *
2569 * Returns upto [in} CountCnt elements.
2570 *
2571 * Conditions:
2572 * Nothing locked.
2573 * Returns:
2574 * KERN_SUCCESS Extracted a send right.
2575 * KERN_INVALID_ARGUMENT The thread is null,
2576 * Invalid special port,
2577 * Illegal mask bit set.
2578 * KERN_FAILURE The thread is dead.
2579 */
2580
2581 kern_return_t
2582 thread_get_exception_ports(
2583 thread_t thread,
2584 exception_mask_t exception_mask,
2585 exception_mask_array_t masks,
2586 mach_msg_type_number_t *CountCnt,
2587 exception_port_array_t ports,
2588 exception_behavior_array_t behaviors,
2589 thread_state_flavor_array_t flavors)
2590 {
2591 unsigned int i, j, count;
2592
2593 if (thread == THREAD_NULL)
2594 return (KERN_INVALID_ARGUMENT);
2595
2596 if (exception_mask & ~EXC_MASK_VALID)
2597 return (KERN_INVALID_ARGUMENT);
2598
2599 thread_mtx_lock(thread);
2600
2601 if (!thread->active) {
2602 thread_mtx_unlock(thread);
2603
2604 return (KERN_FAILURE);
2605 }
2606
2607 count = 0;
2608
2609 if (thread->exc_actions == NULL) {
2610 goto done;
2611 }
2612
2613 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
2614 if (exception_mask & (1 << i)) {
2615 for (j = 0; j < count; ++j) {
2616 /*
2617 * search for an identical entry, if found
2618 * set corresponding mask for this exception.
2619 */
2620 if ( thread->exc_actions[i].port == ports[j] &&
2621 thread->exc_actions[i].behavior ==behaviors[j] &&
2622 thread->exc_actions[i].flavor == flavors[j] ) {
2623 masks[j] |= (1 << i);
2624 break;
2625 }
2626 }
2627
2628 if (j == count) {
2629 masks[j] = (1 << i);
2630 ports[j] = ipc_port_copy_send(thread->exc_actions[i].port);
2631 behaviors[j] = thread->exc_actions[i].behavior;
2632 flavors[j] = thread->exc_actions[i].flavor;
2633 ++count;
2634 if (count >= *CountCnt)
2635 break;
2636 }
2637 }
2638 }
2639
2640 done:
2641 thread_mtx_unlock(thread);
2642
2643 *CountCnt = count;
2644
2645 return (KERN_SUCCESS);
2646 }
2647
2648 kern_return_t
2649 task_get_exception_ports(
2650 task_t task,
2651 exception_mask_t exception_mask,
2652 exception_mask_array_t masks,
2653 mach_msg_type_number_t *CountCnt,
2654 exception_port_array_t ports,
2655 exception_behavior_array_t behaviors,
2656 thread_state_flavor_array_t flavors)
2657 {
2658 unsigned int i, j, count;
2659
2660 if (task == TASK_NULL)
2661 return (KERN_INVALID_ARGUMENT);
2662
2663 if (exception_mask & ~EXC_MASK_VALID)
2664 return (KERN_INVALID_ARGUMENT);
2665
2666 itk_lock(task);
2667
2668 if (task->itk_self == IP_NULL) {
2669 itk_unlock(task);
2670
2671 return (KERN_FAILURE);
2672 }
2673
2674 count = 0;
2675
2676 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
2677 if (exception_mask & (1 << i)) {
2678 for (j = 0; j < count; ++j) {
2679 /*
2680 * search for an identical entry, if found
2681 * set corresponding mask for this exception.
2682 */
2683 if ( task->exc_actions[i].port == ports[j] &&
2684 task->exc_actions[i].behavior == behaviors[j] &&
2685 task->exc_actions[i].flavor == flavors[j] ) {
2686 masks[j] |= (1 << i);
2687 break;
2688 }
2689 }
2690
2691 if (j == count) {
2692 masks[j] = (1 << i);
2693 ports[j] = ipc_port_copy_send(task->exc_actions[i].port);
2694 behaviors[j] = task->exc_actions[i].behavior;
2695 flavors[j] = task->exc_actions[i].flavor;
2696 ++count;
2697 if (count > *CountCnt)
2698 break;
2699 }
2700 }
2701 }
2702
2703 itk_unlock(task);
2704
2705 *CountCnt = count;
2706
2707 return (KERN_SUCCESS);
2708 }