]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/ipc_tt.c
xnu-2422.115.4.tar.gz
[apple/xnu.git] / osfmk / kern / ipc_tt.c
1 /*
2 * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
58 * support for mandatory and extensible security protections. This notice
59 * is included in support of clause 2.2 (b) of the Apple Public License,
60 * Version 2.0.
61 */
62 /*
63 */
64
65 /*
66 * File: ipc_tt.c
67 * Purpose:
68 * Task and thread related IPC functions.
69 */
70
71 #include <mach/mach_types.h>
72 #include <mach/boolean.h>
73 #include <mach/kern_return.h>
74 #include <mach/mach_param.h>
75 #include <mach/task_special_ports.h>
76 #include <mach/thread_special_ports.h>
77 #include <mach/thread_status.h>
78 #include <mach/exception_types.h>
79 #include <mach/memory_object_types.h>
80 #include <mach/mach_traps.h>
81 #include <mach/task_server.h>
82 #include <mach/thread_act_server.h>
83 #include <mach/mach_host_server.h>
84 #include <mach/host_priv_server.h>
85 #include <mach/vm_map_server.h>
86
87 #include <kern/kern_types.h>
88 #include <kern/host.h>
89 #include <kern/ipc_kobject.h>
90 #include <kern/ipc_tt.h>
91 #include <kern/kalloc.h>
92 #include <kern/thread.h>
93 #include <kern/misc_protos.h>
94
95 #include <vm/vm_map.h>
96 #include <vm/vm_pageout.h>
97 #include <vm/vm_protos.h>
98
99 #include <security/mac_mach_internal.h>
100
101 /* forward declarations */
102 task_t convert_port_to_locked_task(ipc_port_t port);
103
104
105 /*
106 * Routine: ipc_task_init
107 * Purpose:
108 * Initialize a task's IPC state.
109 *
110 * If non-null, some state will be inherited from the parent.
111 * The parent must be appropriately initialized.
112 * Conditions:
113 * Nothing locked.
114 */
115
116 void
117 ipc_task_init(
118 task_t task,
119 task_t parent)
120 {
121 ipc_space_t space;
122 ipc_port_t kport;
123 ipc_port_t nport;
124 kern_return_t kr;
125 int i;
126
127
128 kr = ipc_space_create(&ipc_table_entries[0], &space);
129 if (kr != KERN_SUCCESS)
130 panic("ipc_task_init");
131
132 space->is_task = task;
133
134 kport = ipc_port_alloc_kernel();
135 if (kport == IP_NULL)
136 panic("ipc_task_init");
137
138 nport = ipc_port_alloc_kernel();
139 if (nport == IP_NULL)
140 panic("ipc_task_init");
141
142 itk_lock_init(task);
143 task->itk_self = kport;
144 task->itk_nself = nport;
145 task->itk_resume = IP_NULL; /* Lazily allocated on-demand */
146 task->itk_sself = ipc_port_make_send(kport);
147 task->itk_space = space;
148
149 #if CONFIG_MACF_MACH
150 if (parent)
151 mac_task_label_associate(parent, task, &parent->maclabel,
152 &task->maclabel, &kport->ip_label);
153 else
154 mac_task_label_associate_kernel(task, &task->maclabel, &kport->ip_label);
155 #endif
156
157 if (parent == TASK_NULL) {
158 ipc_port_t port;
159
160 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
161 task->exc_actions[i].port = IP_NULL;
162 }/* for */
163
164 kr = host_get_host_port(host_priv_self(), &port);
165 assert(kr == KERN_SUCCESS);
166 task->itk_host = port;
167
168 task->itk_bootstrap = IP_NULL;
169 task->itk_seatbelt = IP_NULL;
170 task->itk_gssd = IP_NULL;
171 task->itk_task_access = IP_NULL;
172
173 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
174 task->itk_registered[i] = IP_NULL;
175 } else {
176 itk_lock(parent);
177 assert(parent->itk_self != IP_NULL);
178
179 /* inherit registered ports */
180
181 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
182 task->itk_registered[i] =
183 ipc_port_copy_send(parent->itk_registered[i]);
184
185 /* inherit exception and bootstrap ports */
186
187 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
188 task->exc_actions[i].port =
189 ipc_port_copy_send(parent->exc_actions[i].port);
190 task->exc_actions[i].flavor =
191 parent->exc_actions[i].flavor;
192 task->exc_actions[i].behavior =
193 parent->exc_actions[i].behavior;
194 task->exc_actions[i].privileged =
195 parent->exc_actions[i].privileged;
196 }/* for */
197 task->itk_host =
198 ipc_port_copy_send(parent->itk_host);
199
200 task->itk_bootstrap =
201 ipc_port_copy_send(parent->itk_bootstrap);
202
203 task->itk_seatbelt =
204 ipc_port_copy_send(parent->itk_seatbelt);
205
206 task->itk_gssd =
207 ipc_port_copy_send(parent->itk_gssd);
208
209 task->itk_task_access =
210 ipc_port_copy_send(parent->itk_task_access);
211
212 itk_unlock(parent);
213 }
214 }
215
216 /*
217 * Routine: ipc_task_enable
218 * Purpose:
219 * Enable a task for IPC access.
220 * Conditions:
221 * Nothing locked.
222 */
223
224 void
225 ipc_task_enable(
226 task_t task)
227 {
228 ipc_port_t kport;
229 ipc_port_t nport;
230
231 itk_lock(task);
232 kport = task->itk_self;
233 if (kport != IP_NULL)
234 ipc_kobject_set(kport, (ipc_kobject_t) task, IKOT_TASK);
235 nport = task->itk_nself;
236 if (nport != IP_NULL)
237 ipc_kobject_set(nport, (ipc_kobject_t) task, IKOT_TASK_NAME);
238 itk_unlock(task);
239 }
240
241 /*
242 * Routine: ipc_task_disable
243 * Purpose:
244 * Disable IPC access to a task.
245 * Conditions:
246 * Nothing locked.
247 */
248
249 void
250 ipc_task_disable(
251 task_t task)
252 {
253 ipc_port_t kport;
254 ipc_port_t nport;
255 ipc_port_t rport;
256
257 itk_lock(task);
258 kport = task->itk_self;
259 if (kport != IP_NULL)
260 ipc_kobject_set(kport, IKO_NULL, IKOT_NONE);
261 nport = task->itk_nself;
262 if (nport != IP_NULL)
263 ipc_kobject_set(nport, IKO_NULL, IKOT_NONE);
264
265 rport = task->itk_resume;
266 if (rport != IP_NULL) {
267 /*
268 * From this point onwards this task is no longer accepting
269 * resumptions.
270 *
271 * There are still outstanding suspensions on this task,
272 * even as it is being torn down. Disconnect the task
273 * from the rport, thereby "orphaning" the rport. The rport
274 * itself will go away only when the last suspension holder
275 * destroys his SO right to it -- when he either
276 * exits, or tries to actually use that last SO right to
277 * resume this (now non-existent) task.
278 */
279 ipc_kobject_set(rport, IKO_NULL, IKOT_NONE);
280 }
281 itk_unlock(task);
282 }
283
284 /*
285 * Routine: ipc_task_terminate
286 * Purpose:
287 * Clean up and destroy a task's IPC state.
288 * Conditions:
289 * Nothing locked. The task must be suspended.
290 * (Or the current thread must be in the task.)
291 */
292
293 void
294 ipc_task_terminate(
295 task_t task)
296 {
297 ipc_port_t kport;
298 ipc_port_t nport;
299 ipc_port_t rport;
300 int i;
301
302 itk_lock(task);
303 kport = task->itk_self;
304
305 if (kport == IP_NULL) {
306 /* the task is already terminated (can this happen?) */
307 itk_unlock(task);
308 return;
309 }
310 task->itk_self = IP_NULL;
311
312 nport = task->itk_nself;
313 assert(nport != IP_NULL);
314 task->itk_nself = IP_NULL;
315
316 rport = task->itk_resume;
317 task->itk_resume = IP_NULL;
318
319 itk_unlock(task);
320
321 /* release the naked send rights */
322
323 if (IP_VALID(task->itk_sself))
324 ipc_port_release_send(task->itk_sself);
325
326 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
327 if (IP_VALID(task->exc_actions[i].port)) {
328 ipc_port_release_send(task->exc_actions[i].port);
329 }
330 }
331
332 if (IP_VALID(task->itk_host))
333 ipc_port_release_send(task->itk_host);
334
335 if (IP_VALID(task->itk_bootstrap))
336 ipc_port_release_send(task->itk_bootstrap);
337
338 if (IP_VALID(task->itk_seatbelt))
339 ipc_port_release_send(task->itk_seatbelt);
340
341 if (IP_VALID(task->itk_gssd))
342 ipc_port_release_send(task->itk_gssd);
343
344 if (IP_VALID(task->itk_task_access))
345 ipc_port_release_send(task->itk_task_access);
346
347 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
348 if (IP_VALID(task->itk_registered[i]))
349 ipc_port_release_send(task->itk_registered[i]);
350
351 /* destroy the kernel ports */
352 ipc_port_dealloc_kernel(kport);
353 ipc_port_dealloc_kernel(nport);
354 if (rport != IP_NULL)
355 ipc_port_dealloc_kernel(rport);
356
357 itk_lock_destroy(task);
358 }
359
360 /*
361 * Routine: ipc_task_reset
362 * Purpose:
363 * Reset a task's IPC state to protect it when
364 * it enters an elevated security context. The
365 * task name port can remain the same - since
366 * it represents no specific privilege.
367 * Conditions:
368 * Nothing locked. The task must be suspended.
369 * (Or the current thread must be in the task.)
370 */
371
372 void
373 ipc_task_reset(
374 task_t task)
375 {
376 ipc_port_t old_kport, new_kport;
377 ipc_port_t old_sself;
378 ipc_port_t old_exc_actions[EXC_TYPES_COUNT];
379 int i;
380
381 new_kport = ipc_port_alloc_kernel();
382 if (new_kport == IP_NULL)
383 panic("ipc_task_reset");
384
385 itk_lock(task);
386
387 old_kport = task->itk_self;
388
389 if (old_kport == IP_NULL) {
390 /* the task is already terminated (can this happen?) */
391 itk_unlock(task);
392 ipc_port_dealloc_kernel(new_kport);
393 return;
394 }
395
396 task->itk_self = new_kport;
397 old_sself = task->itk_sself;
398 task->itk_sself = ipc_port_make_send(new_kport);
399 ipc_kobject_set(old_kport, IKO_NULL, IKOT_NONE);
400 ipc_kobject_set(new_kport, (ipc_kobject_t) task, IKOT_TASK);
401
402 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
403 if (!task->exc_actions[i].privileged) {
404 old_exc_actions[i] = task->exc_actions[i].port;
405 task->exc_actions[i].port = IP_NULL;
406 } else {
407 old_exc_actions[i] = IP_NULL;
408 }
409 }/* for */
410
411 itk_unlock(task);
412
413 /* release the naked send rights */
414
415 if (IP_VALID(old_sself))
416 ipc_port_release_send(old_sself);
417
418 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
419 if (IP_VALID(old_exc_actions[i])) {
420 ipc_port_release_send(old_exc_actions[i]);
421 }
422 }/* for */
423
424 /* destroy the kernel port */
425 ipc_port_dealloc_kernel(old_kport);
426 }
427
428 /*
429 * Routine: ipc_thread_init
430 * Purpose:
431 * Initialize a thread's IPC state.
432 * Conditions:
433 * Nothing locked.
434 */
435
436 void
437 ipc_thread_init(
438 thread_t thread)
439 {
440 ipc_port_t kport;
441
442 kport = ipc_port_alloc_kernel();
443 if (kport == IP_NULL)
444 panic("ipc_thread_init");
445
446 thread->ith_self = kport;
447 thread->ith_sself = ipc_port_make_send(kport);
448 thread->exc_actions = NULL;
449
450 ipc_kobject_set(kport, (ipc_kobject_t)thread, IKOT_THREAD);
451
452 #if IMPORTANCE_INHERITANCE
453 thread->ith_assertions = 0;
454 #endif
455
456 ipc_kmsg_queue_init(&thread->ith_messages);
457
458 thread->ith_rpc_reply = IP_NULL;
459 }
460
461 void
462 ipc_thread_init_exc_actions(
463 thread_t thread)
464 {
465 assert(thread->exc_actions == NULL);
466
467 thread->exc_actions = kalloc(sizeof(struct exception_action) * EXC_TYPES_COUNT);
468 bzero(thread->exc_actions, sizeof(struct exception_action) * EXC_TYPES_COUNT);
469 }
470
471 void
472 ipc_thread_destroy_exc_actions(
473 thread_t thread)
474 {
475 if (thread->exc_actions != NULL) {
476 kfree(thread->exc_actions,
477 sizeof(struct exception_action) * EXC_TYPES_COUNT);
478 thread->exc_actions = NULL;
479 }
480 }
481
482 void
483 ipc_thread_disable(
484 thread_t thread)
485 {
486 ipc_port_t kport = thread->ith_self;
487
488 if (kport != IP_NULL)
489 ipc_kobject_set(kport, IKO_NULL, IKOT_NONE);
490 }
491
492 /*
493 * Routine: ipc_thread_terminate
494 * Purpose:
495 * Clean up and destroy a thread's IPC state.
496 * Conditions:
497 * Nothing locked.
498 */
499
500 void
501 ipc_thread_terminate(
502 thread_t thread)
503 {
504 ipc_port_t kport = thread->ith_self;
505
506 if (kport != IP_NULL) {
507 int i;
508
509 if (IP_VALID(thread->ith_sself))
510 ipc_port_release_send(thread->ith_sself);
511
512 thread->ith_sself = thread->ith_self = IP_NULL;
513
514 if (thread->exc_actions != NULL) {
515 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
516 if (IP_VALID(thread->exc_actions[i].port))
517 ipc_port_release_send(thread->exc_actions[i].port);
518 }
519 ipc_thread_destroy_exc_actions(thread);
520 }
521
522 ipc_port_dealloc_kernel(kport);
523 }
524
525 #if IMPORTANCE_INHERITANCE
526 assert(thread->ith_assertions == 0);
527 #endif
528
529 assert(ipc_kmsg_queue_empty(&thread->ith_messages));
530
531 if (thread->ith_rpc_reply != IP_NULL)
532 ipc_port_dealloc_reply(thread->ith_rpc_reply);
533
534 thread->ith_rpc_reply = IP_NULL;
535 }
536
537 /*
538 * Routine: ipc_thread_reset
539 * Purpose:
540 * Reset the IPC state for a given Mach thread when
541 * its task enters an elevated security context.
542 * Both the thread port and its exception ports have
543 * to be reset. Its RPC reply port cannot have any
544 * rights outstanding, so it should be fine.
545 * Conditions:
546 * Nothing locked.
547 */
548
549 void
550 ipc_thread_reset(
551 thread_t thread)
552 {
553 ipc_port_t old_kport, new_kport;
554 ipc_port_t old_sself;
555 ipc_port_t old_exc_actions[EXC_TYPES_COUNT];
556 boolean_t has_old_exc_actions = FALSE;
557 int i;
558
559 new_kport = ipc_port_alloc_kernel();
560 if (new_kport == IP_NULL)
561 panic("ipc_task_reset");
562
563 thread_mtx_lock(thread);
564
565 old_kport = thread->ith_self;
566
567 if (old_kport == IP_NULL) {
568 /* the is already terminated (can this happen?) */
569 thread_mtx_unlock(thread);
570 ipc_port_dealloc_kernel(new_kport);
571 return;
572 }
573
574 thread->ith_self = new_kport;
575 old_sself = thread->ith_sself;
576 thread->ith_sself = ipc_port_make_send(new_kport);
577 ipc_kobject_set(old_kport, IKO_NULL, IKOT_NONE);
578 ipc_kobject_set(new_kport, (ipc_kobject_t) thread, IKOT_THREAD);
579
580 /*
581 * Only ports that were set by root-owned processes
582 * (privileged ports) should survive
583 */
584 if (thread->exc_actions != NULL) {
585 has_old_exc_actions = TRUE;
586 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
587 if (thread->exc_actions[i].privileged) {
588 old_exc_actions[i] = IP_NULL;
589 } else {
590 old_exc_actions[i] = thread->exc_actions[i].port;
591 thread->exc_actions[i].port = IP_NULL;
592 }
593 }
594 }
595
596 thread_mtx_unlock(thread);
597
598 /* release the naked send rights */
599
600 if (IP_VALID(old_sself))
601 ipc_port_release_send(old_sself);
602
603 if (has_old_exc_actions) {
604 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
605 ipc_port_release_send(old_exc_actions[i]);
606 }
607 }
608
609 /* destroy the kernel port */
610 ipc_port_dealloc_kernel(old_kport);
611 }
612
613 /*
614 * Routine: retrieve_task_self_fast
615 * Purpose:
616 * Optimized version of retrieve_task_self,
617 * that only works for the current task.
618 *
619 * Return a send right (possibly null/dead)
620 * for the task's user-visible self port.
621 * Conditions:
622 * Nothing locked.
623 */
624
625 ipc_port_t
626 retrieve_task_self_fast(
627 register task_t task)
628 {
629 register ipc_port_t port;
630
631 assert(task == current_task());
632
633 itk_lock(task);
634 assert(task->itk_self != IP_NULL);
635
636 if ((port = task->itk_sself) == task->itk_self) {
637 /* no interposing */
638
639 ip_lock(port);
640 assert(ip_active(port));
641 ip_reference(port);
642 port->ip_srights++;
643 ip_unlock(port);
644 } else
645 port = ipc_port_copy_send(port);
646 itk_unlock(task);
647
648 return port;
649 }
650
651 /*
652 * Routine: retrieve_thread_self_fast
653 * Purpose:
654 * Return a send right (possibly null/dead)
655 * for the thread's user-visible self port.
656 *
657 * Only works for the current thread.
658 *
659 * Conditions:
660 * Nothing locked.
661 */
662
663 ipc_port_t
664 retrieve_thread_self_fast(
665 thread_t thread)
666 {
667 register ipc_port_t port;
668
669 assert(thread == current_thread());
670
671 thread_mtx_lock(thread);
672
673 assert(thread->ith_self != IP_NULL);
674
675 if ((port = thread->ith_sself) == thread->ith_self) {
676 /* no interposing */
677
678 ip_lock(port);
679 assert(ip_active(port));
680 ip_reference(port);
681 port->ip_srights++;
682 ip_unlock(port);
683 }
684 else
685 port = ipc_port_copy_send(port);
686
687 thread_mtx_unlock(thread);
688
689 return port;
690 }
691
692 /*
693 * Routine: task_self_trap [mach trap]
694 * Purpose:
695 * Give the caller send rights for his own task port.
696 * Conditions:
697 * Nothing locked.
698 * Returns:
699 * MACH_PORT_NULL if there are any resource failures
700 * or other errors.
701 */
702
703 mach_port_name_t
704 task_self_trap(
705 __unused struct task_self_trap_args *args)
706 {
707 task_t task = current_task();
708 ipc_port_t sright;
709 mach_port_name_t name;
710
711 sright = retrieve_task_self_fast(task);
712 name = ipc_port_copyout_send(sright, task->itk_space);
713 return name;
714 }
715
716 /*
717 * Routine: thread_self_trap [mach trap]
718 * Purpose:
719 * Give the caller send rights for his own thread port.
720 * Conditions:
721 * Nothing locked.
722 * Returns:
723 * MACH_PORT_NULL if there are any resource failures
724 * or other errors.
725 */
726
727 mach_port_name_t
728 thread_self_trap(
729 __unused struct thread_self_trap_args *args)
730 {
731 thread_t thread = current_thread();
732 task_t task = thread->task;
733 ipc_port_t sright;
734 mach_port_name_t name;
735
736 sright = retrieve_thread_self_fast(thread);
737 name = ipc_port_copyout_send(sright, task->itk_space);
738 return name;
739
740 }
741
742 /*
743 * Routine: mach_reply_port [mach trap]
744 * Purpose:
745 * Allocate a port for the caller.
746 * Conditions:
747 * Nothing locked.
748 * Returns:
749 * MACH_PORT_NULL if there are any resource failures
750 * or other errors.
751 */
752
753 mach_port_name_t
754 mach_reply_port(
755 __unused struct mach_reply_port_args *args)
756 {
757 ipc_port_t port;
758 mach_port_name_t name;
759 kern_return_t kr;
760
761 kr = ipc_port_alloc(current_task()->itk_space, &name, &port);
762 if (kr == KERN_SUCCESS)
763 ip_unlock(port);
764 else
765 name = MACH_PORT_NULL;
766 return name;
767 }
768
769 /*
770 * Routine: thread_get_special_port [kernel call]
771 * Purpose:
772 * Clones a send right for one of the thread's
773 * special ports.
774 * Conditions:
775 * Nothing locked.
776 * Returns:
777 * KERN_SUCCESS Extracted a send right.
778 * KERN_INVALID_ARGUMENT The thread is null.
779 * KERN_FAILURE The thread is dead.
780 * KERN_INVALID_ARGUMENT Invalid special port.
781 */
782
783 kern_return_t
784 thread_get_special_port(
785 thread_t thread,
786 int which,
787 ipc_port_t *portp)
788 {
789 kern_return_t result = KERN_SUCCESS;
790 ipc_port_t *whichp;
791
792 if (thread == THREAD_NULL)
793 return (KERN_INVALID_ARGUMENT);
794
795 switch (which) {
796
797 case THREAD_KERNEL_PORT:
798 whichp = &thread->ith_sself;
799 break;
800
801 default:
802 return (KERN_INVALID_ARGUMENT);
803 }
804
805 thread_mtx_lock(thread);
806
807 if (thread->active)
808 *portp = ipc_port_copy_send(*whichp);
809 else
810 result = KERN_FAILURE;
811
812 thread_mtx_unlock(thread);
813
814 return (result);
815 }
816
817 /*
818 * Routine: thread_set_special_port [kernel call]
819 * Purpose:
820 * Changes one of the thread's special ports,
821 * setting it to the supplied send right.
822 * Conditions:
823 * Nothing locked. If successful, consumes
824 * the supplied send right.
825 * Returns:
826 * KERN_SUCCESS Changed the special port.
827 * KERN_INVALID_ARGUMENT The thread is null.
828 * KERN_FAILURE The thread is dead.
829 * KERN_INVALID_ARGUMENT Invalid special port.
830 */
831
832 kern_return_t
833 thread_set_special_port(
834 thread_t thread,
835 int which,
836 ipc_port_t port)
837 {
838 kern_return_t result = KERN_SUCCESS;
839 ipc_port_t *whichp, old = IP_NULL;
840
841 if (thread == THREAD_NULL)
842 return (KERN_INVALID_ARGUMENT);
843
844 switch (which) {
845
846 case THREAD_KERNEL_PORT:
847 whichp = &thread->ith_sself;
848 break;
849
850 default:
851 return (KERN_INVALID_ARGUMENT);
852 }
853
854 thread_mtx_lock(thread);
855
856 if (thread->active) {
857 old = *whichp;
858 *whichp = port;
859 }
860 else
861 result = KERN_FAILURE;
862
863 thread_mtx_unlock(thread);
864
865 if (IP_VALID(old))
866 ipc_port_release_send(old);
867
868 return (result);
869 }
870
871 /*
872 * Routine: task_get_special_port [kernel call]
873 * Purpose:
874 * Clones a send right for one of the task's
875 * special ports.
876 * Conditions:
877 * Nothing locked.
878 * Returns:
879 * KERN_SUCCESS Extracted a send right.
880 * KERN_INVALID_ARGUMENT The task is null.
881 * KERN_FAILURE The task/space is dead.
882 * KERN_INVALID_ARGUMENT Invalid special port.
883 */
884
885 kern_return_t
886 task_get_special_port(
887 task_t task,
888 int which,
889 ipc_port_t *portp)
890 {
891 ipc_port_t port;
892
893 if (task == TASK_NULL)
894 return KERN_INVALID_ARGUMENT;
895
896 itk_lock(task);
897 if (task->itk_self == IP_NULL) {
898 itk_unlock(task);
899 return KERN_FAILURE;
900 }
901
902 switch (which) {
903 case TASK_KERNEL_PORT:
904 port = ipc_port_copy_send(task->itk_sself);
905 break;
906
907 case TASK_NAME_PORT:
908 port = ipc_port_make_send(task->itk_nself);
909 break;
910
911 case TASK_HOST_PORT:
912 port = ipc_port_copy_send(task->itk_host);
913 break;
914
915 case TASK_BOOTSTRAP_PORT:
916 port = ipc_port_copy_send(task->itk_bootstrap);
917 break;
918
919 case TASK_SEATBELT_PORT:
920 port = ipc_port_copy_send(task->itk_seatbelt);
921 break;
922
923 case TASK_ACCESS_PORT:
924 port = ipc_port_copy_send(task->itk_task_access);
925 break;
926
927 default:
928 itk_unlock(task);
929 return KERN_INVALID_ARGUMENT;
930 }
931 itk_unlock(task);
932
933 *portp = port;
934 return KERN_SUCCESS;
935 }
936
937 /*
938 * Routine: task_set_special_port [kernel call]
939 * Purpose:
940 * Changes one of the task's special ports,
941 * setting it to the supplied send right.
942 * Conditions:
943 * Nothing locked. If successful, consumes
944 * the supplied send right.
945 * Returns:
946 * KERN_SUCCESS Changed the special port.
947 * KERN_INVALID_ARGUMENT The task is null.
948 * KERN_FAILURE The task/space is dead.
949 * KERN_INVALID_ARGUMENT Invalid special port.
950 * KERN_NO_ACCESS Attempted overwrite of seatbelt port.
951 */
952
953 kern_return_t
954 task_set_special_port(
955 task_t task,
956 int which,
957 ipc_port_t port)
958 {
959 ipc_port_t *whichp;
960 ipc_port_t old;
961
962 if (task == TASK_NULL)
963 return KERN_INVALID_ARGUMENT;
964
965 switch (which) {
966 case TASK_KERNEL_PORT:
967 whichp = &task->itk_sself;
968 break;
969
970 case TASK_HOST_PORT:
971 whichp = &task->itk_host;
972 break;
973
974 case TASK_BOOTSTRAP_PORT:
975 whichp = &task->itk_bootstrap;
976 break;
977
978 case TASK_SEATBELT_PORT:
979 whichp = &task->itk_seatbelt;
980 break;
981
982 case TASK_ACCESS_PORT:
983 whichp = &task->itk_task_access;
984 break;
985
986 default:
987 return KERN_INVALID_ARGUMENT;
988 }/* switch */
989
990 itk_lock(task);
991 if (task->itk_self == IP_NULL) {
992 itk_unlock(task);
993 return KERN_FAILURE;
994 }
995
996 /* do not allow overwrite of seatbelt or task access ports */
997 if ((TASK_SEATBELT_PORT == which || TASK_ACCESS_PORT == which)
998 && IP_VALID(*whichp)) {
999 itk_unlock(task);
1000 return KERN_NO_ACCESS;
1001 }
1002
1003 #if CONFIG_MACF_MACH
1004 if (mac_task_check_service(current_task(), task, "set_special_port")) {
1005 itk_unlock(task);
1006 return KERN_NO_ACCESS;
1007 }
1008 #endif
1009
1010 old = *whichp;
1011 *whichp = port;
1012 itk_unlock(task);
1013
1014 if (IP_VALID(old))
1015 ipc_port_release_send(old);
1016 return KERN_SUCCESS;
1017 }
1018
1019
1020 /*
1021 * Routine: mach_ports_register [kernel call]
1022 * Purpose:
1023 * Stash a handful of port send rights in the task.
1024 * Child tasks will inherit these rights, but they
1025 * must use mach_ports_lookup to acquire them.
1026 *
1027 * The rights are supplied in a (wired) kalloc'd segment.
1028 * Rights which aren't supplied are assumed to be null.
1029 * Conditions:
1030 * Nothing locked. If successful, consumes
1031 * the supplied rights and memory.
1032 * Returns:
1033 * KERN_SUCCESS Stashed the port rights.
1034 * KERN_INVALID_ARGUMENT The task is null.
1035 * KERN_INVALID_ARGUMENT The task is dead.
1036 * KERN_INVALID_ARGUMENT The memory param is null.
1037 * KERN_INVALID_ARGUMENT Too many port rights supplied.
1038 */
1039
1040 kern_return_t
1041 mach_ports_register(
1042 task_t task,
1043 mach_port_array_t memory,
1044 mach_msg_type_number_t portsCnt)
1045 {
1046 ipc_port_t ports[TASK_PORT_REGISTER_MAX];
1047 unsigned int i;
1048
1049 if ((task == TASK_NULL) ||
1050 (portsCnt > TASK_PORT_REGISTER_MAX) ||
1051 (portsCnt && memory == NULL))
1052 return KERN_INVALID_ARGUMENT;
1053
1054 /*
1055 * Pad the port rights with nulls.
1056 */
1057
1058 for (i = 0; i < portsCnt; i++)
1059 ports[i] = memory[i];
1060 for (; i < TASK_PORT_REGISTER_MAX; i++)
1061 ports[i] = IP_NULL;
1062
1063 itk_lock(task);
1064 if (task->itk_self == IP_NULL) {
1065 itk_unlock(task);
1066 return KERN_INVALID_ARGUMENT;
1067 }
1068
1069 /*
1070 * Replace the old send rights with the new.
1071 * Release the old rights after unlocking.
1072 */
1073
1074 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
1075 ipc_port_t old;
1076
1077 old = task->itk_registered[i];
1078 task->itk_registered[i] = ports[i];
1079 ports[i] = old;
1080 }
1081
1082 itk_unlock(task);
1083
1084 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
1085 if (IP_VALID(ports[i]))
1086 ipc_port_release_send(ports[i]);
1087
1088 /*
1089 * Now that the operation is known to be successful,
1090 * we can free the memory.
1091 */
1092
1093 if (portsCnt != 0)
1094 kfree(memory,
1095 (vm_size_t) (portsCnt * sizeof(mach_port_t)));
1096
1097 return KERN_SUCCESS;
1098 }
1099
1100 /*
1101 * Routine: mach_ports_lookup [kernel call]
1102 * Purpose:
1103 * Retrieves (clones) the stashed port send rights.
1104 * Conditions:
1105 * Nothing locked. If successful, the caller gets
1106 * rights and memory.
1107 * Returns:
1108 * KERN_SUCCESS Retrieved the send rights.
1109 * KERN_INVALID_ARGUMENT The task is null.
1110 * KERN_INVALID_ARGUMENT The task is dead.
1111 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
1112 */
1113
1114 kern_return_t
1115 mach_ports_lookup(
1116 task_t task,
1117 mach_port_array_t *portsp,
1118 mach_msg_type_number_t *portsCnt)
1119 {
1120 void *memory;
1121 vm_size_t size;
1122 ipc_port_t *ports;
1123 int i;
1124
1125 if (task == TASK_NULL)
1126 return KERN_INVALID_ARGUMENT;
1127
1128 size = (vm_size_t) (TASK_PORT_REGISTER_MAX * sizeof(ipc_port_t));
1129
1130 memory = kalloc(size);
1131 if (memory == 0)
1132 return KERN_RESOURCE_SHORTAGE;
1133
1134 itk_lock(task);
1135 if (task->itk_self == IP_NULL) {
1136 itk_unlock(task);
1137
1138 kfree(memory, size);
1139 return KERN_INVALID_ARGUMENT;
1140 }
1141
1142 ports = (ipc_port_t *) memory;
1143
1144 /*
1145 * Clone port rights. Because kalloc'd memory
1146 * is wired, we won't fault while holding the task lock.
1147 */
1148
1149 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
1150 ports[i] = ipc_port_copy_send(task->itk_registered[i]);
1151
1152 itk_unlock(task);
1153
1154 *portsp = (mach_port_array_t) ports;
1155 *portsCnt = TASK_PORT_REGISTER_MAX;
1156 return KERN_SUCCESS;
1157 }
1158
1159 /*
1160 * Routine: convert_port_to_locked_task
1161 * Purpose:
1162 * Internal helper routine to convert from a port to a locked
1163 * task. Used by several routines that try to convert from a
1164 * task port to a reference on some task related object.
1165 * Conditions:
1166 * Nothing locked, blocking OK.
1167 */
1168 task_t
1169 convert_port_to_locked_task(ipc_port_t port)
1170 {
1171 int try_failed_count = 0;
1172
1173 while (IP_VALID(port)) {
1174 task_t task;
1175
1176 ip_lock(port);
1177 if (!ip_active(port) || (ip_kotype(port) != IKOT_TASK)) {
1178 ip_unlock(port);
1179 return TASK_NULL;
1180 }
1181 task = (task_t) port->ip_kobject;
1182 assert(task != TASK_NULL);
1183
1184 /*
1185 * Normal lock ordering puts task_lock() before ip_lock().
1186 * Attempt out-of-order locking here.
1187 */
1188 if (task_lock_try(task)) {
1189 ip_unlock(port);
1190 return(task);
1191 }
1192 try_failed_count++;
1193
1194 ip_unlock(port);
1195 mutex_pause(try_failed_count);
1196 }
1197 return TASK_NULL;
1198 }
1199
1200 /*
1201 * Routine: convert_port_to_task
1202 * Purpose:
1203 * Convert from a port to a task.
1204 * Doesn't consume the port ref; produces a task ref,
1205 * which may be null.
1206 * Conditions:
1207 * Nothing locked.
1208 */
1209 task_t
1210 convert_port_to_task(
1211 ipc_port_t port)
1212 {
1213 task_t task = TASK_NULL;
1214
1215 if (IP_VALID(port)) {
1216 ip_lock(port);
1217
1218 if ( ip_active(port) &&
1219 ip_kotype(port) == IKOT_TASK ) {
1220 task = (task_t)port->ip_kobject;
1221 assert(task != TASK_NULL);
1222
1223 task_reference_internal(task);
1224 }
1225
1226 ip_unlock(port);
1227 }
1228
1229 return (task);
1230 }
1231
1232 /*
1233 * Routine: convert_port_to_task_name
1234 * Purpose:
1235 * Convert from a port to a task name.
1236 * Doesn't consume the port ref; produces a task name ref,
1237 * which may be null.
1238 * Conditions:
1239 * Nothing locked.
1240 */
1241 task_name_t
1242 convert_port_to_task_name(
1243 ipc_port_t port)
1244 {
1245 task_name_t task = TASK_NULL;
1246
1247 if (IP_VALID(port)) {
1248 ip_lock(port);
1249
1250 if ( ip_active(port) &&
1251 (ip_kotype(port) == IKOT_TASK ||
1252 ip_kotype(port) == IKOT_TASK_NAME)) {
1253 task = (task_name_t)port->ip_kobject;
1254 assert(task != TASK_NAME_NULL);
1255
1256 task_reference_internal(task);
1257 }
1258
1259 ip_unlock(port);
1260 }
1261
1262 return (task);
1263 }
1264
1265 /*
1266 * Routine: convert_port_to_task_suspension_token
1267 * Purpose:
1268 * Convert from a port to a task suspension token.
1269 * Doesn't consume the port ref; produces a suspension token ref,
1270 * which may be null.
1271 * Conditions:
1272 * Nothing locked.
1273 */
1274 task_suspension_token_t
1275 convert_port_to_task_suspension_token(
1276 ipc_port_t port)
1277 {
1278 task_suspension_token_t task = TASK_NULL;
1279
1280 if (IP_VALID(port)) {
1281 ip_lock(port);
1282
1283 if ( ip_active(port) &&
1284 ip_kotype(port) == IKOT_TASK_RESUME) {
1285 task = (task_suspension_token_t)port->ip_kobject;
1286 assert(task != TASK_NULL);
1287
1288 task_reference_internal(task);
1289 }
1290
1291 ip_unlock(port);
1292 }
1293
1294 return (task);
1295 }
1296
1297 /*
1298 * Routine: convert_port_to_space
1299 * Purpose:
1300 * Convert from a port to a space.
1301 * Doesn't consume the port ref; produces a space ref,
1302 * which may be null.
1303 * Conditions:
1304 * Nothing locked.
1305 */
1306 ipc_space_t
1307 convert_port_to_space(
1308 ipc_port_t port)
1309 {
1310 ipc_space_t space;
1311 task_t task;
1312
1313 task = convert_port_to_locked_task(port);
1314
1315 if (task == TASK_NULL)
1316 return IPC_SPACE_NULL;
1317
1318 if (!task->active) {
1319 task_unlock(task);
1320 return IPC_SPACE_NULL;
1321 }
1322
1323 space = task->itk_space;
1324 is_reference(space);
1325 task_unlock(task);
1326 return (space);
1327 }
1328
1329 /*
1330 * Routine: convert_port_to_map
1331 * Purpose:
1332 * Convert from a port to a map.
1333 * Doesn't consume the port ref; produces a map ref,
1334 * which may be null.
1335 * Conditions:
1336 * Nothing locked.
1337 */
1338
1339 vm_map_t
1340 convert_port_to_map(
1341 ipc_port_t port)
1342 {
1343 task_t task;
1344 vm_map_t map;
1345
1346 task = convert_port_to_locked_task(port);
1347
1348 if (task == TASK_NULL)
1349 return VM_MAP_NULL;
1350
1351 if (!task->active) {
1352 task_unlock(task);
1353 return VM_MAP_NULL;
1354 }
1355
1356 map = task->map;
1357 vm_map_reference_swap(map);
1358 task_unlock(task);
1359 return map;
1360 }
1361
1362
1363 /*
1364 * Routine: convert_port_to_thread
1365 * Purpose:
1366 * Convert from a port to a thread.
1367 * Doesn't consume the port ref; produces an thread ref,
1368 * which may be null.
1369 * Conditions:
1370 * Nothing locked.
1371 */
1372
1373 thread_t
1374 convert_port_to_thread(
1375 ipc_port_t port)
1376 {
1377 thread_t thread = THREAD_NULL;
1378
1379 if (IP_VALID(port)) {
1380 ip_lock(port);
1381
1382 if ( ip_active(port) &&
1383 ip_kotype(port) == IKOT_THREAD ) {
1384 thread = (thread_t)port->ip_kobject;
1385 assert(thread != THREAD_NULL);
1386
1387 thread_reference_internal(thread);
1388 }
1389
1390 ip_unlock(port);
1391 }
1392
1393 return (thread);
1394 }
1395
1396 /*
1397 * Routine: port_name_to_thread
1398 * Purpose:
1399 * Convert from a port name to an thread reference
1400 * A name of MACH_PORT_NULL is valid for the null thread.
1401 * Conditions:
1402 * Nothing locked.
1403 */
1404 thread_t
1405 port_name_to_thread(
1406 mach_port_name_t name)
1407 {
1408 thread_t thread = THREAD_NULL;
1409 ipc_port_t kport;
1410
1411 if (MACH_PORT_VALID(name)) {
1412 if (ipc_object_copyin(current_space(), name,
1413 MACH_MSG_TYPE_COPY_SEND,
1414 (ipc_object_t *)&kport) != KERN_SUCCESS)
1415 return (THREAD_NULL);
1416
1417 thread = convert_port_to_thread(kport);
1418
1419 if (IP_VALID(kport))
1420 ipc_port_release_send(kport);
1421 }
1422
1423 return (thread);
1424 }
1425
1426 task_t
1427 port_name_to_task(
1428 mach_port_name_t name)
1429 {
1430 ipc_port_t kern_port;
1431 kern_return_t kr;
1432 task_t task = TASK_NULL;
1433
1434 if (MACH_PORT_VALID(name)) {
1435 kr = ipc_object_copyin(current_space(), name,
1436 MACH_MSG_TYPE_COPY_SEND,
1437 (ipc_object_t *) &kern_port);
1438 if (kr != KERN_SUCCESS)
1439 return TASK_NULL;
1440
1441 task = convert_port_to_task(kern_port);
1442
1443 if (IP_VALID(kern_port))
1444 ipc_port_release_send(kern_port);
1445 }
1446 return task;
1447 }
1448
1449 /*
1450 * Routine: convert_task_to_port
1451 * Purpose:
1452 * Convert from a task to a port.
1453 * Consumes a task ref; produces a naked send right
1454 * which may be invalid.
1455 * Conditions:
1456 * Nothing locked.
1457 */
1458
1459 ipc_port_t
1460 convert_task_to_port(
1461 task_t task)
1462 {
1463 ipc_port_t port;
1464
1465 itk_lock(task);
1466 if (task->itk_self != IP_NULL)
1467 port = ipc_port_make_send(task->itk_self);
1468 else
1469 port = IP_NULL;
1470 itk_unlock(task);
1471
1472 task_deallocate(task);
1473 return port;
1474 }
1475
1476 /*
1477 * Routine: convert_task_suspend_token_to_port
1478 * Purpose:
1479 * Convert from a task suspension token to a port.
1480 * Consumes a task suspension token ref; produces a naked send-once right
1481 * which may be invalid.
1482 * Conditions:
1483 * Nothing locked.
1484 */
1485 ipc_port_t
1486 convert_task_suspension_token_to_port(
1487 task_suspension_token_t task)
1488 {
1489 ipc_port_t port;
1490
1491 task_lock(task);
1492 if (task->active) {
1493 if (task->itk_resume == IP_NULL) {
1494 task->itk_resume = ipc_port_alloc_kernel();
1495 if (!IP_VALID(task->itk_resume)) {
1496 panic("failed to create resume port");
1497 }
1498
1499 ipc_kobject_set(task->itk_resume, (ipc_kobject_t) task, IKOT_TASK_RESUME);
1500 }
1501
1502 /*
1503 * Create a send-once right for each instance of a direct user-called
1504 * task_suspend2 call. Each time one of these send-once rights is abandoned,
1505 * the notification handler will resume the target task.
1506 */
1507 port = ipc_port_make_sonce(task->itk_resume);
1508 assert(IP_VALID(port));
1509 } else {
1510 port = IP_NULL;
1511 }
1512
1513 task_unlock(task);
1514 task_suspension_token_deallocate(task);
1515
1516 return port;
1517 }
1518
1519
1520 /*
1521 * Routine: convert_task_name_to_port
1522 * Purpose:
1523 * Convert from a task name ref to a port.
1524 * Consumes a task name ref; produces a naked send right
1525 * which may be invalid.
1526 * Conditions:
1527 * Nothing locked.
1528 */
1529
1530 ipc_port_t
1531 convert_task_name_to_port(
1532 task_name_t task_name)
1533 {
1534 ipc_port_t port;
1535
1536 itk_lock(task_name);
1537 if (task_name->itk_nself != IP_NULL)
1538 port = ipc_port_make_send(task_name->itk_nself);
1539 else
1540 port = IP_NULL;
1541 itk_unlock(task_name);
1542
1543 task_name_deallocate(task_name);
1544 return port;
1545 }
1546
1547 /*
1548 * Routine: convert_thread_to_port
1549 * Purpose:
1550 * Convert from a thread to a port.
1551 * Consumes an thread ref; produces a naked send right
1552 * which may be invalid.
1553 * Conditions:
1554 * Nothing locked.
1555 */
1556
1557 ipc_port_t
1558 convert_thread_to_port(
1559 thread_t thread)
1560 {
1561 ipc_port_t port;
1562
1563 thread_mtx_lock(thread);
1564
1565 if (thread->ith_self != IP_NULL)
1566 port = ipc_port_make_send(thread->ith_self);
1567 else
1568 port = IP_NULL;
1569
1570 thread_mtx_unlock(thread);
1571
1572 thread_deallocate(thread);
1573
1574 return (port);
1575 }
1576
1577 /*
1578 * Routine: space_deallocate
1579 * Purpose:
1580 * Deallocate a space ref produced by convert_port_to_space.
1581 * Conditions:
1582 * Nothing locked.
1583 */
1584
1585 void
1586 space_deallocate(
1587 ipc_space_t space)
1588 {
1589 if (space != IS_NULL)
1590 is_release(space);
1591 }
1592
1593 /*
1594 * Routine: thread/task_set_exception_ports [kernel call]
1595 * Purpose:
1596 * Sets the thread/task exception port, flavor and
1597 * behavior for the exception types specified by the mask.
1598 * There will be one send right per exception per valid
1599 * port.
1600 * Conditions:
1601 * Nothing locked. If successful, consumes
1602 * the supplied send right.
1603 * Returns:
1604 * KERN_SUCCESS Changed the special port.
1605 * KERN_INVALID_ARGUMENT The thread is null,
1606 * Illegal mask bit set.
1607 * Illegal exception behavior
1608 * KERN_FAILURE The thread is dead.
1609 */
1610
1611 kern_return_t
1612 thread_set_exception_ports(
1613 thread_t thread,
1614 exception_mask_t exception_mask,
1615 ipc_port_t new_port,
1616 exception_behavior_t new_behavior,
1617 thread_state_flavor_t new_flavor)
1618 {
1619 ipc_port_t old_port[EXC_TYPES_COUNT];
1620 boolean_t privileged = current_task()->sec_token.val[0] == 0;
1621 register int i;
1622
1623 if (thread == THREAD_NULL)
1624 return (KERN_INVALID_ARGUMENT);
1625
1626 if (exception_mask & ~EXC_MASK_VALID)
1627 return (KERN_INVALID_ARGUMENT);
1628
1629 if (IP_VALID(new_port)) {
1630 switch (new_behavior & ~MACH_EXCEPTION_CODES) {
1631
1632 case EXCEPTION_DEFAULT:
1633 case EXCEPTION_STATE:
1634 case EXCEPTION_STATE_IDENTITY:
1635 break;
1636
1637 default:
1638 return (KERN_INVALID_ARGUMENT);
1639 }
1640 }
1641
1642 /*
1643 * Check the validity of the thread_state_flavor by calling the
1644 * VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in
1645 * osfmk/mach/ARCHITECTURE/thread_status.h
1646 */
1647 if (!VALID_THREAD_STATE_FLAVOR(new_flavor))
1648 return (KERN_INVALID_ARGUMENT);
1649
1650 thread_mtx_lock(thread);
1651
1652 if (!thread->active) {
1653 thread_mtx_unlock(thread);
1654
1655 return (KERN_FAILURE);
1656 }
1657
1658 if (thread->exc_actions == NULL) {
1659 ipc_thread_init_exc_actions(thread);
1660 }
1661 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
1662 if (exception_mask & (1 << i)) {
1663 old_port[i] = thread->exc_actions[i].port;
1664 thread->exc_actions[i].port = ipc_port_copy_send(new_port);
1665 thread->exc_actions[i].behavior = new_behavior;
1666 thread->exc_actions[i].flavor = new_flavor;
1667 thread->exc_actions[i].privileged = privileged;
1668 }
1669 else
1670 old_port[i] = IP_NULL;
1671 }
1672
1673 thread_mtx_unlock(thread);
1674
1675 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i)
1676 if (IP_VALID(old_port[i]))
1677 ipc_port_release_send(old_port[i]);
1678
1679 if (IP_VALID(new_port)) /* consume send right */
1680 ipc_port_release_send(new_port);
1681
1682 return (KERN_SUCCESS);
1683 }
1684
1685 kern_return_t
1686 task_set_exception_ports(
1687 task_t task,
1688 exception_mask_t exception_mask,
1689 ipc_port_t new_port,
1690 exception_behavior_t new_behavior,
1691 thread_state_flavor_t new_flavor)
1692 {
1693 ipc_port_t old_port[EXC_TYPES_COUNT];
1694 boolean_t privileged = current_task()->sec_token.val[0] == 0;
1695 register int i;
1696
1697 if (task == TASK_NULL)
1698 return (KERN_INVALID_ARGUMENT);
1699
1700 if (exception_mask & ~EXC_MASK_VALID)
1701 return (KERN_INVALID_ARGUMENT);
1702
1703 if (IP_VALID(new_port)) {
1704 switch (new_behavior & ~MACH_EXCEPTION_CODES) {
1705
1706 case EXCEPTION_DEFAULT:
1707 case EXCEPTION_STATE:
1708 case EXCEPTION_STATE_IDENTITY:
1709 break;
1710
1711 default:
1712 return (KERN_INVALID_ARGUMENT);
1713 }
1714 }
1715
1716 itk_lock(task);
1717
1718 if (task->itk_self == IP_NULL) {
1719 itk_unlock(task);
1720
1721 return (KERN_FAILURE);
1722 }
1723
1724 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
1725 if (exception_mask & (1 << i)) {
1726 old_port[i] = task->exc_actions[i].port;
1727 task->exc_actions[i].port =
1728 ipc_port_copy_send(new_port);
1729 task->exc_actions[i].behavior = new_behavior;
1730 task->exc_actions[i].flavor = new_flavor;
1731 task->exc_actions[i].privileged = privileged;
1732 }
1733 else
1734 old_port[i] = IP_NULL;
1735 }
1736
1737 itk_unlock(task);
1738
1739 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i)
1740 if (IP_VALID(old_port[i]))
1741 ipc_port_release_send(old_port[i]);
1742
1743 if (IP_VALID(new_port)) /* consume send right */
1744 ipc_port_release_send(new_port);
1745
1746 return (KERN_SUCCESS);
1747 }
1748
1749 /*
1750 * Routine: thread/task_swap_exception_ports [kernel call]
1751 * Purpose:
1752 * Sets the thread/task exception port, flavor and
1753 * behavior for the exception types specified by the
1754 * mask.
1755 *
1756 * The old ports, behavior and flavors are returned
1757 * Count specifies the array sizes on input and
1758 * the number of returned ports etc. on output. The
1759 * arrays must be large enough to hold all the returned
1760 * data, MIG returnes an error otherwise. The masks
1761 * array specifies the corresponding exception type(s).
1762 *
1763 * Conditions:
1764 * Nothing locked. If successful, consumes
1765 * the supplied send right.
1766 *
1767 * Returns upto [in} CountCnt elements.
1768 * Returns:
1769 * KERN_SUCCESS Changed the special port.
1770 * KERN_INVALID_ARGUMENT The thread is null,
1771 * Illegal mask bit set.
1772 * Illegal exception behavior
1773 * KERN_FAILURE The thread is dead.
1774 */
1775
1776 kern_return_t
1777 thread_swap_exception_ports(
1778 thread_t thread,
1779 exception_mask_t exception_mask,
1780 ipc_port_t new_port,
1781 exception_behavior_t new_behavior,
1782 thread_state_flavor_t new_flavor,
1783 exception_mask_array_t masks,
1784 mach_msg_type_number_t *CountCnt,
1785 exception_port_array_t ports,
1786 exception_behavior_array_t behaviors,
1787 thread_state_flavor_array_t flavors)
1788 {
1789 ipc_port_t old_port[EXC_TYPES_COUNT];
1790 boolean_t privileged = current_task()->sec_token.val[0] == 0;
1791 unsigned int i, j, count;
1792
1793 if (thread == THREAD_NULL)
1794 return (KERN_INVALID_ARGUMENT);
1795
1796 if (exception_mask & ~EXC_MASK_VALID)
1797 return (KERN_INVALID_ARGUMENT);
1798
1799 if (IP_VALID(new_port)) {
1800 switch (new_behavior & ~MACH_EXCEPTION_CODES) {
1801
1802 case EXCEPTION_DEFAULT:
1803 case EXCEPTION_STATE:
1804 case EXCEPTION_STATE_IDENTITY:
1805 break;
1806
1807 default:
1808 return (KERN_INVALID_ARGUMENT);
1809 }
1810 }
1811
1812 thread_mtx_lock(thread);
1813
1814 if (!thread->active) {
1815 thread_mtx_unlock(thread);
1816
1817 return (KERN_FAILURE);
1818 }
1819
1820 if (thread->exc_actions == NULL) {
1821 ipc_thread_init_exc_actions(thread);
1822 }
1823
1824 assert(EXC_TYPES_COUNT > FIRST_EXCEPTION);
1825 for (count = 0, i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT && count < *CountCnt; ++i) {
1826 if (exception_mask & (1 << i)) {
1827 for (j = 0; j < count; ++j) {
1828 /*
1829 * search for an identical entry, if found
1830 * set corresponding mask for this exception.
1831 */
1832 if ( thread->exc_actions[i].port == ports[j] &&
1833 thread->exc_actions[i].behavior == behaviors[j] &&
1834 thread->exc_actions[i].flavor == flavors[j] ) {
1835 masks[j] |= (1 << i);
1836 break;
1837 }
1838 }
1839
1840 if (j == count) {
1841 masks[j] = (1 << i);
1842 ports[j] = ipc_port_copy_send(thread->exc_actions[i].port);
1843
1844 behaviors[j] = thread->exc_actions[i].behavior;
1845 flavors[j] = thread->exc_actions[i].flavor;
1846 ++count;
1847 }
1848
1849 old_port[i] = thread->exc_actions[i].port;
1850 thread->exc_actions[i].port = ipc_port_copy_send(new_port);
1851 thread->exc_actions[i].behavior = new_behavior;
1852 thread->exc_actions[i].flavor = new_flavor;
1853 thread->exc_actions[i].privileged = privileged;
1854 }
1855 else
1856 old_port[i] = IP_NULL;
1857 }
1858
1859 thread_mtx_unlock(thread);
1860
1861 while (--i >= FIRST_EXCEPTION) {
1862 if (IP_VALID(old_port[i]))
1863 ipc_port_release_send(old_port[i]);
1864 }
1865
1866 if (IP_VALID(new_port)) /* consume send right */
1867 ipc_port_release_send(new_port);
1868
1869 *CountCnt = count;
1870
1871 return (KERN_SUCCESS);
1872 }
1873
1874 kern_return_t
1875 task_swap_exception_ports(
1876 task_t task,
1877 exception_mask_t exception_mask,
1878 ipc_port_t new_port,
1879 exception_behavior_t new_behavior,
1880 thread_state_flavor_t new_flavor,
1881 exception_mask_array_t masks,
1882 mach_msg_type_number_t *CountCnt,
1883 exception_port_array_t ports,
1884 exception_behavior_array_t behaviors,
1885 thread_state_flavor_array_t flavors)
1886 {
1887 ipc_port_t old_port[EXC_TYPES_COUNT];
1888 boolean_t privileged = current_task()->sec_token.val[0] == 0;
1889 unsigned int i, j, count;
1890
1891 if (task == TASK_NULL)
1892 return (KERN_INVALID_ARGUMENT);
1893
1894 if (exception_mask & ~EXC_MASK_VALID)
1895 return (KERN_INVALID_ARGUMENT);
1896
1897 if (IP_VALID(new_port)) {
1898 switch (new_behavior & ~MACH_EXCEPTION_CODES) {
1899
1900 case EXCEPTION_DEFAULT:
1901 case EXCEPTION_STATE:
1902 case EXCEPTION_STATE_IDENTITY:
1903 break;
1904
1905 default:
1906 return (KERN_INVALID_ARGUMENT);
1907 }
1908 }
1909
1910 itk_lock(task);
1911
1912 if (task->itk_self == IP_NULL) {
1913 itk_unlock(task);
1914
1915 return (KERN_FAILURE);
1916 }
1917
1918 assert(EXC_TYPES_COUNT > FIRST_EXCEPTION);
1919 for (count = 0, i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT && count < *CountCnt; ++i) {
1920 if (exception_mask & (1 << i)) {
1921 for (j = 0; j < count; j++) {
1922 /*
1923 * search for an identical entry, if found
1924 * set corresponding mask for this exception.
1925 */
1926 if ( task->exc_actions[i].port == ports[j] &&
1927 task->exc_actions[i].behavior == behaviors[j] &&
1928 task->exc_actions[i].flavor == flavors[j] ) {
1929 masks[j] |= (1 << i);
1930 break;
1931 }
1932 }
1933
1934 if (j == count) {
1935 masks[j] = (1 << i);
1936 ports[j] = ipc_port_copy_send(task->exc_actions[i].port);
1937 behaviors[j] = task->exc_actions[i].behavior;
1938 flavors[j] = task->exc_actions[i].flavor;
1939 ++count;
1940 }
1941
1942 old_port[i] = task->exc_actions[i].port;
1943
1944 task->exc_actions[i].port = ipc_port_copy_send(new_port);
1945 task->exc_actions[i].behavior = new_behavior;
1946 task->exc_actions[i].flavor = new_flavor;
1947 task->exc_actions[i].privileged = privileged;
1948 }
1949 else
1950 old_port[i] = IP_NULL;
1951 }
1952
1953 itk_unlock(task);
1954
1955 while (--i >= FIRST_EXCEPTION) {
1956 if (IP_VALID(old_port[i]))
1957 ipc_port_release_send(old_port[i]);
1958 }
1959
1960 if (IP_VALID(new_port)) /* consume send right */
1961 ipc_port_release_send(new_port);
1962
1963 *CountCnt = count;
1964
1965 return (KERN_SUCCESS);
1966 }
1967
1968 /*
1969 * Routine: thread/task_get_exception_ports [kernel call]
1970 * Purpose:
1971 * Clones a send right for each of the thread/task's exception
1972 * ports specified in the mask and returns the behaviour
1973 * and flavor of said port.
1974 *
1975 * Returns upto [in} CountCnt elements.
1976 *
1977 * Conditions:
1978 * Nothing locked.
1979 * Returns:
1980 * KERN_SUCCESS Extracted a send right.
1981 * KERN_INVALID_ARGUMENT The thread is null,
1982 * Invalid special port,
1983 * Illegal mask bit set.
1984 * KERN_FAILURE The thread is dead.
1985 */
1986
1987 kern_return_t
1988 thread_get_exception_ports(
1989 thread_t thread,
1990 exception_mask_t exception_mask,
1991 exception_mask_array_t masks,
1992 mach_msg_type_number_t *CountCnt,
1993 exception_port_array_t ports,
1994 exception_behavior_array_t behaviors,
1995 thread_state_flavor_array_t flavors)
1996 {
1997 unsigned int i, j, count;
1998
1999 if (thread == THREAD_NULL)
2000 return (KERN_INVALID_ARGUMENT);
2001
2002 if (exception_mask & ~EXC_MASK_VALID)
2003 return (KERN_INVALID_ARGUMENT);
2004
2005 thread_mtx_lock(thread);
2006
2007 if (!thread->active) {
2008 thread_mtx_unlock(thread);
2009
2010 return (KERN_FAILURE);
2011 }
2012
2013 count = 0;
2014
2015 if (thread->exc_actions == NULL) {
2016 goto done;
2017 }
2018
2019 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
2020 if (exception_mask & (1 << i)) {
2021 for (j = 0; j < count; ++j) {
2022 /*
2023 * search for an identical entry, if found
2024 * set corresponding mask for this exception.
2025 */
2026 if ( thread->exc_actions[i].port == ports[j] &&
2027 thread->exc_actions[i].behavior ==behaviors[j] &&
2028 thread->exc_actions[i].flavor == flavors[j] ) {
2029 masks[j] |= (1 << i);
2030 break;
2031 }
2032 }
2033
2034 if (j == count) {
2035 masks[j] = (1 << i);
2036 ports[j] = ipc_port_copy_send(thread->exc_actions[i].port);
2037 behaviors[j] = thread->exc_actions[i].behavior;
2038 flavors[j] = thread->exc_actions[i].flavor;
2039 ++count;
2040 if (count >= *CountCnt)
2041 break;
2042 }
2043 }
2044 }
2045
2046 done:
2047 thread_mtx_unlock(thread);
2048
2049 *CountCnt = count;
2050
2051 return (KERN_SUCCESS);
2052 }
2053
2054 kern_return_t
2055 task_get_exception_ports(
2056 task_t task,
2057 exception_mask_t exception_mask,
2058 exception_mask_array_t masks,
2059 mach_msg_type_number_t *CountCnt,
2060 exception_port_array_t ports,
2061 exception_behavior_array_t behaviors,
2062 thread_state_flavor_array_t flavors)
2063 {
2064 unsigned int i, j, count;
2065
2066 if (task == TASK_NULL)
2067 return (KERN_INVALID_ARGUMENT);
2068
2069 if (exception_mask & ~EXC_MASK_VALID)
2070 return (KERN_INVALID_ARGUMENT);
2071
2072 itk_lock(task);
2073
2074 if (task->itk_self == IP_NULL) {
2075 itk_unlock(task);
2076
2077 return (KERN_FAILURE);
2078 }
2079
2080 count = 0;
2081
2082 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
2083 if (exception_mask & (1 << i)) {
2084 for (j = 0; j < count; ++j) {
2085 /*
2086 * search for an identical entry, if found
2087 * set corresponding mask for this exception.
2088 */
2089 if ( task->exc_actions[i].port == ports[j] &&
2090 task->exc_actions[i].behavior == behaviors[j] &&
2091 task->exc_actions[i].flavor == flavors[j] ) {
2092 masks[j] |= (1 << i);
2093 break;
2094 }
2095 }
2096
2097 if (j == count) {
2098 masks[j] = (1 << i);
2099 ports[j] = ipc_port_copy_send(task->exc_actions[i].port);
2100 behaviors[j] = task->exc_actions[i].behavior;
2101 flavors[j] = task->exc_actions[i].flavor;
2102 ++count;
2103 if (count > *CountCnt)
2104 break;
2105 }
2106 }
2107 }
2108
2109 itk_unlock(task);
2110
2111 *CountCnt = count;
2112
2113 return (KERN_SUCCESS);
2114 }