]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/ipc_tt.c
xnu-1699.24.8.tar.gz
[apple/xnu.git] / osfmk / kern / ipc_tt.c
1 /*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
58 * support for mandatory and extensible security protections. This notice
59 * is included in support of clause 2.2 (b) of the Apple Public License,
60 * Version 2.0.
61 */
62 /*
63 */
64
65 /*
66 * File: ipc_tt.c
67 * Purpose:
68 * Task and thread related IPC functions.
69 */
70
71 #include <mach/mach_types.h>
72 #include <mach/boolean.h>
73 #include <mach/kern_return.h>
74 #include <mach/mach_param.h>
75 #include <mach/task_special_ports.h>
76 #include <mach/thread_special_ports.h>
77 #include <mach/thread_status.h>
78 #include <mach/exception_types.h>
79 #include <mach/memory_object_types.h>
80 #include <mach/mach_traps.h>
81 #include <mach/task_server.h>
82 #include <mach/thread_act_server.h>
83 #include <mach/mach_host_server.h>
84 #include <mach/host_priv_server.h>
85 #include <mach/vm_map_server.h>
86
87 #include <kern/kern_types.h>
88 #include <kern/host.h>
89 #include <kern/ipc_kobject.h>
90 #include <kern/ipc_tt.h>
91 #include <kern/kalloc.h>
92 #include <kern/thread.h>
93 #include <kern/misc_protos.h>
94
95 #include <vm/vm_map.h>
96 #include <vm/vm_pageout.h>
97 #include <vm/vm_protos.h>
98
99 #include <security/mac_mach_internal.h>
100
101 /* forward declarations */
102 task_t convert_port_to_locked_task(ipc_port_t port);
103
104
105 /*
106 * Routine: ipc_task_init
107 * Purpose:
108 * Initialize a task's IPC state.
109 *
110 * If non-null, some state will be inherited from the parent.
111 * The parent must be appropriately initialized.
112 * Conditions:
113 * Nothing locked.
114 */
115
116 void
117 ipc_task_init(
118 task_t task,
119 task_t parent)
120 {
121 ipc_space_t space;
122 ipc_port_t kport;
123 ipc_port_t nport;
124 kern_return_t kr;
125 int i;
126
127
128 kr = ipc_space_create(&ipc_table_entries[0], &space);
129 if (kr != KERN_SUCCESS)
130 panic("ipc_task_init");
131
132 space->is_task = task;
133
134 kport = ipc_port_alloc_kernel();
135 if (kport == IP_NULL)
136 panic("ipc_task_init");
137
138 nport = ipc_port_alloc_kernel();
139 if (nport == IP_NULL)
140 panic("ipc_task_init");
141
142 itk_lock_init(task);
143 task->itk_self = kport;
144 task->itk_nself = nport;
145 task->itk_sself = ipc_port_make_send(kport);
146 task->itk_space = space;
147 space->is_fast = FALSE;
148
149 #if CONFIG_MACF_MACH
150 if (parent)
151 mac_task_label_associate(parent, task, &parent->maclabel,
152 &task->maclabel, &kport->ip_label);
153 else
154 mac_task_label_associate_kernel(task, &task->maclabel, &kport->ip_label);
155 #endif
156
157 if (parent == TASK_NULL) {
158 ipc_port_t port;
159
160 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
161 task->exc_actions[i].port = IP_NULL;
162 }/* for */
163
164 kr = host_get_host_port(host_priv_self(), &port);
165 assert(kr == KERN_SUCCESS);
166 task->itk_host = port;
167
168 task->itk_bootstrap = IP_NULL;
169 task->itk_seatbelt = IP_NULL;
170 task->itk_gssd = IP_NULL;
171 task->itk_task_access = IP_NULL;
172
173 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
174 task->itk_registered[i] = IP_NULL;
175 } else {
176 itk_lock(parent);
177 assert(parent->itk_self != IP_NULL);
178
179 /* inherit registered ports */
180
181 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
182 task->itk_registered[i] =
183 ipc_port_copy_send(parent->itk_registered[i]);
184
185 /* inherit exception and bootstrap ports */
186
187 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
188 task->exc_actions[i].port =
189 ipc_port_copy_send(parent->exc_actions[i].port);
190 task->exc_actions[i].flavor =
191 parent->exc_actions[i].flavor;
192 task->exc_actions[i].behavior =
193 parent->exc_actions[i].behavior;
194 task->exc_actions[i].privileged =
195 parent->exc_actions[i].privileged;
196 }/* for */
197 task->itk_host =
198 ipc_port_copy_send(parent->itk_host);
199
200 task->itk_bootstrap =
201 ipc_port_copy_send(parent->itk_bootstrap);
202
203 task->itk_seatbelt =
204 ipc_port_copy_send(parent->itk_seatbelt);
205
206 task->itk_gssd =
207 ipc_port_copy_send(parent->itk_gssd);
208
209 task->itk_task_access =
210 ipc_port_copy_send(parent->itk_task_access);
211
212 itk_unlock(parent);
213 }
214 }
215
216 /*
217 * Routine: ipc_task_enable
218 * Purpose:
219 * Enable a task for IPC access.
220 * Conditions:
221 * Nothing locked.
222 */
223
224 void
225 ipc_task_enable(
226 task_t task)
227 {
228 ipc_port_t kport;
229 ipc_port_t nport;
230
231 itk_lock(task);
232 kport = task->itk_self;
233 if (kport != IP_NULL)
234 ipc_kobject_set(kport, (ipc_kobject_t) task, IKOT_TASK);
235 nport = task->itk_nself;
236 if (nport != IP_NULL)
237 ipc_kobject_set(nport, (ipc_kobject_t) task, IKOT_TASK_NAME);
238 itk_unlock(task);
239 }
240
241 /*
242 * Routine: ipc_task_disable
243 * Purpose:
244 * Disable IPC access to a task.
245 * Conditions:
246 * Nothing locked.
247 */
248
249 void
250 ipc_task_disable(
251 task_t task)
252 {
253 ipc_port_t kport;
254 ipc_port_t nport;
255
256 itk_lock(task);
257 kport = task->itk_self;
258 if (kport != IP_NULL)
259 ipc_kobject_set(kport, IKO_NULL, IKOT_NONE);
260 nport = task->itk_nself;
261 if (nport != IP_NULL)
262 ipc_kobject_set(nport, IKO_NULL, IKOT_NONE);
263 itk_unlock(task);
264 }
265
266 /*
267 * Routine: ipc_task_terminate
268 * Purpose:
269 * Clean up and destroy a task's IPC state.
270 * Conditions:
271 * Nothing locked. The task must be suspended.
272 * (Or the current thread must be in the task.)
273 */
274
275 void
276 ipc_task_terminate(
277 task_t task)
278 {
279 ipc_port_t kport;
280 ipc_port_t nport;
281 int i;
282
283 itk_lock(task);
284 kport = task->itk_self;
285
286 if (kport == IP_NULL) {
287 /* the task is already terminated (can this happen?) */
288 itk_unlock(task);
289 return;
290 }
291 task->itk_self = IP_NULL;
292
293 nport = task->itk_nself;
294 assert(nport != IP_NULL);
295 task->itk_nself = IP_NULL;
296
297 itk_unlock(task);
298
299 /* release the naked send rights */
300
301 if (IP_VALID(task->itk_sself))
302 ipc_port_release_send(task->itk_sself);
303
304 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
305 if (IP_VALID(task->exc_actions[i].port)) {
306 ipc_port_release_send(task->exc_actions[i].port);
307 }
308 }
309
310 if (IP_VALID(task->itk_host))
311 ipc_port_release_send(task->itk_host);
312
313 if (IP_VALID(task->itk_bootstrap))
314 ipc_port_release_send(task->itk_bootstrap);
315
316 if (IP_VALID(task->itk_seatbelt))
317 ipc_port_release_send(task->itk_seatbelt);
318
319 if (IP_VALID(task->itk_gssd))
320 ipc_port_release_send(task->itk_gssd);
321
322 if (IP_VALID(task->itk_task_access))
323 ipc_port_release_send(task->itk_task_access);
324
325 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
326 if (IP_VALID(task->itk_registered[i]))
327 ipc_port_release_send(task->itk_registered[i]);
328
329 ipc_port_release_send(task->wired_ledger_port);
330 ipc_port_release_send(task->paged_ledger_port);
331
332 /* destroy the kernel ports */
333 ipc_port_dealloc_kernel(kport);
334 ipc_port_dealloc_kernel(nport);
335
336 itk_lock_destroy(task);
337 }
338
339 /*
340 * Routine: ipc_task_reset
341 * Purpose:
342 * Reset a task's IPC state to protect it when
343 * it enters an elevated security context. The
344 * task name port can remain the same - since
345 * it represents no specific privilege.
346 * Conditions:
347 * Nothing locked. The task must be suspended.
348 * (Or the current thread must be in the task.)
349 */
350
351 void
352 ipc_task_reset(
353 task_t task)
354 {
355 ipc_port_t old_kport, new_kport;
356 ipc_port_t old_sself;
357 ipc_port_t old_exc_actions[EXC_TYPES_COUNT];
358 int i;
359
360 new_kport = ipc_port_alloc_kernel();
361 if (new_kport == IP_NULL)
362 panic("ipc_task_reset");
363
364 itk_lock(task);
365
366 old_kport = task->itk_self;
367
368 if (old_kport == IP_NULL) {
369 /* the task is already terminated (can this happen?) */
370 itk_unlock(task);
371 ipc_port_dealloc_kernel(new_kport);
372 return;
373 }
374
375 task->itk_self = new_kport;
376 old_sself = task->itk_sself;
377 task->itk_sself = ipc_port_make_send(new_kport);
378 ipc_kobject_set(old_kport, IKO_NULL, IKOT_NONE);
379 ipc_kobject_set(new_kport, (ipc_kobject_t) task, IKOT_TASK);
380
381 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
382 if (!task->exc_actions[i].privileged) {
383 old_exc_actions[i] = task->exc_actions[i].port;
384 task->exc_actions[i].port = IP_NULL;
385 } else {
386 old_exc_actions[i] = IP_NULL;
387 }
388 }/* for */
389
390 itk_unlock(task);
391
392 /* release the naked send rights */
393
394 if (IP_VALID(old_sself))
395 ipc_port_release_send(old_sself);
396
397 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
398 if (IP_VALID(old_exc_actions[i])) {
399 ipc_port_release_send(old_exc_actions[i]);
400 }
401 }/* for */
402
403 /* destroy the kernel port */
404 ipc_port_dealloc_kernel(old_kport);
405 }
406
407 /*
408 * Routine: ipc_thread_init
409 * Purpose:
410 * Initialize a thread's IPC state.
411 * Conditions:
412 * Nothing locked.
413 */
414
415 void
416 ipc_thread_init(
417 thread_t thread)
418 {
419 ipc_port_t kport;
420 int i;
421
422 kport = ipc_port_alloc_kernel();
423 if (kport == IP_NULL)
424 panic("ipc_thread_init");
425
426 thread->ith_self = kport;
427 thread->ith_sself = ipc_port_make_send(kport);
428
429 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i)
430 thread->exc_actions[i].port = IP_NULL;
431
432 ipc_kobject_set(kport, (ipc_kobject_t)thread, IKOT_THREAD);
433
434 ipc_kmsg_queue_init(&thread->ith_messages);
435
436 thread->ith_rpc_reply = IP_NULL;
437 }
438
439 void
440 ipc_thread_disable(
441 thread_t thread)
442 {
443 ipc_port_t kport = thread->ith_self;
444
445 if (kport != IP_NULL)
446 ipc_kobject_set(kport, IKO_NULL, IKOT_NONE);
447 }
448
449 /*
450 * Routine: ipc_thread_terminate
451 * Purpose:
452 * Clean up and destroy a thread's IPC state.
453 * Conditions:
454 * Nothing locked.
455 */
456
457 void
458 ipc_thread_terminate(
459 thread_t thread)
460 {
461 ipc_port_t kport = thread->ith_self;
462
463 if (kport != IP_NULL) {
464 int i;
465
466 if (IP_VALID(thread->ith_sself))
467 ipc_port_release_send(thread->ith_sself);
468
469 thread->ith_sself = thread->ith_self = IP_NULL;
470
471 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
472 if (IP_VALID(thread->exc_actions[i].port))
473 ipc_port_release_send(thread->exc_actions[i].port);
474 }
475
476 ipc_port_dealloc_kernel(kport);
477 }
478
479 assert(ipc_kmsg_queue_empty(&thread->ith_messages));
480
481 if (thread->ith_rpc_reply != IP_NULL)
482 ipc_port_dealloc_reply(thread->ith_rpc_reply);
483
484 thread->ith_rpc_reply = IP_NULL;
485 }
486
487 /*
488 * Routine: ipc_thread_reset
489 * Purpose:
490 * Reset the IPC state for a given Mach thread when
491 * its task enters an elevated security context.
492 * Both the thread port and its exception ports have
493 * to be reset. Its RPC reply port cannot have any
494 * rights outstanding, so it should be fine.
495 * Conditions:
496 * Nothing locked.
497 */
498
499 void
500 ipc_thread_reset(
501 thread_t thread)
502 {
503 ipc_port_t old_kport, new_kport;
504 ipc_port_t old_sself;
505 ipc_port_t old_exc_actions[EXC_TYPES_COUNT];
506 int i;
507
508 new_kport = ipc_port_alloc_kernel();
509 if (new_kport == IP_NULL)
510 panic("ipc_task_reset");
511
512 thread_mtx_lock(thread);
513
514 old_kport = thread->ith_self;
515
516 if (old_kport == IP_NULL) {
517 /* the is already terminated (can this happen?) */
518 thread_mtx_unlock(thread);
519 ipc_port_dealloc_kernel(new_kport);
520 return;
521 }
522
523 thread->ith_self = new_kport;
524 old_sself = thread->ith_sself;
525 thread->ith_sself = ipc_port_make_send(new_kport);
526 ipc_kobject_set(old_kport, IKO_NULL, IKOT_NONE);
527 ipc_kobject_set(new_kport, (ipc_kobject_t) thread, IKOT_THREAD);
528
529 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
530 if (!thread->exc_actions[i].privileged) {
531 old_exc_actions[i] = thread->exc_actions[i].port;
532 thread->exc_actions[i].port = IP_NULL;
533 } else {
534 old_exc_actions[i] = IP_NULL;
535 }
536 }/* for */
537
538 thread_mtx_unlock(thread);
539
540 /* release the naked send rights */
541
542 if (IP_VALID(old_sself))
543 ipc_port_release_send(old_sself);
544
545 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
546 if (IP_VALID(old_exc_actions[i])) {
547 ipc_port_release_send(old_exc_actions[i]);
548 }
549 }/* for */
550
551 /* destroy the kernel port */
552 ipc_port_dealloc_kernel(old_kport);
553 }
554
555 /*
556 * Routine: retrieve_task_self_fast
557 * Purpose:
558 * Optimized version of retrieve_task_self,
559 * that only works for the current task.
560 *
561 * Return a send right (possibly null/dead)
562 * for the task's user-visible self port.
563 * Conditions:
564 * Nothing locked.
565 */
566
567 ipc_port_t
568 retrieve_task_self_fast(
569 register task_t task)
570 {
571 register ipc_port_t port;
572
573 assert(task == current_task());
574
575 itk_lock(task);
576 assert(task->itk_self != IP_NULL);
577
578 if ((port = task->itk_sself) == task->itk_self) {
579 /* no interposing */
580
581 ip_lock(port);
582 assert(ip_active(port));
583 ip_reference(port);
584 port->ip_srights++;
585 ip_unlock(port);
586 } else
587 port = ipc_port_copy_send(port);
588 itk_unlock(task);
589
590 return port;
591 }
592
593 /*
594 * Routine: retrieve_thread_self_fast
595 * Purpose:
596 * Return a send right (possibly null/dead)
597 * for the thread's user-visible self port.
598 *
599 * Only works for the current thread.
600 *
601 * Conditions:
602 * Nothing locked.
603 */
604
605 ipc_port_t
606 retrieve_thread_self_fast(
607 thread_t thread)
608 {
609 register ipc_port_t port;
610
611 assert(thread == current_thread());
612
613 thread_mtx_lock(thread);
614
615 assert(thread->ith_self != IP_NULL);
616
617 if ((port = thread->ith_sself) == thread->ith_self) {
618 /* no interposing */
619
620 ip_lock(port);
621 assert(ip_active(port));
622 ip_reference(port);
623 port->ip_srights++;
624 ip_unlock(port);
625 }
626 else
627 port = ipc_port_copy_send(port);
628
629 thread_mtx_unlock(thread);
630
631 return port;
632 }
633
634 /*
635 * Routine: task_self_trap [mach trap]
636 * Purpose:
637 * Give the caller send rights for his own task port.
638 * Conditions:
639 * Nothing locked.
640 * Returns:
641 * MACH_PORT_NULL if there are any resource failures
642 * or other errors.
643 */
644
645 mach_port_name_t
646 task_self_trap(
647 __unused struct task_self_trap_args *args)
648 {
649 task_t task = current_task();
650 ipc_port_t sright;
651 mach_port_name_t name;
652
653 sright = retrieve_task_self_fast(task);
654 name = ipc_port_copyout_send(sright, task->itk_space);
655 return name;
656 }
657
658 /*
659 * Routine: thread_self_trap [mach trap]
660 * Purpose:
661 * Give the caller send rights for his own thread port.
662 * Conditions:
663 * Nothing locked.
664 * Returns:
665 * MACH_PORT_NULL if there are any resource failures
666 * or other errors.
667 */
668
669 mach_port_name_t
670 thread_self_trap(
671 __unused struct thread_self_trap_args *args)
672 {
673 thread_t thread = current_thread();
674 task_t task = thread->task;
675 ipc_port_t sright;
676 mach_port_name_t name;
677
678 sright = retrieve_thread_self_fast(thread);
679 name = ipc_port_copyout_send(sright, task->itk_space);
680 return name;
681
682 }
683
684 /*
685 * Routine: mach_reply_port [mach trap]
686 * Purpose:
687 * Allocate a port for the caller.
688 * Conditions:
689 * Nothing locked.
690 * Returns:
691 * MACH_PORT_NULL if there are any resource failures
692 * or other errors.
693 */
694
695 mach_port_name_t
696 mach_reply_port(
697 __unused struct mach_reply_port_args *args)
698 {
699 ipc_port_t port;
700 mach_port_name_t name;
701 kern_return_t kr;
702
703 kr = ipc_port_alloc(current_task()->itk_space, &name, &port);
704 if (kr == KERN_SUCCESS)
705 ip_unlock(port);
706 else
707 name = MACH_PORT_NULL;
708 return name;
709 }
710
711 /*
712 * Routine: thread_get_special_port [kernel call]
713 * Purpose:
714 * Clones a send right for one of the thread's
715 * special ports.
716 * Conditions:
717 * Nothing locked.
718 * Returns:
719 * KERN_SUCCESS Extracted a send right.
720 * KERN_INVALID_ARGUMENT The thread is null.
721 * KERN_FAILURE The thread is dead.
722 * KERN_INVALID_ARGUMENT Invalid special port.
723 */
724
725 kern_return_t
726 thread_get_special_port(
727 thread_t thread,
728 int which,
729 ipc_port_t *portp)
730 {
731 kern_return_t result = KERN_SUCCESS;
732 ipc_port_t *whichp;
733
734 if (thread == THREAD_NULL)
735 return (KERN_INVALID_ARGUMENT);
736
737 switch (which) {
738
739 case THREAD_KERNEL_PORT:
740 whichp = &thread->ith_sself;
741 break;
742
743 default:
744 return (KERN_INVALID_ARGUMENT);
745 }
746
747 thread_mtx_lock(thread);
748
749 if (thread->active)
750 *portp = ipc_port_copy_send(*whichp);
751 else
752 result = KERN_FAILURE;
753
754 thread_mtx_unlock(thread);
755
756 return (result);
757 }
758
759 /*
760 * Routine: thread_set_special_port [kernel call]
761 * Purpose:
762 * Changes one of the thread's special ports,
763 * setting it to the supplied send right.
764 * Conditions:
765 * Nothing locked. If successful, consumes
766 * the supplied send right.
767 * Returns:
768 * KERN_SUCCESS Changed the special port.
769 * KERN_INVALID_ARGUMENT The thread is null.
770 * KERN_FAILURE The thread is dead.
771 * KERN_INVALID_ARGUMENT Invalid special port.
772 */
773
774 kern_return_t
775 thread_set_special_port(
776 thread_t thread,
777 int which,
778 ipc_port_t port)
779 {
780 kern_return_t result = KERN_SUCCESS;
781 ipc_port_t *whichp, old = IP_NULL;
782
783 if (thread == THREAD_NULL)
784 return (KERN_INVALID_ARGUMENT);
785
786 switch (which) {
787
788 case THREAD_KERNEL_PORT:
789 whichp = &thread->ith_sself;
790 break;
791
792 default:
793 return (KERN_INVALID_ARGUMENT);
794 }
795
796 thread_mtx_lock(thread);
797
798 if (thread->active) {
799 old = *whichp;
800 *whichp = port;
801 }
802 else
803 result = KERN_FAILURE;
804
805 thread_mtx_unlock(thread);
806
807 if (IP_VALID(old))
808 ipc_port_release_send(old);
809
810 return (result);
811 }
812
813 /*
814 * Routine: task_get_special_port [kernel call]
815 * Purpose:
816 * Clones a send right for one of the task's
817 * special ports.
818 * Conditions:
819 * Nothing locked.
820 * Returns:
821 * KERN_SUCCESS Extracted a send right.
822 * KERN_INVALID_ARGUMENT The task is null.
823 * KERN_FAILURE The task/space is dead.
824 * KERN_INVALID_ARGUMENT Invalid special port.
825 */
826
827 kern_return_t
828 task_get_special_port(
829 task_t task,
830 int which,
831 ipc_port_t *portp)
832 {
833 ipc_port_t port;
834
835 if (task == TASK_NULL)
836 return KERN_INVALID_ARGUMENT;
837
838 itk_lock(task);
839 if (task->itk_self == IP_NULL) {
840 itk_unlock(task);
841 return KERN_FAILURE;
842 }
843
844 switch (which) {
845 case TASK_KERNEL_PORT:
846 port = ipc_port_copy_send(task->itk_sself);
847 break;
848
849 case TASK_NAME_PORT:
850 port = ipc_port_make_send(task->itk_nself);
851 break;
852
853 case TASK_HOST_PORT:
854 port = ipc_port_copy_send(task->itk_host);
855 break;
856
857 case TASK_BOOTSTRAP_PORT:
858 port = ipc_port_copy_send(task->itk_bootstrap);
859 break;
860
861 case TASK_WIRED_LEDGER_PORT:
862 port = ipc_port_copy_send(task->wired_ledger_port);
863 break;
864
865 case TASK_PAGED_LEDGER_PORT:
866 port = ipc_port_copy_send(task->paged_ledger_port);
867 break;
868
869 case TASK_SEATBELT_PORT:
870 port = ipc_port_copy_send(task->itk_seatbelt);
871 break;
872
873 case TASK_GSSD_PORT:
874 port = ipc_port_copy_send(task->itk_gssd);
875 break;
876
877 case TASK_ACCESS_PORT:
878 port = ipc_port_copy_send(task->itk_task_access);
879 break;
880
881 default:
882 itk_unlock(task);
883 return KERN_INVALID_ARGUMENT;
884 }
885 itk_unlock(task);
886
887 *portp = port;
888 return KERN_SUCCESS;
889 }
890
891 /*
892 * Routine: task_set_special_port [kernel call]
893 * Purpose:
894 * Changes one of the task's special ports,
895 * setting it to the supplied send right.
896 * Conditions:
897 * Nothing locked. If successful, consumes
898 * the supplied send right.
899 * Returns:
900 * KERN_SUCCESS Changed the special port.
901 * KERN_INVALID_ARGUMENT The task is null.
902 * KERN_FAILURE The task/space is dead.
903 * KERN_INVALID_ARGUMENT Invalid special port.
904 * KERN_NO_ACCESS Attempted overwrite of seatbelt port.
905 */
906
907 kern_return_t
908 task_set_special_port(
909 task_t task,
910 int which,
911 ipc_port_t port)
912 {
913 ipc_port_t *whichp;
914 ipc_port_t old;
915
916 if (task == TASK_NULL)
917 return KERN_INVALID_ARGUMENT;
918
919 switch (which) {
920 case TASK_KERNEL_PORT:
921 whichp = &task->itk_sself;
922 break;
923
924 case TASK_HOST_PORT:
925 whichp = &task->itk_host;
926 break;
927
928 case TASK_BOOTSTRAP_PORT:
929 whichp = &task->itk_bootstrap;
930 break;
931
932 case TASK_WIRED_LEDGER_PORT:
933 whichp = &task->wired_ledger_port;
934 break;
935
936 case TASK_PAGED_LEDGER_PORT:
937 whichp = &task->paged_ledger_port;
938 break;
939
940 case TASK_SEATBELT_PORT:
941 whichp = &task->itk_seatbelt;
942 break;
943
944 case TASK_GSSD_PORT:
945 whichp = &task->itk_gssd;
946 break;
947
948 case TASK_ACCESS_PORT:
949 whichp = &task->itk_task_access;
950 break;
951
952 default:
953 return KERN_INVALID_ARGUMENT;
954 }/* switch */
955
956 itk_lock(task);
957 if (task->itk_self == IP_NULL) {
958 itk_unlock(task);
959 return KERN_FAILURE;
960 }
961
962 /* do not allow overwrite of seatbelt or task access ports */
963 if ((TASK_SEATBELT_PORT == which || TASK_ACCESS_PORT == which)
964 && IP_VALID(*whichp)) {
965 itk_unlock(task);
966 return KERN_NO_ACCESS;
967 }
968
969 #if CONFIG_MACF_MACH
970 if (mac_task_check_service(current_task(), task, "set_special_port")) {
971 itk_unlock(task);
972 return KERN_NO_ACCESS;
973 }
974 #endif
975
976 old = *whichp;
977 *whichp = port;
978 itk_unlock(task);
979
980 if (IP_VALID(old))
981 ipc_port_release_send(old);
982 return KERN_SUCCESS;
983 }
984
985
986 /*
987 * Routine: mach_ports_register [kernel call]
988 * Purpose:
989 * Stash a handful of port send rights in the task.
990 * Child tasks will inherit these rights, but they
991 * must use mach_ports_lookup to acquire them.
992 *
993 * The rights are supplied in a (wired) kalloc'd segment.
994 * Rights which aren't supplied are assumed to be null.
995 * Conditions:
996 * Nothing locked. If successful, consumes
997 * the supplied rights and memory.
998 * Returns:
999 * KERN_SUCCESS Stashed the port rights.
1000 * KERN_INVALID_ARGUMENT The task is null.
1001 * KERN_INVALID_ARGUMENT The task is dead.
1002 * KERN_INVALID_ARGUMENT Too many port rights supplied.
1003 */
1004
1005 kern_return_t
1006 mach_ports_register(
1007 task_t task,
1008 mach_port_array_t memory,
1009 mach_msg_type_number_t portsCnt)
1010 {
1011 ipc_port_t ports[TASK_PORT_REGISTER_MAX];
1012 unsigned int i;
1013
1014 if ((task == TASK_NULL) ||
1015 (portsCnt > TASK_PORT_REGISTER_MAX))
1016 return KERN_INVALID_ARGUMENT;
1017
1018 /*
1019 * Pad the port rights with nulls.
1020 */
1021
1022 for (i = 0; i < portsCnt; i++)
1023 ports[i] = memory[i];
1024 for (; i < TASK_PORT_REGISTER_MAX; i++)
1025 ports[i] = IP_NULL;
1026
1027 itk_lock(task);
1028 if (task->itk_self == IP_NULL) {
1029 itk_unlock(task);
1030 return KERN_INVALID_ARGUMENT;
1031 }
1032
1033 /*
1034 * Replace the old send rights with the new.
1035 * Release the old rights after unlocking.
1036 */
1037
1038 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
1039 ipc_port_t old;
1040
1041 old = task->itk_registered[i];
1042 task->itk_registered[i] = ports[i];
1043 ports[i] = old;
1044 }
1045
1046 itk_unlock(task);
1047
1048 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
1049 if (IP_VALID(ports[i]))
1050 ipc_port_release_send(ports[i]);
1051
1052 /*
1053 * Now that the operation is known to be successful,
1054 * we can free the memory.
1055 */
1056
1057 if (portsCnt != 0)
1058 kfree(memory,
1059 (vm_size_t) (portsCnt * sizeof(mach_port_t)));
1060
1061 return KERN_SUCCESS;
1062 }
1063
1064 /*
1065 * Routine: mach_ports_lookup [kernel call]
1066 * Purpose:
1067 * Retrieves (clones) the stashed port send rights.
1068 * Conditions:
1069 * Nothing locked. If successful, the caller gets
1070 * rights and memory.
1071 * Returns:
1072 * KERN_SUCCESS Retrieved the send rights.
1073 * KERN_INVALID_ARGUMENT The task is null.
1074 * KERN_INVALID_ARGUMENT The task is dead.
1075 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
1076 */
1077
1078 kern_return_t
1079 mach_ports_lookup(
1080 task_t task,
1081 mach_port_array_t *portsp,
1082 mach_msg_type_number_t *portsCnt)
1083 {
1084 void *memory;
1085 vm_size_t size;
1086 ipc_port_t *ports;
1087 int i;
1088
1089 if (task == TASK_NULL)
1090 return KERN_INVALID_ARGUMENT;
1091
1092 size = (vm_size_t) (TASK_PORT_REGISTER_MAX * sizeof(ipc_port_t));
1093
1094 memory = kalloc(size);
1095 if (memory == 0)
1096 return KERN_RESOURCE_SHORTAGE;
1097
1098 itk_lock(task);
1099 if (task->itk_self == IP_NULL) {
1100 itk_unlock(task);
1101
1102 kfree(memory, size);
1103 return KERN_INVALID_ARGUMENT;
1104 }
1105
1106 ports = (ipc_port_t *) memory;
1107
1108 /*
1109 * Clone port rights. Because kalloc'd memory
1110 * is wired, we won't fault while holding the task lock.
1111 */
1112
1113 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
1114 ports[i] = ipc_port_copy_send(task->itk_registered[i]);
1115
1116 itk_unlock(task);
1117
1118 *portsp = (mach_port_array_t) ports;
1119 *portsCnt = TASK_PORT_REGISTER_MAX;
1120 return KERN_SUCCESS;
1121 }
1122
1123 /*
1124 * Routine: convert_port_to_locked_task
1125 * Purpose:
1126 * Internal helper routine to convert from a port to a locked
1127 * task. Used by several routines that try to convert from a
1128 * task port to a reference on some task related object.
1129 * Conditions:
1130 * Nothing locked, blocking OK.
1131 */
1132 task_t
1133 convert_port_to_locked_task(ipc_port_t port)
1134 {
1135 int try_failed_count = 0;
1136
1137 while (IP_VALID(port)) {
1138 task_t task;
1139
1140 ip_lock(port);
1141 if (!ip_active(port) || (ip_kotype(port) != IKOT_TASK)) {
1142 ip_unlock(port);
1143 return TASK_NULL;
1144 }
1145 task = (task_t) port->ip_kobject;
1146 assert(task != TASK_NULL);
1147
1148 /*
1149 * Normal lock ordering puts task_lock() before ip_lock().
1150 * Attempt out-of-order locking here.
1151 */
1152 if (task_lock_try(task)) {
1153 ip_unlock(port);
1154 return(task);
1155 }
1156 try_failed_count++;
1157
1158 ip_unlock(port);
1159 mutex_pause(try_failed_count);
1160 }
1161 return TASK_NULL;
1162 }
1163
1164 /*
1165 * Routine: convert_port_to_task
1166 * Purpose:
1167 * Convert from a port to a task.
1168 * Doesn't consume the port ref; produces a task ref,
1169 * which may be null.
1170 * Conditions:
1171 * Nothing locked.
1172 */
1173 task_t
1174 convert_port_to_task(
1175 ipc_port_t port)
1176 {
1177 task_t task = TASK_NULL;
1178
1179 if (IP_VALID(port)) {
1180 ip_lock(port);
1181
1182 if ( ip_active(port) &&
1183 ip_kotype(port) == IKOT_TASK ) {
1184 task = (task_t)port->ip_kobject;
1185 assert(task != TASK_NULL);
1186
1187 task_reference_internal(task);
1188 }
1189
1190 ip_unlock(port);
1191 }
1192
1193 return (task);
1194 }
1195
1196 /*
1197 * Routine: convert_port_to_task_name
1198 * Purpose:
1199 * Convert from a port to a task name.
1200 * Doesn't consume the port ref; produces a task name ref,
1201 * which may be null.
1202 * Conditions:
1203 * Nothing locked.
1204 */
1205 task_name_t
1206 convert_port_to_task_name(
1207 ipc_port_t port)
1208 {
1209 task_name_t task = TASK_NULL;
1210
1211 if (IP_VALID(port)) {
1212 ip_lock(port);
1213
1214 if ( ip_active(port) &&
1215 (ip_kotype(port) == IKOT_TASK ||
1216 ip_kotype(port) == IKOT_TASK_NAME)) {
1217 task = (task_name_t)port->ip_kobject;
1218 assert(task != TASK_NAME_NULL);
1219
1220 task_reference_internal(task);
1221 }
1222
1223 ip_unlock(port);
1224 }
1225
1226 return (task);
1227 }
1228
1229 /*
1230 * Routine: convert_port_to_space
1231 * Purpose:
1232 * Convert from a port to a space.
1233 * Doesn't consume the port ref; produces a space ref,
1234 * which may be null.
1235 * Conditions:
1236 * Nothing locked.
1237 */
1238 ipc_space_t
1239 convert_port_to_space(
1240 ipc_port_t port)
1241 {
1242 ipc_space_t space;
1243 task_t task;
1244
1245 task = convert_port_to_locked_task(port);
1246
1247 if (task == TASK_NULL)
1248 return IPC_SPACE_NULL;
1249
1250 if (!task->active) {
1251 task_unlock(task);
1252 return IPC_SPACE_NULL;
1253 }
1254
1255 space = task->itk_space;
1256 is_reference(space);
1257 task_unlock(task);
1258 return (space);
1259 }
1260
1261 /*
1262 * Routine: convert_port_to_map
1263 * Purpose:
1264 * Convert from a port to a map.
1265 * Doesn't consume the port ref; produces a map ref,
1266 * which may be null.
1267 * Conditions:
1268 * Nothing locked.
1269 */
1270
1271 vm_map_t
1272 convert_port_to_map(
1273 ipc_port_t port)
1274 {
1275 task_t task;
1276 vm_map_t map;
1277
1278 task = convert_port_to_locked_task(port);
1279
1280 if (task == TASK_NULL)
1281 return VM_MAP_NULL;
1282
1283 if (!task->active) {
1284 task_unlock(task);
1285 return VM_MAP_NULL;
1286 }
1287
1288 map = task->map;
1289 vm_map_reference_swap(map);
1290 task_unlock(task);
1291 return map;
1292 }
1293
1294
1295 /*
1296 * Routine: convert_port_to_thread
1297 * Purpose:
1298 * Convert from a port to a thread.
1299 * Doesn't consume the port ref; produces an thread ref,
1300 * which may be null.
1301 * Conditions:
1302 * Nothing locked.
1303 */
1304
1305 thread_t
1306 convert_port_to_thread(
1307 ipc_port_t port)
1308 {
1309 thread_t thread = THREAD_NULL;
1310
1311 if (IP_VALID(port)) {
1312 ip_lock(port);
1313
1314 if ( ip_active(port) &&
1315 ip_kotype(port) == IKOT_THREAD ) {
1316 thread = (thread_t)port->ip_kobject;
1317 assert(thread != THREAD_NULL);
1318
1319 thread_reference_internal(thread);
1320 }
1321
1322 ip_unlock(port);
1323 }
1324
1325 return (thread);
1326 }
1327
1328 /*
1329 * Routine: port_name_to_thread
1330 * Purpose:
1331 * Convert from a port name to an thread reference
1332 * A name of MACH_PORT_NULL is valid for the null thread.
1333 * Conditions:
1334 * Nothing locked.
1335 */
1336 thread_t
1337 port_name_to_thread(
1338 mach_port_name_t name)
1339 {
1340 thread_t thread = THREAD_NULL;
1341 ipc_port_t kport;
1342
1343 if (MACH_PORT_VALID(name)) {
1344 if (ipc_object_copyin(current_space(), name,
1345 MACH_MSG_TYPE_COPY_SEND,
1346 (ipc_object_t *)&kport) != KERN_SUCCESS)
1347 return (THREAD_NULL);
1348
1349 thread = convert_port_to_thread(kport);
1350
1351 if (IP_VALID(kport))
1352 ipc_port_release_send(kport);
1353 }
1354
1355 return (thread);
1356 }
1357
1358 task_t
1359 port_name_to_task(
1360 mach_port_name_t name)
1361 {
1362 ipc_port_t kern_port;
1363 kern_return_t kr;
1364 task_t task = TASK_NULL;
1365
1366 if (MACH_PORT_VALID(name)) {
1367 kr = ipc_object_copyin(current_space(), name,
1368 MACH_MSG_TYPE_COPY_SEND,
1369 (ipc_object_t *) &kern_port);
1370 if (kr != KERN_SUCCESS)
1371 return TASK_NULL;
1372
1373 task = convert_port_to_task(kern_port);
1374
1375 if (IP_VALID(kern_port))
1376 ipc_port_release_send(kern_port);
1377 }
1378 return task;
1379 }
1380
1381 /*
1382 * Routine: convert_task_to_port
1383 * Purpose:
1384 * Convert from a task to a port.
1385 * Consumes a task ref; produces a naked send right
1386 * which may be invalid.
1387 * Conditions:
1388 * Nothing locked.
1389 */
1390
1391 ipc_port_t
1392 convert_task_to_port(
1393 task_t task)
1394 {
1395 ipc_port_t port;
1396
1397 itk_lock(task);
1398 if (task->itk_self != IP_NULL)
1399 port = ipc_port_make_send(task->itk_self);
1400 else
1401 port = IP_NULL;
1402 itk_unlock(task);
1403
1404 task_deallocate(task);
1405 return port;
1406 }
1407
1408 /*
1409 * Routine: convert_task_name_to_port
1410 * Purpose:
1411 * Convert from a task name ref to a port.
1412 * Consumes a task name ref; produces a naked send right
1413 * which may be invalid.
1414 * Conditions:
1415 * Nothing locked.
1416 */
1417
1418 ipc_port_t
1419 convert_task_name_to_port(
1420 task_name_t task_name)
1421 {
1422 ipc_port_t port;
1423
1424 itk_lock(task_name);
1425 if (task_name->itk_nself != IP_NULL)
1426 port = ipc_port_make_send(task_name->itk_nself);
1427 else
1428 port = IP_NULL;
1429 itk_unlock(task_name);
1430
1431 task_name_deallocate(task_name);
1432 return port;
1433 }
1434
1435 /*
1436 * Routine: convert_thread_to_port
1437 * Purpose:
1438 * Convert from a thread to a port.
1439 * Consumes an thread ref; produces a naked send right
1440 * which may be invalid.
1441 * Conditions:
1442 * Nothing locked.
1443 */
1444
1445 ipc_port_t
1446 convert_thread_to_port(
1447 thread_t thread)
1448 {
1449 ipc_port_t port;
1450
1451 thread_mtx_lock(thread);
1452
1453 if (thread->ith_self != IP_NULL)
1454 port = ipc_port_make_send(thread->ith_self);
1455 else
1456 port = IP_NULL;
1457
1458 thread_mtx_unlock(thread);
1459
1460 thread_deallocate(thread);
1461
1462 return (port);
1463 }
1464
1465 /*
1466 * Routine: space_deallocate
1467 * Purpose:
1468 * Deallocate a space ref produced by convert_port_to_space.
1469 * Conditions:
1470 * Nothing locked.
1471 */
1472
1473 void
1474 space_deallocate(
1475 ipc_space_t space)
1476 {
1477 if (space != IS_NULL)
1478 is_release(space);
1479 }
1480
1481 /*
1482 * Routine: thread/task_set_exception_ports [kernel call]
1483 * Purpose:
1484 * Sets the thread/task exception port, flavor and
1485 * behavior for the exception types specified by the mask.
1486 * There will be one send right per exception per valid
1487 * port.
1488 * Conditions:
1489 * Nothing locked. If successful, consumes
1490 * the supplied send right.
1491 * Returns:
1492 * KERN_SUCCESS Changed the special port.
1493 * KERN_INVALID_ARGUMENT The thread is null,
1494 * Illegal mask bit set.
1495 * Illegal exception behavior
1496 * KERN_FAILURE The thread is dead.
1497 */
1498
1499 kern_return_t
1500 thread_set_exception_ports(
1501 thread_t thread,
1502 exception_mask_t exception_mask,
1503 ipc_port_t new_port,
1504 exception_behavior_t new_behavior,
1505 thread_state_flavor_t new_flavor)
1506 {
1507 ipc_port_t old_port[EXC_TYPES_COUNT];
1508 boolean_t privileged = current_task()->sec_token.val[0] == 0;
1509 register int i;
1510
1511 if (thread == THREAD_NULL)
1512 return (KERN_INVALID_ARGUMENT);
1513
1514 if (exception_mask & ~EXC_MASK_VALID)
1515 return (KERN_INVALID_ARGUMENT);
1516
1517 if (IP_VALID(new_port)) {
1518 switch (new_behavior & ~MACH_EXCEPTION_CODES) {
1519
1520 case EXCEPTION_DEFAULT:
1521 case EXCEPTION_STATE:
1522 case EXCEPTION_STATE_IDENTITY:
1523 break;
1524
1525 default:
1526 return (KERN_INVALID_ARGUMENT);
1527 }
1528 }
1529
1530 /*
1531 * Check the validity of the thread_state_flavor by calling the
1532 * VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in
1533 * osfmk/mach/ARCHITECTURE/thread_status.h
1534 */
1535 if (!VALID_THREAD_STATE_FLAVOR(new_flavor))
1536 return (KERN_INVALID_ARGUMENT);
1537
1538 thread_mtx_lock(thread);
1539
1540 if (!thread->active) {
1541 thread_mtx_unlock(thread);
1542
1543 return (KERN_FAILURE);
1544 }
1545
1546 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
1547 if (exception_mask & (1 << i)) {
1548 old_port[i] = thread->exc_actions[i].port;
1549 thread->exc_actions[i].port = ipc_port_copy_send(new_port);
1550 thread->exc_actions[i].behavior = new_behavior;
1551 thread->exc_actions[i].flavor = new_flavor;
1552 thread->exc_actions[i].privileged = privileged;
1553 }
1554 else
1555 old_port[i] = IP_NULL;
1556 }
1557
1558 thread_mtx_unlock(thread);
1559
1560 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i)
1561 if (IP_VALID(old_port[i]))
1562 ipc_port_release_send(old_port[i]);
1563
1564 if (IP_VALID(new_port)) /* consume send right */
1565 ipc_port_release_send(new_port);
1566
1567 return (KERN_SUCCESS);
1568 }
1569
1570 kern_return_t
1571 task_set_exception_ports(
1572 task_t task,
1573 exception_mask_t exception_mask,
1574 ipc_port_t new_port,
1575 exception_behavior_t new_behavior,
1576 thread_state_flavor_t new_flavor)
1577 {
1578 ipc_port_t old_port[EXC_TYPES_COUNT];
1579 boolean_t privileged = current_task()->sec_token.val[0] == 0;
1580 register int i;
1581
1582 if (task == TASK_NULL)
1583 return (KERN_INVALID_ARGUMENT);
1584
1585 if (exception_mask & ~EXC_MASK_VALID)
1586 return (KERN_INVALID_ARGUMENT);
1587
1588 if (IP_VALID(new_port)) {
1589 switch (new_behavior & ~MACH_EXCEPTION_CODES) {
1590
1591 case EXCEPTION_DEFAULT:
1592 case EXCEPTION_STATE:
1593 case EXCEPTION_STATE_IDENTITY:
1594 break;
1595
1596 default:
1597 return (KERN_INVALID_ARGUMENT);
1598 }
1599 }
1600
1601 itk_lock(task);
1602
1603 if (task->itk_self == IP_NULL) {
1604 itk_unlock(task);
1605
1606 return (KERN_FAILURE);
1607 }
1608
1609 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
1610 if (exception_mask & (1 << i)) {
1611 old_port[i] = task->exc_actions[i].port;
1612 task->exc_actions[i].port =
1613 ipc_port_copy_send(new_port);
1614 task->exc_actions[i].behavior = new_behavior;
1615 task->exc_actions[i].flavor = new_flavor;
1616 task->exc_actions[i].privileged = privileged;
1617 }
1618 else
1619 old_port[i] = IP_NULL;
1620 }
1621
1622 itk_unlock(task);
1623
1624 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i)
1625 if (IP_VALID(old_port[i]))
1626 ipc_port_release_send(old_port[i]);
1627
1628 if (IP_VALID(new_port)) /* consume send right */
1629 ipc_port_release_send(new_port);
1630
1631 return (KERN_SUCCESS);
1632 }
1633
1634 /*
1635 * Routine: thread/task_swap_exception_ports [kernel call]
1636 * Purpose:
1637 * Sets the thread/task exception port, flavor and
1638 * behavior for the exception types specified by the
1639 * mask.
1640 *
1641 * The old ports, behavior and flavors are returned
1642 * Count specifies the array sizes on input and
1643 * the number of returned ports etc. on output. The
1644 * arrays must be large enough to hold all the returned
1645 * data, MIG returnes an error otherwise. The masks
1646 * array specifies the corresponding exception type(s).
1647 *
1648 * Conditions:
1649 * Nothing locked. If successful, consumes
1650 * the supplied send right.
1651 *
1652 * Returns upto [in} CountCnt elements.
1653 * Returns:
1654 * KERN_SUCCESS Changed the special port.
1655 * KERN_INVALID_ARGUMENT The thread is null,
1656 * Illegal mask bit set.
1657 * Illegal exception behavior
1658 * KERN_FAILURE The thread is dead.
1659 */
1660
1661 kern_return_t
1662 thread_swap_exception_ports(
1663 thread_t thread,
1664 exception_mask_t exception_mask,
1665 ipc_port_t new_port,
1666 exception_behavior_t new_behavior,
1667 thread_state_flavor_t new_flavor,
1668 exception_mask_array_t masks,
1669 mach_msg_type_number_t *CountCnt,
1670 exception_port_array_t ports,
1671 exception_behavior_array_t behaviors,
1672 thread_state_flavor_array_t flavors)
1673 {
1674 ipc_port_t old_port[EXC_TYPES_COUNT];
1675 boolean_t privileged = current_task()->sec_token.val[0] == 0;
1676 unsigned int i, j, count;
1677
1678 if (thread == THREAD_NULL)
1679 return (KERN_INVALID_ARGUMENT);
1680
1681 if (exception_mask & ~EXC_MASK_VALID)
1682 return (KERN_INVALID_ARGUMENT);
1683
1684 if (IP_VALID(new_port)) {
1685 switch (new_behavior & ~MACH_EXCEPTION_CODES) {
1686
1687 case EXCEPTION_DEFAULT:
1688 case EXCEPTION_STATE:
1689 case EXCEPTION_STATE_IDENTITY:
1690 break;
1691
1692 default:
1693 return (KERN_INVALID_ARGUMENT);
1694 }
1695 }
1696
1697 thread_mtx_lock(thread);
1698
1699 if (!thread->active) {
1700 thread_mtx_unlock(thread);
1701
1702 return (KERN_FAILURE);
1703 }
1704
1705 count = 0;
1706
1707 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
1708 if (exception_mask & (1 << i)) {
1709 for (j = 0; j < count; ++j) {
1710 /*
1711 * search for an identical entry, if found
1712 * set corresponding mask for this exception.
1713 */
1714 if ( thread->exc_actions[i].port == ports[j] &&
1715 thread->exc_actions[i].behavior == behaviors[j] &&
1716 thread->exc_actions[i].flavor == flavors[j] ) {
1717 masks[j] |= (1 << i);
1718 break;
1719 }
1720 }
1721
1722 if (j == count) {
1723 masks[j] = (1 << i);
1724 ports[j] = ipc_port_copy_send(thread->exc_actions[i].port);
1725
1726 behaviors[j] = thread->exc_actions[i].behavior;
1727 flavors[j] = thread->exc_actions[i].flavor;
1728 ++count;
1729 }
1730
1731 old_port[i] = thread->exc_actions[i].port;
1732 thread->exc_actions[i].port = ipc_port_copy_send(new_port);
1733 thread->exc_actions[i].behavior = new_behavior;
1734 thread->exc_actions[i].flavor = new_flavor;
1735 thread->exc_actions[i].privileged = privileged;
1736 if (count > *CountCnt)
1737 break;
1738 }
1739 else
1740 old_port[i] = IP_NULL;
1741 }
1742
1743 thread_mtx_unlock(thread);
1744
1745 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i)
1746 if (IP_VALID(old_port[i]))
1747 ipc_port_release_send(old_port[i]);
1748
1749 if (IP_VALID(new_port)) /* consume send right */
1750 ipc_port_release_send(new_port);
1751
1752 *CountCnt = count;
1753
1754 return (KERN_SUCCESS);
1755 }
1756
1757 kern_return_t
1758 task_swap_exception_ports(
1759 task_t task,
1760 exception_mask_t exception_mask,
1761 ipc_port_t new_port,
1762 exception_behavior_t new_behavior,
1763 thread_state_flavor_t new_flavor,
1764 exception_mask_array_t masks,
1765 mach_msg_type_number_t *CountCnt,
1766 exception_port_array_t ports,
1767 exception_behavior_array_t behaviors,
1768 thread_state_flavor_array_t flavors)
1769 {
1770 ipc_port_t old_port[EXC_TYPES_COUNT];
1771 boolean_t privileged = current_task()->sec_token.val[0] == 0;
1772 unsigned int i, j, count;
1773
1774 if (task == TASK_NULL)
1775 return (KERN_INVALID_ARGUMENT);
1776
1777 if (exception_mask & ~EXC_MASK_VALID)
1778 return (KERN_INVALID_ARGUMENT);
1779
1780 if (IP_VALID(new_port)) {
1781 switch (new_behavior & ~MACH_EXCEPTION_CODES) {
1782
1783 case EXCEPTION_DEFAULT:
1784 case EXCEPTION_STATE:
1785 case EXCEPTION_STATE_IDENTITY:
1786 break;
1787
1788 default:
1789 return (KERN_INVALID_ARGUMENT);
1790 }
1791 }
1792
1793 itk_lock(task);
1794
1795 if (task->itk_self == IP_NULL) {
1796 itk_unlock(task);
1797
1798 return (KERN_FAILURE);
1799 }
1800
1801 count = 0;
1802
1803 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
1804 if (exception_mask & (1 << i)) {
1805 for (j = 0; j < count; j++) {
1806 /*
1807 * search for an identical entry, if found
1808 * set corresponding mask for this exception.
1809 */
1810 if ( task->exc_actions[i].port == ports[j] &&
1811 task->exc_actions[i].behavior == behaviors[j] &&
1812 task->exc_actions[i].flavor == flavors[j] ) {
1813 masks[j] |= (1 << i);
1814 break;
1815 }
1816 }
1817
1818 if (j == count) {
1819 masks[j] = (1 << i);
1820 ports[j] = ipc_port_copy_send(task->exc_actions[i].port);
1821 behaviors[j] = task->exc_actions[i].behavior;
1822 flavors[j] = task->exc_actions[i].flavor;
1823 ++count;
1824 }
1825
1826 old_port[i] = task->exc_actions[i].port;
1827 task->exc_actions[i].port = ipc_port_copy_send(new_port);
1828 task->exc_actions[i].behavior = new_behavior;
1829 task->exc_actions[i].flavor = new_flavor;
1830 task->exc_actions[i].privileged = privileged;
1831 if (count > *CountCnt)
1832 break;
1833 }
1834 else
1835 old_port[i] = IP_NULL;
1836 }
1837
1838 itk_unlock(task);
1839
1840 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++)
1841 if (IP_VALID(old_port[i]))
1842 ipc_port_release_send(old_port[i]);
1843
1844 if (IP_VALID(new_port)) /* consume send right */
1845 ipc_port_release_send(new_port);
1846
1847 *CountCnt = count;
1848
1849 return (KERN_SUCCESS);
1850 }
1851
1852 /*
1853 * Routine: thread/task_get_exception_ports [kernel call]
1854 * Purpose:
1855 * Clones a send right for each of the thread/task's exception
1856 * ports specified in the mask and returns the behaviour
1857 * and flavor of said port.
1858 *
1859 * Returns upto [in} CountCnt elements.
1860 *
1861 * Conditions:
1862 * Nothing locked.
1863 * Returns:
1864 * KERN_SUCCESS Extracted a send right.
1865 * KERN_INVALID_ARGUMENT The thread is null,
1866 * Invalid special port,
1867 * Illegal mask bit set.
1868 * KERN_FAILURE The thread is dead.
1869 */
1870
1871 kern_return_t
1872 thread_get_exception_ports(
1873 thread_t thread,
1874 exception_mask_t exception_mask,
1875 exception_mask_array_t masks,
1876 mach_msg_type_number_t *CountCnt,
1877 exception_port_array_t ports,
1878 exception_behavior_array_t behaviors,
1879 thread_state_flavor_array_t flavors)
1880 {
1881 unsigned int i, j, count;
1882
1883 if (thread == THREAD_NULL)
1884 return (KERN_INVALID_ARGUMENT);
1885
1886 if (exception_mask & ~EXC_MASK_VALID)
1887 return (KERN_INVALID_ARGUMENT);
1888
1889 thread_mtx_lock(thread);
1890
1891 if (!thread->active) {
1892 thread_mtx_unlock(thread);
1893
1894 return (KERN_FAILURE);
1895 }
1896
1897 count = 0;
1898
1899 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
1900 if (exception_mask & (1 << i)) {
1901 for (j = 0; j < count; ++j) {
1902 /*
1903 * search for an identical entry, if found
1904 * set corresponding mask for this exception.
1905 */
1906 if ( thread->exc_actions[i].port == ports[j] &&
1907 thread->exc_actions[i].behavior ==behaviors[j] &&
1908 thread->exc_actions[i].flavor == flavors[j] ) {
1909 masks[j] |= (1 << i);
1910 break;
1911 }
1912 }
1913
1914 if (j == count) {
1915 masks[j] = (1 << i);
1916 ports[j] = ipc_port_copy_send(thread->exc_actions[i].port);
1917 behaviors[j] = thread->exc_actions[i].behavior;
1918 flavors[j] = thread->exc_actions[i].flavor;
1919 ++count;
1920 if (count >= *CountCnt)
1921 break;
1922 }
1923 }
1924 }
1925
1926 thread_mtx_unlock(thread);
1927
1928 *CountCnt = count;
1929
1930 return (KERN_SUCCESS);
1931 }
1932
1933 kern_return_t
1934 task_get_exception_ports(
1935 task_t task,
1936 exception_mask_t exception_mask,
1937 exception_mask_array_t masks,
1938 mach_msg_type_number_t *CountCnt,
1939 exception_port_array_t ports,
1940 exception_behavior_array_t behaviors,
1941 thread_state_flavor_array_t flavors)
1942 {
1943 unsigned int i, j, count;
1944
1945 if (task == TASK_NULL)
1946 return (KERN_INVALID_ARGUMENT);
1947
1948 if (exception_mask & ~EXC_MASK_VALID)
1949 return (KERN_INVALID_ARGUMENT);
1950
1951 itk_lock(task);
1952
1953 if (task->itk_self == IP_NULL) {
1954 itk_unlock(task);
1955
1956 return (KERN_FAILURE);
1957 }
1958
1959 count = 0;
1960
1961 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
1962 if (exception_mask & (1 << i)) {
1963 for (j = 0; j < count; ++j) {
1964 /*
1965 * search for an identical entry, if found
1966 * set corresponding mask for this exception.
1967 */
1968 if ( task->exc_actions[i].port == ports[j] &&
1969 task->exc_actions[i].behavior == behaviors[j] &&
1970 task->exc_actions[i].flavor == flavors[j] ) {
1971 masks[j] |= (1 << i);
1972 break;
1973 }
1974 }
1975
1976 if (j == count) {
1977 masks[j] = (1 << i);
1978 ports[j] = ipc_port_copy_send(task->exc_actions[i].port);
1979 behaviors[j] = task->exc_actions[i].behavior;
1980 flavors[j] = task->exc_actions[i].flavor;
1981 ++count;
1982 if (count > *CountCnt)
1983 break;
1984 }
1985 }
1986 }
1987
1988 itk_unlock(task);
1989
1990 *CountCnt = count;
1991
1992 return (KERN_SUCCESS);
1993 }