]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/ipc_tt.c
xnu-1228.7.58.tar.gz
[apple/xnu.git] / osfmk / kern / ipc_tt.c
1 /*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
58 * support for mandatory and extensible security protections. This notice
59 * is included in support of clause 2.2 (b) of the Apple Public License,
60 * Version 2.0.
61 */
62 /*
63 */
64
65 /*
66 * File: ipc_tt.c
67 * Purpose:
68 * Task and thread related IPC functions.
69 */
70
71 #include <mach/mach_types.h>
72 #include <mach/boolean.h>
73 #include <mach/kern_return.h>
74 #include <mach/mach_param.h>
75 #include <mach/task_special_ports.h>
76 #include <mach/thread_special_ports.h>
77 #include <mach/thread_status.h>
78 #include <mach/exception_types.h>
79 #include <mach/memory_object_types.h>
80 #include <mach/mach_traps.h>
81 #include <mach/task_server.h>
82 #include <mach/thread_act_server.h>
83 #include <mach/mach_host_server.h>
84 #include <mach/host_priv_server.h>
85 #include <mach/vm_map_server.h>
86
87 #include <kern/kern_types.h>
88 #include <kern/host.h>
89 #include <kern/ipc_kobject.h>
90 #include <kern/ipc_tt.h>
91 #include <kern/kalloc.h>
92 #include <kern/thread.h>
93 #include <kern/misc_protos.h>
94
95 #include <vm/vm_map.h>
96 #include <vm/vm_pageout.h>
97 #include <vm/vm_protos.h>
98
99 #include <security/mac_mach_internal.h>
100
101 /* forward declarations */
102 task_t convert_port_to_locked_task(ipc_port_t port);
103
104
105 /*
106 * Routine: ipc_task_init
107 * Purpose:
108 * Initialize a task's IPC state.
109 *
110 * If non-null, some state will be inherited from the parent.
111 * The parent must be appropriately initialized.
112 * Conditions:
113 * Nothing locked.
114 */
115
116 void
117 ipc_task_init(
118 task_t task,
119 task_t parent)
120 {
121 ipc_space_t space;
122 ipc_port_t kport;
123 ipc_port_t nport;
124 kern_return_t kr;
125 int i;
126
127
128 kr = ipc_space_create(&ipc_table_entries[0], &space);
129 if (kr != KERN_SUCCESS)
130 panic("ipc_task_init");
131
132 space->is_task = task;
133
134 kport = ipc_port_alloc_kernel();
135 if (kport == IP_NULL)
136 panic("ipc_task_init");
137
138 nport = ipc_port_alloc_kernel();
139 if (nport == IP_NULL)
140 panic("ipc_task_init");
141
142 itk_lock_init(task);
143 task->itk_self = kport;
144 task->itk_nself = nport;
145 task->itk_sself = ipc_port_make_send(kport);
146 task->itk_space = space;
147 space->is_fast = FALSE;
148
149 #if CONFIG_MACF_MACH
150 if (parent)
151 mac_task_label_associate(parent, task, &parent->maclabel,
152 &task->maclabel, &kport->ip_label);
153 else
154 mac_task_label_associate_kernel(task, &task->maclabel, &kport->ip_label);
155 #endif
156
157 if (parent == TASK_NULL) {
158 ipc_port_t port;
159
160 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
161 task->exc_actions[i].port = IP_NULL;
162 }/* for */
163
164 kr = host_get_host_port(host_priv_self(), &port);
165 assert(kr == KERN_SUCCESS);
166 task->itk_host = port;
167
168 task->itk_bootstrap = IP_NULL;
169 task->itk_seatbelt = IP_NULL;
170 task->itk_gssd = IP_NULL;
171 task->itk_automountd = IP_NULL;
172 task->itk_task_access = IP_NULL;
173
174 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
175 task->itk_registered[i] = IP_NULL;
176 } else {
177 itk_lock(parent);
178 assert(parent->itk_self != IP_NULL);
179
180 /* inherit registered ports */
181
182 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
183 task->itk_registered[i] =
184 ipc_port_copy_send(parent->itk_registered[i]);
185
186 /* inherit exception and bootstrap ports */
187
188 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
189 task->exc_actions[i].port =
190 ipc_port_copy_send(parent->exc_actions[i].port);
191 task->exc_actions[i].flavor =
192 parent->exc_actions[i].flavor;
193 task->exc_actions[i].behavior =
194 parent->exc_actions[i].behavior;
195 task->exc_actions[i].privileged =
196 parent->exc_actions[i].privileged;
197 }/* for */
198 task->itk_host =
199 ipc_port_copy_send(parent->itk_host);
200
201 task->itk_bootstrap =
202 ipc_port_copy_send(parent->itk_bootstrap);
203
204 task->itk_seatbelt =
205 ipc_port_copy_send(parent->itk_seatbelt);
206
207 task->itk_gssd =
208 ipc_port_copy_send(parent->itk_gssd);
209
210 task->itk_automountd =
211 ipc_port_copy_send(parent->itk_automountd);
212
213 task->itk_task_access =
214 ipc_port_copy_send(parent->itk_task_access);
215
216 itk_unlock(parent);
217 }
218 }
219
220 /*
221 * Routine: ipc_task_enable
222 * Purpose:
223 * Enable a task for IPC access.
224 * Conditions:
225 * Nothing locked.
226 */
227
228 void
229 ipc_task_enable(
230 task_t task)
231 {
232 ipc_port_t kport;
233 ipc_port_t nport;
234
235 itk_lock(task);
236 kport = task->itk_self;
237 if (kport != IP_NULL)
238 ipc_kobject_set(kport, (ipc_kobject_t) task, IKOT_TASK);
239 nport = task->itk_nself;
240 if (nport != IP_NULL)
241 ipc_kobject_set(nport, (ipc_kobject_t) task, IKOT_TASK_NAME);
242 itk_unlock(task);
243 }
244
245 /*
246 * Routine: ipc_task_disable
247 * Purpose:
248 * Disable IPC access to a task.
249 * Conditions:
250 * Nothing locked.
251 */
252
253 void
254 ipc_task_disable(
255 task_t task)
256 {
257 ipc_port_t kport;
258 ipc_port_t nport;
259
260 itk_lock(task);
261 kport = task->itk_self;
262 if (kport != IP_NULL)
263 ipc_kobject_set(kport, IKO_NULL, IKOT_NONE);
264 nport = task->itk_nself;
265 if (nport != IP_NULL)
266 ipc_kobject_set(nport, IKO_NULL, IKOT_NONE);
267 itk_unlock(task);
268 }
269
270 /*
271 * Routine: ipc_task_terminate
272 * Purpose:
273 * Clean up and destroy a task's IPC state.
274 * Conditions:
275 * Nothing locked. The task must be suspended.
276 * (Or the current thread must be in the task.)
277 */
278
279 void
280 ipc_task_terminate(
281 task_t task)
282 {
283 ipc_port_t kport;
284 ipc_port_t nport;
285 int i;
286
287 itk_lock(task);
288 kport = task->itk_self;
289
290 if (kport == IP_NULL) {
291 /* the task is already terminated (can this happen?) */
292 itk_unlock(task);
293 return;
294 }
295 task->itk_self = IP_NULL;
296
297 nport = task->itk_nself;
298 assert(nport != IP_NULL);
299 task->itk_nself = IP_NULL;
300
301 itk_unlock(task);
302
303 /* release the naked send rights */
304
305 if (IP_VALID(task->itk_sself))
306 ipc_port_release_send(task->itk_sself);
307
308 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
309 if (IP_VALID(task->exc_actions[i].port)) {
310 ipc_port_release_send(task->exc_actions[i].port);
311 }
312 }
313
314 if (IP_VALID(task->itk_host))
315 ipc_port_release_send(task->itk_host);
316
317 if (IP_VALID(task->itk_bootstrap))
318 ipc_port_release_send(task->itk_bootstrap);
319
320 if (IP_VALID(task->itk_seatbelt))
321 ipc_port_release_send(task->itk_seatbelt);
322
323 if (IP_VALID(task->itk_gssd))
324 ipc_port_release_send(task->itk_gssd);
325
326 if (IP_VALID(task->itk_automountd))
327 ipc_port_release_send(task->itk_automountd);
328
329 if (IP_VALID(task->itk_task_access))
330 ipc_port_release_send(task->itk_task_access);
331
332 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
333 if (IP_VALID(task->itk_registered[i]))
334 ipc_port_release_send(task->itk_registered[i]);
335
336 ipc_port_release_send(task->wired_ledger_port);
337 ipc_port_release_send(task->paged_ledger_port);
338
339 /* destroy the kernel ports */
340 ipc_port_dealloc_kernel(kport);
341 ipc_port_dealloc_kernel(nport);
342 }
343
344 /*
345 * Routine: ipc_task_reset
346 * Purpose:
347 * Reset a task's IPC state to protect it when
348 * it enters an elevated security context. The
349 * task name port can remain the same - since
350 * it represents no specific privilege.
351 * Conditions:
352 * Nothing locked. The task must be suspended.
353 * (Or the current thread must be in the task.)
354 */
355
356 void
357 ipc_task_reset(
358 task_t task)
359 {
360 ipc_port_t old_kport, new_kport;
361 ipc_port_t old_sself;
362 ipc_port_t old_exc_actions[EXC_TYPES_COUNT];
363 int i;
364
365 new_kport = ipc_port_alloc_kernel();
366 if (new_kport == IP_NULL)
367 panic("ipc_task_reset");
368
369 itk_lock(task);
370
371 old_kport = task->itk_self;
372
373 if (old_kport == IP_NULL) {
374 /* the task is already terminated (can this happen?) */
375 itk_unlock(task);
376 ipc_port_dealloc_kernel(new_kport);
377 return;
378 }
379
380 task->itk_self = new_kport;
381 old_sself = task->itk_sself;
382 task->itk_sself = ipc_port_make_send(new_kport);
383 ipc_kobject_set(old_kport, IKO_NULL, IKOT_NONE);
384 ipc_kobject_set(new_kport, (ipc_kobject_t) task, IKOT_TASK);
385
386 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
387 if (!task->exc_actions[i].privileged) {
388 old_exc_actions[i] = task->exc_actions[i].port;
389 task->exc_actions[i].port = IP_NULL;
390 } else {
391 old_exc_actions[i] = IP_NULL;
392 }
393 }/* for */
394
395 itk_unlock(task);
396
397 /* release the naked send rights */
398
399 if (IP_VALID(old_sself))
400 ipc_port_release_send(old_sself);
401
402 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
403 if (IP_VALID(old_exc_actions[i])) {
404 ipc_port_release_send(old_exc_actions[i]);
405 }
406 }/* for */
407
408 /* destroy the kernel port */
409 ipc_port_dealloc_kernel(old_kport);
410 }
411
412 /*
413 * Routine: ipc_thread_init
414 * Purpose:
415 * Initialize a thread's IPC state.
416 * Conditions:
417 * Nothing locked.
418 */
419
420 void
421 ipc_thread_init(
422 thread_t thread)
423 {
424 ipc_port_t kport;
425 int i;
426
427 kport = ipc_port_alloc_kernel();
428 if (kport == IP_NULL)
429 panic("ipc_thread_init");
430
431 thread->ith_self = kport;
432 thread->ith_sself = ipc_port_make_send(kport);
433
434 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i)
435 thread->exc_actions[i].port = IP_NULL;
436
437 ipc_kobject_set(kport, (ipc_kobject_t)thread, IKOT_THREAD);
438
439 ipc_kmsg_queue_init(&thread->ith_messages);
440
441 thread->ith_rpc_reply = IP_NULL;
442 }
443
444 void
445 ipc_thread_disable(
446 thread_t thread)
447 {
448 ipc_port_t kport = thread->ith_self;
449
450 if (kport != IP_NULL)
451 ipc_kobject_set(kport, IKO_NULL, IKOT_NONE);
452 }
453
454 /*
455 * Routine: ipc_thread_terminate
456 * Purpose:
457 * Clean up and destroy a thread's IPC state.
458 * Conditions:
459 * Nothing locked.
460 */
461
462 void
463 ipc_thread_terminate(
464 thread_t thread)
465 {
466 ipc_port_t kport = thread->ith_self;
467
468 if (kport != IP_NULL) {
469 int i;
470
471 if (IP_VALID(thread->ith_sself))
472 ipc_port_release_send(thread->ith_sself);
473
474 thread->ith_sself = thread->ith_self = IP_NULL;
475
476 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
477 if (IP_VALID(thread->exc_actions[i].port))
478 ipc_port_release_send(thread->exc_actions[i].port);
479 }
480
481 ipc_port_dealloc_kernel(kport);
482 }
483
484 assert(ipc_kmsg_queue_empty(&thread->ith_messages));
485
486 if (thread->ith_rpc_reply != IP_NULL)
487 ipc_port_dealloc_reply(thread->ith_rpc_reply);
488
489 thread->ith_rpc_reply = IP_NULL;
490 }
491
492 /*
493 * Routine: ipc_thread_reset
494 * Purpose:
495 * Reset the IPC state for a given Mach thread when
496 * its task enters an elevated security context.
497 * Both the thread port and its exception ports have
498 * to be reset. Its RPC reply port cannot have any
499 * rights outstanding, so it should be fine.
500 * Conditions:
501 * Nothing locked.
502 */
503
504 void
505 ipc_thread_reset(
506 thread_t thread)
507 {
508 ipc_port_t old_kport, new_kport;
509 ipc_port_t old_sself;
510 ipc_port_t old_exc_actions[EXC_TYPES_COUNT];
511 int i;
512
513 new_kport = ipc_port_alloc_kernel();
514 if (new_kport == IP_NULL)
515 panic("ipc_task_reset");
516
517 thread_mtx_lock(thread);
518
519 old_kport = thread->ith_self;
520
521 if (old_kport == IP_NULL) {
522 /* the is already terminated (can this happen?) */
523 thread_mtx_unlock(thread);
524 ipc_port_dealloc_kernel(new_kport);
525 return;
526 }
527
528 thread->ith_self = new_kport;
529 old_sself = thread->ith_sself;
530 thread->ith_sself = ipc_port_make_send(new_kport);
531 ipc_kobject_set(old_kport, IKO_NULL, IKOT_NONE);
532 ipc_kobject_set(new_kport, (ipc_kobject_t) thread, IKOT_THREAD);
533
534 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
535 if (!thread->exc_actions[i].privileged) {
536 old_exc_actions[i] = thread->exc_actions[i].port;
537 thread->exc_actions[i].port = IP_NULL;
538 } else {
539 old_exc_actions[i] = IP_NULL;
540 }
541 }/* for */
542
543 thread_mtx_unlock(thread);
544
545 /* release the naked send rights */
546
547 if (IP_VALID(old_sself))
548 ipc_port_release_send(old_sself);
549
550 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
551 if (IP_VALID(old_exc_actions[i])) {
552 ipc_port_release_send(old_exc_actions[i]);
553 }
554 }/* for */
555
556 /* destroy the kernel port */
557 ipc_port_dealloc_kernel(old_kport);
558 }
559
560 /*
561 * Routine: retrieve_task_self_fast
562 * Purpose:
563 * Optimized version of retrieve_task_self,
564 * that only works for the current task.
565 *
566 * Return a send right (possibly null/dead)
567 * for the task's user-visible self port.
568 * Conditions:
569 * Nothing locked.
570 */
571
572 ipc_port_t
573 retrieve_task_self_fast(
574 register task_t task)
575 {
576 register ipc_port_t port;
577
578 assert(task == current_task());
579
580 itk_lock(task);
581 assert(task->itk_self != IP_NULL);
582
583 if ((port = task->itk_sself) == task->itk_self) {
584 /* no interposing */
585
586 ip_lock(port);
587 assert(ip_active(port));
588 ip_reference(port);
589 port->ip_srights++;
590 ip_unlock(port);
591 } else
592 port = ipc_port_copy_send(port);
593 itk_unlock(task);
594
595 return port;
596 }
597
598 /*
599 * Routine: retrieve_thread_self_fast
600 * Purpose:
601 * Return a send right (possibly null/dead)
602 * for the thread's user-visible self port.
603 *
604 * Only works for the current thread.
605 *
606 * Conditions:
607 * Nothing locked.
608 */
609
610 ipc_port_t
611 retrieve_thread_self_fast(
612 thread_t thread)
613 {
614 register ipc_port_t port;
615
616 assert(thread == current_thread());
617
618 thread_mtx_lock(thread);
619
620 assert(thread->ith_self != IP_NULL);
621
622 if ((port = thread->ith_sself) == thread->ith_self) {
623 /* no interposing */
624
625 ip_lock(port);
626 assert(ip_active(port));
627 ip_reference(port);
628 port->ip_srights++;
629 ip_unlock(port);
630 }
631 else
632 port = ipc_port_copy_send(port);
633
634 thread_mtx_unlock(thread);
635
636 return port;
637 }
638
639 /*
640 * Routine: task_self_trap [mach trap]
641 * Purpose:
642 * Give the caller send rights for his own task port.
643 * Conditions:
644 * Nothing locked.
645 * Returns:
646 * MACH_PORT_NULL if there are any resource failures
647 * or other errors.
648 */
649
650 mach_port_name_t
651 task_self_trap(
652 __unused struct task_self_trap_args *args)
653 {
654 task_t task = current_task();
655 ipc_port_t sright;
656 mach_port_name_t name;
657
658 sright = retrieve_task_self_fast(task);
659 name = ipc_port_copyout_send(sright, task->itk_space);
660 return name;
661 }
662
663 /*
664 * Routine: thread_self_trap [mach trap]
665 * Purpose:
666 * Give the caller send rights for his own thread port.
667 * Conditions:
668 * Nothing locked.
669 * Returns:
670 * MACH_PORT_NULL if there are any resource failures
671 * or other errors.
672 */
673
674 mach_port_name_t
675 thread_self_trap(
676 __unused struct thread_self_trap_args *args)
677 {
678 thread_t thread = current_thread();
679 task_t task = thread->task;
680 ipc_port_t sright;
681 mach_port_name_t name;
682
683 sright = retrieve_thread_self_fast(thread);
684 name = ipc_port_copyout_send(sright, task->itk_space);
685 return name;
686
687 }
688
689 /*
690 * Routine: mach_reply_port [mach trap]
691 * Purpose:
692 * Allocate a port for the caller.
693 * Conditions:
694 * Nothing locked.
695 * Returns:
696 * MACH_PORT_NULL if there are any resource failures
697 * or other errors.
698 */
699
700 mach_port_name_t
701 mach_reply_port(
702 __unused struct mach_reply_port_args *args)
703 {
704 ipc_port_t port;
705 mach_port_name_t name;
706 kern_return_t kr;
707
708 kr = ipc_port_alloc(current_task()->itk_space, &name, &port);
709 if (kr == KERN_SUCCESS)
710 ip_unlock(port);
711 else
712 name = MACH_PORT_NULL;
713 return name;
714 }
715
716 /*
717 * Routine: thread_get_special_port [kernel call]
718 * Purpose:
719 * Clones a send right for one of the thread's
720 * special ports.
721 * Conditions:
722 * Nothing locked.
723 * Returns:
724 * KERN_SUCCESS Extracted a send right.
725 * KERN_INVALID_ARGUMENT The thread is null.
726 * KERN_FAILURE The thread is dead.
727 * KERN_INVALID_ARGUMENT Invalid special port.
728 */
729
730 kern_return_t
731 thread_get_special_port(
732 thread_t thread,
733 int which,
734 ipc_port_t *portp)
735 {
736 kern_return_t result = KERN_SUCCESS;
737 ipc_port_t *whichp;
738
739 if (thread == THREAD_NULL)
740 return (KERN_INVALID_ARGUMENT);
741
742 switch (which) {
743
744 case THREAD_KERNEL_PORT:
745 whichp = &thread->ith_sself;
746 break;
747
748 default:
749 return (KERN_INVALID_ARGUMENT);
750 }
751
752 thread_mtx_lock(thread);
753
754 if (thread->active)
755 *portp = ipc_port_copy_send(*whichp);
756 else
757 result = KERN_FAILURE;
758
759 thread_mtx_unlock(thread);
760
761 return (result);
762 }
763
764 /*
765 * Routine: thread_set_special_port [kernel call]
766 * Purpose:
767 * Changes one of the thread's special ports,
768 * setting it to the supplied send right.
769 * Conditions:
770 * Nothing locked. If successful, consumes
771 * the supplied send right.
772 * Returns:
773 * KERN_SUCCESS Changed the special port.
774 * KERN_INVALID_ARGUMENT The thread is null.
775 * KERN_FAILURE The thread is dead.
776 * KERN_INVALID_ARGUMENT Invalid special port.
777 */
778
779 kern_return_t
780 thread_set_special_port(
781 thread_t thread,
782 int which,
783 ipc_port_t port)
784 {
785 kern_return_t result = KERN_SUCCESS;
786 ipc_port_t *whichp, old = IP_NULL;
787
788 if (thread == THREAD_NULL)
789 return (KERN_INVALID_ARGUMENT);
790
791 switch (which) {
792
793 case THREAD_KERNEL_PORT:
794 whichp = &thread->ith_sself;
795 break;
796
797 default:
798 return (KERN_INVALID_ARGUMENT);
799 }
800
801 thread_mtx_lock(thread);
802
803 if (thread->active) {
804 old = *whichp;
805 *whichp = port;
806 }
807 else
808 result = KERN_FAILURE;
809
810 thread_mtx_unlock(thread);
811
812 if (IP_VALID(old))
813 ipc_port_release_send(old);
814
815 return (result);
816 }
817
818 /*
819 * Routine: task_get_special_port [kernel call]
820 * Purpose:
821 * Clones a send right for one of the task's
822 * special ports.
823 * Conditions:
824 * Nothing locked.
825 * Returns:
826 * KERN_SUCCESS Extracted a send right.
827 * KERN_INVALID_ARGUMENT The task is null.
828 * KERN_FAILURE The task/space is dead.
829 * KERN_INVALID_ARGUMENT Invalid special port.
830 */
831
832 kern_return_t
833 task_get_special_port(
834 task_t task,
835 int which,
836 ipc_port_t *portp)
837 {
838 ipc_port_t port;
839
840 if (task == TASK_NULL)
841 return KERN_INVALID_ARGUMENT;
842
843 itk_lock(task);
844 if (task->itk_self == IP_NULL) {
845 itk_unlock(task);
846 return KERN_FAILURE;
847 }
848
849 switch (which) {
850 case TASK_KERNEL_PORT:
851 port = ipc_port_copy_send(task->itk_sself);
852 break;
853
854 case TASK_NAME_PORT:
855 port = ipc_port_make_send(task->itk_nself);
856 break;
857
858 case TASK_HOST_PORT:
859 port = ipc_port_copy_send(task->itk_host);
860 break;
861
862 case TASK_BOOTSTRAP_PORT:
863 port = ipc_port_copy_send(task->itk_bootstrap);
864 break;
865
866 case TASK_WIRED_LEDGER_PORT:
867 port = ipc_port_copy_send(task->wired_ledger_port);
868 break;
869
870 case TASK_PAGED_LEDGER_PORT:
871 port = ipc_port_copy_send(task->paged_ledger_port);
872 break;
873
874 case TASK_SEATBELT_PORT:
875 port = ipc_port_copy_send(task->itk_seatbelt);
876 break;
877
878 case TASK_GSSD_PORT:
879 port = ipc_port_copy_send(task->itk_gssd);
880 break;
881
882 case TASK_ACCESS_PORT:
883 port = ipc_port_copy_send(task->itk_task_access);
884 break;
885
886 case TASK_AUTOMOUNTD_PORT:
887 port = ipc_port_copy_send(task->itk_automountd);
888 break;
889
890 default:
891 itk_unlock(task);
892 return KERN_INVALID_ARGUMENT;
893 }
894 itk_unlock(task);
895
896 *portp = port;
897 return KERN_SUCCESS;
898 }
899
900 /*
901 * Routine: task_set_special_port [kernel call]
902 * Purpose:
903 * Changes one of the task's special ports,
904 * setting it to the supplied send right.
905 * Conditions:
906 * Nothing locked. If successful, consumes
907 * the supplied send right.
908 * Returns:
909 * KERN_SUCCESS Changed the special port.
910 * KERN_INVALID_ARGUMENT The task is null.
911 * KERN_FAILURE The task/space is dead.
912 * KERN_INVALID_ARGUMENT Invalid special port.
913 * KERN_NO_ACCESS Attempted overwrite of seatbelt port.
914 */
915
916 kern_return_t
917 task_set_special_port(
918 task_t task,
919 int which,
920 ipc_port_t port)
921 {
922 ipc_port_t *whichp;
923 ipc_port_t old;
924
925 if (task == TASK_NULL)
926 return KERN_INVALID_ARGUMENT;
927
928 switch (which) {
929 case TASK_KERNEL_PORT:
930 whichp = &task->itk_sself;
931 break;
932
933 case TASK_HOST_PORT:
934 whichp = &task->itk_host;
935 break;
936
937 case TASK_BOOTSTRAP_PORT:
938 whichp = &task->itk_bootstrap;
939 break;
940
941 case TASK_WIRED_LEDGER_PORT:
942 whichp = &task->wired_ledger_port;
943 break;
944
945 case TASK_PAGED_LEDGER_PORT:
946 whichp = &task->paged_ledger_port;
947 break;
948
949 case TASK_SEATBELT_PORT:
950 whichp = &task->itk_seatbelt;
951 break;
952
953 case TASK_GSSD_PORT:
954 whichp = &task->itk_gssd;
955 break;
956
957 case TASK_ACCESS_PORT:
958 whichp = &task->itk_task_access;
959 break;
960
961 case TASK_AUTOMOUNTD_PORT:
962 whichp = &task->itk_automountd;
963 break;
964
965 default:
966 return KERN_INVALID_ARGUMENT;
967 }/* switch */
968
969 itk_lock(task);
970 if (task->itk_self == IP_NULL) {
971 itk_unlock(task);
972 return KERN_FAILURE;
973 }
974
975 /* do not allow overwrite of seatbelt or task access ports */
976 if ((TASK_SEATBELT_PORT == which || TASK_ACCESS_PORT == which)
977 && IP_VALID(*whichp)) {
978 itk_unlock(task);
979 return KERN_NO_ACCESS;
980 }
981
982 #if CONFIG_MACF_MACH
983 if (mac_task_check_service(current_task(), task, "set_special_port")) {
984 itk_unlock(task);
985 return KERN_NO_ACCESS;
986 }
987 #endif
988
989 old = *whichp;
990 *whichp = port;
991 itk_unlock(task);
992
993 if (IP_VALID(old))
994 ipc_port_release_send(old);
995 return KERN_SUCCESS;
996 }
997
998
999 /*
1000 * Routine: mach_ports_register [kernel call]
1001 * Purpose:
1002 * Stash a handful of port send rights in the task.
1003 * Child tasks will inherit these rights, but they
1004 * must use mach_ports_lookup to acquire them.
1005 *
1006 * The rights are supplied in a (wired) kalloc'd segment.
1007 * Rights which aren't supplied are assumed to be null.
1008 * Conditions:
1009 * Nothing locked. If successful, consumes
1010 * the supplied rights and memory.
1011 * Returns:
1012 * KERN_SUCCESS Stashed the port rights.
1013 * KERN_INVALID_ARGUMENT The task is null.
1014 * KERN_INVALID_ARGUMENT The task is dead.
1015 * KERN_INVALID_ARGUMENT Too many port rights supplied.
1016 */
1017
1018 kern_return_t
1019 mach_ports_register(
1020 task_t task,
1021 mach_port_array_t memory,
1022 mach_msg_type_number_t portsCnt)
1023 {
1024 ipc_port_t ports[TASK_PORT_REGISTER_MAX];
1025 unsigned int i;
1026
1027 if ((task == TASK_NULL) ||
1028 (portsCnt > TASK_PORT_REGISTER_MAX))
1029 return KERN_INVALID_ARGUMENT;
1030
1031 /*
1032 * Pad the port rights with nulls.
1033 */
1034
1035 for (i = 0; i < portsCnt; i++)
1036 ports[i] = memory[i];
1037 for (; i < TASK_PORT_REGISTER_MAX; i++)
1038 ports[i] = IP_NULL;
1039
1040 itk_lock(task);
1041 if (task->itk_self == IP_NULL) {
1042 itk_unlock(task);
1043 return KERN_INVALID_ARGUMENT;
1044 }
1045
1046 /*
1047 * Replace the old send rights with the new.
1048 * Release the old rights after unlocking.
1049 */
1050
1051 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
1052 ipc_port_t old;
1053
1054 old = task->itk_registered[i];
1055 task->itk_registered[i] = ports[i];
1056 ports[i] = old;
1057 }
1058
1059 itk_unlock(task);
1060
1061 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
1062 if (IP_VALID(ports[i]))
1063 ipc_port_release_send(ports[i]);
1064
1065 /*
1066 * Now that the operation is known to be successful,
1067 * we can free the memory.
1068 */
1069
1070 if (portsCnt != 0)
1071 kfree(memory,
1072 (vm_size_t) (portsCnt * sizeof(mach_port_t)));
1073
1074 return KERN_SUCCESS;
1075 }
1076
1077 /*
1078 * Routine: mach_ports_lookup [kernel call]
1079 * Purpose:
1080 * Retrieves (clones) the stashed port send rights.
1081 * Conditions:
1082 * Nothing locked. If successful, the caller gets
1083 * rights and memory.
1084 * Returns:
1085 * KERN_SUCCESS Retrieved the send rights.
1086 * KERN_INVALID_ARGUMENT The task is null.
1087 * KERN_INVALID_ARGUMENT The task is dead.
1088 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
1089 */
1090
1091 kern_return_t
1092 mach_ports_lookup(
1093 task_t task,
1094 mach_port_array_t *portsp,
1095 mach_msg_type_number_t *portsCnt)
1096 {
1097 void *memory;
1098 vm_size_t size;
1099 ipc_port_t *ports;
1100 int i;
1101
1102 if (task == TASK_NULL)
1103 return KERN_INVALID_ARGUMENT;
1104
1105 size = (vm_size_t) (TASK_PORT_REGISTER_MAX * sizeof(ipc_port_t));
1106
1107 memory = kalloc(size);
1108 if (memory == 0)
1109 return KERN_RESOURCE_SHORTAGE;
1110
1111 itk_lock(task);
1112 if (task->itk_self == IP_NULL) {
1113 itk_unlock(task);
1114
1115 kfree(memory, size);
1116 return KERN_INVALID_ARGUMENT;
1117 }
1118
1119 ports = (ipc_port_t *) memory;
1120
1121 /*
1122 * Clone port rights. Because kalloc'd memory
1123 * is wired, we won't fault while holding the task lock.
1124 */
1125
1126 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
1127 ports[i] = ipc_port_copy_send(task->itk_registered[i]);
1128
1129 itk_unlock(task);
1130
1131 *portsp = (mach_port_array_t) ports;
1132 *portsCnt = TASK_PORT_REGISTER_MAX;
1133 return KERN_SUCCESS;
1134 }
1135
1136 /*
1137 * Routine: convert_port_to_locked_task
1138 * Purpose:
1139 * Internal helper routine to convert from a port to a locked
1140 * task. Used by several routines that try to convert from a
1141 * task port to a reference on some task related object.
1142 * Conditions:
1143 * Nothing locked, blocking OK.
1144 */
1145 task_t
1146 convert_port_to_locked_task(ipc_port_t port)
1147 {
1148 int try_failed_count = 0;
1149
1150 while (IP_VALID(port)) {
1151 task_t task;
1152
1153 ip_lock(port);
1154 if (!ip_active(port) || (ip_kotype(port) != IKOT_TASK)) {
1155 ip_unlock(port);
1156 return TASK_NULL;
1157 }
1158 task = (task_t) port->ip_kobject;
1159 assert(task != TASK_NULL);
1160
1161 /*
1162 * Normal lock ordering puts task_lock() before ip_lock().
1163 * Attempt out-of-order locking here.
1164 */
1165 if (task_lock_try(task)) {
1166 ip_unlock(port);
1167 return(task);
1168 }
1169 try_failed_count++;
1170
1171 ip_unlock(port);
1172 mutex_pause(try_failed_count);
1173 }
1174 return TASK_NULL;
1175 }
1176
1177 /*
1178 * Routine: convert_port_to_task
1179 * Purpose:
1180 * Convert from a port to a task.
1181 * Doesn't consume the port ref; produces a task ref,
1182 * which may be null.
1183 * Conditions:
1184 * Nothing locked.
1185 */
1186 task_t
1187 convert_port_to_task(
1188 ipc_port_t port)
1189 {
1190 task_t task = TASK_NULL;
1191
1192 if (IP_VALID(port)) {
1193 ip_lock(port);
1194
1195 if ( ip_active(port) &&
1196 ip_kotype(port) == IKOT_TASK ) {
1197 task = (task_t)port->ip_kobject;
1198 assert(task != TASK_NULL);
1199
1200 task_reference_internal(task);
1201 }
1202
1203 ip_unlock(port);
1204 }
1205
1206 return (task);
1207 }
1208
1209 /*
1210 * Routine: convert_port_to_task_name
1211 * Purpose:
1212 * Convert from a port to a task name.
1213 * Doesn't consume the port ref; produces a task name ref,
1214 * which may be null.
1215 * Conditions:
1216 * Nothing locked.
1217 */
1218 task_name_t
1219 convert_port_to_task_name(
1220 ipc_port_t port)
1221 {
1222 task_name_t task = TASK_NULL;
1223
1224 if (IP_VALID(port)) {
1225 ip_lock(port);
1226
1227 if ( ip_active(port) &&
1228 (ip_kotype(port) == IKOT_TASK ||
1229 ip_kotype(port) == IKOT_TASK_NAME)) {
1230 task = (task_name_t)port->ip_kobject;
1231 assert(task != TASK_NAME_NULL);
1232
1233 task_reference_internal(task);
1234 }
1235
1236 ip_unlock(port);
1237 }
1238
1239 return (task);
1240 }
1241
1242 /*
1243 * Routine: convert_port_to_space
1244 * Purpose:
1245 * Convert from a port to a space.
1246 * Doesn't consume the port ref; produces a space ref,
1247 * which may be null.
1248 * Conditions:
1249 * Nothing locked.
1250 */
1251 ipc_space_t
1252 convert_port_to_space(
1253 ipc_port_t port)
1254 {
1255 ipc_space_t space;
1256 task_t task;
1257
1258 task = convert_port_to_locked_task(port);
1259
1260 if (task == TASK_NULL)
1261 return IPC_SPACE_NULL;
1262
1263 if (!task->active) {
1264 task_unlock(task);
1265 return IPC_SPACE_NULL;
1266 }
1267
1268 space = task->itk_space;
1269 is_reference(space);
1270 task_unlock(task);
1271 return (space);
1272 }
1273
1274 /*
1275 * Routine: convert_port_to_map
1276 * Purpose:
1277 * Convert from a port to a map.
1278 * Doesn't consume the port ref; produces a map ref,
1279 * which may be null.
1280 * Conditions:
1281 * Nothing locked.
1282 */
1283
1284 vm_map_t
1285 convert_port_to_map(
1286 ipc_port_t port)
1287 {
1288 task_t task;
1289 vm_map_t map;
1290
1291 task = convert_port_to_locked_task(port);
1292
1293 if (task == TASK_NULL)
1294 return VM_MAP_NULL;
1295
1296 if (!task->active) {
1297 task_unlock(task);
1298 return VM_MAP_NULL;
1299 }
1300
1301 map = task->map;
1302 vm_map_reference_swap(map);
1303 task_unlock(task);
1304 return map;
1305 }
1306
1307
1308 /*
1309 * Routine: convert_port_to_thread
1310 * Purpose:
1311 * Convert from a port to a thread.
1312 * Doesn't consume the port ref; produces an thread ref,
1313 * which may be null.
1314 * Conditions:
1315 * Nothing locked.
1316 */
1317
1318 thread_t
1319 convert_port_to_thread(
1320 ipc_port_t port)
1321 {
1322 thread_t thread = THREAD_NULL;
1323
1324 if (IP_VALID(port)) {
1325 ip_lock(port);
1326
1327 if ( ip_active(port) &&
1328 ip_kotype(port) == IKOT_THREAD ) {
1329 thread = (thread_t)port->ip_kobject;
1330 assert(thread != THREAD_NULL);
1331
1332 thread_reference_internal(thread);
1333 }
1334
1335 ip_unlock(port);
1336 }
1337
1338 return (thread);
1339 }
1340
1341 /*
1342 * Routine: port_name_to_thread
1343 * Purpose:
1344 * Convert from a port name to an thread reference
1345 * A name of MACH_PORT_NULL is valid for the null thread.
1346 * Conditions:
1347 * Nothing locked.
1348 */
1349 thread_t
1350 port_name_to_thread(
1351 mach_port_name_t name)
1352 {
1353 thread_t thread = THREAD_NULL;
1354 ipc_port_t kport;
1355
1356 if (MACH_PORT_VALID(name)) {
1357 if (ipc_object_copyin(current_space(), name,
1358 MACH_MSG_TYPE_COPY_SEND,
1359 (ipc_object_t *)&kport) != KERN_SUCCESS)
1360 return (THREAD_NULL);
1361
1362 thread = convert_port_to_thread(kport);
1363
1364 if (IP_VALID(kport))
1365 ipc_port_release_send(kport);
1366 }
1367
1368 return (thread);
1369 }
1370
1371 task_t
1372 port_name_to_task(
1373 mach_port_name_t name)
1374 {
1375 ipc_port_t kern_port;
1376 kern_return_t kr;
1377 task_t task = TASK_NULL;
1378
1379 if (MACH_PORT_VALID(name)) {
1380 kr = ipc_object_copyin(current_space(), name,
1381 MACH_MSG_TYPE_COPY_SEND,
1382 (ipc_object_t *) &kern_port);
1383 if (kr != KERN_SUCCESS)
1384 return TASK_NULL;
1385
1386 task = convert_port_to_task(kern_port);
1387
1388 if (IP_VALID(kern_port))
1389 ipc_port_release_send(kern_port);
1390 }
1391 return task;
1392 }
1393
1394 /*
1395 * Routine: convert_task_to_port
1396 * Purpose:
1397 * Convert from a task to a port.
1398 * Consumes a task ref; produces a naked send right
1399 * which may be invalid.
1400 * Conditions:
1401 * Nothing locked.
1402 */
1403
1404 ipc_port_t
1405 convert_task_to_port(
1406 task_t task)
1407 {
1408 ipc_port_t port;
1409
1410 itk_lock(task);
1411 if (task->itk_self != IP_NULL)
1412 port = ipc_port_make_send(task->itk_self);
1413 else
1414 port = IP_NULL;
1415 itk_unlock(task);
1416
1417 task_deallocate(task);
1418 return port;
1419 }
1420
1421 /*
1422 * Routine: convert_task_name_to_port
1423 * Purpose:
1424 * Convert from a task name ref to a port.
1425 * Consumes a task name ref; produces a naked send right
1426 * which may be invalid.
1427 * Conditions:
1428 * Nothing locked.
1429 */
1430
1431 ipc_port_t
1432 convert_task_name_to_port(
1433 task_name_t task_name)
1434 {
1435 ipc_port_t port;
1436
1437 itk_lock(task_name);
1438 if (task_name->itk_nself != IP_NULL)
1439 port = ipc_port_make_send(task_name->itk_nself);
1440 else
1441 port = IP_NULL;
1442 itk_unlock(task_name);
1443
1444 task_name_deallocate(task_name);
1445 return port;
1446 }
1447
1448 /*
1449 * Routine: convert_thread_to_port
1450 * Purpose:
1451 * Convert from a thread to a port.
1452 * Consumes an thread ref; produces a naked send right
1453 * which may be invalid.
1454 * Conditions:
1455 * Nothing locked.
1456 */
1457
1458 ipc_port_t
1459 convert_thread_to_port(
1460 thread_t thread)
1461 {
1462 ipc_port_t port;
1463
1464 thread_mtx_lock(thread);
1465
1466 if (thread->ith_self != IP_NULL)
1467 port = ipc_port_make_send(thread->ith_self);
1468 else
1469 port = IP_NULL;
1470
1471 thread_mtx_unlock(thread);
1472
1473 thread_deallocate(thread);
1474
1475 return (port);
1476 }
1477
1478 /*
1479 * Routine: space_deallocate
1480 * Purpose:
1481 * Deallocate a space ref produced by convert_port_to_space.
1482 * Conditions:
1483 * Nothing locked.
1484 */
1485
1486 void
1487 space_deallocate(
1488 ipc_space_t space)
1489 {
1490 if (space != IS_NULL)
1491 is_release(space);
1492 }
1493
1494 /*
1495 * Routine: thread/task_set_exception_ports [kernel call]
1496 * Purpose:
1497 * Sets the thread/task exception port, flavor and
1498 * behavior for the exception types specified by the mask.
1499 * There will be one send right per exception per valid
1500 * port.
1501 * Conditions:
1502 * Nothing locked. If successful, consumes
1503 * the supplied send right.
1504 * Returns:
1505 * KERN_SUCCESS Changed the special port.
1506 * KERN_INVALID_ARGUMENT The thread is null,
1507 * Illegal mask bit set.
1508 * Illegal exception behavior
1509 * KERN_FAILURE The thread is dead.
1510 */
1511
1512 kern_return_t
1513 thread_set_exception_ports(
1514 thread_t thread,
1515 exception_mask_t exception_mask,
1516 ipc_port_t new_port,
1517 exception_behavior_t new_behavior,
1518 thread_state_flavor_t new_flavor)
1519 {
1520 ipc_port_t old_port[EXC_TYPES_COUNT];
1521 boolean_t privileged = current_task()->sec_token.val[0] == 0;
1522 register int i;
1523
1524 if (thread == THREAD_NULL)
1525 return (KERN_INVALID_ARGUMENT);
1526
1527 if (exception_mask & ~EXC_MASK_ALL)
1528 return (KERN_INVALID_ARGUMENT);
1529
1530 if (IP_VALID(new_port)) {
1531 switch (new_behavior & ~MACH_EXCEPTION_CODES) {
1532
1533 case EXCEPTION_DEFAULT:
1534 case EXCEPTION_STATE:
1535 case EXCEPTION_STATE_IDENTITY:
1536 break;
1537
1538 default:
1539 return (KERN_INVALID_ARGUMENT);
1540 }
1541 }
1542
1543 /*
1544 * Check the validity of the thread_state_flavor by calling the
1545 * VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in
1546 * osfmk/mach/ARCHITECTURE/thread_status.h
1547 */
1548 if (!VALID_THREAD_STATE_FLAVOR(new_flavor))
1549 return (KERN_INVALID_ARGUMENT);
1550
1551 thread_mtx_lock(thread);
1552
1553 if (!thread->active) {
1554 thread_mtx_unlock(thread);
1555
1556 return (KERN_FAILURE);
1557 }
1558
1559 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
1560 if (exception_mask & (1 << i)) {
1561 old_port[i] = thread->exc_actions[i].port;
1562 thread->exc_actions[i].port = ipc_port_copy_send(new_port);
1563 thread->exc_actions[i].behavior = new_behavior;
1564 thread->exc_actions[i].flavor = new_flavor;
1565 thread->exc_actions[i].privileged = privileged;
1566 }
1567 else
1568 old_port[i] = IP_NULL;
1569 }
1570
1571 thread_mtx_unlock(thread);
1572
1573 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i)
1574 if (IP_VALID(old_port[i]))
1575 ipc_port_release_send(old_port[i]);
1576
1577 if (IP_VALID(new_port)) /* consume send right */
1578 ipc_port_release_send(new_port);
1579
1580 return (KERN_SUCCESS);
1581 }
1582
1583 kern_return_t
1584 task_set_exception_ports(
1585 task_t task,
1586 exception_mask_t exception_mask,
1587 ipc_port_t new_port,
1588 exception_behavior_t new_behavior,
1589 thread_state_flavor_t new_flavor)
1590 {
1591 ipc_port_t old_port[EXC_TYPES_COUNT];
1592 boolean_t privileged = current_task()->sec_token.val[0] == 0;
1593 register int i;
1594
1595 if (task == TASK_NULL)
1596 return (KERN_INVALID_ARGUMENT);
1597
1598 if (exception_mask & ~EXC_MASK_ALL)
1599 return (KERN_INVALID_ARGUMENT);
1600
1601 if (IP_VALID(new_port)) {
1602 switch (new_behavior & ~MACH_EXCEPTION_CODES) {
1603
1604 case EXCEPTION_DEFAULT:
1605 case EXCEPTION_STATE:
1606 case EXCEPTION_STATE_IDENTITY:
1607 break;
1608
1609 default:
1610 return (KERN_INVALID_ARGUMENT);
1611 }
1612 }
1613
1614 itk_lock(task);
1615
1616 if (task->itk_self == IP_NULL) {
1617 itk_unlock(task);
1618
1619 return (KERN_FAILURE);
1620 }
1621
1622 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
1623 if (exception_mask & (1 << i)) {
1624 old_port[i] = task->exc_actions[i].port;
1625 task->exc_actions[i].port =
1626 ipc_port_copy_send(new_port);
1627 task->exc_actions[i].behavior = new_behavior;
1628 task->exc_actions[i].flavor = new_flavor;
1629 task->exc_actions[i].privileged = privileged;
1630 }
1631 else
1632 old_port[i] = IP_NULL;
1633 }
1634
1635 itk_unlock(task);
1636
1637 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i)
1638 if (IP_VALID(old_port[i]))
1639 ipc_port_release_send(old_port[i]);
1640
1641 if (IP_VALID(new_port)) /* consume send right */
1642 ipc_port_release_send(new_port);
1643
1644 return (KERN_SUCCESS);
1645 }
1646
1647 /*
1648 * Routine: thread/task_swap_exception_ports [kernel call]
1649 * Purpose:
1650 * Sets the thread/task exception port, flavor and
1651 * behavior for the exception types specified by the
1652 * mask.
1653 *
1654 * The old ports, behavior and flavors are returned
1655 * Count specifies the array sizes on input and
1656 * the number of returned ports etc. on output. The
1657 * arrays must be large enough to hold all the returned
1658 * data, MIG returnes an error otherwise. The masks
1659 * array specifies the corresponding exception type(s).
1660 *
1661 * Conditions:
1662 * Nothing locked. If successful, consumes
1663 * the supplied send right.
1664 *
1665 * Returns upto [in} CountCnt elements.
1666 * Returns:
1667 * KERN_SUCCESS Changed the special port.
1668 * KERN_INVALID_ARGUMENT The thread is null,
1669 * Illegal mask bit set.
1670 * Illegal exception behavior
1671 * KERN_FAILURE The thread is dead.
1672 */
1673
1674 kern_return_t
1675 thread_swap_exception_ports(
1676 thread_t thread,
1677 exception_mask_t exception_mask,
1678 ipc_port_t new_port,
1679 exception_behavior_t new_behavior,
1680 thread_state_flavor_t new_flavor,
1681 exception_mask_array_t masks,
1682 mach_msg_type_number_t *CountCnt,
1683 exception_port_array_t ports,
1684 exception_behavior_array_t behaviors,
1685 thread_state_flavor_array_t flavors)
1686 {
1687 ipc_port_t old_port[EXC_TYPES_COUNT];
1688 boolean_t privileged = current_task()->sec_token.val[0] == 0;
1689 unsigned int i, j, count;
1690
1691 if (thread == THREAD_NULL)
1692 return (KERN_INVALID_ARGUMENT);
1693
1694 if (exception_mask & ~EXC_MASK_ALL)
1695 return (KERN_INVALID_ARGUMENT);
1696
1697 if (IP_VALID(new_port)) {
1698 switch (new_behavior & ~MACH_EXCEPTION_CODES) {
1699
1700 case EXCEPTION_DEFAULT:
1701 case EXCEPTION_STATE:
1702 case EXCEPTION_STATE_IDENTITY:
1703 break;
1704
1705 default:
1706 return (KERN_INVALID_ARGUMENT);
1707 }
1708 }
1709
1710 thread_mtx_lock(thread);
1711
1712 if (!thread->active) {
1713 thread_mtx_unlock(thread);
1714
1715 return (KERN_FAILURE);
1716 }
1717
1718 count = 0;
1719
1720 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
1721 if (exception_mask & (1 << i)) {
1722 for (j = 0; j < count; ++j) {
1723 /*
1724 * search for an identical entry, if found
1725 * set corresponding mask for this exception.
1726 */
1727 if ( thread->exc_actions[i].port == ports[j] &&
1728 thread->exc_actions[i].behavior == behaviors[j] &&
1729 thread->exc_actions[i].flavor == flavors[j] ) {
1730 masks[j] |= (1 << i);
1731 break;
1732 }
1733 }
1734
1735 if (j == count) {
1736 masks[j] = (1 << i);
1737 ports[j] = ipc_port_copy_send(thread->exc_actions[i].port);
1738
1739 behaviors[j] = thread->exc_actions[i].behavior;
1740 flavors[j] = thread->exc_actions[i].flavor;
1741 ++count;
1742 }
1743
1744 old_port[i] = thread->exc_actions[i].port;
1745 thread->exc_actions[i].port = ipc_port_copy_send(new_port);
1746 thread->exc_actions[i].behavior = new_behavior;
1747 thread->exc_actions[i].flavor = new_flavor;
1748 thread->exc_actions[i].privileged = privileged;
1749 if (count > *CountCnt)
1750 break;
1751 }
1752 else
1753 old_port[i] = IP_NULL;
1754 }
1755
1756 thread_mtx_unlock(thread);
1757
1758 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i)
1759 if (IP_VALID(old_port[i]))
1760 ipc_port_release_send(old_port[i]);
1761
1762 if (IP_VALID(new_port)) /* consume send right */
1763 ipc_port_release_send(new_port);
1764
1765 *CountCnt = count;
1766
1767 return (KERN_SUCCESS);
1768 }
1769
1770 kern_return_t
1771 task_swap_exception_ports(
1772 task_t task,
1773 exception_mask_t exception_mask,
1774 ipc_port_t new_port,
1775 exception_behavior_t new_behavior,
1776 thread_state_flavor_t new_flavor,
1777 exception_mask_array_t masks,
1778 mach_msg_type_number_t *CountCnt,
1779 exception_port_array_t ports,
1780 exception_behavior_array_t behaviors,
1781 thread_state_flavor_array_t flavors)
1782 {
1783 ipc_port_t old_port[EXC_TYPES_COUNT];
1784 boolean_t privileged = current_task()->sec_token.val[0] == 0;
1785 unsigned int i, j, count;
1786
1787 if (task == TASK_NULL)
1788 return (KERN_INVALID_ARGUMENT);
1789
1790 if (exception_mask & ~EXC_MASK_ALL)
1791 return (KERN_INVALID_ARGUMENT);
1792
1793 if (IP_VALID(new_port)) {
1794 switch (new_behavior & ~MACH_EXCEPTION_CODES) {
1795
1796 case EXCEPTION_DEFAULT:
1797 case EXCEPTION_STATE:
1798 case EXCEPTION_STATE_IDENTITY:
1799 break;
1800
1801 default:
1802 return (KERN_INVALID_ARGUMENT);
1803 }
1804 }
1805
1806 itk_lock(task);
1807
1808 if (task->itk_self == IP_NULL) {
1809 itk_unlock(task);
1810
1811 return (KERN_FAILURE);
1812 }
1813
1814 count = 0;
1815
1816 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
1817 if (exception_mask & (1 << i)) {
1818 for (j = 0; j < count; j++) {
1819 /*
1820 * search for an identical entry, if found
1821 * set corresponding mask for this exception.
1822 */
1823 if ( task->exc_actions[i].port == ports[j] &&
1824 task->exc_actions[i].behavior == behaviors[j] &&
1825 task->exc_actions[i].flavor == flavors[j] ) {
1826 masks[j] |= (1 << i);
1827 break;
1828 }
1829 }
1830
1831 if (j == count) {
1832 masks[j] = (1 << i);
1833 ports[j] = ipc_port_copy_send(task->exc_actions[i].port);
1834 behaviors[j] = task->exc_actions[i].behavior;
1835 flavors[j] = task->exc_actions[i].flavor;
1836 ++count;
1837 }
1838
1839 old_port[i] = task->exc_actions[i].port;
1840 task->exc_actions[i].port = ipc_port_copy_send(new_port);
1841 task->exc_actions[i].behavior = new_behavior;
1842 task->exc_actions[i].flavor = new_flavor;
1843 task->exc_actions[i].privileged = privileged;
1844 if (count > *CountCnt)
1845 break;
1846 }
1847 else
1848 old_port[i] = IP_NULL;
1849 }
1850
1851 itk_unlock(task);
1852
1853 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++)
1854 if (IP_VALID(old_port[i]))
1855 ipc_port_release_send(old_port[i]);
1856
1857 if (IP_VALID(new_port)) /* consume send right */
1858 ipc_port_release_send(new_port);
1859
1860 *CountCnt = count;
1861
1862 return (KERN_SUCCESS);
1863 }
1864
1865 /*
1866 * Routine: thread/task_get_exception_ports [kernel call]
1867 * Purpose:
1868 * Clones a send right for each of the thread/task's exception
1869 * ports specified in the mask and returns the behaviour
1870 * and flavor of said port.
1871 *
1872 * Returns upto [in} CountCnt elements.
1873 *
1874 * Conditions:
1875 * Nothing locked.
1876 * Returns:
1877 * KERN_SUCCESS Extracted a send right.
1878 * KERN_INVALID_ARGUMENT The thread is null,
1879 * Invalid special port,
1880 * Illegal mask bit set.
1881 * KERN_FAILURE The thread is dead.
1882 */
1883
1884 kern_return_t
1885 thread_get_exception_ports(
1886 thread_t thread,
1887 exception_mask_t exception_mask,
1888 exception_mask_array_t masks,
1889 mach_msg_type_number_t *CountCnt,
1890 exception_port_array_t ports,
1891 exception_behavior_array_t behaviors,
1892 thread_state_flavor_array_t flavors)
1893 {
1894 unsigned int i, j, count;
1895
1896 if (thread == THREAD_NULL)
1897 return (KERN_INVALID_ARGUMENT);
1898
1899 if (exception_mask & ~EXC_MASK_ALL)
1900 return (KERN_INVALID_ARGUMENT);
1901
1902 thread_mtx_lock(thread);
1903
1904 if (!thread->active) {
1905 thread_mtx_unlock(thread);
1906
1907 return (KERN_FAILURE);
1908 }
1909
1910 count = 0;
1911
1912 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
1913 if (exception_mask & (1 << i)) {
1914 for (j = 0; j < count; ++j) {
1915 /*
1916 * search for an identical entry, if found
1917 * set corresponding mask for this exception.
1918 */
1919 if ( thread->exc_actions[i].port == ports[j] &&
1920 thread->exc_actions[i].behavior ==behaviors[j] &&
1921 thread->exc_actions[i].flavor == flavors[j] ) {
1922 masks[j] |= (1 << i);
1923 break;
1924 }
1925 }
1926
1927 if (j == count) {
1928 masks[j] = (1 << i);
1929 ports[j] = ipc_port_copy_send(thread->exc_actions[i].port);
1930 behaviors[j] = thread->exc_actions[i].behavior;
1931 flavors[j] = thread->exc_actions[i].flavor;
1932 ++count;
1933 if (count >= *CountCnt)
1934 break;
1935 }
1936 }
1937 }
1938
1939 thread_mtx_unlock(thread);
1940
1941 *CountCnt = count;
1942
1943 return (KERN_SUCCESS);
1944 }
1945
1946 kern_return_t
1947 task_get_exception_ports(
1948 task_t task,
1949 exception_mask_t exception_mask,
1950 exception_mask_array_t masks,
1951 mach_msg_type_number_t *CountCnt,
1952 exception_port_array_t ports,
1953 exception_behavior_array_t behaviors,
1954 thread_state_flavor_array_t flavors)
1955 {
1956 unsigned int i, j, count;
1957
1958 if (task == TASK_NULL)
1959 return (KERN_INVALID_ARGUMENT);
1960
1961 if (exception_mask & ~EXC_MASK_ALL)
1962 return (KERN_INVALID_ARGUMENT);
1963
1964 itk_lock(task);
1965
1966 if (task->itk_self == IP_NULL) {
1967 itk_unlock(task);
1968
1969 return (KERN_FAILURE);
1970 }
1971
1972 count = 0;
1973
1974 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
1975 if (exception_mask & (1 << i)) {
1976 for (j = 0; j < count; ++j) {
1977 /*
1978 * search for an identical entry, if found
1979 * set corresponding mask for this exception.
1980 */
1981 if ( task->exc_actions[i].port == ports[j] &&
1982 task->exc_actions[i].behavior == behaviors[j] &&
1983 task->exc_actions[i].flavor == flavors[j] ) {
1984 masks[j] |= (1 << i);
1985 break;
1986 }
1987 }
1988
1989 if (j == count) {
1990 masks[j] = (1 << i);
1991 ports[j] = ipc_port_copy_send(task->exc_actions[i].port);
1992 behaviors[j] = task->exc_actions[i].behavior;
1993 flavors[j] = task->exc_actions[i].flavor;
1994 ++count;
1995 if (count > *CountCnt)
1996 break;
1997 }
1998 }
1999 }
2000
2001 itk_unlock(task);
2002
2003 *CountCnt = count;
2004
2005 return (KERN_SUCCESS);
2006 }