]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/ipc_tt.c
24f41f7545d84cb8b1ac1bb1b692a20f37a4d40d
[apple/xnu.git] / osfmk / kern / ipc_tt.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_COPYRIGHT@
24 */
25 /*
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50 /*
51 */
52
53 /*
54 * File: ipc_tt.c
55 * Purpose:
56 * Task and thread related IPC functions.
57 */
58
59 #include <mach/mach_types.h>
60 #include <mach/boolean.h>
61 #include <mach/kern_return.h>
62 #include <mach/mach_param.h>
63 #include <mach/task_special_ports.h>
64 #include <mach/thread_special_ports.h>
65 #include <mach/thread_status.h>
66 #include <mach/exception_types.h>
67 #include <mach/memory_object_types.h>
68 #include <mach/mach_traps.h>
69 #include <mach/task_server.h>
70 #include <mach/thread_act_server.h>
71 #include <mach/mach_host_server.h>
72 #include <mach/host_priv_server.h>
73 #include <mach/vm_map_server.h>
74
75 #include <kern/kern_types.h>
76 #include <kern/host.h>
77 #include <kern/ipc_kobject.h>
78 #include <kern/ipc_tt.h>
79 #include <kern/kalloc.h>
80 #include <kern/thread.h>
81 #include <kern/misc_protos.h>
82
83 #include <vm/vm_map.h>
84 #include <vm/vm_pageout.h>
85 #include <vm/vm_shared_memory_server.h>
86 #include <vm/vm_protos.h>
87
88 /* forward declarations */
89 task_t convert_port_to_locked_task(ipc_port_t port);
90
91
92 /*
93 * Routine: ipc_task_init
94 * Purpose:
95 * Initialize a task's IPC state.
96 *
97 * If non-null, some state will be inherited from the parent.
98 * The parent must be appropriately initialized.
99 * Conditions:
100 * Nothing locked.
101 */
102
103 void
104 ipc_task_init(
105 task_t task,
106 task_t parent)
107 {
108 ipc_space_t space;
109 ipc_port_t kport;
110 kern_return_t kr;
111 int i;
112
113
114 kr = ipc_space_create(&ipc_table_entries[0], &space);
115 if (kr != KERN_SUCCESS)
116 panic("ipc_task_init");
117
118
119 kport = ipc_port_alloc_kernel();
120 if (kport == IP_NULL)
121 panic("ipc_task_init");
122
123 itk_lock_init(task);
124 task->itk_self = kport;
125 task->itk_sself = ipc_port_make_send(kport);
126 task->itk_space = space;
127 space->is_fast = FALSE;
128
129 if (parent == TASK_NULL) {
130 ipc_port_t port;
131
132 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
133 task->exc_actions[i].port = IP_NULL;
134 }/* for */
135
136 kr = host_get_host_port(host_priv_self(), &port);
137 assert(kr == KERN_SUCCESS);
138 task->itk_host = port;
139
140 task->itk_bootstrap = IP_NULL;
141
142 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
143 task->itk_registered[i] = IP_NULL;
144 } else {
145 itk_lock(parent);
146 assert(parent->itk_self != IP_NULL);
147
148 /* inherit registered ports */
149
150 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
151 task->itk_registered[i] =
152 ipc_port_copy_send(parent->itk_registered[i]);
153
154 /* inherit exception and bootstrap ports */
155
156 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
157 task->exc_actions[i].port =
158 ipc_port_copy_send(parent->exc_actions[i].port);
159 task->exc_actions[i].flavor =
160 parent->exc_actions[i].flavor;
161 task->exc_actions[i].behavior =
162 parent->exc_actions[i].behavior;
163 task->exc_actions[i].privileged =
164 parent->exc_actions[i].privileged;
165 }/* for */
166 task->itk_host =
167 ipc_port_copy_send(parent->itk_host);
168
169 task->itk_bootstrap =
170 ipc_port_copy_send(parent->itk_bootstrap);
171
172 itk_unlock(parent);
173 }
174 }
175
176 /*
177 * Routine: ipc_task_enable
178 * Purpose:
179 * Enable a task for IPC access.
180 * Conditions:
181 * Nothing locked.
182 */
183
184 void
185 ipc_task_enable(
186 task_t task)
187 {
188 ipc_port_t kport;
189
190 itk_lock(task);
191 kport = task->itk_self;
192 if (kport != IP_NULL)
193 ipc_kobject_set(kport, (ipc_kobject_t) task, IKOT_TASK);
194 itk_unlock(task);
195 }
196
197 /*
198 * Routine: ipc_task_disable
199 * Purpose:
200 * Disable IPC access to a task.
201 * Conditions:
202 * Nothing locked.
203 */
204
205 void
206 ipc_task_disable(
207 task_t task)
208 {
209 ipc_port_t kport;
210
211 itk_lock(task);
212 kport = task->itk_self;
213 if (kport != IP_NULL)
214 ipc_kobject_set(kport, IKO_NULL, IKOT_NONE);
215 itk_unlock(task);
216 }
217
218 /*
219 * Routine: ipc_task_terminate
220 * Purpose:
221 * Clean up and destroy a task's IPC state.
222 * Conditions:
223 * Nothing locked. The task must be suspended.
224 * (Or the current thread must be in the task.)
225 */
226
227 void
228 ipc_task_terminate(
229 task_t task)
230 {
231 ipc_port_t kport;
232 int i;
233
234 itk_lock(task);
235 kport = task->itk_self;
236
237 if (kport == IP_NULL) {
238 /* the task is already terminated (can this happen?) */
239 itk_unlock(task);
240 return;
241 }
242
243 task->itk_self = IP_NULL;
244 itk_unlock(task);
245
246 /* release the naked send rights */
247
248 if (IP_VALID(task->itk_sself))
249 ipc_port_release_send(task->itk_sself);
250
251 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
252 if (IP_VALID(task->exc_actions[i].port)) {
253 ipc_port_release_send(task->exc_actions[i].port);
254 }
255 }
256
257 if (IP_VALID(task->itk_host))
258 ipc_port_release_send(task->itk_host);
259
260 if (IP_VALID(task->itk_bootstrap))
261 ipc_port_release_send(task->itk_bootstrap);
262
263 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
264 if (IP_VALID(task->itk_registered[i]))
265 ipc_port_release_send(task->itk_registered[i]);
266
267 ipc_port_release_send(task->wired_ledger_port);
268 ipc_port_release_send(task->paged_ledger_port);
269
270 /* destroy the kernel port */
271 ipc_port_dealloc_kernel(kport);
272 }
273
274 /*
275 * Routine: ipc_task_reset
276 * Purpose:
277 * Reset a task's IPC state to protect it when
278 * it enters an elevated security context.
279 * Conditions:
280 * Nothing locked. The task must be suspended.
281 * (Or the current thread must be in the task.)
282 */
283
284 void
285 ipc_task_reset(
286 task_t task)
287 {
288 ipc_port_t old_kport, new_kport;
289 ipc_port_t old_sself;
290 ipc_port_t old_exc_actions[EXC_TYPES_COUNT];
291 int i;
292
293 new_kport = ipc_port_alloc_kernel();
294 if (new_kport == IP_NULL)
295 panic("ipc_task_reset");
296
297 itk_lock(task);
298
299 old_kport = task->itk_self;
300
301 if (old_kport == IP_NULL) {
302 /* the task is already terminated (can this happen?) */
303 itk_unlock(task);
304 ipc_port_dealloc_kernel(new_kport);
305 return;
306 }
307
308 task->itk_self = new_kport;
309 old_sself = task->itk_sself;
310 task->itk_sself = ipc_port_make_send(new_kport);
311 ipc_kobject_set(old_kport, IKO_NULL, IKOT_NONE);
312 ipc_kobject_set(new_kport, (ipc_kobject_t) task, IKOT_TASK);
313
314 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
315 if (!task->exc_actions[i].privileged) {
316 old_exc_actions[i] = task->exc_actions[i].port;
317 task->exc_actions[i].port = IP_NULL;
318 } else {
319 old_exc_actions[i] = IP_NULL;
320 }
321 }/* for */
322
323 itk_unlock(task);
324
325 /* release the naked send rights */
326
327 if (IP_VALID(old_sself))
328 ipc_port_release_send(old_sself);
329
330 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
331 if (IP_VALID(old_exc_actions[i])) {
332 ipc_port_release_send(old_exc_actions[i]);
333 }
334 }/* for */
335
336 /* destroy the kernel port */
337 ipc_port_dealloc_kernel(old_kport);
338 }
339
340 /*
341 * Routine: ipc_thread_init
342 * Purpose:
343 * Initialize a thread's IPC state.
344 * Conditions:
345 * Nothing locked.
346 */
347
348 void
349 ipc_thread_init(
350 thread_t thread)
351 {
352 ipc_port_t kport;
353 int i;
354
355 kport = ipc_port_alloc_kernel();
356 if (kport == IP_NULL)
357 panic("ipc_thread_init");
358
359 thread->ith_self = kport;
360 thread->ith_sself = ipc_port_make_send(kport);
361
362 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i)
363 thread->exc_actions[i].port = IP_NULL;
364
365 ipc_kobject_set(kport, (ipc_kobject_t)thread, IKOT_THREAD);
366
367 ipc_kmsg_queue_init(&thread->ith_messages);
368
369 thread->ith_rpc_reply = IP_NULL;
370 }
371
372 void
373 ipc_thread_disable(
374 thread_t thread)
375 {
376 ipc_port_t kport = thread->ith_self;
377
378 if (kport != IP_NULL)
379 ipc_kobject_set(kport, IKO_NULL, IKOT_NONE);
380 }
381
382 /*
383 * Routine: ipc_thread_terminate
384 * Purpose:
385 * Clean up and destroy a thread's IPC state.
386 * Conditions:
387 * Nothing locked.
388 */
389
390 void
391 ipc_thread_terminate(
392 thread_t thread)
393 {
394 ipc_port_t kport = thread->ith_self;
395
396 if (kport != IP_NULL) {
397 int i;
398
399 if (IP_VALID(thread->ith_sself))
400 ipc_port_release_send(thread->ith_sself);
401
402 thread->ith_sself = thread->ith_self = IP_NULL;
403
404 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
405 if (IP_VALID(thread->exc_actions[i].port))
406 ipc_port_release_send(thread->exc_actions[i].port);
407 }
408
409 ipc_port_dealloc_kernel(kport);
410 }
411
412 assert(ipc_kmsg_queue_empty(&thread->ith_messages));
413
414 if (thread->ith_rpc_reply != IP_NULL)
415 ipc_port_dealloc_reply(thread->ith_rpc_reply);
416
417 thread->ith_rpc_reply = IP_NULL;
418 }
419
420 /*
421 * Routine: ipc_thread_reset
422 * Purpose:
423 * Reset the IPC state for a given Mach thread when
424 * its task enters an elevated security context.
425 * Both the thread port and its exception ports have
426 * to be reset. Its RPC reply port cannot have any
427 * rights outstanding, so it should be fine.
428 * Conditions:
429 * Nothing locked.
430 */
431
432 void
433 ipc_thread_reset(
434 thread_t thread)
435 {
436 ipc_port_t old_kport, new_kport;
437 ipc_port_t old_sself;
438 ipc_port_t old_exc_actions[EXC_TYPES_COUNT];
439 int i;
440
441 new_kport = ipc_port_alloc_kernel();
442 if (new_kport == IP_NULL)
443 panic("ipc_task_reset");
444
445 thread_mtx_lock(thread);
446
447 old_kport = thread->ith_self;
448
449 if (old_kport == IP_NULL) {
450 /* the is already terminated (can this happen?) */
451 thread_mtx_unlock(thread);
452 ipc_port_dealloc_kernel(new_kport);
453 return;
454 }
455
456 thread->ith_self = new_kport;
457 old_sself = thread->ith_sself;
458 thread->ith_sself = ipc_port_make_send(new_kport);
459 ipc_kobject_set(old_kport, IKO_NULL, IKOT_NONE);
460 ipc_kobject_set(new_kport, (ipc_kobject_t) thread, IKOT_THREAD);
461
462 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
463 if (!thread->exc_actions[i].privileged) {
464 old_exc_actions[i] = thread->exc_actions[i].port;
465 thread->exc_actions[i].port = IP_NULL;
466 } else {
467 old_exc_actions[i] = IP_NULL;
468 }
469 }/* for */
470
471 thread_mtx_unlock(thread);
472
473 /* release the naked send rights */
474
475 if (IP_VALID(old_sself))
476 ipc_port_release_send(old_sself);
477
478 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
479 if (IP_VALID(old_exc_actions[i])) {
480 ipc_port_release_send(old_exc_actions[i]);
481 }
482 }/* for */
483
484 /* destroy the kernel port */
485 ipc_port_dealloc_kernel(old_kport);
486 }
487
488 /*
489 * Routine: retrieve_task_self_fast
490 * Purpose:
491 * Optimized version of retrieve_task_self,
492 * that only works for the current task.
493 *
494 * Return a send right (possibly null/dead)
495 * for the task's user-visible self port.
496 * Conditions:
497 * Nothing locked.
498 */
499
500 ipc_port_t
501 retrieve_task_self_fast(
502 register task_t task)
503 {
504 register ipc_port_t port;
505
506 assert(task == current_task());
507
508 itk_lock(task);
509 assert(task->itk_self != IP_NULL);
510
511 if ((port = task->itk_sself) == task->itk_self) {
512 /* no interposing */
513
514 ip_lock(port);
515 assert(ip_active(port));
516 ip_reference(port);
517 port->ip_srights++;
518 ip_unlock(port);
519 } else
520 port = ipc_port_copy_send(port);
521 itk_unlock(task);
522
523 return port;
524 }
525
526 /*
527 * Routine: retrieve_thread_self_fast
528 * Purpose:
529 * Return a send right (possibly null/dead)
530 * for the thread's user-visible self port.
531 *
532 * Only works for the current thread.
533 *
534 * Conditions:
535 * Nothing locked.
536 */
537
538 ipc_port_t
539 retrieve_thread_self_fast(
540 thread_t thread)
541 {
542 register ipc_port_t port;
543
544 assert(thread == current_thread());
545
546 thread_mtx_lock(thread);
547
548 assert(thread->ith_self != IP_NULL);
549
550 if ((port = thread->ith_sself) == thread->ith_self) {
551 /* no interposing */
552
553 ip_lock(port);
554 assert(ip_active(port));
555 ip_reference(port);
556 port->ip_srights++;
557 ip_unlock(port);
558 }
559 else
560 port = ipc_port_copy_send(port);
561
562 thread_mtx_unlock(thread);
563
564 return port;
565 }
566
567 /*
568 * Routine: task_self_trap [mach trap]
569 * Purpose:
570 * Give the caller send rights for his own task port.
571 * Conditions:
572 * Nothing locked.
573 * Returns:
574 * MACH_PORT_NULL if there are any resource failures
575 * or other errors.
576 */
577
578 mach_port_name_t
579 task_self_trap(
580 __unused struct task_self_trap_args *args)
581 {
582 task_t task = current_task();
583 ipc_port_t sright;
584 mach_port_name_t name;
585
586 sright = retrieve_task_self_fast(task);
587 name = ipc_port_copyout_send(sright, task->itk_space);
588 return name;
589 }
590
591 /*
592 * Routine: thread_self_trap [mach trap]
593 * Purpose:
594 * Give the caller send rights for his own thread port.
595 * Conditions:
596 * Nothing locked.
597 * Returns:
598 * MACH_PORT_NULL if there are any resource failures
599 * or other errors.
600 */
601
602 mach_port_name_t
603 thread_self_trap(
604 __unused struct thread_self_trap_args *args)
605 {
606 thread_t thread = current_thread();
607 task_t task = thread->task;
608 ipc_port_t sright;
609 mach_port_name_t name;
610
611 sright = retrieve_thread_self_fast(thread);
612 name = ipc_port_copyout_send(sright, task->itk_space);
613 return name;
614
615 }
616
617 /*
618 * Routine: mach_reply_port [mach trap]
619 * Purpose:
620 * Allocate a port for the caller.
621 * Conditions:
622 * Nothing locked.
623 * Returns:
624 * MACH_PORT_NULL if there are any resource failures
625 * or other errors.
626 */
627
628 mach_port_name_t
629 mach_reply_port(
630 __unused struct mach_reply_port_args *args)
631 {
632 ipc_port_t port;
633 mach_port_name_t name;
634 kern_return_t kr;
635
636 kr = ipc_port_alloc(current_task()->itk_space, &name, &port);
637 if (kr == KERN_SUCCESS)
638 ip_unlock(port);
639 else
640 name = MACH_PORT_NULL;
641 return name;
642 }
643
644 /*
645 * Routine: thread_get_special_port [kernel call]
646 * Purpose:
647 * Clones a send right for one of the thread's
648 * special ports.
649 * Conditions:
650 * Nothing locked.
651 * Returns:
652 * KERN_SUCCESS Extracted a send right.
653 * KERN_INVALID_ARGUMENT The thread is null.
654 * KERN_FAILURE The thread is dead.
655 * KERN_INVALID_ARGUMENT Invalid special port.
656 */
657
658 kern_return_t
659 thread_get_special_port(
660 thread_t thread,
661 int which,
662 ipc_port_t *portp)
663 {
664 kern_return_t result = KERN_SUCCESS;
665 ipc_port_t *whichp;
666
667 if (thread == THREAD_NULL)
668 return (KERN_INVALID_ARGUMENT);
669
670 switch (which) {
671
672 case THREAD_KERNEL_PORT:
673 whichp = &thread->ith_sself;
674 break;
675
676 default:
677 return (KERN_INVALID_ARGUMENT);
678 }
679
680 thread_mtx_lock(thread);
681
682 if (thread->active)
683 *portp = ipc_port_copy_send(*whichp);
684 else
685 result = KERN_FAILURE;
686
687 thread_mtx_unlock(thread);
688
689 return (result);
690 }
691
692 /*
693 * Routine: thread_set_special_port [kernel call]
694 * Purpose:
695 * Changes one of the thread's special ports,
696 * setting it to the supplied send right.
697 * Conditions:
698 * Nothing locked. If successful, consumes
699 * the supplied send right.
700 * Returns:
701 * KERN_SUCCESS Changed the special port.
702 * KERN_INVALID_ARGUMENT The thread is null.
703 * KERN_FAILURE The thread is dead.
704 * KERN_INVALID_ARGUMENT Invalid special port.
705 */
706
707 kern_return_t
708 thread_set_special_port(
709 thread_t thread,
710 int which,
711 ipc_port_t port)
712 {
713 kern_return_t result = KERN_SUCCESS;
714 ipc_port_t *whichp, old = IP_NULL;
715
716 if (thread == THREAD_NULL)
717 return (KERN_INVALID_ARGUMENT);
718
719 switch (which) {
720
721 case THREAD_KERNEL_PORT:
722 whichp = &thread->ith_sself;
723 break;
724
725 default:
726 return (KERN_INVALID_ARGUMENT);
727 }
728
729 thread_mtx_lock(thread);
730
731 if (thread->active) {
732 old = *whichp;
733 *whichp = port;
734 }
735 else
736 result = KERN_FAILURE;
737
738 thread_mtx_unlock(thread);
739
740 if (IP_VALID(old))
741 ipc_port_release_send(old);
742
743 return (result);
744 }
745
746 /*
747 * Routine: task_get_special_port [kernel call]
748 * Purpose:
749 * Clones a send right for one of the task's
750 * special ports.
751 * Conditions:
752 * Nothing locked.
753 * Returns:
754 * KERN_SUCCESS Extracted a send right.
755 * KERN_INVALID_ARGUMENT The task is null.
756 * KERN_FAILURE The task/space is dead.
757 * KERN_INVALID_ARGUMENT Invalid special port.
758 */
759
760 kern_return_t
761 task_get_special_port(
762 task_t task,
763 int which,
764 ipc_port_t *portp)
765 {
766 ipc_port_t *whichp;
767 ipc_port_t port;
768
769 if (task == TASK_NULL)
770 return KERN_INVALID_ARGUMENT;
771
772 switch (which) {
773 case TASK_KERNEL_PORT:
774 whichp = &task->itk_sself;
775 break;
776
777 case TASK_HOST_PORT:
778 whichp = &task->itk_host;
779 break;
780
781 case TASK_BOOTSTRAP_PORT:
782 whichp = &task->itk_bootstrap;
783 break;
784
785 case TASK_WIRED_LEDGER_PORT:
786 whichp = &task->wired_ledger_port;
787 break;
788
789 case TASK_PAGED_LEDGER_PORT:
790 whichp = &task->paged_ledger_port;
791 break;
792
793 default:
794 return KERN_INVALID_ARGUMENT;
795 }
796
797 itk_lock(task);
798 if (task->itk_self == IP_NULL) {
799 itk_unlock(task);
800 return KERN_FAILURE;
801 }
802
803 port = ipc_port_copy_send(*whichp);
804 itk_unlock(task);
805
806 *portp = port;
807 return KERN_SUCCESS;
808 }
809
810 /*
811 * Routine: task_set_special_port [kernel call]
812 * Purpose:
813 * Changes one of the task's special ports,
814 * setting it to the supplied send right.
815 * Conditions:
816 * Nothing locked. If successful, consumes
817 * the supplied send right.
818 * Returns:
819 * KERN_SUCCESS Changed the special port.
820 * KERN_INVALID_ARGUMENT The task is null.
821 * KERN_FAILURE The task/space is dead.
822 * KERN_INVALID_ARGUMENT Invalid special port.
823 */
824
825 kern_return_t
826 task_set_special_port(
827 task_t task,
828 int which,
829 ipc_port_t port)
830 {
831 ipc_port_t *whichp;
832 ipc_port_t old;
833
834 if (task == TASK_NULL)
835 return KERN_INVALID_ARGUMENT;
836
837 switch (which) {
838 case TASK_KERNEL_PORT:
839 whichp = &task->itk_sself;
840 break;
841
842 case TASK_HOST_PORT:
843 whichp = &task->itk_host;
844 break;
845
846 case TASK_BOOTSTRAP_PORT:
847 whichp = &task->itk_bootstrap;
848 break;
849
850 case TASK_WIRED_LEDGER_PORT:
851 whichp = &task->wired_ledger_port;
852 break;
853
854 case TASK_PAGED_LEDGER_PORT:
855 whichp = &task->paged_ledger_port;
856 break;
857
858 default:
859 return KERN_INVALID_ARGUMENT;
860 }/* switch */
861
862 itk_lock(task);
863 if (task->itk_self == IP_NULL) {
864 itk_unlock(task);
865 return KERN_FAILURE;
866 }
867
868 old = *whichp;
869 *whichp = port;
870 itk_unlock(task);
871
872 if (IP_VALID(old))
873 ipc_port_release_send(old);
874 return KERN_SUCCESS;
875 }
876
877
878 /*
879 * Routine: mach_ports_register [kernel call]
880 * Purpose:
881 * Stash a handful of port send rights in the task.
882 * Child tasks will inherit these rights, but they
883 * must use mach_ports_lookup to acquire them.
884 *
885 * The rights are supplied in a (wired) kalloc'd segment.
886 * Rights which aren't supplied are assumed to be null.
887 * Conditions:
888 * Nothing locked. If successful, consumes
889 * the supplied rights and memory.
890 * Returns:
891 * KERN_SUCCESS Stashed the port rights.
892 * KERN_INVALID_ARGUMENT The task is null.
893 * KERN_INVALID_ARGUMENT The task is dead.
894 * KERN_INVALID_ARGUMENT Too many port rights supplied.
895 */
896
897 kern_return_t
898 mach_ports_register(
899 task_t task,
900 mach_port_array_t memory,
901 mach_msg_type_number_t portsCnt)
902 {
903 ipc_port_t ports[TASK_PORT_REGISTER_MAX];
904 unsigned int i;
905
906 if ((task == TASK_NULL) ||
907 (portsCnt > TASK_PORT_REGISTER_MAX))
908 return KERN_INVALID_ARGUMENT;
909
910 /*
911 * Pad the port rights with nulls.
912 */
913
914 for (i = 0; i < portsCnt; i++)
915 ports[i] = memory[i];
916 for (; i < TASK_PORT_REGISTER_MAX; i++)
917 ports[i] = IP_NULL;
918
919 itk_lock(task);
920 if (task->itk_self == IP_NULL) {
921 itk_unlock(task);
922 return KERN_INVALID_ARGUMENT;
923 }
924
925 /*
926 * Replace the old send rights with the new.
927 * Release the old rights after unlocking.
928 */
929
930 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
931 ipc_port_t old;
932
933 old = task->itk_registered[i];
934 task->itk_registered[i] = ports[i];
935 ports[i] = old;
936 }
937
938 itk_unlock(task);
939
940 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
941 if (IP_VALID(ports[i]))
942 ipc_port_release_send(ports[i]);
943
944 /*
945 * Now that the operation is known to be successful,
946 * we can free the memory.
947 */
948
949 if (portsCnt != 0)
950 kfree(memory,
951 (vm_size_t) (portsCnt * sizeof(mach_port_t)));
952
953 return KERN_SUCCESS;
954 }
955
956 /*
957 * Routine: mach_ports_lookup [kernel call]
958 * Purpose:
959 * Retrieves (clones) the stashed port send rights.
960 * Conditions:
961 * Nothing locked. If successful, the caller gets
962 * rights and memory.
963 * Returns:
964 * KERN_SUCCESS Retrieved the send rights.
965 * KERN_INVALID_ARGUMENT The task is null.
966 * KERN_INVALID_ARGUMENT The task is dead.
967 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
968 */
969
970 kern_return_t
971 mach_ports_lookup(
972 task_t task,
973 mach_port_array_t *portsp,
974 mach_msg_type_number_t *portsCnt)
975 {
976 void *memory;
977 vm_size_t size;
978 ipc_port_t *ports;
979 int i;
980
981 if (task == TASK_NULL)
982 return KERN_INVALID_ARGUMENT;
983
984 size = (vm_size_t) (TASK_PORT_REGISTER_MAX * sizeof(ipc_port_t));
985
986 memory = kalloc(size);
987 if (memory == 0)
988 return KERN_RESOURCE_SHORTAGE;
989
990 itk_lock(task);
991 if (task->itk_self == IP_NULL) {
992 itk_unlock(task);
993
994 kfree(memory, size);
995 return KERN_INVALID_ARGUMENT;
996 }
997
998 ports = (ipc_port_t *) memory;
999
1000 /*
1001 * Clone port rights. Because kalloc'd memory
1002 * is wired, we won't fault while holding the task lock.
1003 */
1004
1005 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
1006 ports[i] = ipc_port_copy_send(task->itk_registered[i]);
1007
1008 itk_unlock(task);
1009
1010 *portsp = (mach_port_array_t) ports;
1011 *portsCnt = TASK_PORT_REGISTER_MAX;
1012 return KERN_SUCCESS;
1013 }
1014
1015 /*
1016 * Routine: convert_port_to_locked_task
1017 * Purpose:
1018 * Internal helper routine to convert from a port to a locked
1019 * task. Used by several routines that try to convert from a
1020 * task port to a reference on some task related object.
1021 * Conditions:
1022 * Nothing locked, blocking OK.
1023 */
1024 task_t
1025 convert_port_to_locked_task(ipc_port_t port)
1026 {
1027 while (IP_VALID(port)) {
1028 task_t task;
1029
1030 ip_lock(port);
1031 if (!ip_active(port) || (ip_kotype(port) != IKOT_TASK)) {
1032 ip_unlock(port);
1033 return TASK_NULL;
1034 }
1035 task = (task_t) port->ip_kobject;
1036 assert(task != TASK_NULL);
1037
1038 /*
1039 * Normal lock ordering puts task_lock() before ip_lock().
1040 * Attempt out-of-order locking here.
1041 */
1042 if (task_lock_try(task)) {
1043 ip_unlock(port);
1044 return(task);
1045 }
1046
1047 ip_unlock(port);
1048 mutex_pause();
1049 }
1050 return TASK_NULL;
1051 }
1052
1053 /*
1054 * Routine: convert_port_to_task
1055 * Purpose:
1056 * Convert from a port to a task.
1057 * Doesn't consume the port ref; produces a task ref,
1058 * which may be null.
1059 * Conditions:
1060 * Nothing locked.
1061 */
1062 task_t
1063 convert_port_to_task(
1064 ipc_port_t port)
1065 {
1066 task_t task = TASK_NULL;
1067
1068 if (IP_VALID(port)) {
1069 ip_lock(port);
1070
1071 if ( ip_active(port) &&
1072 ip_kotype(port) == IKOT_TASK ) {
1073 task = (task_t)port->ip_kobject;
1074 assert(task != TASK_NULL);
1075
1076 task_reference_internal(task);
1077 }
1078
1079 ip_unlock(port);
1080 }
1081
1082 return (task);
1083 }
1084
1085 /*
1086 * Routine: convert_port_to_space
1087 * Purpose:
1088 * Convert from a port to a space.
1089 * Doesn't consume the port ref; produces a space ref,
1090 * which may be null.
1091 * Conditions:
1092 * Nothing locked.
1093 */
1094 ipc_space_t
1095 convert_port_to_space(
1096 ipc_port_t port)
1097 {
1098 ipc_space_t space;
1099 task_t task;
1100
1101 task = convert_port_to_locked_task(port);
1102
1103 if (task == TASK_NULL)
1104 return IPC_SPACE_NULL;
1105
1106 if (!task->active) {
1107 task_unlock(task);
1108 return IPC_SPACE_NULL;
1109 }
1110
1111 space = task->itk_space;
1112 is_reference(space);
1113 task_unlock(task);
1114 return (space);
1115 }
1116
1117 /*
1118 * Routine: convert_port_to_map
1119 * Purpose:
1120 * Convert from a port to a map.
1121 * Doesn't consume the port ref; produces a map ref,
1122 * which may be null.
1123 * Conditions:
1124 * Nothing locked.
1125 */
1126
1127 vm_map_t
1128 convert_port_to_map(
1129 ipc_port_t port)
1130 {
1131 task_t task;
1132 vm_map_t map;
1133
1134 task = convert_port_to_locked_task(port);
1135
1136 if (task == TASK_NULL)
1137 return VM_MAP_NULL;
1138
1139 if (!task->active) {
1140 task_unlock(task);
1141 return VM_MAP_NULL;
1142 }
1143
1144 map = task->map;
1145 vm_map_reference_swap(map);
1146 task_unlock(task);
1147 return map;
1148 }
1149
1150
1151 /*
1152 * Routine: convert_port_to_thread
1153 * Purpose:
1154 * Convert from a port to a thread.
1155 * Doesn't consume the port ref; produces an thread ref,
1156 * which may be null.
1157 * Conditions:
1158 * Nothing locked.
1159 */
1160
1161 thread_t
1162 convert_port_to_thread(
1163 ipc_port_t port)
1164 {
1165 thread_t thread = THREAD_NULL;
1166
1167 if (IP_VALID(port)) {
1168 ip_lock(port);
1169
1170 if ( ip_active(port) &&
1171 ip_kotype(port) == IKOT_THREAD ) {
1172 thread = (thread_t)port->ip_kobject;
1173 assert(thread != THREAD_NULL);
1174
1175 thread_reference_internal(thread);
1176 }
1177
1178 ip_unlock(port);
1179 }
1180
1181 return (thread);
1182 }
1183
1184 /*
1185 * Routine: port_name_to_thread
1186 * Purpose:
1187 * Convert from a port name to an thread reference
1188 * A name of MACH_PORT_NULL is valid for the null thread.
1189 * Conditions:
1190 * Nothing locked.
1191 */
1192 thread_t
1193 port_name_to_thread(
1194 mach_port_name_t name)
1195 {
1196 thread_t thread = THREAD_NULL;
1197 ipc_port_t kport;
1198
1199 if (MACH_PORT_VALID(name)) {
1200 if (ipc_object_copyin(current_space(), name,
1201 MACH_MSG_TYPE_COPY_SEND,
1202 (ipc_object_t *)&kport) != KERN_SUCCESS)
1203 return (THREAD_NULL);
1204
1205 thread = convert_port_to_thread(kport);
1206
1207 if (IP_VALID(kport))
1208 ipc_port_release_send(kport);
1209 }
1210
1211 return (thread);
1212 }
1213
1214 task_t
1215 port_name_to_task(
1216 mach_port_name_t name)
1217 {
1218 ipc_port_t kern_port;
1219 kern_return_t kr;
1220 task_t task = TASK_NULL;
1221
1222 if (MACH_PORT_VALID(name)) {
1223 kr = ipc_object_copyin(current_space(), name,
1224 MACH_MSG_TYPE_COPY_SEND,
1225 (ipc_object_t *) &kern_port);
1226 if (kr != KERN_SUCCESS)
1227 return TASK_NULL;
1228
1229 task = convert_port_to_task(kern_port);
1230
1231 if (IP_VALID(kern_port))
1232 ipc_port_release_send(kern_port);
1233 }
1234 return task;
1235 }
1236
1237 /*
1238 * Routine: convert_task_to_port
1239 * Purpose:
1240 * Convert from a task to a port.
1241 * Consumes a task ref; produces a naked send right
1242 * which may be invalid.
1243 * Conditions:
1244 * Nothing locked.
1245 */
1246
1247 ipc_port_t
1248 convert_task_to_port(
1249 task_t task)
1250 {
1251 ipc_port_t port;
1252
1253 itk_lock(task);
1254 if (task->itk_self != IP_NULL)
1255 port = ipc_port_make_send(task->itk_self);
1256 else
1257 port = IP_NULL;
1258 itk_unlock(task);
1259
1260 task_deallocate(task);
1261 return port;
1262 }
1263
1264 /*
1265 * Routine: convert_thread_to_port
1266 * Purpose:
1267 * Convert from a thread to a port.
1268 * Consumes an thread ref; produces a naked send right
1269 * which may be invalid.
1270 * Conditions:
1271 * Nothing locked.
1272 */
1273
1274 ipc_port_t
1275 convert_thread_to_port(
1276 thread_t thread)
1277 {
1278 ipc_port_t port;
1279
1280 thread_mtx_lock(thread);
1281
1282 if (thread->ith_self != IP_NULL)
1283 port = ipc_port_make_send(thread->ith_self);
1284 else
1285 port = IP_NULL;
1286
1287 thread_mtx_unlock(thread);
1288
1289 thread_deallocate(thread);
1290
1291 return (port);
1292 }
1293
1294 /*
1295 * Routine: space_deallocate
1296 * Purpose:
1297 * Deallocate a space ref produced by convert_port_to_space.
1298 * Conditions:
1299 * Nothing locked.
1300 */
1301
1302 void
1303 space_deallocate(
1304 ipc_space_t space)
1305 {
1306 if (space != IS_NULL)
1307 is_release(space);
1308 }
1309
1310 /*
1311 * Routine: thread/task_set_exception_ports [kernel call]
1312 * Purpose:
1313 * Sets the thread/task exception port, flavor and
1314 * behavior for the exception types specified by the mask.
1315 * There will be one send right per exception per valid
1316 * port.
1317 * Conditions:
1318 * Nothing locked. If successful, consumes
1319 * the supplied send right.
1320 * Returns:
1321 * KERN_SUCCESS Changed the special port.
1322 * KERN_INVALID_ARGUMENT The thread is null,
1323 * Illegal mask bit set.
1324 * Illegal exception behavior
1325 * KERN_FAILURE The thread is dead.
1326 */
1327
1328 kern_return_t
1329 thread_set_exception_ports(
1330 thread_t thread,
1331 exception_mask_t exception_mask,
1332 ipc_port_t new_port,
1333 exception_behavior_t new_behavior,
1334 thread_state_flavor_t new_flavor)
1335 {
1336 ipc_port_t old_port[EXC_TYPES_COUNT];
1337 boolean_t privileged = current_task()->sec_token.val[0] == 0;
1338 register int i;
1339
1340 if (thread == THREAD_NULL)
1341 return (KERN_INVALID_ARGUMENT);
1342
1343 if (exception_mask & ~EXC_MASK_ALL)
1344 return (KERN_INVALID_ARGUMENT);
1345
1346 if (IP_VALID(new_port)) {
1347 switch (new_behavior) {
1348
1349 case EXCEPTION_DEFAULT:
1350 case EXCEPTION_STATE:
1351 case EXCEPTION_STATE_IDENTITY:
1352 break;
1353
1354 default:
1355 return (KERN_INVALID_ARGUMENT);
1356 }
1357 }
1358
1359 /*
1360 * Check the validity of the thread_state_flavor by calling the
1361 * VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in
1362 * osfmk/mach/ARCHITECTURE/thread_status.h
1363 */
1364 if (!VALID_THREAD_STATE_FLAVOR(new_flavor))
1365 return (KERN_INVALID_ARGUMENT);
1366
1367 thread_mtx_lock(thread);
1368
1369 if (!thread->active) {
1370 thread_mtx_unlock(thread);
1371
1372 return (KERN_FAILURE);
1373 }
1374
1375 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
1376 if (exception_mask & (1 << i)) {
1377 old_port[i] = thread->exc_actions[i].port;
1378 thread->exc_actions[i].port = ipc_port_copy_send(new_port);
1379 thread->exc_actions[i].behavior = new_behavior;
1380 thread->exc_actions[i].flavor = new_flavor;
1381 thread->exc_actions[i].privileged = privileged;
1382 }
1383 else
1384 old_port[i] = IP_NULL;
1385 }
1386
1387 thread_mtx_unlock(thread);
1388
1389 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i)
1390 if (IP_VALID(old_port[i]))
1391 ipc_port_release_send(old_port[i]);
1392
1393 if (IP_VALID(new_port)) /* consume send right */
1394 ipc_port_release_send(new_port);
1395
1396 return (KERN_SUCCESS);
1397 }
1398
1399 kern_return_t
1400 task_set_exception_ports(
1401 task_t task,
1402 exception_mask_t exception_mask,
1403 ipc_port_t new_port,
1404 exception_behavior_t new_behavior,
1405 thread_state_flavor_t new_flavor)
1406 {
1407 ipc_port_t old_port[EXC_TYPES_COUNT];
1408 boolean_t privileged = current_task()->sec_token.val[0] == 0;
1409 register int i;
1410
1411 if (task == TASK_NULL)
1412 return (KERN_INVALID_ARGUMENT);
1413
1414 if (exception_mask & ~EXC_MASK_ALL)
1415 return (KERN_INVALID_ARGUMENT);
1416
1417 if (IP_VALID(new_port)) {
1418 switch (new_behavior) {
1419
1420 case EXCEPTION_DEFAULT:
1421 case EXCEPTION_STATE:
1422 case EXCEPTION_STATE_IDENTITY:
1423 break;
1424
1425 default:
1426 return (KERN_INVALID_ARGUMENT);
1427 }
1428 }
1429
1430 itk_lock(task);
1431
1432 if (task->itk_self == IP_NULL) {
1433 itk_unlock(task);
1434
1435 return (KERN_FAILURE);
1436 }
1437
1438 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
1439 if (exception_mask & (1 << i)) {
1440 old_port[i] = task->exc_actions[i].port;
1441 task->exc_actions[i].port =
1442 ipc_port_copy_send(new_port);
1443 task->exc_actions[i].behavior = new_behavior;
1444 task->exc_actions[i].flavor = new_flavor;
1445 task->exc_actions[i].privileged = privileged;
1446 }
1447 else
1448 old_port[i] = IP_NULL;
1449 }
1450
1451 itk_unlock(task);
1452
1453 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i)
1454 if (IP_VALID(old_port[i]))
1455 ipc_port_release_send(old_port[i]);
1456
1457 if (IP_VALID(new_port)) /* consume send right */
1458 ipc_port_release_send(new_port);
1459
1460 return (KERN_SUCCESS);
1461 }
1462
1463 /*
1464 * Routine: thread/task_swap_exception_ports [kernel call]
1465 * Purpose:
1466 * Sets the thread/task exception port, flavor and
1467 * behavior for the exception types specified by the
1468 * mask.
1469 *
1470 * The old ports, behavior and flavors are returned
1471 * Count specifies the array sizes on input and
1472 * the number of returned ports etc. on output. The
1473 * arrays must be large enough to hold all the returned
1474 * data, MIG returnes an error otherwise. The masks
1475 * array specifies the corresponding exception type(s).
1476 *
1477 * Conditions:
1478 * Nothing locked. If successful, consumes
1479 * the supplied send right.
1480 *
1481 * Returns upto [in} CountCnt elements.
1482 * Returns:
1483 * KERN_SUCCESS Changed the special port.
1484 * KERN_INVALID_ARGUMENT The thread is null,
1485 * Illegal mask bit set.
1486 * Illegal exception behavior
1487 * KERN_FAILURE The thread is dead.
1488 */
1489
1490 kern_return_t
1491 thread_swap_exception_ports(
1492 thread_t thread,
1493 exception_mask_t exception_mask,
1494 ipc_port_t new_port,
1495 exception_behavior_t new_behavior,
1496 thread_state_flavor_t new_flavor,
1497 exception_mask_array_t masks,
1498 mach_msg_type_number_t *CountCnt,
1499 exception_port_array_t ports,
1500 exception_behavior_array_t behaviors,
1501 thread_state_flavor_array_t flavors)
1502 {
1503 ipc_port_t old_port[EXC_TYPES_COUNT];
1504 boolean_t privileged = current_task()->sec_token.val[0] == 0;
1505 unsigned int i, j, count;
1506
1507 if (thread == THREAD_NULL)
1508 return (KERN_INVALID_ARGUMENT);
1509
1510 if (exception_mask & ~EXC_MASK_ALL)
1511 return (KERN_INVALID_ARGUMENT);
1512
1513 if (IP_VALID(new_port)) {
1514 switch (new_behavior) {
1515
1516 case EXCEPTION_DEFAULT:
1517 case EXCEPTION_STATE:
1518 case EXCEPTION_STATE_IDENTITY:
1519 break;
1520
1521 default:
1522 return (KERN_INVALID_ARGUMENT);
1523 }
1524 }
1525
1526 thread_mtx_lock(thread);
1527
1528 if (!thread->active) {
1529 thread_mtx_unlock(thread);
1530
1531 return (KERN_FAILURE);
1532 }
1533
1534 count = 0;
1535
1536 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
1537 if (exception_mask & (1 << i)) {
1538 for (j = 0; j < count; ++j) {
1539 /*
1540 * search for an identical entry, if found
1541 * set corresponding mask for this exception.
1542 */
1543 if ( thread->exc_actions[i].port == ports[j] &&
1544 thread->exc_actions[i].behavior == behaviors[j] &&
1545 thread->exc_actions[i].flavor == flavors[j] ) {
1546 masks[j] |= (1 << i);
1547 break;
1548 }
1549 }
1550
1551 if (j == count) {
1552 masks[j] = (1 << i);
1553 ports[j] = ipc_port_copy_send(thread->exc_actions[i].port);
1554
1555 behaviors[j] = thread->exc_actions[i].behavior;
1556 flavors[j] = thread->exc_actions[i].flavor;
1557 ++count;
1558 }
1559
1560 old_port[i] = thread->exc_actions[i].port;
1561 thread->exc_actions[i].port = ipc_port_copy_send(new_port);
1562 thread->exc_actions[i].behavior = new_behavior;
1563 thread->exc_actions[i].flavor = new_flavor;
1564 thread->exc_actions[i].privileged = privileged;
1565 if (count > *CountCnt)
1566 break;
1567 }
1568 else
1569 old_port[i] = IP_NULL;
1570 }
1571
1572 thread_mtx_unlock(thread);
1573
1574 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i)
1575 if (IP_VALID(old_port[i]))
1576 ipc_port_release_send(old_port[i]);
1577
1578 if (IP_VALID(new_port)) /* consume send right */
1579 ipc_port_release_send(new_port);
1580
1581 *CountCnt = count;
1582
1583 return (KERN_SUCCESS);
1584 }
1585
1586 kern_return_t
1587 task_swap_exception_ports(
1588 task_t task,
1589 exception_mask_t exception_mask,
1590 ipc_port_t new_port,
1591 exception_behavior_t new_behavior,
1592 thread_state_flavor_t new_flavor,
1593 exception_mask_array_t masks,
1594 mach_msg_type_number_t *CountCnt,
1595 exception_port_array_t ports,
1596 exception_behavior_array_t behaviors,
1597 thread_state_flavor_array_t flavors)
1598 {
1599 ipc_port_t old_port[EXC_TYPES_COUNT];
1600 boolean_t privileged = current_task()->sec_token.val[0] == 0;
1601 unsigned int i, j, count;
1602
1603 if (task == TASK_NULL)
1604 return (KERN_INVALID_ARGUMENT);
1605
1606 if (exception_mask & ~EXC_MASK_ALL)
1607 return (KERN_INVALID_ARGUMENT);
1608
1609 if (IP_VALID(new_port)) {
1610 switch (new_behavior) {
1611
1612 case EXCEPTION_DEFAULT:
1613 case EXCEPTION_STATE:
1614 case EXCEPTION_STATE_IDENTITY:
1615 break;
1616
1617 default:
1618 return (KERN_INVALID_ARGUMENT);
1619 }
1620 }
1621
1622 itk_lock(task);
1623
1624 if (task->itk_self == IP_NULL) {
1625 itk_unlock(task);
1626
1627 return (KERN_FAILURE);
1628 }
1629
1630 count = 0;
1631
1632 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
1633 if (exception_mask & (1 << i)) {
1634 for (j = 0; j < count; j++) {
1635 /*
1636 * search for an identical entry, if found
1637 * set corresponding mask for this exception.
1638 */
1639 if ( task->exc_actions[i].port == ports[j] &&
1640 task->exc_actions[i].behavior == behaviors[j] &&
1641 task->exc_actions[i].flavor == flavors[j] ) {
1642 masks[j] |= (1 << i);
1643 break;
1644 }
1645 }
1646
1647 if (j == count) {
1648 masks[j] = (1 << i);
1649 ports[j] = ipc_port_copy_send(task->exc_actions[i].port);
1650 behaviors[j] = task->exc_actions[i].behavior;
1651 flavors[j] = task->exc_actions[i].flavor;
1652 ++count;
1653 }
1654
1655 old_port[i] = task->exc_actions[i].port;
1656 task->exc_actions[i].port = ipc_port_copy_send(new_port);
1657 task->exc_actions[i].behavior = new_behavior;
1658 task->exc_actions[i].flavor = new_flavor;
1659 task->exc_actions[i].privileged = privileged;
1660 if (count > *CountCnt)
1661 break;
1662 }
1663 else
1664 old_port[i] = IP_NULL;
1665 }
1666
1667 itk_unlock(task);
1668
1669 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++)
1670 if (IP_VALID(old_port[i]))
1671 ipc_port_release_send(old_port[i]);
1672
1673 if (IP_VALID(new_port)) /* consume send right */
1674 ipc_port_release_send(new_port);
1675
1676 *CountCnt = count;
1677
1678 return (KERN_SUCCESS);
1679 }
1680
1681 /*
1682 * Routine: thread/task_get_exception_ports [kernel call]
1683 * Purpose:
1684 * Clones a send right for each of the thread/task's exception
1685 * ports specified in the mask and returns the behaviour
1686 * and flavor of said port.
1687 *
1688 * Returns upto [in} CountCnt elements.
1689 *
1690 * Conditions:
1691 * Nothing locked.
1692 * Returns:
1693 * KERN_SUCCESS Extracted a send right.
1694 * KERN_INVALID_ARGUMENT The thread is null,
1695 * Invalid special port,
1696 * Illegal mask bit set.
1697 * KERN_FAILURE The thread is dead.
1698 */
1699
1700 kern_return_t
1701 thread_get_exception_ports(
1702 thread_t thread,
1703 exception_mask_t exception_mask,
1704 exception_mask_array_t masks,
1705 mach_msg_type_number_t *CountCnt,
1706 exception_port_array_t ports,
1707 exception_behavior_array_t behaviors,
1708 thread_state_flavor_array_t flavors)
1709 {
1710 unsigned int i, j, count;
1711
1712 if (thread == THREAD_NULL)
1713 return (KERN_INVALID_ARGUMENT);
1714
1715 if (exception_mask & ~EXC_MASK_ALL)
1716 return (KERN_INVALID_ARGUMENT);
1717
1718 thread_mtx_lock(thread);
1719
1720 if (!thread->active) {
1721 thread_mtx_unlock(thread);
1722
1723 return (KERN_FAILURE);
1724 }
1725
1726 count = 0;
1727
1728 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
1729 if (exception_mask & (1 << i)) {
1730 for (j = 0; j < count; ++j) {
1731 /*
1732 * search for an identical entry, if found
1733 * set corresponding mask for this exception.
1734 */
1735 if ( thread->exc_actions[i].port == ports[j] &&
1736 thread->exc_actions[i].behavior ==behaviors[j] &&
1737 thread->exc_actions[i].flavor == flavors[j] ) {
1738 masks[j] |= (1 << i);
1739 break;
1740 }
1741 }
1742
1743 if (j == count) {
1744 masks[j] = (1 << i);
1745 ports[j] = ipc_port_copy_send(thread->exc_actions[i].port);
1746 behaviors[j] = thread->exc_actions[i].behavior;
1747 flavors[j] = thread->exc_actions[i].flavor;
1748 ++count;
1749 if (count >= *CountCnt)
1750 break;
1751 }
1752 }
1753 }
1754
1755 thread_mtx_unlock(thread);
1756
1757 *CountCnt = count;
1758
1759 return (KERN_SUCCESS);
1760 }
1761
1762 kern_return_t
1763 task_get_exception_ports(
1764 task_t task,
1765 exception_mask_t exception_mask,
1766 exception_mask_array_t masks,
1767 mach_msg_type_number_t *CountCnt,
1768 exception_port_array_t ports,
1769 exception_behavior_array_t behaviors,
1770 thread_state_flavor_array_t flavors)
1771 {
1772 unsigned int i, j, count;
1773
1774 if (task == TASK_NULL)
1775 return (KERN_INVALID_ARGUMENT);
1776
1777 if (exception_mask & ~EXC_MASK_ALL)
1778 return (KERN_INVALID_ARGUMENT);
1779
1780 itk_lock(task);
1781
1782 if (task->itk_self == IP_NULL) {
1783 itk_unlock(task);
1784
1785 return (KERN_FAILURE);
1786 }
1787
1788 count = 0;
1789
1790 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
1791 if (exception_mask & (1 << i)) {
1792 for (j = 0; j < count; ++j) {
1793 /*
1794 * search for an identical entry, if found
1795 * set corresponding mask for this exception.
1796 */
1797 if ( task->exc_actions[i].port == ports[j] &&
1798 task->exc_actions[i].behavior == behaviors[j] &&
1799 task->exc_actions[i].flavor == flavors[j] ) {
1800 masks[j] |= (1 << i);
1801 break;
1802 }
1803 }
1804
1805 if (j == count) {
1806 masks[j] = (1 << i);
1807 ports[j] = ipc_port_copy_send(task->exc_actions[i].port);
1808 behaviors[j] = task->exc_actions[i].behavior;
1809 flavors[j] = task->exc_actions[i].flavor;
1810 ++count;
1811 if (count > *CountCnt)
1812 break;
1813 }
1814 }
1815 }
1816
1817 itk_unlock(task);
1818
1819 *CountCnt = count;
1820
1821 return (KERN_SUCCESS);
1822 }