]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/ipc_tt.c
xnu-792.tar.gz
[apple/xnu.git] / osfmk / kern / ipc_tt.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_COPYRIGHT@
24 */
25 /*
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50 /*
51 */
52
53 /*
54 * File: ipc_tt.c
55 * Purpose:
56 * Task and thread related IPC functions.
57 */
58
59 #include <mach/mach_types.h>
60 #include <mach/boolean.h>
61 #include <mach/kern_return.h>
62 #include <mach/mach_param.h>
63 #include <mach/task_special_ports.h>
64 #include <mach/thread_special_ports.h>
65 #include <mach/thread_status.h>
66 #include <mach/exception_types.h>
67 #include <mach/memory_object_types.h>
68 #include <mach/mach_traps.h>
69 #include <mach/task_server.h>
70 #include <mach/thread_act_server.h>
71 #include <mach/mach_host_server.h>
72 #include <mach/host_priv_server.h>
73 #include <mach/vm_map_server.h>
74
75 #include <kern/kern_types.h>
76 #include <kern/host.h>
77 #include <kern/ipc_kobject.h>
78 #include <kern/ipc_tt.h>
79 #include <kern/kalloc.h>
80 #include <kern/thread.h>
81 #include <kern/misc_protos.h>
82
83 #include <vm/vm_map.h>
84 #include <vm/vm_pageout.h>
85 #include <vm/vm_shared_memory_server.h>
86 #include <vm/vm_protos.h>
87
88 /* forward declarations */
89 task_t convert_port_to_locked_task(ipc_port_t port);
90
91
92 /*
93 * Routine: ipc_task_init
94 * Purpose:
95 * Initialize a task's IPC state.
96 *
97 * If non-null, some state will be inherited from the parent.
98 * The parent must be appropriately initialized.
99 * Conditions:
100 * Nothing locked.
101 */
102
103 void
104 ipc_task_init(
105 task_t task,
106 task_t parent)
107 {
108 ipc_space_t space;
109 ipc_port_t kport;
110 kern_return_t kr;
111 int i;
112
113
114 kr = ipc_space_create(&ipc_table_entries[0], &space);
115 if (kr != KERN_SUCCESS)
116 panic("ipc_task_init");
117
118
119 kport = ipc_port_alloc_kernel();
120 if (kport == IP_NULL)
121 panic("ipc_task_init");
122
123 itk_lock_init(task);
124 task->itk_self = kport;
125 task->itk_sself = ipc_port_make_send(kport);
126 task->itk_space = space;
127 space->is_fast = FALSE;
128
129 if (parent == TASK_NULL) {
130 ipc_port_t port;
131
132 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
133 task->exc_actions[i].port = IP_NULL;
134 }/* for */
135
136 kr = host_get_host_port(host_priv_self(), &port);
137 assert(kr == KERN_SUCCESS);
138 task->itk_host = port;
139
140 task->itk_bootstrap = IP_NULL;
141
142 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
143 task->itk_registered[i] = IP_NULL;
144 } else {
145 itk_lock(parent);
146 assert(parent->itk_self != IP_NULL);
147
148 /* inherit registered ports */
149
150 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
151 task->itk_registered[i] =
152 ipc_port_copy_send(parent->itk_registered[i]);
153
154 /* inherit exception and bootstrap ports */
155
156 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
157 task->exc_actions[i].port =
158 ipc_port_copy_send(parent->exc_actions[i].port);
159 task->exc_actions[i].flavor =
160 parent->exc_actions[i].flavor;
161 task->exc_actions[i].behavior =
162 parent->exc_actions[i].behavior;
163 }/* for */
164 task->itk_host =
165 ipc_port_copy_send(parent->itk_host);
166
167 task->itk_bootstrap =
168 ipc_port_copy_send(parent->itk_bootstrap);
169
170 itk_unlock(parent);
171 }
172 }
173
174 /*
175 * Routine: ipc_task_enable
176 * Purpose:
177 * Enable a task for IPC access.
178 * Conditions:
179 * Nothing locked.
180 */
181
182 void
183 ipc_task_enable(
184 task_t task)
185 {
186 ipc_port_t kport;
187
188 itk_lock(task);
189 kport = task->itk_self;
190 if (kport != IP_NULL)
191 ipc_kobject_set(kport, (ipc_kobject_t) task, IKOT_TASK);
192 itk_unlock(task);
193 }
194
195 /*
196 * Routine: ipc_task_disable
197 * Purpose:
198 * Disable IPC access to a task.
199 * Conditions:
200 * Nothing locked.
201 */
202
203 void
204 ipc_task_disable(
205 task_t task)
206 {
207 ipc_port_t kport;
208
209 itk_lock(task);
210 kport = task->itk_self;
211 if (kport != IP_NULL)
212 ipc_kobject_set(kport, IKO_NULL, IKOT_NONE);
213 itk_unlock(task);
214 }
215
216 /*
217 * Routine: ipc_task_terminate
218 * Purpose:
219 * Clean up and destroy a task's IPC state.
220 * Conditions:
221 * Nothing locked. The task must be suspended.
222 * (Or the current thread must be in the task.)
223 */
224
225 void
226 ipc_task_terminate(
227 task_t task)
228 {
229 ipc_port_t kport;
230 int i;
231
232 itk_lock(task);
233 kport = task->itk_self;
234
235 if (kport == IP_NULL) {
236 /* the task is already terminated (can this happen?) */
237 itk_unlock(task);
238 return;
239 }
240
241 task->itk_self = IP_NULL;
242 itk_unlock(task);
243
244 /* release the naked send rights */
245
246 if (IP_VALID(task->itk_sself))
247 ipc_port_release_send(task->itk_sself);
248
249 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
250 if (IP_VALID(task->exc_actions[i].port)) {
251 ipc_port_release_send(task->exc_actions[i].port);
252 }
253 }
254
255 if (IP_VALID(task->itk_host))
256 ipc_port_release_send(task->itk_host);
257
258 if (IP_VALID(task->itk_bootstrap))
259 ipc_port_release_send(task->itk_bootstrap);
260
261 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
262 if (IP_VALID(task->itk_registered[i]))
263 ipc_port_release_send(task->itk_registered[i]);
264
265 ipc_port_release_send(task->wired_ledger_port);
266 ipc_port_release_send(task->paged_ledger_port);
267
268 /* destroy the kernel port */
269 ipc_port_dealloc_kernel(kport);
270 }
271
272 /*
273 * Routine: ipc_task_reset
274 * Purpose:
275 * Reset a task's IPC state to protect it when
276 * it enters an elevated security context.
277 * Conditions:
278 * Nothing locked. The task must be suspended.
279 * (Or the current thread must be in the task.)
280 */
281
282 void
283 ipc_task_reset(
284 task_t task)
285 {
286 ipc_port_t old_kport, new_kport;
287 ipc_port_t old_sself;
288 #if 0
289 ipc_port_t old_exc_actions[EXC_TYPES_COUNT];
290 int i;
291 #endif
292
293 new_kport = ipc_port_alloc_kernel();
294 if (new_kport == IP_NULL)
295 panic("ipc_task_reset");
296
297 itk_lock(task);
298
299 old_kport = task->itk_self;
300
301 if (old_kport == IP_NULL) {
302 /* the task is already terminated (can this happen?) */
303 itk_unlock(task);
304 ipc_port_dealloc_kernel(new_kport);
305 return;
306 }
307
308 task->itk_self = new_kport;
309 old_sself = task->itk_sself;
310 task->itk_sself = ipc_port_make_send(new_kport);
311 ipc_kobject_set(old_kport, IKO_NULL, IKOT_NONE);
312 ipc_kobject_set(new_kport, (ipc_kobject_t) task, IKOT_TASK);
313
314 #if 0
315 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
316 old_exc_actions[i] = task->exc_action[i].port;
317 task->exc_actions[i].port = IP_NULL;
318 }/* for */
319 #endif
320
321 itk_unlock(task);
322
323 /* release the naked send rights */
324
325 if (IP_VALID(old_sself))
326 ipc_port_release_send(old_sself);
327
328 #if 0
329 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
330 if (IP_VALID(old_exc_actions[i])) {
331 ipc_port_release_send(old_exc_actions[i]);
332 }
333 }/* for */
334 #endif
335
336 /* destroy the kernel port */
337 ipc_port_dealloc_kernel(old_kport);
338 }
339
340 /*
341 * Routine: ipc_thread_init
342 * Purpose:
343 * Initialize a thread's IPC state.
344 * Conditions:
345 * Nothing locked.
346 */
347
348 void
349 ipc_thread_init(
350 thread_t thread)
351 {
352 ipc_port_t kport;
353 int i;
354
355 kport = ipc_port_alloc_kernel();
356 if (kport == IP_NULL)
357 panic("ipc_thread_init");
358
359 thread->ith_self = kport;
360 thread->ith_sself = ipc_port_make_send(kport);
361
362 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i)
363 thread->exc_actions[i].port = IP_NULL;
364
365 ipc_kobject_set(kport, (ipc_kobject_t)thread, IKOT_THREAD);
366
367 ipc_kmsg_queue_init(&thread->ith_messages);
368
369 thread->ith_rpc_reply = IP_NULL;
370 }
371
372 void
373 ipc_thread_disable(
374 thread_t thread)
375 {
376 ipc_port_t kport = thread->ith_self;
377
378 if (kport != IP_NULL)
379 ipc_kobject_set(kport, IKO_NULL, IKOT_NONE);
380 }
381
382 /*
383 * Routine: ipc_thread_terminate
384 * Purpose:
385 * Clean up and destroy a thread's IPC state.
386 * Conditions:
387 * Nothing locked.
388 */
389
390 void
391 ipc_thread_terminate(
392 thread_t thread)
393 {
394 ipc_port_t kport = thread->ith_self;
395
396 if (kport != IP_NULL) {
397 int i;
398
399 if (IP_VALID(thread->ith_sself))
400 ipc_port_release_send(thread->ith_sself);
401
402 thread->ith_sself = thread->ith_self = IP_NULL;
403
404 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
405 if (IP_VALID(thread->exc_actions[i].port))
406 ipc_port_release_send(thread->exc_actions[i].port);
407 }
408
409 ipc_port_dealloc_kernel(kport);
410 }
411
412 assert(ipc_kmsg_queue_empty(&thread->ith_messages));
413
414 if (thread->ith_rpc_reply != IP_NULL)
415 ipc_port_dealloc_reply(thread->ith_rpc_reply);
416
417 thread->ith_rpc_reply = IP_NULL;
418 }
419
420 /*
421 * Routine: retrieve_task_self_fast
422 * Purpose:
423 * Optimized version of retrieve_task_self,
424 * that only works for the current task.
425 *
426 * Return a send right (possibly null/dead)
427 * for the task's user-visible self port.
428 * Conditions:
429 * Nothing locked.
430 */
431
432 ipc_port_t
433 retrieve_task_self_fast(
434 register task_t task)
435 {
436 register ipc_port_t port;
437
438 assert(task == current_task());
439
440 itk_lock(task);
441 assert(task->itk_self != IP_NULL);
442
443 if ((port = task->itk_sself) == task->itk_self) {
444 /* no interposing */
445
446 ip_lock(port);
447 assert(ip_active(port));
448 ip_reference(port);
449 port->ip_srights++;
450 ip_unlock(port);
451 } else
452 port = ipc_port_copy_send(port);
453 itk_unlock(task);
454
455 return port;
456 }
457
458 /*
459 * Routine: retrieve_thread_self_fast
460 * Purpose:
461 * Return a send right (possibly null/dead)
462 * for the thread's user-visible self port.
463 *
464 * Only works for the current thread.
465 *
466 * Conditions:
467 * Nothing locked.
468 */
469
470 ipc_port_t
471 retrieve_thread_self_fast(
472 thread_t thread)
473 {
474 register ipc_port_t port;
475
476 assert(thread == current_thread());
477
478 thread_mtx_lock(thread);
479
480 assert(thread->ith_self != IP_NULL);
481
482 if ((port = thread->ith_sself) == thread->ith_self) {
483 /* no interposing */
484
485 ip_lock(port);
486 assert(ip_active(port));
487 ip_reference(port);
488 port->ip_srights++;
489 ip_unlock(port);
490 }
491 else
492 port = ipc_port_copy_send(port);
493
494 thread_mtx_unlock(thread);
495
496 return port;
497 }
498
499 /*
500 * Routine: task_self_trap [mach trap]
501 * Purpose:
502 * Give the caller send rights for his own task port.
503 * Conditions:
504 * Nothing locked.
505 * Returns:
506 * MACH_PORT_NULL if there are any resource failures
507 * or other errors.
508 */
509
510 mach_port_name_t
511 task_self_trap(
512 __unused struct task_self_trap_args *args)
513 {
514 task_t task = current_task();
515 ipc_port_t sright;
516 mach_port_name_t name;
517
518 sright = retrieve_task_self_fast(task);
519 name = ipc_port_copyout_send(sright, task->itk_space);
520 return name;
521 }
522
523 /*
524 * Routine: thread_self_trap [mach trap]
525 * Purpose:
526 * Give the caller send rights for his own thread port.
527 * Conditions:
528 * Nothing locked.
529 * Returns:
530 * MACH_PORT_NULL if there are any resource failures
531 * or other errors.
532 */
533
534 mach_port_name_t
535 thread_self_trap(
536 __unused struct thread_self_trap_args *args)
537 {
538 thread_t thread = current_thread();
539 task_t task = thread->task;
540 ipc_port_t sright;
541 mach_port_name_t name;
542
543 sright = retrieve_thread_self_fast(thread);
544 name = ipc_port_copyout_send(sright, task->itk_space);
545 return name;
546
547 }
548
549 /*
550 * Routine: mach_reply_port [mach trap]
551 * Purpose:
552 * Allocate a port for the caller.
553 * Conditions:
554 * Nothing locked.
555 * Returns:
556 * MACH_PORT_NULL if there are any resource failures
557 * or other errors.
558 */
559
560 mach_port_name_t
561 mach_reply_port(
562 __unused struct mach_reply_port_args *args)
563 {
564 ipc_port_t port;
565 mach_port_name_t name;
566 kern_return_t kr;
567
568 kr = ipc_port_alloc(current_task()->itk_space, &name, &port);
569 if (kr == KERN_SUCCESS)
570 ip_unlock(port);
571 else
572 name = MACH_PORT_NULL;
573 return name;
574 }
575
576 /*
577 * Routine: thread_get_special_port [kernel call]
578 * Purpose:
579 * Clones a send right for one of the thread's
580 * special ports.
581 * Conditions:
582 * Nothing locked.
583 * Returns:
584 * KERN_SUCCESS Extracted a send right.
585 * KERN_INVALID_ARGUMENT The thread is null.
586 * KERN_FAILURE The thread is dead.
587 * KERN_INVALID_ARGUMENT Invalid special port.
588 */
589
590 kern_return_t
591 thread_get_special_port(
592 thread_t thread,
593 int which,
594 ipc_port_t *portp)
595 {
596 kern_return_t result = KERN_SUCCESS;
597 ipc_port_t *whichp;
598
599 if (thread == THREAD_NULL)
600 return (KERN_INVALID_ARGUMENT);
601
602 switch (which) {
603
604 case THREAD_KERNEL_PORT:
605 whichp = &thread->ith_sself;
606 break;
607
608 default:
609 return (KERN_INVALID_ARGUMENT);
610 }
611
612 thread_mtx_lock(thread);
613
614 if (thread->active)
615 *portp = ipc_port_copy_send(*whichp);
616 else
617 result = KERN_FAILURE;
618
619 thread_mtx_unlock(thread);
620
621 return (result);
622 }
623
624 /*
625 * Routine: thread_set_special_port [kernel call]
626 * Purpose:
627 * Changes one of the thread's special ports,
628 * setting it to the supplied send right.
629 * Conditions:
630 * Nothing locked. If successful, consumes
631 * the supplied send right.
632 * Returns:
633 * KERN_SUCCESS Changed the special port.
634 * KERN_INVALID_ARGUMENT The thread is null.
635 * KERN_FAILURE The thread is dead.
636 * KERN_INVALID_ARGUMENT Invalid special port.
637 */
638
639 kern_return_t
640 thread_set_special_port(
641 thread_t thread,
642 int which,
643 ipc_port_t port)
644 {
645 kern_return_t result = KERN_SUCCESS;
646 ipc_port_t *whichp, old = IP_NULL;
647
648 if (thread == THREAD_NULL)
649 return (KERN_INVALID_ARGUMENT);
650
651 switch (which) {
652
653 case THREAD_KERNEL_PORT:
654 whichp = &thread->ith_sself;
655 break;
656
657 default:
658 return (KERN_INVALID_ARGUMENT);
659 }
660
661 thread_mtx_lock(thread);
662
663 if (thread->active) {
664 old = *whichp;
665 *whichp = port;
666 }
667 else
668 result = KERN_FAILURE;
669
670 thread_mtx_unlock(thread);
671
672 if (IP_VALID(old))
673 ipc_port_release_send(old);
674
675 return (result);
676 }
677
678 /*
679 * Routine: task_get_special_port [kernel call]
680 * Purpose:
681 * Clones a send right for one of the task's
682 * special ports.
683 * Conditions:
684 * Nothing locked.
685 * Returns:
686 * KERN_SUCCESS Extracted a send right.
687 * KERN_INVALID_ARGUMENT The task is null.
688 * KERN_FAILURE The task/space is dead.
689 * KERN_INVALID_ARGUMENT Invalid special port.
690 */
691
692 kern_return_t
693 task_get_special_port(
694 task_t task,
695 int which,
696 ipc_port_t *portp)
697 {
698 ipc_port_t *whichp;
699 ipc_port_t port;
700
701 if (task == TASK_NULL)
702 return KERN_INVALID_ARGUMENT;
703
704 switch (which) {
705 case TASK_KERNEL_PORT:
706 whichp = &task->itk_sself;
707 break;
708
709 case TASK_HOST_PORT:
710 whichp = &task->itk_host;
711 break;
712
713 case TASK_BOOTSTRAP_PORT:
714 whichp = &task->itk_bootstrap;
715 break;
716
717 case TASK_WIRED_LEDGER_PORT:
718 whichp = &task->wired_ledger_port;
719 break;
720
721 case TASK_PAGED_LEDGER_PORT:
722 whichp = &task->paged_ledger_port;
723 break;
724
725 default:
726 return KERN_INVALID_ARGUMENT;
727 }
728
729 itk_lock(task);
730 if (task->itk_self == IP_NULL) {
731 itk_unlock(task);
732 return KERN_FAILURE;
733 }
734
735 port = ipc_port_copy_send(*whichp);
736 itk_unlock(task);
737
738 *portp = port;
739 return KERN_SUCCESS;
740 }
741
742 /*
743 * Routine: task_set_special_port [kernel call]
744 * Purpose:
745 * Changes one of the task's special ports,
746 * setting it to the supplied send right.
747 * Conditions:
748 * Nothing locked. If successful, consumes
749 * the supplied send right.
750 * Returns:
751 * KERN_SUCCESS Changed the special port.
752 * KERN_INVALID_ARGUMENT The task is null.
753 * KERN_FAILURE The task/space is dead.
754 * KERN_INVALID_ARGUMENT Invalid special port.
755 */
756
757 kern_return_t
758 task_set_special_port(
759 task_t task,
760 int which,
761 ipc_port_t port)
762 {
763 ipc_port_t *whichp;
764 ipc_port_t old;
765
766 if (task == TASK_NULL)
767 return KERN_INVALID_ARGUMENT;
768
769 switch (which) {
770 case TASK_KERNEL_PORT:
771 whichp = &task->itk_sself;
772 break;
773
774 case TASK_HOST_PORT:
775 whichp = &task->itk_host;
776 break;
777
778 case TASK_BOOTSTRAP_PORT:
779 whichp = &task->itk_bootstrap;
780 break;
781
782 case TASK_WIRED_LEDGER_PORT:
783 whichp = &task->wired_ledger_port;
784 break;
785
786 case TASK_PAGED_LEDGER_PORT:
787 whichp = &task->paged_ledger_port;
788 break;
789
790 default:
791 return KERN_INVALID_ARGUMENT;
792 }/* switch */
793
794 itk_lock(task);
795 if (task->itk_self == IP_NULL) {
796 itk_unlock(task);
797 return KERN_FAILURE;
798 }
799
800 old = *whichp;
801 *whichp = port;
802 itk_unlock(task);
803
804 if (IP_VALID(old))
805 ipc_port_release_send(old);
806 return KERN_SUCCESS;
807 }
808
809
810 /*
811 * Routine: mach_ports_register [kernel call]
812 * Purpose:
813 * Stash a handful of port send rights in the task.
814 * Child tasks will inherit these rights, but they
815 * must use mach_ports_lookup to acquire them.
816 *
817 * The rights are supplied in a (wired) kalloc'd segment.
818 * Rights which aren't supplied are assumed to be null.
819 * Conditions:
820 * Nothing locked. If successful, consumes
821 * the supplied rights and memory.
822 * Returns:
823 * KERN_SUCCESS Stashed the port rights.
824 * KERN_INVALID_ARGUMENT The task is null.
825 * KERN_INVALID_ARGUMENT The task is dead.
826 * KERN_INVALID_ARGUMENT Too many port rights supplied.
827 */
828
829 kern_return_t
830 mach_ports_register(
831 task_t task,
832 mach_port_array_t memory,
833 mach_msg_type_number_t portsCnt)
834 {
835 ipc_port_t ports[TASK_PORT_REGISTER_MAX];
836 unsigned int i;
837
838 if ((task == TASK_NULL) ||
839 (portsCnt > TASK_PORT_REGISTER_MAX))
840 return KERN_INVALID_ARGUMENT;
841
842 /*
843 * Pad the port rights with nulls.
844 */
845
846 for (i = 0; i < portsCnt; i++)
847 ports[i] = memory[i];
848 for (; i < TASK_PORT_REGISTER_MAX; i++)
849 ports[i] = IP_NULL;
850
851 itk_lock(task);
852 if (task->itk_self == IP_NULL) {
853 itk_unlock(task);
854 return KERN_INVALID_ARGUMENT;
855 }
856
857 /*
858 * Replace the old send rights with the new.
859 * Release the old rights after unlocking.
860 */
861
862 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
863 ipc_port_t old;
864
865 old = task->itk_registered[i];
866 task->itk_registered[i] = ports[i];
867 ports[i] = old;
868 }
869
870 itk_unlock(task);
871
872 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
873 if (IP_VALID(ports[i]))
874 ipc_port_release_send(ports[i]);
875
876 /*
877 * Now that the operation is known to be successful,
878 * we can free the memory.
879 */
880
881 if (portsCnt != 0)
882 kfree(memory,
883 (vm_size_t) (portsCnt * sizeof(mach_port_t)));
884
885 return KERN_SUCCESS;
886 }
887
888 /*
889 * Routine: mach_ports_lookup [kernel call]
890 * Purpose:
891 * Retrieves (clones) the stashed port send rights.
892 * Conditions:
893 * Nothing locked. If successful, the caller gets
894 * rights and memory.
895 * Returns:
896 * KERN_SUCCESS Retrieved the send rights.
897 * KERN_INVALID_ARGUMENT The task is null.
898 * KERN_INVALID_ARGUMENT The task is dead.
899 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
900 */
901
902 kern_return_t
903 mach_ports_lookup(
904 task_t task,
905 mach_port_array_t *portsp,
906 mach_msg_type_number_t *portsCnt)
907 {
908 void *memory;
909 vm_size_t size;
910 ipc_port_t *ports;
911 int i;
912
913 if (task == TASK_NULL)
914 return KERN_INVALID_ARGUMENT;
915
916 size = (vm_size_t) (TASK_PORT_REGISTER_MAX * sizeof(ipc_port_t));
917
918 memory = kalloc(size);
919 if (memory == 0)
920 return KERN_RESOURCE_SHORTAGE;
921
922 itk_lock(task);
923 if (task->itk_self == IP_NULL) {
924 itk_unlock(task);
925
926 kfree(memory, size);
927 return KERN_INVALID_ARGUMENT;
928 }
929
930 ports = (ipc_port_t *) memory;
931
932 /*
933 * Clone port rights. Because kalloc'd memory
934 * is wired, we won't fault while holding the task lock.
935 */
936
937 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
938 ports[i] = ipc_port_copy_send(task->itk_registered[i]);
939
940 itk_unlock(task);
941
942 *portsp = (mach_port_array_t) ports;
943 *portsCnt = TASK_PORT_REGISTER_MAX;
944 return KERN_SUCCESS;
945 }
946
947 /*
948 * Routine: convert_port_to_locked_task
949 * Purpose:
950 * Internal helper routine to convert from a port to a locked
951 * task. Used by several routines that try to convert from a
952 * task port to a reference on some task related object.
953 * Conditions:
954 * Nothing locked, blocking OK.
955 */
956 task_t
957 convert_port_to_locked_task(ipc_port_t port)
958 {
959 while (IP_VALID(port)) {
960 task_t task;
961
962 ip_lock(port);
963 if (!ip_active(port) || (ip_kotype(port) != IKOT_TASK)) {
964 ip_unlock(port);
965 return TASK_NULL;
966 }
967 task = (task_t) port->ip_kobject;
968 assert(task != TASK_NULL);
969
970 /*
971 * Normal lock ordering puts task_lock() before ip_lock().
972 * Attempt out-of-order locking here.
973 */
974 if (task_lock_try(task)) {
975 ip_unlock(port);
976 return(task);
977 }
978
979 ip_unlock(port);
980 mutex_pause();
981 }
982 return TASK_NULL;
983 }
984
985 /*
986 * Routine: convert_port_to_task
987 * Purpose:
988 * Convert from a port to a task.
989 * Doesn't consume the port ref; produces a task ref,
990 * which may be null.
991 * Conditions:
992 * Nothing locked.
993 */
994 task_t
995 convert_port_to_task(
996 ipc_port_t port)
997 {
998 task_t task = TASK_NULL;
999
1000 if (IP_VALID(port)) {
1001 ip_lock(port);
1002
1003 if ( ip_active(port) &&
1004 ip_kotype(port) == IKOT_TASK ) {
1005 task = (task_t)port->ip_kobject;
1006 assert(task != TASK_NULL);
1007
1008 task_reference_internal(task);
1009 }
1010
1011 ip_unlock(port);
1012 }
1013
1014 return (task);
1015 }
1016
1017 /*
1018 * Routine: convert_port_to_space
1019 * Purpose:
1020 * Convert from a port to a space.
1021 * Doesn't consume the port ref; produces a space ref,
1022 * which may be null.
1023 * Conditions:
1024 * Nothing locked.
1025 */
1026 ipc_space_t
1027 convert_port_to_space(
1028 ipc_port_t port)
1029 {
1030 ipc_space_t space;
1031 task_t task;
1032
1033 task = convert_port_to_locked_task(port);
1034
1035 if (task == TASK_NULL)
1036 return IPC_SPACE_NULL;
1037
1038 if (!task->active) {
1039 task_unlock(task);
1040 return IPC_SPACE_NULL;
1041 }
1042
1043 space = task->itk_space;
1044 is_reference(space);
1045 task_unlock(task);
1046 return (space);
1047 }
1048
1049 /*
1050 * Routine: convert_port_to_map
1051 * Purpose:
1052 * Convert from a port to a map.
1053 * Doesn't consume the port ref; produces a map ref,
1054 * which may be null.
1055 * Conditions:
1056 * Nothing locked.
1057 */
1058
1059 vm_map_t
1060 convert_port_to_map(
1061 ipc_port_t port)
1062 {
1063 task_t task;
1064 vm_map_t map;
1065
1066 task = convert_port_to_locked_task(port);
1067
1068 if (task == TASK_NULL)
1069 return VM_MAP_NULL;
1070
1071 if (!task->active) {
1072 task_unlock(task);
1073 return VM_MAP_NULL;
1074 }
1075
1076 map = task->map;
1077 vm_map_reference_swap(map);
1078 task_unlock(task);
1079 return map;
1080 }
1081
1082
1083 /*
1084 * Routine: convert_port_to_thread
1085 * Purpose:
1086 * Convert from a port to a thread.
1087 * Doesn't consume the port ref; produces an thread ref,
1088 * which may be null.
1089 * Conditions:
1090 * Nothing locked.
1091 */
1092
1093 thread_t
1094 convert_port_to_thread(
1095 ipc_port_t port)
1096 {
1097 thread_t thread = THREAD_NULL;
1098
1099 if (IP_VALID(port)) {
1100 ip_lock(port);
1101
1102 if ( ip_active(port) &&
1103 ip_kotype(port) == IKOT_THREAD ) {
1104 thread = (thread_t)port->ip_kobject;
1105 assert(thread != THREAD_NULL);
1106
1107 thread_reference_internal(thread);
1108 }
1109
1110 ip_unlock(port);
1111 }
1112
1113 return (thread);
1114 }
1115
1116 /*
1117 * Routine: port_name_to_thread
1118 * Purpose:
1119 * Convert from a port name to an thread reference
1120 * A name of MACH_PORT_NULL is valid for the null thread.
1121 * Conditions:
1122 * Nothing locked.
1123 */
1124 thread_t
1125 port_name_to_thread(
1126 mach_port_name_t name)
1127 {
1128 thread_t thread = THREAD_NULL;
1129 ipc_port_t kport;
1130
1131 if (MACH_PORT_VALID(name)) {
1132 if (ipc_object_copyin(current_space(), name,
1133 MACH_MSG_TYPE_COPY_SEND,
1134 (ipc_object_t *)&kport) != KERN_SUCCESS)
1135 return (THREAD_NULL);
1136
1137 thread = convert_port_to_thread(kport);
1138
1139 if (IP_VALID(kport))
1140 ipc_port_release_send(kport);
1141 }
1142
1143 return (thread);
1144 }
1145
1146 task_t
1147 port_name_to_task(
1148 mach_port_name_t name)
1149 {
1150 ipc_port_t kern_port;
1151 kern_return_t kr;
1152 task_t task = TASK_NULL;
1153
1154 if (MACH_PORT_VALID(name)) {
1155 kr = ipc_object_copyin(current_space(), name,
1156 MACH_MSG_TYPE_COPY_SEND,
1157 (ipc_object_t *) &kern_port);
1158 if (kr != KERN_SUCCESS)
1159 return TASK_NULL;
1160
1161 task = convert_port_to_task(kern_port);
1162
1163 if (IP_VALID(kern_port))
1164 ipc_port_release_send(kern_port);
1165 }
1166 return task;
1167 }
1168
1169 /*
1170 * Routine: convert_task_to_port
1171 * Purpose:
1172 * Convert from a task to a port.
1173 * Consumes a task ref; produces a naked send right
1174 * which may be invalid.
1175 * Conditions:
1176 * Nothing locked.
1177 */
1178
1179 ipc_port_t
1180 convert_task_to_port(
1181 task_t task)
1182 {
1183 ipc_port_t port;
1184
1185 itk_lock(task);
1186 if (task->itk_self != IP_NULL)
1187 port = ipc_port_make_send(task->itk_self);
1188 else
1189 port = IP_NULL;
1190 itk_unlock(task);
1191
1192 task_deallocate(task);
1193 return port;
1194 }
1195
1196 /*
1197 * Routine: convert_thread_to_port
1198 * Purpose:
1199 * Convert from a thread to a port.
1200 * Consumes an thread ref; produces a naked send right
1201 * which may be invalid.
1202 * Conditions:
1203 * Nothing locked.
1204 */
1205
1206 ipc_port_t
1207 convert_thread_to_port(
1208 thread_t thread)
1209 {
1210 ipc_port_t port;
1211
1212 thread_mtx_lock(thread);
1213
1214 if (thread->ith_self != IP_NULL)
1215 port = ipc_port_make_send(thread->ith_self);
1216 else
1217 port = IP_NULL;
1218
1219 thread_mtx_unlock(thread);
1220
1221 thread_deallocate(thread);
1222
1223 return (port);
1224 }
1225
1226 /*
1227 * Routine: space_deallocate
1228 * Purpose:
1229 * Deallocate a space ref produced by convert_port_to_space.
1230 * Conditions:
1231 * Nothing locked.
1232 */
1233
1234 void
1235 space_deallocate(
1236 ipc_space_t space)
1237 {
1238 if (space != IS_NULL)
1239 is_release(space);
1240 }
1241
1242 /*
1243 * Routine: thread/task_set_exception_ports [kernel call]
1244 * Purpose:
1245 * Sets the thread/task exception port, flavor and
1246 * behavior for the exception types specified by the mask.
1247 * There will be one send right per exception per valid
1248 * port.
1249 * Conditions:
1250 * Nothing locked. If successful, consumes
1251 * the supplied send right.
1252 * Returns:
1253 * KERN_SUCCESS Changed the special port.
1254 * KERN_INVALID_ARGUMENT The thread is null,
1255 * Illegal mask bit set.
1256 * Illegal exception behavior
1257 * KERN_FAILURE The thread is dead.
1258 */
1259
1260 kern_return_t
1261 thread_set_exception_ports(
1262 thread_t thread,
1263 exception_mask_t exception_mask,
1264 ipc_port_t new_port,
1265 exception_behavior_t new_behavior,
1266 thread_state_flavor_t new_flavor)
1267 {
1268 ipc_port_t old_port[EXC_TYPES_COUNT];
1269 register int i;
1270
1271 if (thread == THREAD_NULL)
1272 return (KERN_INVALID_ARGUMENT);
1273
1274 if (exception_mask & ~EXC_MASK_ALL)
1275 return (KERN_INVALID_ARGUMENT);
1276
1277 if (IP_VALID(new_port)) {
1278 switch (new_behavior) {
1279
1280 case EXCEPTION_DEFAULT:
1281 case EXCEPTION_STATE:
1282 case EXCEPTION_STATE_IDENTITY:
1283 break;
1284
1285 default:
1286 return (KERN_INVALID_ARGUMENT);
1287 }
1288 }
1289
1290 /*
1291 * Check the validity of the thread_state_flavor by calling the
1292 * VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in
1293 * osfmk/mach/ARCHITECTURE/thread_status.h
1294 */
1295 if (!VALID_THREAD_STATE_FLAVOR(new_flavor))
1296 return (KERN_INVALID_ARGUMENT);
1297
1298 thread_mtx_lock(thread);
1299
1300 if (!thread->active) {
1301 thread_mtx_unlock(thread);
1302
1303 return (KERN_FAILURE);
1304 }
1305
1306 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
1307 if (exception_mask & (1 << i)) {
1308 old_port[i] = thread->exc_actions[i].port;
1309 thread->exc_actions[i].port = ipc_port_copy_send(new_port);
1310 thread->exc_actions[i].behavior = new_behavior;
1311 thread->exc_actions[i].flavor = new_flavor;
1312 }
1313 else
1314 old_port[i] = IP_NULL;
1315 }
1316
1317 thread_mtx_unlock(thread);
1318
1319 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i)
1320 if (IP_VALID(old_port[i]))
1321 ipc_port_release_send(old_port[i]);
1322
1323 if (IP_VALID(new_port)) /* consume send right */
1324 ipc_port_release_send(new_port);
1325
1326 return (KERN_SUCCESS);
1327 }
1328
1329 kern_return_t
1330 task_set_exception_ports(
1331 task_t task,
1332 exception_mask_t exception_mask,
1333 ipc_port_t new_port,
1334 exception_behavior_t new_behavior,
1335 thread_state_flavor_t new_flavor)
1336 {
1337 ipc_port_t old_port[EXC_TYPES_COUNT];
1338 register int i;
1339
1340 if (task == TASK_NULL)
1341 return (KERN_INVALID_ARGUMENT);
1342
1343 if (exception_mask & ~EXC_MASK_ALL)
1344 return (KERN_INVALID_ARGUMENT);
1345
1346 if (IP_VALID(new_port)) {
1347 switch (new_behavior) {
1348
1349 case EXCEPTION_DEFAULT:
1350 case EXCEPTION_STATE:
1351 case EXCEPTION_STATE_IDENTITY:
1352 break;
1353
1354 default:
1355 return (KERN_INVALID_ARGUMENT);
1356 }
1357 }
1358
1359 itk_lock(task);
1360
1361 if (task->itk_self == IP_NULL) {
1362 itk_unlock(task);
1363
1364 return (KERN_FAILURE);
1365 }
1366
1367 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
1368 if (exception_mask & (1 << i)) {
1369 old_port[i] = task->exc_actions[i].port;
1370 task->exc_actions[i].port =
1371 ipc_port_copy_send(new_port);
1372 task->exc_actions[i].behavior = new_behavior;
1373 task->exc_actions[i].flavor = new_flavor;
1374 }
1375 else
1376 old_port[i] = IP_NULL;
1377 }
1378
1379 itk_unlock(task);
1380
1381 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i)
1382 if (IP_VALID(old_port[i]))
1383 ipc_port_release_send(old_port[i]);
1384
1385 if (IP_VALID(new_port)) /* consume send right */
1386 ipc_port_release_send(new_port);
1387
1388 return (KERN_SUCCESS);
1389 }
1390
1391 /*
1392 * Routine: thread/task_swap_exception_ports [kernel call]
1393 * Purpose:
1394 * Sets the thread/task exception port, flavor and
1395 * behavior for the exception types specified by the
1396 * mask.
1397 *
1398 * The old ports, behavior and flavors are returned
1399 * Count specifies the array sizes on input and
1400 * the number of returned ports etc. on output. The
1401 * arrays must be large enough to hold all the returned
1402 * data, MIG returnes an error otherwise. The masks
1403 * array specifies the corresponding exception type(s).
1404 *
1405 * Conditions:
1406 * Nothing locked. If successful, consumes
1407 * the supplied send right.
1408 *
1409 * Returns upto [in} CountCnt elements.
1410 * Returns:
1411 * KERN_SUCCESS Changed the special port.
1412 * KERN_INVALID_ARGUMENT The thread is null,
1413 * Illegal mask bit set.
1414 * Illegal exception behavior
1415 * KERN_FAILURE The thread is dead.
1416 */
1417
1418 kern_return_t
1419 thread_swap_exception_ports(
1420 thread_t thread,
1421 exception_mask_t exception_mask,
1422 ipc_port_t new_port,
1423 exception_behavior_t new_behavior,
1424 thread_state_flavor_t new_flavor,
1425 exception_mask_array_t masks,
1426 mach_msg_type_number_t *CountCnt,
1427 exception_port_array_t ports,
1428 exception_behavior_array_t behaviors,
1429 thread_state_flavor_array_t flavors)
1430 {
1431 ipc_port_t old_port[EXC_TYPES_COUNT];
1432 unsigned int i, j, count;
1433
1434 if (thread == THREAD_NULL)
1435 return (KERN_INVALID_ARGUMENT);
1436
1437 if (exception_mask & ~EXC_MASK_ALL)
1438 return (KERN_INVALID_ARGUMENT);
1439
1440 if (IP_VALID(new_port)) {
1441 switch (new_behavior) {
1442
1443 case EXCEPTION_DEFAULT:
1444 case EXCEPTION_STATE:
1445 case EXCEPTION_STATE_IDENTITY:
1446 break;
1447
1448 default:
1449 return (KERN_INVALID_ARGUMENT);
1450 }
1451 }
1452
1453 thread_mtx_lock(thread);
1454
1455 if (!thread->active) {
1456 thread_mtx_unlock(thread);
1457
1458 return (KERN_FAILURE);
1459 }
1460
1461 count = 0;
1462
1463 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
1464 if (exception_mask & (1 << i)) {
1465 for (j = 0; j < count; ++j) {
1466 /*
1467 * search for an identical entry, if found
1468 * set corresponding mask for this exception.
1469 */
1470 if ( thread->exc_actions[i].port == ports[j] &&
1471 thread->exc_actions[i].behavior == behaviors[j] &&
1472 thread->exc_actions[i].flavor == flavors[j] ) {
1473 masks[j] |= (1 << i);
1474 break;
1475 }
1476 }
1477
1478 if (j == count) {
1479 masks[j] = (1 << i);
1480 ports[j] = ipc_port_copy_send(thread->exc_actions[i].port);
1481
1482 behaviors[j] = thread->exc_actions[i].behavior;
1483 flavors[j] = thread->exc_actions[i].flavor;
1484 ++count;
1485 }
1486
1487 old_port[i] = thread->exc_actions[i].port;
1488 thread->exc_actions[i].port = ipc_port_copy_send(new_port);
1489 thread->exc_actions[i].behavior = new_behavior;
1490 thread->exc_actions[i].flavor = new_flavor;
1491 if (count > *CountCnt)
1492 break;
1493 }
1494 else
1495 old_port[i] = IP_NULL;
1496 }
1497
1498 thread_mtx_unlock(thread);
1499
1500 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i)
1501 if (IP_VALID(old_port[i]))
1502 ipc_port_release_send(old_port[i]);
1503
1504 if (IP_VALID(new_port)) /* consume send right */
1505 ipc_port_release_send(new_port);
1506
1507 *CountCnt = count;
1508
1509 return (KERN_SUCCESS);
1510 }
1511
1512 kern_return_t
1513 task_swap_exception_ports(
1514 task_t task,
1515 exception_mask_t exception_mask,
1516 ipc_port_t new_port,
1517 exception_behavior_t new_behavior,
1518 thread_state_flavor_t new_flavor,
1519 exception_mask_array_t masks,
1520 mach_msg_type_number_t *CountCnt,
1521 exception_port_array_t ports,
1522 exception_behavior_array_t behaviors,
1523 thread_state_flavor_array_t flavors)
1524 {
1525 ipc_port_t old_port[EXC_TYPES_COUNT];
1526 unsigned int i, j, count;
1527
1528 if (task == TASK_NULL)
1529 return (KERN_INVALID_ARGUMENT);
1530
1531 if (exception_mask & ~EXC_MASK_ALL)
1532 return (KERN_INVALID_ARGUMENT);
1533
1534 if (IP_VALID(new_port)) {
1535 switch (new_behavior) {
1536
1537 case EXCEPTION_DEFAULT:
1538 case EXCEPTION_STATE:
1539 case EXCEPTION_STATE_IDENTITY:
1540 break;
1541
1542 default:
1543 return (KERN_INVALID_ARGUMENT);
1544 }
1545 }
1546
1547 itk_lock(task);
1548
1549 if (task->itk_self == IP_NULL) {
1550 itk_unlock(task);
1551
1552 return (KERN_FAILURE);
1553 }
1554
1555 count = 0;
1556
1557 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
1558 if (exception_mask & (1 << i)) {
1559 for (j = 0; j < count; j++) {
1560 /*
1561 * search for an identical entry, if found
1562 * set corresponding mask for this exception.
1563 */
1564 if ( task->exc_actions[i].port == ports[j] &&
1565 task->exc_actions[i].behavior == behaviors[j] &&
1566 task->exc_actions[i].flavor == flavors[j] ) {
1567 masks[j] |= (1 << i);
1568 break;
1569 }
1570 }
1571
1572 if (j == count) {
1573 masks[j] = (1 << i);
1574 ports[j] = ipc_port_copy_send(task->exc_actions[i].port);
1575 behaviors[j] = task->exc_actions[i].behavior;
1576 flavors[j] = task->exc_actions[i].flavor;
1577 ++count;
1578 }
1579
1580 old_port[i] = task->exc_actions[i].port;
1581 task->exc_actions[i].port = ipc_port_copy_send(new_port);
1582 task->exc_actions[i].behavior = new_behavior;
1583 task->exc_actions[i].flavor = new_flavor;
1584 if (count > *CountCnt)
1585 break;
1586 }
1587 else
1588 old_port[i] = IP_NULL;
1589 }
1590
1591 itk_unlock(task);
1592
1593 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++)
1594 if (IP_VALID(old_port[i]))
1595 ipc_port_release_send(old_port[i]);
1596
1597 if (IP_VALID(new_port)) /* consume send right */
1598 ipc_port_release_send(new_port);
1599
1600 *CountCnt = count;
1601
1602 return (KERN_SUCCESS);
1603 }
1604
1605 /*
1606 * Routine: thread/task_get_exception_ports [kernel call]
1607 * Purpose:
1608 * Clones a send right for each of the thread/task's exception
1609 * ports specified in the mask and returns the behaviour
1610 * and flavor of said port.
1611 *
1612 * Returns upto [in} CountCnt elements.
1613 *
1614 * Conditions:
1615 * Nothing locked.
1616 * Returns:
1617 * KERN_SUCCESS Extracted a send right.
1618 * KERN_INVALID_ARGUMENT The thread is null,
1619 * Invalid special port,
1620 * Illegal mask bit set.
1621 * KERN_FAILURE The thread is dead.
1622 */
1623
1624 kern_return_t
1625 thread_get_exception_ports(
1626 thread_t thread,
1627 exception_mask_t exception_mask,
1628 exception_mask_array_t masks,
1629 mach_msg_type_number_t *CountCnt,
1630 exception_port_array_t ports,
1631 exception_behavior_array_t behaviors,
1632 thread_state_flavor_array_t flavors)
1633 {
1634 unsigned int i, j, count;
1635
1636 if (thread == THREAD_NULL)
1637 return (KERN_INVALID_ARGUMENT);
1638
1639 if (exception_mask & ~EXC_MASK_ALL)
1640 return (KERN_INVALID_ARGUMENT);
1641
1642 thread_mtx_lock(thread);
1643
1644 if (!thread->active) {
1645 thread_mtx_unlock(thread);
1646
1647 return (KERN_FAILURE);
1648 }
1649
1650 count = 0;
1651
1652 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
1653 if (exception_mask & (1 << i)) {
1654 for (j = 0; j < count; ++j) {
1655 /*
1656 * search for an identical entry, if found
1657 * set corresponding mask for this exception.
1658 */
1659 if ( thread->exc_actions[i].port == ports[j] &&
1660 thread->exc_actions[i].behavior ==behaviors[j] &&
1661 thread->exc_actions[i].flavor == flavors[j] ) {
1662 masks[j] |= (1 << i);
1663 break;
1664 }
1665 }
1666
1667 if (j == count) {
1668 masks[j] = (1 << i);
1669 ports[j] = ipc_port_copy_send(thread->exc_actions[i].port);
1670 behaviors[j] = thread->exc_actions[i].behavior;
1671 flavors[j] = thread->exc_actions[i].flavor;
1672 ++count;
1673 if (count >= *CountCnt)
1674 break;
1675 }
1676 }
1677 }
1678
1679 thread_mtx_unlock(thread);
1680
1681 *CountCnt = count;
1682
1683 return (KERN_SUCCESS);
1684 }
1685
1686 kern_return_t
1687 task_get_exception_ports(
1688 task_t task,
1689 exception_mask_t exception_mask,
1690 exception_mask_array_t masks,
1691 mach_msg_type_number_t *CountCnt,
1692 exception_port_array_t ports,
1693 exception_behavior_array_t behaviors,
1694 thread_state_flavor_array_t flavors)
1695 {
1696 unsigned int i, j, count;
1697
1698 if (task == TASK_NULL)
1699 return (KERN_INVALID_ARGUMENT);
1700
1701 if (exception_mask & ~EXC_MASK_ALL)
1702 return (KERN_INVALID_ARGUMENT);
1703
1704 itk_lock(task);
1705
1706 if (task->itk_self == IP_NULL) {
1707 itk_unlock(task);
1708
1709 return (KERN_FAILURE);
1710 }
1711
1712 count = 0;
1713
1714 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
1715 if (exception_mask & (1 << i)) {
1716 for (j = 0; j < count; ++j) {
1717 /*
1718 * search for an identical entry, if found
1719 * set corresponding mask for this exception.
1720 */
1721 if ( task->exc_actions[i].port == ports[j] &&
1722 task->exc_actions[i].behavior == behaviors[j] &&
1723 task->exc_actions[i].flavor == flavors[j] ) {
1724 masks[j] |= (1 << i);
1725 break;
1726 }
1727 }
1728
1729 if (j == count) {
1730 masks[j] = (1 << i);
1731 ports[j] = ipc_port_copy_send(task->exc_actions[i].port);
1732 behaviors[j] = task->exc_actions[i].behavior;
1733 flavors[j] = task->exc_actions[i].flavor;
1734 ++count;
1735 if (count > *CountCnt)
1736 break;
1737 }
1738 }
1739 }
1740
1741 itk_unlock(task);
1742
1743 *CountCnt = count;
1744
1745 return (KERN_SUCCESS);
1746 }