]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/ipc_tt.c
0bee189d9570ff9994796ec120c061b867854938
[apple/xnu.git] / osfmk / kern / ipc_tt.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30 /*
31 * @OSF_COPYRIGHT@
32 */
33 /*
34 * Mach Operating System
35 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
36 * All Rights Reserved.
37 *
38 * Permission to use, copy, modify and distribute this software and its
39 * documentation is hereby granted, provided that both the copyright
40 * notice and this permission notice appear in all copies of the
41 * software, derivative works or modified versions, and any portions
42 * thereof, and that both notices appear in supporting documentation.
43 *
44 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
45 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
46 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
47 *
48 * Carnegie Mellon requests users of this software to return to
49 *
50 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
51 * School of Computer Science
52 * Carnegie Mellon University
53 * Pittsburgh PA 15213-3890
54 *
55 * any improvements or extensions that they make and grant Carnegie Mellon
56 * the rights to redistribute these changes.
57 */
58 /*
59 */
60
61 /*
62 * File: ipc_tt.c
63 * Purpose:
64 * Task and thread related IPC functions.
65 */
66
67 #include <mach/mach_types.h>
68 #include <mach/boolean.h>
69 #include <mach/kern_return.h>
70 #include <mach/mach_param.h>
71 #include <mach/task_special_ports.h>
72 #include <mach/thread_special_ports.h>
73 #include <mach/thread_status.h>
74 #include <mach/exception_types.h>
75 #include <mach/memory_object_types.h>
76 #include <mach/mach_traps.h>
77 #include <mach/task_server.h>
78 #include <mach/thread_act_server.h>
79 #include <mach/mach_host_server.h>
80 #include <mach/host_priv_server.h>
81 #include <mach/vm_map_server.h>
82
83 #include <kern/kern_types.h>
84 #include <kern/host.h>
85 #include <kern/ipc_kobject.h>
86 #include <kern/ipc_tt.h>
87 #include <kern/kalloc.h>
88 #include <kern/thread.h>
89 #include <kern/misc_protos.h>
90
91 #include <vm/vm_map.h>
92 #include <vm/vm_pageout.h>
93 #include <vm/vm_shared_memory_server.h>
94 #include <vm/vm_protos.h>
95
96 /* forward declarations */
97 task_t convert_port_to_locked_task(ipc_port_t port);
98
99
100 /*
101 * Routine: ipc_task_init
102 * Purpose:
103 * Initialize a task's IPC state.
104 *
105 * If non-null, some state will be inherited from the parent.
106 * The parent must be appropriately initialized.
107 * Conditions:
108 * Nothing locked.
109 */
110
111 void
112 ipc_task_init(
113 task_t task,
114 task_t parent)
115 {
116 ipc_space_t space;
117 ipc_port_t kport;
118 kern_return_t kr;
119 int i;
120
121
122 kr = ipc_space_create(&ipc_table_entries[0], &space);
123 if (kr != KERN_SUCCESS)
124 panic("ipc_task_init");
125
126
127 kport = ipc_port_alloc_kernel();
128 if (kport == IP_NULL)
129 panic("ipc_task_init");
130
131 itk_lock_init(task);
132 task->itk_self = kport;
133 task->itk_sself = ipc_port_make_send(kport);
134 task->itk_space = space;
135 space->is_fast = FALSE;
136
137 if (parent == TASK_NULL) {
138 ipc_port_t port;
139
140 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
141 task->exc_actions[i].port = IP_NULL;
142 }/* for */
143
144 kr = host_get_host_port(host_priv_self(), &port);
145 assert(kr == KERN_SUCCESS);
146 task->itk_host = port;
147
148 task->itk_bootstrap = IP_NULL;
149
150 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
151 task->itk_registered[i] = IP_NULL;
152 } else {
153 itk_lock(parent);
154 assert(parent->itk_self != IP_NULL);
155
156 /* inherit registered ports */
157
158 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
159 task->itk_registered[i] =
160 ipc_port_copy_send(parent->itk_registered[i]);
161
162 /* inherit exception and bootstrap ports */
163
164 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
165 task->exc_actions[i].port =
166 ipc_port_copy_send(parent->exc_actions[i].port);
167 task->exc_actions[i].flavor =
168 parent->exc_actions[i].flavor;
169 task->exc_actions[i].behavior =
170 parent->exc_actions[i].behavior;
171 task->exc_actions[i].privileged =
172 parent->exc_actions[i].privileged;
173 }/* for */
174 task->itk_host =
175 ipc_port_copy_send(parent->itk_host);
176
177 task->itk_bootstrap =
178 ipc_port_copy_send(parent->itk_bootstrap);
179
180 itk_unlock(parent);
181 }
182 }
183
184 /*
185 * Routine: ipc_task_enable
186 * Purpose:
187 * Enable a task for IPC access.
188 * Conditions:
189 * Nothing locked.
190 */
191
192 void
193 ipc_task_enable(
194 task_t task)
195 {
196 ipc_port_t kport;
197
198 itk_lock(task);
199 kport = task->itk_self;
200 if (kport != IP_NULL)
201 ipc_kobject_set(kport, (ipc_kobject_t) task, IKOT_TASK);
202 itk_unlock(task);
203 }
204
205 /*
206 * Routine: ipc_task_disable
207 * Purpose:
208 * Disable IPC access to a task.
209 * Conditions:
210 * Nothing locked.
211 */
212
213 void
214 ipc_task_disable(
215 task_t task)
216 {
217 ipc_port_t kport;
218
219 itk_lock(task);
220 kport = task->itk_self;
221 if (kport != IP_NULL)
222 ipc_kobject_set(kport, IKO_NULL, IKOT_NONE);
223 itk_unlock(task);
224 }
225
226 /*
227 * Routine: ipc_task_terminate
228 * Purpose:
229 * Clean up and destroy a task's IPC state.
230 * Conditions:
231 * Nothing locked. The task must be suspended.
232 * (Or the current thread must be in the task.)
233 */
234
235 void
236 ipc_task_terminate(
237 task_t task)
238 {
239 ipc_port_t kport;
240 int i;
241
242 itk_lock(task);
243 kport = task->itk_self;
244
245 if (kport == IP_NULL) {
246 /* the task is already terminated (can this happen?) */
247 itk_unlock(task);
248 return;
249 }
250
251 task->itk_self = IP_NULL;
252 itk_unlock(task);
253
254 /* release the naked send rights */
255
256 if (IP_VALID(task->itk_sself))
257 ipc_port_release_send(task->itk_sself);
258
259 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
260 if (IP_VALID(task->exc_actions[i].port)) {
261 ipc_port_release_send(task->exc_actions[i].port);
262 }
263 }
264
265 if (IP_VALID(task->itk_host))
266 ipc_port_release_send(task->itk_host);
267
268 if (IP_VALID(task->itk_bootstrap))
269 ipc_port_release_send(task->itk_bootstrap);
270
271 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
272 if (IP_VALID(task->itk_registered[i]))
273 ipc_port_release_send(task->itk_registered[i]);
274
275 ipc_port_release_send(task->wired_ledger_port);
276 ipc_port_release_send(task->paged_ledger_port);
277
278 /* destroy the kernel port */
279 ipc_port_dealloc_kernel(kport);
280 }
281
282 /*
283 * Routine: ipc_task_reset
284 * Purpose:
285 * Reset a task's IPC state to protect it when
286 * it enters an elevated security context.
287 * Conditions:
288 * Nothing locked. The task must be suspended.
289 * (Or the current thread must be in the task.)
290 */
291
292 void
293 ipc_task_reset(
294 task_t task)
295 {
296 ipc_port_t old_kport, new_kport;
297 ipc_port_t old_sself;
298 ipc_port_t old_exc_actions[EXC_TYPES_COUNT];
299 int i;
300
301 new_kport = ipc_port_alloc_kernel();
302 if (new_kport == IP_NULL)
303 panic("ipc_task_reset");
304
305 itk_lock(task);
306
307 old_kport = task->itk_self;
308
309 if (old_kport == IP_NULL) {
310 /* the task is already terminated (can this happen?) */
311 itk_unlock(task);
312 ipc_port_dealloc_kernel(new_kport);
313 return;
314 }
315
316 task->itk_self = new_kport;
317 old_sself = task->itk_sself;
318 task->itk_sself = ipc_port_make_send(new_kport);
319 ipc_kobject_set(old_kport, IKO_NULL, IKOT_NONE);
320 ipc_kobject_set(new_kport, (ipc_kobject_t) task, IKOT_TASK);
321
322 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
323 if (!task->exc_actions[i].privileged) {
324 old_exc_actions[i] = task->exc_actions[i].port;
325 task->exc_actions[i].port = IP_NULL;
326 } else {
327 old_exc_actions[i] = IP_NULL;
328 }
329 }/* for */
330
331 itk_unlock(task);
332
333 /* release the naked send rights */
334
335 if (IP_VALID(old_sself))
336 ipc_port_release_send(old_sself);
337
338 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
339 if (IP_VALID(old_exc_actions[i])) {
340 ipc_port_release_send(old_exc_actions[i]);
341 }
342 }/* for */
343
344 /* destroy the kernel port */
345 ipc_port_dealloc_kernel(old_kport);
346 }
347
348 /*
349 * Routine: ipc_thread_init
350 * Purpose:
351 * Initialize a thread's IPC state.
352 * Conditions:
353 * Nothing locked.
354 */
355
356 void
357 ipc_thread_init(
358 thread_t thread)
359 {
360 ipc_port_t kport;
361 int i;
362
363 kport = ipc_port_alloc_kernel();
364 if (kport == IP_NULL)
365 panic("ipc_thread_init");
366
367 thread->ith_self = kport;
368 thread->ith_sself = ipc_port_make_send(kport);
369
370 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i)
371 thread->exc_actions[i].port = IP_NULL;
372
373 ipc_kobject_set(kport, (ipc_kobject_t)thread, IKOT_THREAD);
374
375 ipc_kmsg_queue_init(&thread->ith_messages);
376
377 thread->ith_rpc_reply = IP_NULL;
378 }
379
380 void
381 ipc_thread_disable(
382 thread_t thread)
383 {
384 ipc_port_t kport = thread->ith_self;
385
386 if (kport != IP_NULL)
387 ipc_kobject_set(kport, IKO_NULL, IKOT_NONE);
388 }
389
390 /*
391 * Routine: ipc_thread_terminate
392 * Purpose:
393 * Clean up and destroy a thread's IPC state.
394 * Conditions:
395 * Nothing locked.
396 */
397
398 void
399 ipc_thread_terminate(
400 thread_t thread)
401 {
402 ipc_port_t kport = thread->ith_self;
403
404 if (kport != IP_NULL) {
405 int i;
406
407 if (IP_VALID(thread->ith_sself))
408 ipc_port_release_send(thread->ith_sself);
409
410 thread->ith_sself = thread->ith_self = IP_NULL;
411
412 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
413 if (IP_VALID(thread->exc_actions[i].port))
414 ipc_port_release_send(thread->exc_actions[i].port);
415 }
416
417 ipc_port_dealloc_kernel(kport);
418 }
419
420 assert(ipc_kmsg_queue_empty(&thread->ith_messages));
421
422 if (thread->ith_rpc_reply != IP_NULL)
423 ipc_port_dealloc_reply(thread->ith_rpc_reply);
424
425 thread->ith_rpc_reply = IP_NULL;
426 }
427
428 /*
429 * Routine: retrieve_task_self_fast
430 * Purpose:
431 * Optimized version of retrieve_task_self,
432 * that only works for the current task.
433 *
434 * Return a send right (possibly null/dead)
435 * for the task's user-visible self port.
436 * Conditions:
437 * Nothing locked.
438 */
439
440 ipc_port_t
441 retrieve_task_self_fast(
442 register task_t task)
443 {
444 register ipc_port_t port;
445
446 assert(task == current_task());
447
448 itk_lock(task);
449 assert(task->itk_self != IP_NULL);
450
451 if ((port = task->itk_sself) == task->itk_self) {
452 /* no interposing */
453
454 ip_lock(port);
455 assert(ip_active(port));
456 ip_reference(port);
457 port->ip_srights++;
458 ip_unlock(port);
459 } else
460 port = ipc_port_copy_send(port);
461 itk_unlock(task);
462
463 return port;
464 }
465
466 /*
467 * Routine: retrieve_thread_self_fast
468 * Purpose:
469 * Return a send right (possibly null/dead)
470 * for the thread's user-visible self port.
471 *
472 * Only works for the current thread.
473 *
474 * Conditions:
475 * Nothing locked.
476 */
477
478 ipc_port_t
479 retrieve_thread_self_fast(
480 thread_t thread)
481 {
482 register ipc_port_t port;
483
484 assert(thread == current_thread());
485
486 thread_mtx_lock(thread);
487
488 assert(thread->ith_self != IP_NULL);
489
490 if ((port = thread->ith_sself) == thread->ith_self) {
491 /* no interposing */
492
493 ip_lock(port);
494 assert(ip_active(port));
495 ip_reference(port);
496 port->ip_srights++;
497 ip_unlock(port);
498 }
499 else
500 port = ipc_port_copy_send(port);
501
502 thread_mtx_unlock(thread);
503
504 return port;
505 }
506
507 /*
508 * Routine: task_self_trap [mach trap]
509 * Purpose:
510 * Give the caller send rights for his own task port.
511 * Conditions:
512 * Nothing locked.
513 * Returns:
514 * MACH_PORT_NULL if there are any resource failures
515 * or other errors.
516 */
517
518 mach_port_name_t
519 task_self_trap(
520 __unused struct task_self_trap_args *args)
521 {
522 task_t task = current_task();
523 ipc_port_t sright;
524 mach_port_name_t name;
525
526 sright = retrieve_task_self_fast(task);
527 name = ipc_port_copyout_send(sright, task->itk_space);
528 return name;
529 }
530
531 /*
532 * Routine: thread_self_trap [mach trap]
533 * Purpose:
534 * Give the caller send rights for his own thread port.
535 * Conditions:
536 * Nothing locked.
537 * Returns:
538 * MACH_PORT_NULL if there are any resource failures
539 * or other errors.
540 */
541
542 mach_port_name_t
543 thread_self_trap(
544 __unused struct thread_self_trap_args *args)
545 {
546 thread_t thread = current_thread();
547 task_t task = thread->task;
548 ipc_port_t sright;
549 mach_port_name_t name;
550
551 sright = retrieve_thread_self_fast(thread);
552 name = ipc_port_copyout_send(sright, task->itk_space);
553 return name;
554
555 }
556
557 /*
558 * Routine: mach_reply_port [mach trap]
559 * Purpose:
560 * Allocate a port for the caller.
561 * Conditions:
562 * Nothing locked.
563 * Returns:
564 * MACH_PORT_NULL if there are any resource failures
565 * or other errors.
566 */
567
568 mach_port_name_t
569 mach_reply_port(
570 __unused struct mach_reply_port_args *args)
571 {
572 ipc_port_t port;
573 mach_port_name_t name;
574 kern_return_t kr;
575
576 kr = ipc_port_alloc(current_task()->itk_space, &name, &port);
577 if (kr == KERN_SUCCESS)
578 ip_unlock(port);
579 else
580 name = MACH_PORT_NULL;
581 return name;
582 }
583
584 /*
585 * Routine: thread_get_special_port [kernel call]
586 * Purpose:
587 * Clones a send right for one of the thread's
588 * special ports.
589 * Conditions:
590 * Nothing locked.
591 * Returns:
592 * KERN_SUCCESS Extracted a send right.
593 * KERN_INVALID_ARGUMENT The thread is null.
594 * KERN_FAILURE The thread is dead.
595 * KERN_INVALID_ARGUMENT Invalid special port.
596 */
597
598 kern_return_t
599 thread_get_special_port(
600 thread_t thread,
601 int which,
602 ipc_port_t *portp)
603 {
604 kern_return_t result = KERN_SUCCESS;
605 ipc_port_t *whichp;
606
607 if (thread == THREAD_NULL)
608 return (KERN_INVALID_ARGUMENT);
609
610 switch (which) {
611
612 case THREAD_KERNEL_PORT:
613 whichp = &thread->ith_sself;
614 break;
615
616 default:
617 return (KERN_INVALID_ARGUMENT);
618 }
619
620 thread_mtx_lock(thread);
621
622 if (thread->active)
623 *portp = ipc_port_copy_send(*whichp);
624 else
625 result = KERN_FAILURE;
626
627 thread_mtx_unlock(thread);
628
629 return (result);
630 }
631
632 /*
633 * Routine: thread_set_special_port [kernel call]
634 * Purpose:
635 * Changes one of the thread's special ports,
636 * setting it to the supplied send right.
637 * Conditions:
638 * Nothing locked. If successful, consumes
639 * the supplied send right.
640 * Returns:
641 * KERN_SUCCESS Changed the special port.
642 * KERN_INVALID_ARGUMENT The thread is null.
643 * KERN_FAILURE The thread is dead.
644 * KERN_INVALID_ARGUMENT Invalid special port.
645 */
646
647 kern_return_t
648 thread_set_special_port(
649 thread_t thread,
650 int which,
651 ipc_port_t port)
652 {
653 kern_return_t result = KERN_SUCCESS;
654 ipc_port_t *whichp, old = IP_NULL;
655
656 if (thread == THREAD_NULL)
657 return (KERN_INVALID_ARGUMENT);
658
659 switch (which) {
660
661 case THREAD_KERNEL_PORT:
662 whichp = &thread->ith_sself;
663 break;
664
665 default:
666 return (KERN_INVALID_ARGUMENT);
667 }
668
669 thread_mtx_lock(thread);
670
671 if (thread->active) {
672 old = *whichp;
673 *whichp = port;
674 }
675 else
676 result = KERN_FAILURE;
677
678 thread_mtx_unlock(thread);
679
680 if (IP_VALID(old))
681 ipc_port_release_send(old);
682
683 return (result);
684 }
685
686 /*
687 * Routine: task_get_special_port [kernel call]
688 * Purpose:
689 * Clones a send right for one of the task's
690 * special ports.
691 * Conditions:
692 * Nothing locked.
693 * Returns:
694 * KERN_SUCCESS Extracted a send right.
695 * KERN_INVALID_ARGUMENT The task is null.
696 * KERN_FAILURE The task/space is dead.
697 * KERN_INVALID_ARGUMENT Invalid special port.
698 */
699
700 kern_return_t
701 task_get_special_port(
702 task_t task,
703 int which,
704 ipc_port_t *portp)
705 {
706 ipc_port_t *whichp;
707 ipc_port_t port;
708
709 if (task == TASK_NULL)
710 return KERN_INVALID_ARGUMENT;
711
712 switch (which) {
713 case TASK_KERNEL_PORT:
714 whichp = &task->itk_sself;
715 break;
716
717 case TASK_HOST_PORT:
718 whichp = &task->itk_host;
719 break;
720
721 case TASK_BOOTSTRAP_PORT:
722 whichp = &task->itk_bootstrap;
723 break;
724
725 case TASK_WIRED_LEDGER_PORT:
726 whichp = &task->wired_ledger_port;
727 break;
728
729 case TASK_PAGED_LEDGER_PORT:
730 whichp = &task->paged_ledger_port;
731 break;
732
733 default:
734 return KERN_INVALID_ARGUMENT;
735 }
736
737 itk_lock(task);
738 if (task->itk_self == IP_NULL) {
739 itk_unlock(task);
740 return KERN_FAILURE;
741 }
742
743 port = ipc_port_copy_send(*whichp);
744 itk_unlock(task);
745
746 *portp = port;
747 return KERN_SUCCESS;
748 }
749
750 /*
751 * Routine: task_set_special_port [kernel call]
752 * Purpose:
753 * Changes one of the task's special ports,
754 * setting it to the supplied send right.
755 * Conditions:
756 * Nothing locked. If successful, consumes
757 * the supplied send right.
758 * Returns:
759 * KERN_SUCCESS Changed the special port.
760 * KERN_INVALID_ARGUMENT The task is null.
761 * KERN_FAILURE The task/space is dead.
762 * KERN_INVALID_ARGUMENT Invalid special port.
763 */
764
765 kern_return_t
766 task_set_special_port(
767 task_t task,
768 int which,
769 ipc_port_t port)
770 {
771 ipc_port_t *whichp;
772 ipc_port_t old;
773
774 if (task == TASK_NULL)
775 return KERN_INVALID_ARGUMENT;
776
777 switch (which) {
778 case TASK_KERNEL_PORT:
779 whichp = &task->itk_sself;
780 break;
781
782 case TASK_HOST_PORT:
783 whichp = &task->itk_host;
784 break;
785
786 case TASK_BOOTSTRAP_PORT:
787 whichp = &task->itk_bootstrap;
788 break;
789
790 case TASK_WIRED_LEDGER_PORT:
791 whichp = &task->wired_ledger_port;
792 break;
793
794 case TASK_PAGED_LEDGER_PORT:
795 whichp = &task->paged_ledger_port;
796 break;
797
798 default:
799 return KERN_INVALID_ARGUMENT;
800 }/* switch */
801
802 itk_lock(task);
803 if (task->itk_self == IP_NULL) {
804 itk_unlock(task);
805 return KERN_FAILURE;
806 }
807
808 old = *whichp;
809 *whichp = port;
810 itk_unlock(task);
811
812 if (IP_VALID(old))
813 ipc_port_release_send(old);
814 return KERN_SUCCESS;
815 }
816
817
818 /*
819 * Routine: mach_ports_register [kernel call]
820 * Purpose:
821 * Stash a handful of port send rights in the task.
822 * Child tasks will inherit these rights, but they
823 * must use mach_ports_lookup to acquire them.
824 *
825 * The rights are supplied in a (wired) kalloc'd segment.
826 * Rights which aren't supplied are assumed to be null.
827 * Conditions:
828 * Nothing locked. If successful, consumes
829 * the supplied rights and memory.
830 * Returns:
831 * KERN_SUCCESS Stashed the port rights.
832 * KERN_INVALID_ARGUMENT The task is null.
833 * KERN_INVALID_ARGUMENT The task is dead.
834 * KERN_INVALID_ARGUMENT Too many port rights supplied.
835 */
836
837 kern_return_t
838 mach_ports_register(
839 task_t task,
840 mach_port_array_t memory,
841 mach_msg_type_number_t portsCnt)
842 {
843 ipc_port_t ports[TASK_PORT_REGISTER_MAX];
844 unsigned int i;
845
846 if ((task == TASK_NULL) ||
847 (portsCnt > TASK_PORT_REGISTER_MAX))
848 return KERN_INVALID_ARGUMENT;
849
850 /*
851 * Pad the port rights with nulls.
852 */
853
854 for (i = 0; i < portsCnt; i++)
855 ports[i] = memory[i];
856 for (; i < TASK_PORT_REGISTER_MAX; i++)
857 ports[i] = IP_NULL;
858
859 itk_lock(task);
860 if (task->itk_self == IP_NULL) {
861 itk_unlock(task);
862 return KERN_INVALID_ARGUMENT;
863 }
864
865 /*
866 * Replace the old send rights with the new.
867 * Release the old rights after unlocking.
868 */
869
870 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
871 ipc_port_t old;
872
873 old = task->itk_registered[i];
874 task->itk_registered[i] = ports[i];
875 ports[i] = old;
876 }
877
878 itk_unlock(task);
879
880 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
881 if (IP_VALID(ports[i]))
882 ipc_port_release_send(ports[i]);
883
884 /*
885 * Now that the operation is known to be successful,
886 * we can free the memory.
887 */
888
889 if (portsCnt != 0)
890 kfree(memory,
891 (vm_size_t) (portsCnt * sizeof(mach_port_t)));
892
893 return KERN_SUCCESS;
894 }
895
896 /*
897 * Routine: mach_ports_lookup [kernel call]
898 * Purpose:
899 * Retrieves (clones) the stashed port send rights.
900 * Conditions:
901 * Nothing locked. If successful, the caller gets
902 * rights and memory.
903 * Returns:
904 * KERN_SUCCESS Retrieved the send rights.
905 * KERN_INVALID_ARGUMENT The task is null.
906 * KERN_INVALID_ARGUMENT The task is dead.
907 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
908 */
909
910 kern_return_t
911 mach_ports_lookup(
912 task_t task,
913 mach_port_array_t *portsp,
914 mach_msg_type_number_t *portsCnt)
915 {
916 void *memory;
917 vm_size_t size;
918 ipc_port_t *ports;
919 int i;
920
921 if (task == TASK_NULL)
922 return KERN_INVALID_ARGUMENT;
923
924 size = (vm_size_t) (TASK_PORT_REGISTER_MAX * sizeof(ipc_port_t));
925
926 memory = kalloc(size);
927 if (memory == 0)
928 return KERN_RESOURCE_SHORTAGE;
929
930 itk_lock(task);
931 if (task->itk_self == IP_NULL) {
932 itk_unlock(task);
933
934 kfree(memory, size);
935 return KERN_INVALID_ARGUMENT;
936 }
937
938 ports = (ipc_port_t *) memory;
939
940 /*
941 * Clone port rights. Because kalloc'd memory
942 * is wired, we won't fault while holding the task lock.
943 */
944
945 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
946 ports[i] = ipc_port_copy_send(task->itk_registered[i]);
947
948 itk_unlock(task);
949
950 *portsp = (mach_port_array_t) ports;
951 *portsCnt = TASK_PORT_REGISTER_MAX;
952 return KERN_SUCCESS;
953 }
954
955 /*
956 * Routine: convert_port_to_locked_task
957 * Purpose:
958 * Internal helper routine to convert from a port to a locked
959 * task. Used by several routines that try to convert from a
960 * task port to a reference on some task related object.
961 * Conditions:
962 * Nothing locked, blocking OK.
963 */
964 task_t
965 convert_port_to_locked_task(ipc_port_t port)
966 {
967 while (IP_VALID(port)) {
968 task_t task;
969
970 ip_lock(port);
971 if (!ip_active(port) || (ip_kotype(port) != IKOT_TASK)) {
972 ip_unlock(port);
973 return TASK_NULL;
974 }
975 task = (task_t) port->ip_kobject;
976 assert(task != TASK_NULL);
977
978 /*
979 * Normal lock ordering puts task_lock() before ip_lock().
980 * Attempt out-of-order locking here.
981 */
982 if (task_lock_try(task)) {
983 ip_unlock(port);
984 return(task);
985 }
986
987 ip_unlock(port);
988 mutex_pause();
989 }
990 return TASK_NULL;
991 }
992
993 /*
994 * Routine: convert_port_to_task
995 * Purpose:
996 * Convert from a port to a task.
997 * Doesn't consume the port ref; produces a task ref,
998 * which may be null.
999 * Conditions:
1000 * Nothing locked.
1001 */
1002 task_t
1003 convert_port_to_task(
1004 ipc_port_t port)
1005 {
1006 task_t task = TASK_NULL;
1007
1008 if (IP_VALID(port)) {
1009 ip_lock(port);
1010
1011 if ( ip_active(port) &&
1012 ip_kotype(port) == IKOT_TASK ) {
1013 task = (task_t)port->ip_kobject;
1014 assert(task != TASK_NULL);
1015
1016 task_reference_internal(task);
1017 }
1018
1019 ip_unlock(port);
1020 }
1021
1022 return (task);
1023 }
1024
1025 /*
1026 * Routine: convert_port_to_space
1027 * Purpose:
1028 * Convert from a port to a space.
1029 * Doesn't consume the port ref; produces a space ref,
1030 * which may be null.
1031 * Conditions:
1032 * Nothing locked.
1033 */
1034 ipc_space_t
1035 convert_port_to_space(
1036 ipc_port_t port)
1037 {
1038 ipc_space_t space;
1039 task_t task;
1040
1041 task = convert_port_to_locked_task(port);
1042
1043 if (task == TASK_NULL)
1044 return IPC_SPACE_NULL;
1045
1046 if (!task->active) {
1047 task_unlock(task);
1048 return IPC_SPACE_NULL;
1049 }
1050
1051 space = task->itk_space;
1052 is_reference(space);
1053 task_unlock(task);
1054 return (space);
1055 }
1056
1057 /*
1058 * Routine: convert_port_to_map
1059 * Purpose:
1060 * Convert from a port to a map.
1061 * Doesn't consume the port ref; produces a map ref,
1062 * which may be null.
1063 * Conditions:
1064 * Nothing locked.
1065 */
1066
1067 vm_map_t
1068 convert_port_to_map(
1069 ipc_port_t port)
1070 {
1071 task_t task;
1072 vm_map_t map;
1073
1074 task = convert_port_to_locked_task(port);
1075
1076 if (task == TASK_NULL)
1077 return VM_MAP_NULL;
1078
1079 if (!task->active) {
1080 task_unlock(task);
1081 return VM_MAP_NULL;
1082 }
1083
1084 map = task->map;
1085 vm_map_reference_swap(map);
1086 task_unlock(task);
1087 return map;
1088 }
1089
1090
1091 /*
1092 * Routine: convert_port_to_thread
1093 * Purpose:
1094 * Convert from a port to a thread.
1095 * Doesn't consume the port ref; produces an thread ref,
1096 * which may be null.
1097 * Conditions:
1098 * Nothing locked.
1099 */
1100
1101 thread_t
1102 convert_port_to_thread(
1103 ipc_port_t port)
1104 {
1105 thread_t thread = THREAD_NULL;
1106
1107 if (IP_VALID(port)) {
1108 ip_lock(port);
1109
1110 if ( ip_active(port) &&
1111 ip_kotype(port) == IKOT_THREAD ) {
1112 thread = (thread_t)port->ip_kobject;
1113 assert(thread != THREAD_NULL);
1114
1115 thread_reference_internal(thread);
1116 }
1117
1118 ip_unlock(port);
1119 }
1120
1121 return (thread);
1122 }
1123
1124 /*
1125 * Routine: port_name_to_thread
1126 * Purpose:
1127 * Convert from a port name to an thread reference
1128 * A name of MACH_PORT_NULL is valid for the null thread.
1129 * Conditions:
1130 * Nothing locked.
1131 */
1132 thread_t
1133 port_name_to_thread(
1134 mach_port_name_t name)
1135 {
1136 thread_t thread = THREAD_NULL;
1137 ipc_port_t kport;
1138
1139 if (MACH_PORT_VALID(name)) {
1140 if (ipc_object_copyin(current_space(), name,
1141 MACH_MSG_TYPE_COPY_SEND,
1142 (ipc_object_t *)&kport) != KERN_SUCCESS)
1143 return (THREAD_NULL);
1144
1145 thread = convert_port_to_thread(kport);
1146
1147 if (IP_VALID(kport))
1148 ipc_port_release_send(kport);
1149 }
1150
1151 return (thread);
1152 }
1153
1154 task_t
1155 port_name_to_task(
1156 mach_port_name_t name)
1157 {
1158 ipc_port_t kern_port;
1159 kern_return_t kr;
1160 task_t task = TASK_NULL;
1161
1162 if (MACH_PORT_VALID(name)) {
1163 kr = ipc_object_copyin(current_space(), name,
1164 MACH_MSG_TYPE_COPY_SEND,
1165 (ipc_object_t *) &kern_port);
1166 if (kr != KERN_SUCCESS)
1167 return TASK_NULL;
1168
1169 task = convert_port_to_task(kern_port);
1170
1171 if (IP_VALID(kern_port))
1172 ipc_port_release_send(kern_port);
1173 }
1174 return task;
1175 }
1176
1177 /*
1178 * Routine: convert_task_to_port
1179 * Purpose:
1180 * Convert from a task to a port.
1181 * Consumes a task ref; produces a naked send right
1182 * which may be invalid.
1183 * Conditions:
1184 * Nothing locked.
1185 */
1186
1187 ipc_port_t
1188 convert_task_to_port(
1189 task_t task)
1190 {
1191 ipc_port_t port;
1192
1193 itk_lock(task);
1194 if (task->itk_self != IP_NULL)
1195 port = ipc_port_make_send(task->itk_self);
1196 else
1197 port = IP_NULL;
1198 itk_unlock(task);
1199
1200 task_deallocate(task);
1201 return port;
1202 }
1203
1204 /*
1205 * Routine: convert_thread_to_port
1206 * Purpose:
1207 * Convert from a thread to a port.
1208 * Consumes an thread ref; produces a naked send right
1209 * which may be invalid.
1210 * Conditions:
1211 * Nothing locked.
1212 */
1213
1214 ipc_port_t
1215 convert_thread_to_port(
1216 thread_t thread)
1217 {
1218 ipc_port_t port;
1219
1220 thread_mtx_lock(thread);
1221
1222 if (thread->ith_self != IP_NULL)
1223 port = ipc_port_make_send(thread->ith_self);
1224 else
1225 port = IP_NULL;
1226
1227 thread_mtx_unlock(thread);
1228
1229 thread_deallocate(thread);
1230
1231 return (port);
1232 }
1233
1234 /*
1235 * Routine: space_deallocate
1236 * Purpose:
1237 * Deallocate a space ref produced by convert_port_to_space.
1238 * Conditions:
1239 * Nothing locked.
1240 */
1241
1242 void
1243 space_deallocate(
1244 ipc_space_t space)
1245 {
1246 if (space != IS_NULL)
1247 is_release(space);
1248 }
1249
1250 /*
1251 * Routine: thread/task_set_exception_ports [kernel call]
1252 * Purpose:
1253 * Sets the thread/task exception port, flavor and
1254 * behavior for the exception types specified by the mask.
1255 * There will be one send right per exception per valid
1256 * port.
1257 * Conditions:
1258 * Nothing locked. If successful, consumes
1259 * the supplied send right.
1260 * Returns:
1261 * KERN_SUCCESS Changed the special port.
1262 * KERN_INVALID_ARGUMENT The thread is null,
1263 * Illegal mask bit set.
1264 * Illegal exception behavior
1265 * KERN_FAILURE The thread is dead.
1266 */
1267
1268 kern_return_t
1269 thread_set_exception_ports(
1270 thread_t thread,
1271 exception_mask_t exception_mask,
1272 ipc_port_t new_port,
1273 exception_behavior_t new_behavior,
1274 thread_state_flavor_t new_flavor)
1275 {
1276 ipc_port_t old_port[EXC_TYPES_COUNT];
1277 register int i;
1278
1279 if (thread == THREAD_NULL)
1280 return (KERN_INVALID_ARGUMENT);
1281
1282 if (exception_mask & ~EXC_MASK_ALL)
1283 return (KERN_INVALID_ARGUMENT);
1284
1285 if (IP_VALID(new_port)) {
1286 switch (new_behavior) {
1287
1288 case EXCEPTION_DEFAULT:
1289 case EXCEPTION_STATE:
1290 case EXCEPTION_STATE_IDENTITY:
1291 break;
1292
1293 default:
1294 return (KERN_INVALID_ARGUMENT);
1295 }
1296 }
1297
1298 /*
1299 * Check the validity of the thread_state_flavor by calling the
1300 * VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in
1301 * osfmk/mach/ARCHITECTURE/thread_status.h
1302 */
1303 if (!VALID_THREAD_STATE_FLAVOR(new_flavor))
1304 return (KERN_INVALID_ARGUMENT);
1305
1306 thread_mtx_lock(thread);
1307
1308 if (!thread->active) {
1309 thread_mtx_unlock(thread);
1310
1311 return (KERN_FAILURE);
1312 }
1313
1314 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
1315 if (exception_mask & (1 << i)) {
1316 old_port[i] = thread->exc_actions[i].port;
1317 thread->exc_actions[i].port = ipc_port_copy_send(new_port);
1318 thread->exc_actions[i].behavior = new_behavior;
1319 thread->exc_actions[i].flavor = new_flavor;
1320 }
1321 else
1322 old_port[i] = IP_NULL;
1323 }
1324
1325 thread_mtx_unlock(thread);
1326
1327 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i)
1328 if (IP_VALID(old_port[i]))
1329 ipc_port_release_send(old_port[i]);
1330
1331 if (IP_VALID(new_port)) /* consume send right */
1332 ipc_port_release_send(new_port);
1333
1334 return (KERN_SUCCESS);
1335 }
1336
1337 kern_return_t
1338 task_set_exception_ports(
1339 task_t task,
1340 exception_mask_t exception_mask,
1341 ipc_port_t new_port,
1342 exception_behavior_t new_behavior,
1343 thread_state_flavor_t new_flavor)
1344 {
1345 ipc_port_t old_port[EXC_TYPES_COUNT];
1346 boolean_t privileged = current_task()->sec_token.val[0] == 0;
1347 register int i;
1348
1349 if (task == TASK_NULL)
1350 return (KERN_INVALID_ARGUMENT);
1351
1352 if (exception_mask & ~EXC_MASK_ALL)
1353 return (KERN_INVALID_ARGUMENT);
1354
1355 if (IP_VALID(new_port)) {
1356 switch (new_behavior) {
1357
1358 case EXCEPTION_DEFAULT:
1359 case EXCEPTION_STATE:
1360 case EXCEPTION_STATE_IDENTITY:
1361 break;
1362
1363 default:
1364 return (KERN_INVALID_ARGUMENT);
1365 }
1366 }
1367
1368 itk_lock(task);
1369
1370 if (task->itk_self == IP_NULL) {
1371 itk_unlock(task);
1372
1373 return (KERN_FAILURE);
1374 }
1375
1376 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
1377 if (exception_mask & (1 << i)) {
1378 old_port[i] = task->exc_actions[i].port;
1379 task->exc_actions[i].port =
1380 ipc_port_copy_send(new_port);
1381 task->exc_actions[i].behavior = new_behavior;
1382 task->exc_actions[i].flavor = new_flavor;
1383 task->exc_actions[i].privileged = privileged;
1384 }
1385 else
1386 old_port[i] = IP_NULL;
1387 }
1388
1389 itk_unlock(task);
1390
1391 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i)
1392 if (IP_VALID(old_port[i]))
1393 ipc_port_release_send(old_port[i]);
1394
1395 if (IP_VALID(new_port)) /* consume send right */
1396 ipc_port_release_send(new_port);
1397
1398 return (KERN_SUCCESS);
1399 }
1400
1401 /*
1402 * Routine: thread/task_swap_exception_ports [kernel call]
1403 * Purpose:
1404 * Sets the thread/task exception port, flavor and
1405 * behavior for the exception types specified by the
1406 * mask.
1407 *
1408 * The old ports, behavior and flavors are returned
1409 * Count specifies the array sizes on input and
1410 * the number of returned ports etc. on output. The
1411 * arrays must be large enough to hold all the returned
1412 * data, MIG returnes an error otherwise. The masks
1413 * array specifies the corresponding exception type(s).
1414 *
1415 * Conditions:
1416 * Nothing locked. If successful, consumes
1417 * the supplied send right.
1418 *
1419 * Returns upto [in} CountCnt elements.
1420 * Returns:
1421 * KERN_SUCCESS Changed the special port.
1422 * KERN_INVALID_ARGUMENT The thread is null,
1423 * Illegal mask bit set.
1424 * Illegal exception behavior
1425 * KERN_FAILURE The thread is dead.
1426 */
1427
1428 kern_return_t
1429 thread_swap_exception_ports(
1430 thread_t thread,
1431 exception_mask_t exception_mask,
1432 ipc_port_t new_port,
1433 exception_behavior_t new_behavior,
1434 thread_state_flavor_t new_flavor,
1435 exception_mask_array_t masks,
1436 mach_msg_type_number_t *CountCnt,
1437 exception_port_array_t ports,
1438 exception_behavior_array_t behaviors,
1439 thread_state_flavor_array_t flavors)
1440 {
1441 ipc_port_t old_port[EXC_TYPES_COUNT];
1442 unsigned int i, j, count;
1443
1444 if (thread == THREAD_NULL)
1445 return (KERN_INVALID_ARGUMENT);
1446
1447 if (exception_mask & ~EXC_MASK_ALL)
1448 return (KERN_INVALID_ARGUMENT);
1449
1450 if (IP_VALID(new_port)) {
1451 switch (new_behavior) {
1452
1453 case EXCEPTION_DEFAULT:
1454 case EXCEPTION_STATE:
1455 case EXCEPTION_STATE_IDENTITY:
1456 break;
1457
1458 default:
1459 return (KERN_INVALID_ARGUMENT);
1460 }
1461 }
1462
1463 thread_mtx_lock(thread);
1464
1465 if (!thread->active) {
1466 thread_mtx_unlock(thread);
1467
1468 return (KERN_FAILURE);
1469 }
1470
1471 count = 0;
1472
1473 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
1474 if (exception_mask & (1 << i)) {
1475 for (j = 0; j < count; ++j) {
1476 /*
1477 * search for an identical entry, if found
1478 * set corresponding mask for this exception.
1479 */
1480 if ( thread->exc_actions[i].port == ports[j] &&
1481 thread->exc_actions[i].behavior == behaviors[j] &&
1482 thread->exc_actions[i].flavor == flavors[j] ) {
1483 masks[j] |= (1 << i);
1484 break;
1485 }
1486 }
1487
1488 if (j == count) {
1489 masks[j] = (1 << i);
1490 ports[j] = ipc_port_copy_send(thread->exc_actions[i].port);
1491
1492 behaviors[j] = thread->exc_actions[i].behavior;
1493 flavors[j] = thread->exc_actions[i].flavor;
1494 ++count;
1495 }
1496
1497 old_port[i] = thread->exc_actions[i].port;
1498 thread->exc_actions[i].port = ipc_port_copy_send(new_port);
1499 thread->exc_actions[i].behavior = new_behavior;
1500 thread->exc_actions[i].flavor = new_flavor;
1501 if (count > *CountCnt)
1502 break;
1503 }
1504 else
1505 old_port[i] = IP_NULL;
1506 }
1507
1508 thread_mtx_unlock(thread);
1509
1510 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i)
1511 if (IP_VALID(old_port[i]))
1512 ipc_port_release_send(old_port[i]);
1513
1514 if (IP_VALID(new_port)) /* consume send right */
1515 ipc_port_release_send(new_port);
1516
1517 *CountCnt = count;
1518
1519 return (KERN_SUCCESS);
1520 }
1521
1522 kern_return_t
1523 task_swap_exception_ports(
1524 task_t task,
1525 exception_mask_t exception_mask,
1526 ipc_port_t new_port,
1527 exception_behavior_t new_behavior,
1528 thread_state_flavor_t new_flavor,
1529 exception_mask_array_t masks,
1530 mach_msg_type_number_t *CountCnt,
1531 exception_port_array_t ports,
1532 exception_behavior_array_t behaviors,
1533 thread_state_flavor_array_t flavors)
1534 {
1535 ipc_port_t old_port[EXC_TYPES_COUNT];
1536 boolean_t privileged = current_task()->sec_token.val[0] == 0;
1537 unsigned int i, j, count;
1538
1539 if (task == TASK_NULL)
1540 return (KERN_INVALID_ARGUMENT);
1541
1542 if (exception_mask & ~EXC_MASK_ALL)
1543 return (KERN_INVALID_ARGUMENT);
1544
1545 if (IP_VALID(new_port)) {
1546 switch (new_behavior) {
1547
1548 case EXCEPTION_DEFAULT:
1549 case EXCEPTION_STATE:
1550 case EXCEPTION_STATE_IDENTITY:
1551 break;
1552
1553 default:
1554 return (KERN_INVALID_ARGUMENT);
1555 }
1556 }
1557
1558 itk_lock(task);
1559
1560 if (task->itk_self == IP_NULL) {
1561 itk_unlock(task);
1562
1563 return (KERN_FAILURE);
1564 }
1565
1566 count = 0;
1567
1568 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
1569 if (exception_mask & (1 << i)) {
1570 for (j = 0; j < count; j++) {
1571 /*
1572 * search for an identical entry, if found
1573 * set corresponding mask for this exception.
1574 */
1575 if ( task->exc_actions[i].port == ports[j] &&
1576 task->exc_actions[i].behavior == behaviors[j] &&
1577 task->exc_actions[i].flavor == flavors[j] ) {
1578 masks[j] |= (1 << i);
1579 break;
1580 }
1581 }
1582
1583 if (j == count) {
1584 masks[j] = (1 << i);
1585 ports[j] = ipc_port_copy_send(task->exc_actions[i].port);
1586 behaviors[j] = task->exc_actions[i].behavior;
1587 flavors[j] = task->exc_actions[i].flavor;
1588 ++count;
1589 }
1590
1591 old_port[i] = task->exc_actions[i].port;
1592 task->exc_actions[i].port = ipc_port_copy_send(new_port);
1593 task->exc_actions[i].behavior = new_behavior;
1594 task->exc_actions[i].flavor = new_flavor;
1595 task->exc_actions[i].privileged = privileged;
1596 if (count > *CountCnt)
1597 break;
1598 }
1599 else
1600 old_port[i] = IP_NULL;
1601 }
1602
1603 itk_unlock(task);
1604
1605 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++)
1606 if (IP_VALID(old_port[i]))
1607 ipc_port_release_send(old_port[i]);
1608
1609 if (IP_VALID(new_port)) /* consume send right */
1610 ipc_port_release_send(new_port);
1611
1612 *CountCnt = count;
1613
1614 return (KERN_SUCCESS);
1615 }
1616
1617 /*
1618 * Routine: thread/task_get_exception_ports [kernel call]
1619 * Purpose:
1620 * Clones a send right for each of the thread/task's exception
1621 * ports specified in the mask and returns the behaviour
1622 * and flavor of said port.
1623 *
1624 * Returns upto [in} CountCnt elements.
1625 *
1626 * Conditions:
1627 * Nothing locked.
1628 * Returns:
1629 * KERN_SUCCESS Extracted a send right.
1630 * KERN_INVALID_ARGUMENT The thread is null,
1631 * Invalid special port,
1632 * Illegal mask bit set.
1633 * KERN_FAILURE The thread is dead.
1634 */
1635
1636 kern_return_t
1637 thread_get_exception_ports(
1638 thread_t thread,
1639 exception_mask_t exception_mask,
1640 exception_mask_array_t masks,
1641 mach_msg_type_number_t *CountCnt,
1642 exception_port_array_t ports,
1643 exception_behavior_array_t behaviors,
1644 thread_state_flavor_array_t flavors)
1645 {
1646 unsigned int i, j, count;
1647
1648 if (thread == THREAD_NULL)
1649 return (KERN_INVALID_ARGUMENT);
1650
1651 if (exception_mask & ~EXC_MASK_ALL)
1652 return (KERN_INVALID_ARGUMENT);
1653
1654 thread_mtx_lock(thread);
1655
1656 if (!thread->active) {
1657 thread_mtx_unlock(thread);
1658
1659 return (KERN_FAILURE);
1660 }
1661
1662 count = 0;
1663
1664 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
1665 if (exception_mask & (1 << i)) {
1666 for (j = 0; j < count; ++j) {
1667 /*
1668 * search for an identical entry, if found
1669 * set corresponding mask for this exception.
1670 */
1671 if ( thread->exc_actions[i].port == ports[j] &&
1672 thread->exc_actions[i].behavior ==behaviors[j] &&
1673 thread->exc_actions[i].flavor == flavors[j] ) {
1674 masks[j] |= (1 << i);
1675 break;
1676 }
1677 }
1678
1679 if (j == count) {
1680 masks[j] = (1 << i);
1681 ports[j] = ipc_port_copy_send(thread->exc_actions[i].port);
1682 behaviors[j] = thread->exc_actions[i].behavior;
1683 flavors[j] = thread->exc_actions[i].flavor;
1684 ++count;
1685 if (count >= *CountCnt)
1686 break;
1687 }
1688 }
1689 }
1690
1691 thread_mtx_unlock(thread);
1692
1693 *CountCnt = count;
1694
1695 return (KERN_SUCCESS);
1696 }
1697
1698 kern_return_t
1699 task_get_exception_ports(
1700 task_t task,
1701 exception_mask_t exception_mask,
1702 exception_mask_array_t masks,
1703 mach_msg_type_number_t *CountCnt,
1704 exception_port_array_t ports,
1705 exception_behavior_array_t behaviors,
1706 thread_state_flavor_array_t flavors)
1707 {
1708 unsigned int i, j, count;
1709
1710 if (task == TASK_NULL)
1711 return (KERN_INVALID_ARGUMENT);
1712
1713 if (exception_mask & ~EXC_MASK_ALL)
1714 return (KERN_INVALID_ARGUMENT);
1715
1716 itk_lock(task);
1717
1718 if (task->itk_self == IP_NULL) {
1719 itk_unlock(task);
1720
1721 return (KERN_FAILURE);
1722 }
1723
1724 count = 0;
1725
1726 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
1727 if (exception_mask & (1 << i)) {
1728 for (j = 0; j < count; ++j) {
1729 /*
1730 * search for an identical entry, if found
1731 * set corresponding mask for this exception.
1732 */
1733 if ( task->exc_actions[i].port == ports[j] &&
1734 task->exc_actions[i].behavior == behaviors[j] &&
1735 task->exc_actions[i].flavor == flavors[j] ) {
1736 masks[j] |= (1 << i);
1737 break;
1738 }
1739 }
1740
1741 if (j == count) {
1742 masks[j] = (1 << i);
1743 ports[j] = ipc_port_copy_send(task->exc_actions[i].port);
1744 behaviors[j] = task->exc_actions[i].behavior;
1745 flavors[j] = task->exc_actions[i].flavor;
1746 ++count;
1747 if (count > *CountCnt)
1748 break;
1749 }
1750 }
1751 }
1752
1753 itk_unlock(task);
1754
1755 *CountCnt = count;
1756
1757 return (KERN_SUCCESS);
1758 }