]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/ipc_tt.c
a9e1d11f601b954adb8214c76f32f2e67dadb072
[apple/xnu.git] / osfmk / kern / ipc_tt.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_COPYRIGHT@
24 */
25 /*
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50 /*
51 */
52
53 /*
54 * File: ipc_tt.c
55 * Purpose:
56 * Task and thread related IPC functions.
57 */
58
59 #include <mach/mach_types.h>
60 #include <mach/boolean.h>
61 #include <mach/kern_return.h>
62 #include <mach/mach_param.h>
63 #include <mach/task_special_ports.h>
64 #include <mach/thread_special_ports.h>
65 #include <mach/thread_status.h>
66 #include <mach/exception_types.h>
67 #include <mach/memory_object_types.h>
68 #include <mach/mach_traps.h>
69 #include <mach/task_server.h>
70 #include <mach/thread_act_server.h>
71 #include <mach/mach_host_server.h>
72 #include <mach/host_priv_server.h>
73 #include <mach/vm_map_server.h>
74
75 #include <kern/kern_types.h>
76 #include <kern/host.h>
77 #include <kern/ipc_kobject.h>
78 #include <kern/ipc_tt.h>
79 #include <kern/kalloc.h>
80 #include <kern/thread.h>
81 #include <kern/misc_protos.h>
82
83 #include <vm/vm_map.h>
84 #include <vm/vm_pageout.h>
85 #include <vm/vm_shared_memory_server.h>
86 #include <vm/vm_protos.h>
87
88 /* forward declarations */
89 task_t convert_port_to_locked_task(ipc_port_t port);
90
91
92 /*
93 * Routine: ipc_task_init
94 * Purpose:
95 * Initialize a task's IPC state.
96 *
97 * If non-null, some state will be inherited from the parent.
98 * The parent must be appropriately initialized.
99 * Conditions:
100 * Nothing locked.
101 */
102
103 void
104 ipc_task_init(
105 task_t task,
106 task_t parent)
107 {
108 ipc_space_t space;
109 ipc_port_t kport;
110 ipc_port_t nport;
111 kern_return_t kr;
112 int i;
113
114
115 kr = ipc_space_create(&ipc_table_entries[0], &space);
116 if (kr != KERN_SUCCESS)
117 panic("ipc_task_init");
118
119
120 kport = ipc_port_alloc_kernel();
121 if (kport == IP_NULL)
122 panic("ipc_task_init");
123
124 nport = ipc_port_alloc_kernel();
125 if (nport == IP_NULL)
126 panic("ipc_task_init");
127
128 itk_lock_init(task);
129 task->itk_self = kport;
130 task->itk_nself = nport;
131 task->itk_sself = ipc_port_make_send(kport);
132 task->itk_space = space;
133 space->is_fast = FALSE;
134
135 if (parent == TASK_NULL) {
136 ipc_port_t port;
137
138 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
139 task->exc_actions[i].port = IP_NULL;
140 }/* for */
141
142 kr = host_get_host_port(host_priv_self(), &port);
143 assert(kr == KERN_SUCCESS);
144 task->itk_host = port;
145
146 task->itk_bootstrap = IP_NULL;
147
148 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
149 task->itk_registered[i] = IP_NULL;
150 } else {
151 itk_lock(parent);
152 assert(parent->itk_self != IP_NULL);
153
154 /* inherit registered ports */
155
156 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
157 task->itk_registered[i] =
158 ipc_port_copy_send(parent->itk_registered[i]);
159
160 /* inherit exception and bootstrap ports */
161
162 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
163 task->exc_actions[i].port =
164 ipc_port_copy_send(parent->exc_actions[i].port);
165 task->exc_actions[i].flavor =
166 parent->exc_actions[i].flavor;
167 task->exc_actions[i].behavior =
168 parent->exc_actions[i].behavior;
169 }/* for */
170 task->itk_host =
171 ipc_port_copy_send(parent->itk_host);
172
173 task->itk_bootstrap =
174 ipc_port_copy_send(parent->itk_bootstrap);
175
176 itk_unlock(parent);
177 }
178 }
179
180 /*
181 * Routine: ipc_task_enable
182 * Purpose:
183 * Enable a task for IPC access.
184 * Conditions:
185 * Nothing locked.
186 */
187
188 void
189 ipc_task_enable(
190 task_t task)
191 {
192 ipc_port_t kport;
193 ipc_port_t nport;
194
195 itk_lock(task);
196 kport = task->itk_self;
197 if (kport != IP_NULL)
198 ipc_kobject_set(kport, (ipc_kobject_t) task, IKOT_TASK);
199 nport = task->itk_nself;
200 if (nport != IP_NULL)
201 ipc_kobject_set(nport, (ipc_kobject_t) task, IKOT_TASK_NAME);
202 itk_unlock(task);
203 }
204
205 /*
206 * Routine: ipc_task_disable
207 * Purpose:
208 * Disable IPC access to a task.
209 * Conditions:
210 * Nothing locked.
211 */
212
213 void
214 ipc_task_disable(
215 task_t task)
216 {
217 ipc_port_t kport;
218 ipc_port_t nport;
219
220 itk_lock(task);
221 kport = task->itk_self;
222 if (kport != IP_NULL)
223 ipc_kobject_set(kport, IKO_NULL, IKOT_NONE);
224 nport = task->itk_nself;
225 if (nport != IP_NULL)
226 ipc_kobject_set(nport, IKO_NULL, IKOT_NONE);
227 itk_unlock(task);
228 }
229
230 /*
231 * Routine: ipc_task_terminate
232 * Purpose:
233 * Clean up and destroy a task's IPC state.
234 * Conditions:
235 * Nothing locked. The task must be suspended.
236 * (Or the current thread must be in the task.)
237 */
238
239 void
240 ipc_task_terminate(
241 task_t task)
242 {
243 ipc_port_t kport;
244 ipc_port_t nport;
245 int i;
246
247 itk_lock(task);
248 kport = task->itk_self;
249
250 if (kport == IP_NULL) {
251 /* the task is already terminated (can this happen?) */
252 itk_unlock(task);
253 return;
254 }
255 task->itk_self = IP_NULL;
256
257 nport = task->itk_nself;
258 assert(nport != IP_NULL);
259 task->itk_nself = IP_NULL;
260
261 itk_unlock(task);
262
263 /* release the naked send rights */
264
265 if (IP_VALID(task->itk_sself))
266 ipc_port_release_send(task->itk_sself);
267
268 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
269 if (IP_VALID(task->exc_actions[i].port)) {
270 ipc_port_release_send(task->exc_actions[i].port);
271 }
272 }
273
274 if (IP_VALID(task->itk_host))
275 ipc_port_release_send(task->itk_host);
276
277 if (IP_VALID(task->itk_bootstrap))
278 ipc_port_release_send(task->itk_bootstrap);
279
280 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
281 if (IP_VALID(task->itk_registered[i]))
282 ipc_port_release_send(task->itk_registered[i]);
283
284 ipc_port_release_send(task->wired_ledger_port);
285 ipc_port_release_send(task->paged_ledger_port);
286
287 /* destroy the kernel ports */
288 ipc_port_dealloc_kernel(kport);
289 ipc_port_dealloc_kernel(nport);
290 }
291
292 /*
293 * Routine: ipc_task_reset
294 * Purpose:
295 * Reset a task's IPC state to protect it when
296 * it enters an elevated security context. The
297 * task name port can remain the same - since
298 * it represents no specific privilege.
299 * Conditions:
300 * Nothing locked. The task must be suspended.
301 * (Or the current thread must be in the task.)
302 */
303
304 void
305 ipc_task_reset(
306 task_t task)
307 {
308 ipc_port_t old_kport, new_kport;
309 ipc_port_t old_sself;
310 #if 0
311 ipc_port_t old_exc_actions[EXC_TYPES_COUNT];
312 int i;
313 #endif
314
315 new_kport = ipc_port_alloc_kernel();
316 if (new_kport == IP_NULL)
317 panic("ipc_task_reset");
318
319 itk_lock(task);
320
321 old_kport = task->itk_self;
322
323 if (old_kport == IP_NULL) {
324 /* the task is already terminated (can this happen?) */
325 itk_unlock(task);
326 ipc_port_dealloc_kernel(new_kport);
327 return;
328 }
329
330 task->itk_self = new_kport;
331 old_sself = task->itk_sself;
332 task->itk_sself = ipc_port_make_send(new_kport);
333 ipc_kobject_set(old_kport, IKO_NULL, IKOT_NONE);
334 ipc_kobject_set(new_kport, (ipc_kobject_t) task, IKOT_TASK);
335
336 #if 0
337 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
338 old_exc_actions[i] = task->exc_action[i].port;
339 task->exc_actions[i].port = IP_NULL;
340 }/* for */
341 #endif
342
343 itk_unlock(task);
344
345 /* release the naked send rights */
346
347 if (IP_VALID(old_sself))
348 ipc_port_release_send(old_sself);
349
350 #if 0
351 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
352 if (IP_VALID(old_exc_actions[i])) {
353 ipc_port_release_send(old_exc_actions[i]);
354 }
355 }/* for */
356 #endif
357
358 /* destroy the kernel port */
359 ipc_port_dealloc_kernel(old_kport);
360 }
361
362 /*
363 * Routine: ipc_thread_init
364 * Purpose:
365 * Initialize a thread's IPC state.
366 * Conditions:
367 * Nothing locked.
368 */
369
370 void
371 ipc_thread_init(
372 thread_t thread)
373 {
374 ipc_port_t kport;
375 int i;
376
377 kport = ipc_port_alloc_kernel();
378 if (kport == IP_NULL)
379 panic("ipc_thread_init");
380
381 thread->ith_self = kport;
382 thread->ith_sself = ipc_port_make_send(kport);
383
384 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i)
385 thread->exc_actions[i].port = IP_NULL;
386
387 ipc_kobject_set(kport, (ipc_kobject_t)thread, IKOT_THREAD);
388
389 ipc_kmsg_queue_init(&thread->ith_messages);
390
391 thread->ith_rpc_reply = IP_NULL;
392 }
393
394 void
395 ipc_thread_disable(
396 thread_t thread)
397 {
398 ipc_port_t kport = thread->ith_self;
399
400 if (kport != IP_NULL)
401 ipc_kobject_set(kport, IKO_NULL, IKOT_NONE);
402 }
403
404 /*
405 * Routine: ipc_thread_terminate
406 * Purpose:
407 * Clean up and destroy a thread's IPC state.
408 * Conditions:
409 * Nothing locked.
410 */
411
412 void
413 ipc_thread_terminate(
414 thread_t thread)
415 {
416 ipc_port_t kport = thread->ith_self;
417
418 if (kport != IP_NULL) {
419 int i;
420
421 if (IP_VALID(thread->ith_sself))
422 ipc_port_release_send(thread->ith_sself);
423
424 thread->ith_sself = thread->ith_self = IP_NULL;
425
426 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
427 if (IP_VALID(thread->exc_actions[i].port))
428 ipc_port_release_send(thread->exc_actions[i].port);
429 }
430
431 ipc_port_dealloc_kernel(kport);
432 }
433
434 assert(ipc_kmsg_queue_empty(&thread->ith_messages));
435
436 if (thread->ith_rpc_reply != IP_NULL)
437 ipc_port_dealloc_reply(thread->ith_rpc_reply);
438
439 thread->ith_rpc_reply = IP_NULL;
440 }
441
442 /*
443 * Routine: retrieve_task_self_fast
444 * Purpose:
445 * Optimized version of retrieve_task_self,
446 * that only works for the current task.
447 *
448 * Return a send right (possibly null/dead)
449 * for the task's user-visible self port.
450 * Conditions:
451 * Nothing locked.
452 */
453
454 ipc_port_t
455 retrieve_task_self_fast(
456 register task_t task)
457 {
458 register ipc_port_t port;
459
460 assert(task == current_task());
461
462 itk_lock(task);
463 assert(task->itk_self != IP_NULL);
464
465 if ((port = task->itk_sself) == task->itk_self) {
466 /* no interposing */
467
468 ip_lock(port);
469 assert(ip_active(port));
470 ip_reference(port);
471 port->ip_srights++;
472 ip_unlock(port);
473 } else
474 port = ipc_port_copy_send(port);
475 itk_unlock(task);
476
477 return port;
478 }
479
480 /*
481 * Routine: retrieve_thread_self_fast
482 * Purpose:
483 * Return a send right (possibly null/dead)
484 * for the thread's user-visible self port.
485 *
486 * Only works for the current thread.
487 *
488 * Conditions:
489 * Nothing locked.
490 */
491
492 ipc_port_t
493 retrieve_thread_self_fast(
494 thread_t thread)
495 {
496 register ipc_port_t port;
497
498 assert(thread == current_thread());
499
500 thread_mtx_lock(thread);
501
502 assert(thread->ith_self != IP_NULL);
503
504 if ((port = thread->ith_sself) == thread->ith_self) {
505 /* no interposing */
506
507 ip_lock(port);
508 assert(ip_active(port));
509 ip_reference(port);
510 port->ip_srights++;
511 ip_unlock(port);
512 }
513 else
514 port = ipc_port_copy_send(port);
515
516 thread_mtx_unlock(thread);
517
518 return port;
519 }
520
521 /*
522 * Routine: task_self_trap [mach trap]
523 * Purpose:
524 * Give the caller send rights for his own task port.
525 * Conditions:
526 * Nothing locked.
527 * Returns:
528 * MACH_PORT_NULL if there are any resource failures
529 * or other errors.
530 */
531
532 mach_port_name_t
533 task_self_trap(
534 __unused struct task_self_trap_args *args)
535 {
536 task_t task = current_task();
537 ipc_port_t sright;
538 mach_port_name_t name;
539
540 sright = retrieve_task_self_fast(task);
541 name = ipc_port_copyout_send(sright, task->itk_space);
542 return name;
543 }
544
545 /*
546 * Routine: thread_self_trap [mach trap]
547 * Purpose:
548 * Give the caller send rights for his own thread port.
549 * Conditions:
550 * Nothing locked.
551 * Returns:
552 * MACH_PORT_NULL if there are any resource failures
553 * or other errors.
554 */
555
556 mach_port_name_t
557 thread_self_trap(
558 __unused struct thread_self_trap_args *args)
559 {
560 thread_t thread = current_thread();
561 task_t task = thread->task;
562 ipc_port_t sright;
563 mach_port_name_t name;
564
565 sright = retrieve_thread_self_fast(thread);
566 name = ipc_port_copyout_send(sright, task->itk_space);
567 return name;
568
569 }
570
571 /*
572 * Routine: mach_reply_port [mach trap]
573 * Purpose:
574 * Allocate a port for the caller.
575 * Conditions:
576 * Nothing locked.
577 * Returns:
578 * MACH_PORT_NULL if there are any resource failures
579 * or other errors.
580 */
581
582 mach_port_name_t
583 mach_reply_port(
584 __unused struct mach_reply_port_args *args)
585 {
586 ipc_port_t port;
587 mach_port_name_t name;
588 kern_return_t kr;
589
590 kr = ipc_port_alloc(current_task()->itk_space, &name, &port);
591 if (kr == KERN_SUCCESS)
592 ip_unlock(port);
593 else
594 name = MACH_PORT_NULL;
595 return name;
596 }
597
598 /*
599 * Routine: thread_get_special_port [kernel call]
600 * Purpose:
601 * Clones a send right for one of the thread's
602 * special ports.
603 * Conditions:
604 * Nothing locked.
605 * Returns:
606 * KERN_SUCCESS Extracted a send right.
607 * KERN_INVALID_ARGUMENT The thread is null.
608 * KERN_FAILURE The thread is dead.
609 * KERN_INVALID_ARGUMENT Invalid special port.
610 */
611
612 kern_return_t
613 thread_get_special_port(
614 thread_t thread,
615 int which,
616 ipc_port_t *portp)
617 {
618 kern_return_t result = KERN_SUCCESS;
619 ipc_port_t *whichp;
620
621 if (thread == THREAD_NULL)
622 return (KERN_INVALID_ARGUMENT);
623
624 switch (which) {
625
626 case THREAD_KERNEL_PORT:
627 whichp = &thread->ith_sself;
628 break;
629
630 default:
631 return (KERN_INVALID_ARGUMENT);
632 }
633
634 thread_mtx_lock(thread);
635
636 if (thread->active)
637 *portp = ipc_port_copy_send(*whichp);
638 else
639 result = KERN_FAILURE;
640
641 thread_mtx_unlock(thread);
642
643 return (result);
644 }
645
646 /*
647 * Routine: thread_set_special_port [kernel call]
648 * Purpose:
649 * Changes one of the thread's special ports,
650 * setting it to the supplied send right.
651 * Conditions:
652 * Nothing locked. If successful, consumes
653 * the supplied send right.
654 * Returns:
655 * KERN_SUCCESS Changed the special port.
656 * KERN_INVALID_ARGUMENT The thread is null.
657 * KERN_FAILURE The thread is dead.
658 * KERN_INVALID_ARGUMENT Invalid special port.
659 */
660
661 kern_return_t
662 thread_set_special_port(
663 thread_t thread,
664 int which,
665 ipc_port_t port)
666 {
667 kern_return_t result = KERN_SUCCESS;
668 ipc_port_t *whichp, old = IP_NULL;
669
670 if (thread == THREAD_NULL)
671 return (KERN_INVALID_ARGUMENT);
672
673 switch (which) {
674
675 case THREAD_KERNEL_PORT:
676 whichp = &thread->ith_sself;
677 break;
678
679 default:
680 return (KERN_INVALID_ARGUMENT);
681 }
682
683 thread_mtx_lock(thread);
684
685 if (thread->active) {
686 old = *whichp;
687 *whichp = port;
688 }
689 else
690 result = KERN_FAILURE;
691
692 thread_mtx_unlock(thread);
693
694 if (IP_VALID(old))
695 ipc_port_release_send(old);
696
697 return (result);
698 }
699
700 /*
701 * Routine: task_get_special_port [kernel call]
702 * Purpose:
703 * Clones a send right for one of the task's
704 * special ports.
705 * Conditions:
706 * Nothing locked.
707 * Returns:
708 * KERN_SUCCESS Extracted a send right.
709 * KERN_INVALID_ARGUMENT The task is null.
710 * KERN_FAILURE The task/space is dead.
711 * KERN_INVALID_ARGUMENT Invalid special port.
712 */
713
714 kern_return_t
715 task_get_special_port(
716 task_t task,
717 int which,
718 ipc_port_t *portp)
719 {
720 ipc_port_t port;
721
722 if (task == TASK_NULL)
723 return KERN_INVALID_ARGUMENT;
724
725 itk_lock(task);
726 if (task->itk_self == IP_NULL) {
727 itk_unlock(task);
728 return KERN_FAILURE;
729 }
730
731 switch (which) {
732 case TASK_KERNEL_PORT:
733 port = ipc_port_copy_send(task->itk_sself);
734 break;
735
736 case TASK_NAME_PORT:
737 port = ipc_port_make_send(task->itk_nself);
738 break;
739
740 case TASK_HOST_PORT:
741 port = ipc_port_copy_send(task->itk_host);
742 break;
743
744 case TASK_BOOTSTRAP_PORT:
745 port = ipc_port_copy_send(task->itk_bootstrap);
746 break;
747
748 case TASK_WIRED_LEDGER_PORT:
749 port = ipc_port_copy_send(task->wired_ledger_port);
750 break;
751
752 case TASK_PAGED_LEDGER_PORT:
753 port = ipc_port_copy_send(task->paged_ledger_port);
754 break;
755
756 default:
757 return KERN_INVALID_ARGUMENT;
758 }
759 itk_unlock(task);
760
761 *portp = port;
762 return KERN_SUCCESS;
763 }
764
765 /*
766 * Routine: task_set_special_port [kernel call]
767 * Purpose:
768 * Changes one of the task's special ports,
769 * setting it to the supplied send right.
770 * Conditions:
771 * Nothing locked. If successful, consumes
772 * the supplied send right.
773 * Returns:
774 * KERN_SUCCESS Changed the special port.
775 * KERN_INVALID_ARGUMENT The task is null.
776 * KERN_FAILURE The task/space is dead.
777 * KERN_INVALID_ARGUMENT Invalid special port.
778 */
779
780 kern_return_t
781 task_set_special_port(
782 task_t task,
783 int which,
784 ipc_port_t port)
785 {
786 ipc_port_t *whichp;
787 ipc_port_t old;
788
789 if (task == TASK_NULL)
790 return KERN_INVALID_ARGUMENT;
791
792 switch (which) {
793 case TASK_KERNEL_PORT:
794 whichp = &task->itk_sself;
795 break;
796
797 case TASK_HOST_PORT:
798 whichp = &task->itk_host;
799 break;
800
801 case TASK_BOOTSTRAP_PORT:
802 whichp = &task->itk_bootstrap;
803 break;
804
805 case TASK_WIRED_LEDGER_PORT:
806 whichp = &task->wired_ledger_port;
807 break;
808
809 case TASK_PAGED_LEDGER_PORT:
810 whichp = &task->paged_ledger_port;
811 break;
812
813 default:
814 return KERN_INVALID_ARGUMENT;
815 }/* switch */
816
817 itk_lock(task);
818 if (task->itk_self == IP_NULL) {
819 itk_unlock(task);
820 return KERN_FAILURE;
821 }
822
823 old = *whichp;
824 *whichp = port;
825 itk_unlock(task);
826
827 if (IP_VALID(old))
828 ipc_port_release_send(old);
829 return KERN_SUCCESS;
830 }
831
832
833 /*
834 * Routine: mach_ports_register [kernel call]
835 * Purpose:
836 * Stash a handful of port send rights in the task.
837 * Child tasks will inherit these rights, but they
838 * must use mach_ports_lookup to acquire them.
839 *
840 * The rights are supplied in a (wired) kalloc'd segment.
841 * Rights which aren't supplied are assumed to be null.
842 * Conditions:
843 * Nothing locked. If successful, consumes
844 * the supplied rights and memory.
845 * Returns:
846 * KERN_SUCCESS Stashed the port rights.
847 * KERN_INVALID_ARGUMENT The task is null.
848 * KERN_INVALID_ARGUMENT The task is dead.
849 * KERN_INVALID_ARGUMENT Too many port rights supplied.
850 */
851
852 kern_return_t
853 mach_ports_register(
854 task_t task,
855 mach_port_array_t memory,
856 mach_msg_type_number_t portsCnt)
857 {
858 ipc_port_t ports[TASK_PORT_REGISTER_MAX];
859 unsigned int i;
860
861 if ((task == TASK_NULL) ||
862 (portsCnt > TASK_PORT_REGISTER_MAX))
863 return KERN_INVALID_ARGUMENT;
864
865 /*
866 * Pad the port rights with nulls.
867 */
868
869 for (i = 0; i < portsCnt; i++)
870 ports[i] = memory[i];
871 for (; i < TASK_PORT_REGISTER_MAX; i++)
872 ports[i] = IP_NULL;
873
874 itk_lock(task);
875 if (task->itk_self == IP_NULL) {
876 itk_unlock(task);
877 return KERN_INVALID_ARGUMENT;
878 }
879
880 /*
881 * Replace the old send rights with the new.
882 * Release the old rights after unlocking.
883 */
884
885 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
886 ipc_port_t old;
887
888 old = task->itk_registered[i];
889 task->itk_registered[i] = ports[i];
890 ports[i] = old;
891 }
892
893 itk_unlock(task);
894
895 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
896 if (IP_VALID(ports[i]))
897 ipc_port_release_send(ports[i]);
898
899 /*
900 * Now that the operation is known to be successful,
901 * we can free the memory.
902 */
903
904 if (portsCnt != 0)
905 kfree(memory,
906 (vm_size_t) (portsCnt * sizeof(mach_port_t)));
907
908 return KERN_SUCCESS;
909 }
910
911 /*
912 * Routine: mach_ports_lookup [kernel call]
913 * Purpose:
914 * Retrieves (clones) the stashed port send rights.
915 * Conditions:
916 * Nothing locked. If successful, the caller gets
917 * rights and memory.
918 * Returns:
919 * KERN_SUCCESS Retrieved the send rights.
920 * KERN_INVALID_ARGUMENT The task is null.
921 * KERN_INVALID_ARGUMENT The task is dead.
922 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
923 */
924
925 kern_return_t
926 mach_ports_lookup(
927 task_t task,
928 mach_port_array_t *portsp,
929 mach_msg_type_number_t *portsCnt)
930 {
931 void *memory;
932 vm_size_t size;
933 ipc_port_t *ports;
934 int i;
935
936 if (task == TASK_NULL)
937 return KERN_INVALID_ARGUMENT;
938
939 size = (vm_size_t) (TASK_PORT_REGISTER_MAX * sizeof(ipc_port_t));
940
941 memory = kalloc(size);
942 if (memory == 0)
943 return KERN_RESOURCE_SHORTAGE;
944
945 itk_lock(task);
946 if (task->itk_self == IP_NULL) {
947 itk_unlock(task);
948
949 kfree(memory, size);
950 return KERN_INVALID_ARGUMENT;
951 }
952
953 ports = (ipc_port_t *) memory;
954
955 /*
956 * Clone port rights. Because kalloc'd memory
957 * is wired, we won't fault while holding the task lock.
958 */
959
960 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
961 ports[i] = ipc_port_copy_send(task->itk_registered[i]);
962
963 itk_unlock(task);
964
965 *portsp = (mach_port_array_t) ports;
966 *portsCnt = TASK_PORT_REGISTER_MAX;
967 return KERN_SUCCESS;
968 }
969
970 /*
971 * Routine: convert_port_to_locked_task
972 * Purpose:
973 * Internal helper routine to convert from a port to a locked
974 * task. Used by several routines that try to convert from a
975 * task port to a reference on some task related object.
976 * Conditions:
977 * Nothing locked, blocking OK.
978 */
979 task_t
980 convert_port_to_locked_task(ipc_port_t port)
981 {
982 while (IP_VALID(port)) {
983 task_t task;
984
985 ip_lock(port);
986 if (!ip_active(port) || (ip_kotype(port) != IKOT_TASK)) {
987 ip_unlock(port);
988 return TASK_NULL;
989 }
990 task = (task_t) port->ip_kobject;
991 assert(task != TASK_NULL);
992
993 /*
994 * Normal lock ordering puts task_lock() before ip_lock().
995 * Attempt out-of-order locking here.
996 */
997 if (task_lock_try(task)) {
998 ip_unlock(port);
999 return(task);
1000 }
1001
1002 ip_unlock(port);
1003 mutex_pause();
1004 }
1005 return TASK_NULL;
1006 }
1007
1008 /*
1009 * Routine: convert_port_to_task
1010 * Purpose:
1011 * Convert from a port to a task.
1012 * Doesn't consume the port ref; produces a task ref,
1013 * which may be null.
1014 * Conditions:
1015 * Nothing locked.
1016 */
1017 task_t
1018 convert_port_to_task(
1019 ipc_port_t port)
1020 {
1021 task_t task = TASK_NULL;
1022
1023 if (IP_VALID(port)) {
1024 ip_lock(port);
1025
1026 if ( ip_active(port) &&
1027 ip_kotype(port) == IKOT_TASK ) {
1028 task = (task_t)port->ip_kobject;
1029 assert(task != TASK_NULL);
1030
1031 task_reference_internal(task);
1032 }
1033
1034 ip_unlock(port);
1035 }
1036
1037 return (task);
1038 }
1039
1040 /*
1041 * Routine: convert_port_to_task_name
1042 * Purpose:
1043 * Convert from a port to a task name.
1044 * Doesn't consume the port ref; produces a task name ref,
1045 * which may be null.
1046 * Conditions:
1047 * Nothing locked.
1048 */
1049 task_name_t
1050 convert_port_to_task_name(
1051 ipc_port_t port)
1052 {
1053 task_name_t task = TASK_NULL;
1054
1055 if (IP_VALID(port)) {
1056 ip_lock(port);
1057
1058 if ( ip_active(port) &&
1059 (ip_kotype(port) == IKOT_TASK ||
1060 ip_kotype(port) == IKOT_TASK_NAME)) {
1061 task = (task_name_t)port->ip_kobject;
1062 assert(task != TASK_NAME_NULL);
1063
1064 task_reference_internal(task);
1065 }
1066
1067 ip_unlock(port);
1068 }
1069
1070 return (task);
1071 }
1072
1073 /*
1074 * Routine: convert_port_to_space
1075 * Purpose:
1076 * Convert from a port to a space.
1077 * Doesn't consume the port ref; produces a space ref,
1078 * which may be null.
1079 * Conditions:
1080 * Nothing locked.
1081 */
1082 ipc_space_t
1083 convert_port_to_space(
1084 ipc_port_t port)
1085 {
1086 ipc_space_t space;
1087 task_t task;
1088
1089 task = convert_port_to_locked_task(port);
1090
1091 if (task == TASK_NULL)
1092 return IPC_SPACE_NULL;
1093
1094 if (!task->active) {
1095 task_unlock(task);
1096 return IPC_SPACE_NULL;
1097 }
1098
1099 space = task->itk_space;
1100 is_reference(space);
1101 task_unlock(task);
1102 return (space);
1103 }
1104
1105 /*
1106 * Routine: convert_port_to_map
1107 * Purpose:
1108 * Convert from a port to a map.
1109 * Doesn't consume the port ref; produces a map ref,
1110 * which may be null.
1111 * Conditions:
1112 * Nothing locked.
1113 */
1114
1115 vm_map_t
1116 convert_port_to_map(
1117 ipc_port_t port)
1118 {
1119 task_t task;
1120 vm_map_t map;
1121
1122 task = convert_port_to_locked_task(port);
1123
1124 if (task == TASK_NULL)
1125 return VM_MAP_NULL;
1126
1127 if (!task->active) {
1128 task_unlock(task);
1129 return VM_MAP_NULL;
1130 }
1131
1132 map = task->map;
1133 vm_map_reference_swap(map);
1134 task_unlock(task);
1135 return map;
1136 }
1137
1138
1139 /*
1140 * Routine: convert_port_to_thread
1141 * Purpose:
1142 * Convert from a port to a thread.
1143 * Doesn't consume the port ref; produces an thread ref,
1144 * which may be null.
1145 * Conditions:
1146 * Nothing locked.
1147 */
1148
1149 thread_t
1150 convert_port_to_thread(
1151 ipc_port_t port)
1152 {
1153 thread_t thread = THREAD_NULL;
1154
1155 if (IP_VALID(port)) {
1156 ip_lock(port);
1157
1158 if ( ip_active(port) &&
1159 ip_kotype(port) == IKOT_THREAD ) {
1160 thread = (thread_t)port->ip_kobject;
1161 assert(thread != THREAD_NULL);
1162
1163 thread_reference_internal(thread);
1164 }
1165
1166 ip_unlock(port);
1167 }
1168
1169 return (thread);
1170 }
1171
1172 /*
1173 * Routine: port_name_to_thread
1174 * Purpose:
1175 * Convert from a port name to an thread reference
1176 * A name of MACH_PORT_NULL is valid for the null thread.
1177 * Conditions:
1178 * Nothing locked.
1179 */
1180 thread_t
1181 port_name_to_thread(
1182 mach_port_name_t name)
1183 {
1184 thread_t thread = THREAD_NULL;
1185 ipc_port_t kport;
1186
1187 if (MACH_PORT_VALID(name)) {
1188 if (ipc_object_copyin(current_space(), name,
1189 MACH_MSG_TYPE_COPY_SEND,
1190 (ipc_object_t *)&kport) != KERN_SUCCESS)
1191 return (THREAD_NULL);
1192
1193 thread = convert_port_to_thread(kport);
1194
1195 if (IP_VALID(kport))
1196 ipc_port_release_send(kport);
1197 }
1198
1199 return (thread);
1200 }
1201
1202 task_t
1203 port_name_to_task(
1204 mach_port_name_t name)
1205 {
1206 ipc_port_t kern_port;
1207 kern_return_t kr;
1208 task_t task = TASK_NULL;
1209
1210 if (MACH_PORT_VALID(name)) {
1211 kr = ipc_object_copyin(current_space(), name,
1212 MACH_MSG_TYPE_COPY_SEND,
1213 (ipc_object_t *) &kern_port);
1214 if (kr != KERN_SUCCESS)
1215 return TASK_NULL;
1216
1217 task = convert_port_to_task(kern_port);
1218
1219 if (IP_VALID(kern_port))
1220 ipc_port_release_send(kern_port);
1221 }
1222 return task;
1223 }
1224
1225 /*
1226 * Routine: convert_task_to_port
1227 * Purpose:
1228 * Convert from a task to a port.
1229 * Consumes a task ref; produces a naked send right
1230 * which may be invalid.
1231 * Conditions:
1232 * Nothing locked.
1233 */
1234
1235 ipc_port_t
1236 convert_task_to_port(
1237 task_t task)
1238 {
1239 ipc_port_t port;
1240
1241 itk_lock(task);
1242 if (task->itk_self != IP_NULL)
1243 port = ipc_port_make_send(task->itk_self);
1244 else
1245 port = IP_NULL;
1246 itk_unlock(task);
1247
1248 task_deallocate(task);
1249 return port;
1250 }
1251
1252 /*
1253 * Routine: convert_task_name_to_port
1254 * Purpose:
1255 * Convert from a task name ref to a port.
1256 * Consumes a task name ref; produces a naked send right
1257 * which may be invalid.
1258 * Conditions:
1259 * Nothing locked.
1260 */
1261
1262 ipc_port_t
1263 convert_task_name_to_port(
1264 task_name_t task_name)
1265 {
1266 ipc_port_t port;
1267
1268 itk_lock(task_name);
1269 if (task_name->itk_nself != IP_NULL)
1270 port = ipc_port_make_send(task_name->itk_nself);
1271 else
1272 port = IP_NULL;
1273 itk_unlock(task_name);
1274
1275 task_name_deallocate(task_name);
1276 return port;
1277 }
1278
1279 /*
1280 * Routine: convert_thread_to_port
1281 * Purpose:
1282 * Convert from a thread to a port.
1283 * Consumes an thread ref; produces a naked send right
1284 * which may be invalid.
1285 * Conditions:
1286 * Nothing locked.
1287 */
1288
1289 ipc_port_t
1290 convert_thread_to_port(
1291 thread_t thread)
1292 {
1293 ipc_port_t port;
1294
1295 thread_mtx_lock(thread);
1296
1297 if (thread->ith_self != IP_NULL)
1298 port = ipc_port_make_send(thread->ith_self);
1299 else
1300 port = IP_NULL;
1301
1302 thread_mtx_unlock(thread);
1303
1304 thread_deallocate(thread);
1305
1306 return (port);
1307 }
1308
1309 /*
1310 * Routine: space_deallocate
1311 * Purpose:
1312 * Deallocate a space ref produced by convert_port_to_space.
1313 * Conditions:
1314 * Nothing locked.
1315 */
1316
1317 void
1318 space_deallocate(
1319 ipc_space_t space)
1320 {
1321 if (space != IS_NULL)
1322 is_release(space);
1323 }
1324
1325 /*
1326 * Routine: thread/task_set_exception_ports [kernel call]
1327 * Purpose:
1328 * Sets the thread/task exception port, flavor and
1329 * behavior for the exception types specified by the mask.
1330 * There will be one send right per exception per valid
1331 * port.
1332 * Conditions:
1333 * Nothing locked. If successful, consumes
1334 * the supplied send right.
1335 * Returns:
1336 * KERN_SUCCESS Changed the special port.
1337 * KERN_INVALID_ARGUMENT The thread is null,
1338 * Illegal mask bit set.
1339 * Illegal exception behavior
1340 * KERN_FAILURE The thread is dead.
1341 */
1342
1343 kern_return_t
1344 thread_set_exception_ports(
1345 thread_t thread,
1346 exception_mask_t exception_mask,
1347 ipc_port_t new_port,
1348 exception_behavior_t new_behavior,
1349 thread_state_flavor_t new_flavor)
1350 {
1351 ipc_port_t old_port[EXC_TYPES_COUNT];
1352 register int i;
1353
1354 if (thread == THREAD_NULL)
1355 return (KERN_INVALID_ARGUMENT);
1356
1357 if (exception_mask & ~EXC_MASK_ALL)
1358 return (KERN_INVALID_ARGUMENT);
1359
1360 if (IP_VALID(new_port)) {
1361 switch (new_behavior) {
1362
1363 case EXCEPTION_DEFAULT:
1364 case EXCEPTION_STATE:
1365 case EXCEPTION_STATE_IDENTITY:
1366 break;
1367
1368 default:
1369 return (KERN_INVALID_ARGUMENT);
1370 }
1371 }
1372
1373 /*
1374 * Check the validity of the thread_state_flavor by calling the
1375 * VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in
1376 * osfmk/mach/ARCHITECTURE/thread_status.h
1377 */
1378 if (!VALID_THREAD_STATE_FLAVOR(new_flavor))
1379 return (KERN_INVALID_ARGUMENT);
1380
1381 thread_mtx_lock(thread);
1382
1383 if (!thread->active) {
1384 thread_mtx_unlock(thread);
1385
1386 return (KERN_FAILURE);
1387 }
1388
1389 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
1390 if (exception_mask & (1 << i)) {
1391 old_port[i] = thread->exc_actions[i].port;
1392 thread->exc_actions[i].port = ipc_port_copy_send(new_port);
1393 thread->exc_actions[i].behavior = new_behavior;
1394 thread->exc_actions[i].flavor = new_flavor;
1395 }
1396 else
1397 old_port[i] = IP_NULL;
1398 }
1399
1400 thread_mtx_unlock(thread);
1401
1402 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i)
1403 if (IP_VALID(old_port[i]))
1404 ipc_port_release_send(old_port[i]);
1405
1406 if (IP_VALID(new_port)) /* consume send right */
1407 ipc_port_release_send(new_port);
1408
1409 return (KERN_SUCCESS);
1410 }
1411
1412 kern_return_t
1413 task_set_exception_ports(
1414 task_t task,
1415 exception_mask_t exception_mask,
1416 ipc_port_t new_port,
1417 exception_behavior_t new_behavior,
1418 thread_state_flavor_t new_flavor)
1419 {
1420 ipc_port_t old_port[EXC_TYPES_COUNT];
1421 register int i;
1422
1423 if (task == TASK_NULL)
1424 return (KERN_INVALID_ARGUMENT);
1425
1426 if (exception_mask & ~EXC_MASK_ALL)
1427 return (KERN_INVALID_ARGUMENT);
1428
1429 if (IP_VALID(new_port)) {
1430 switch (new_behavior) {
1431
1432 case EXCEPTION_DEFAULT:
1433 case EXCEPTION_STATE:
1434 case EXCEPTION_STATE_IDENTITY:
1435 break;
1436
1437 default:
1438 return (KERN_INVALID_ARGUMENT);
1439 }
1440 }
1441
1442 itk_lock(task);
1443
1444 if (task->itk_self == IP_NULL) {
1445 itk_unlock(task);
1446
1447 return (KERN_FAILURE);
1448 }
1449
1450 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
1451 if (exception_mask & (1 << i)) {
1452 old_port[i] = task->exc_actions[i].port;
1453 task->exc_actions[i].port =
1454 ipc_port_copy_send(new_port);
1455 task->exc_actions[i].behavior = new_behavior;
1456 task->exc_actions[i].flavor = new_flavor;
1457 }
1458 else
1459 old_port[i] = IP_NULL;
1460 }
1461
1462 itk_unlock(task);
1463
1464 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i)
1465 if (IP_VALID(old_port[i]))
1466 ipc_port_release_send(old_port[i]);
1467
1468 if (IP_VALID(new_port)) /* consume send right */
1469 ipc_port_release_send(new_port);
1470
1471 return (KERN_SUCCESS);
1472 }
1473
1474 /*
1475 * Routine: thread/task_swap_exception_ports [kernel call]
1476 * Purpose:
1477 * Sets the thread/task exception port, flavor and
1478 * behavior for the exception types specified by the
1479 * mask.
1480 *
1481 * The old ports, behavior and flavors are returned
1482 * Count specifies the array sizes on input and
1483 * the number of returned ports etc. on output. The
1484 * arrays must be large enough to hold all the returned
1485 * data, MIG returnes an error otherwise. The masks
1486 * array specifies the corresponding exception type(s).
1487 *
1488 * Conditions:
1489 * Nothing locked. If successful, consumes
1490 * the supplied send right.
1491 *
1492 * Returns upto [in} CountCnt elements.
1493 * Returns:
1494 * KERN_SUCCESS Changed the special port.
1495 * KERN_INVALID_ARGUMENT The thread is null,
1496 * Illegal mask bit set.
1497 * Illegal exception behavior
1498 * KERN_FAILURE The thread is dead.
1499 */
1500
1501 kern_return_t
1502 thread_swap_exception_ports(
1503 thread_t thread,
1504 exception_mask_t exception_mask,
1505 ipc_port_t new_port,
1506 exception_behavior_t new_behavior,
1507 thread_state_flavor_t new_flavor,
1508 exception_mask_array_t masks,
1509 mach_msg_type_number_t *CountCnt,
1510 exception_port_array_t ports,
1511 exception_behavior_array_t behaviors,
1512 thread_state_flavor_array_t flavors)
1513 {
1514 ipc_port_t old_port[EXC_TYPES_COUNT];
1515 unsigned int i, j, count;
1516
1517 if (thread == THREAD_NULL)
1518 return (KERN_INVALID_ARGUMENT);
1519
1520 if (exception_mask & ~EXC_MASK_ALL)
1521 return (KERN_INVALID_ARGUMENT);
1522
1523 if (IP_VALID(new_port)) {
1524 switch (new_behavior) {
1525
1526 case EXCEPTION_DEFAULT:
1527 case EXCEPTION_STATE:
1528 case EXCEPTION_STATE_IDENTITY:
1529 break;
1530
1531 default:
1532 return (KERN_INVALID_ARGUMENT);
1533 }
1534 }
1535
1536 thread_mtx_lock(thread);
1537
1538 if (!thread->active) {
1539 thread_mtx_unlock(thread);
1540
1541 return (KERN_FAILURE);
1542 }
1543
1544 count = 0;
1545
1546 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
1547 if (exception_mask & (1 << i)) {
1548 for (j = 0; j < count; ++j) {
1549 /*
1550 * search for an identical entry, if found
1551 * set corresponding mask for this exception.
1552 */
1553 if ( thread->exc_actions[i].port == ports[j] &&
1554 thread->exc_actions[i].behavior == behaviors[j] &&
1555 thread->exc_actions[i].flavor == flavors[j] ) {
1556 masks[j] |= (1 << i);
1557 break;
1558 }
1559 }
1560
1561 if (j == count) {
1562 masks[j] = (1 << i);
1563 ports[j] = ipc_port_copy_send(thread->exc_actions[i].port);
1564
1565 behaviors[j] = thread->exc_actions[i].behavior;
1566 flavors[j] = thread->exc_actions[i].flavor;
1567 ++count;
1568 }
1569
1570 old_port[i] = thread->exc_actions[i].port;
1571 thread->exc_actions[i].port = ipc_port_copy_send(new_port);
1572 thread->exc_actions[i].behavior = new_behavior;
1573 thread->exc_actions[i].flavor = new_flavor;
1574 if (count > *CountCnt)
1575 break;
1576 }
1577 else
1578 old_port[i] = IP_NULL;
1579 }
1580
1581 thread_mtx_unlock(thread);
1582
1583 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i)
1584 if (IP_VALID(old_port[i]))
1585 ipc_port_release_send(old_port[i]);
1586
1587 if (IP_VALID(new_port)) /* consume send right */
1588 ipc_port_release_send(new_port);
1589
1590 *CountCnt = count;
1591
1592 return (KERN_SUCCESS);
1593 }
1594
1595 kern_return_t
1596 task_swap_exception_ports(
1597 task_t task,
1598 exception_mask_t exception_mask,
1599 ipc_port_t new_port,
1600 exception_behavior_t new_behavior,
1601 thread_state_flavor_t new_flavor,
1602 exception_mask_array_t masks,
1603 mach_msg_type_number_t *CountCnt,
1604 exception_port_array_t ports,
1605 exception_behavior_array_t behaviors,
1606 thread_state_flavor_array_t flavors)
1607 {
1608 ipc_port_t old_port[EXC_TYPES_COUNT];
1609 unsigned int i, j, count;
1610
1611 if (task == TASK_NULL)
1612 return (KERN_INVALID_ARGUMENT);
1613
1614 if (exception_mask & ~EXC_MASK_ALL)
1615 return (KERN_INVALID_ARGUMENT);
1616
1617 if (IP_VALID(new_port)) {
1618 switch (new_behavior) {
1619
1620 case EXCEPTION_DEFAULT:
1621 case EXCEPTION_STATE:
1622 case EXCEPTION_STATE_IDENTITY:
1623 break;
1624
1625 default:
1626 return (KERN_INVALID_ARGUMENT);
1627 }
1628 }
1629
1630 itk_lock(task);
1631
1632 if (task->itk_self == IP_NULL) {
1633 itk_unlock(task);
1634
1635 return (KERN_FAILURE);
1636 }
1637
1638 count = 0;
1639
1640 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
1641 if (exception_mask & (1 << i)) {
1642 for (j = 0; j < count; j++) {
1643 /*
1644 * search for an identical entry, if found
1645 * set corresponding mask for this exception.
1646 */
1647 if ( task->exc_actions[i].port == ports[j] &&
1648 task->exc_actions[i].behavior == behaviors[j] &&
1649 task->exc_actions[i].flavor == flavors[j] ) {
1650 masks[j] |= (1 << i);
1651 break;
1652 }
1653 }
1654
1655 if (j == count) {
1656 masks[j] = (1 << i);
1657 ports[j] = ipc_port_copy_send(task->exc_actions[i].port);
1658 behaviors[j] = task->exc_actions[i].behavior;
1659 flavors[j] = task->exc_actions[i].flavor;
1660 ++count;
1661 }
1662
1663 old_port[i] = task->exc_actions[i].port;
1664 task->exc_actions[i].port = ipc_port_copy_send(new_port);
1665 task->exc_actions[i].behavior = new_behavior;
1666 task->exc_actions[i].flavor = new_flavor;
1667 if (count > *CountCnt)
1668 break;
1669 }
1670 else
1671 old_port[i] = IP_NULL;
1672 }
1673
1674 itk_unlock(task);
1675
1676 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++)
1677 if (IP_VALID(old_port[i]))
1678 ipc_port_release_send(old_port[i]);
1679
1680 if (IP_VALID(new_port)) /* consume send right */
1681 ipc_port_release_send(new_port);
1682
1683 *CountCnt = count;
1684
1685 return (KERN_SUCCESS);
1686 }
1687
1688 /*
1689 * Routine: thread/task_get_exception_ports [kernel call]
1690 * Purpose:
1691 * Clones a send right for each of the thread/task's exception
1692 * ports specified in the mask and returns the behaviour
1693 * and flavor of said port.
1694 *
1695 * Returns upto [in} CountCnt elements.
1696 *
1697 * Conditions:
1698 * Nothing locked.
1699 * Returns:
1700 * KERN_SUCCESS Extracted a send right.
1701 * KERN_INVALID_ARGUMENT The thread is null,
1702 * Invalid special port,
1703 * Illegal mask bit set.
1704 * KERN_FAILURE The thread is dead.
1705 */
1706
1707 kern_return_t
1708 thread_get_exception_ports(
1709 thread_t thread,
1710 exception_mask_t exception_mask,
1711 exception_mask_array_t masks,
1712 mach_msg_type_number_t *CountCnt,
1713 exception_port_array_t ports,
1714 exception_behavior_array_t behaviors,
1715 thread_state_flavor_array_t flavors)
1716 {
1717 unsigned int i, j, count;
1718
1719 if (thread == THREAD_NULL)
1720 return (KERN_INVALID_ARGUMENT);
1721
1722 if (exception_mask & ~EXC_MASK_ALL)
1723 return (KERN_INVALID_ARGUMENT);
1724
1725 thread_mtx_lock(thread);
1726
1727 if (!thread->active) {
1728 thread_mtx_unlock(thread);
1729
1730 return (KERN_FAILURE);
1731 }
1732
1733 count = 0;
1734
1735 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
1736 if (exception_mask & (1 << i)) {
1737 for (j = 0; j < count; ++j) {
1738 /*
1739 * search for an identical entry, if found
1740 * set corresponding mask for this exception.
1741 */
1742 if ( thread->exc_actions[i].port == ports[j] &&
1743 thread->exc_actions[i].behavior ==behaviors[j] &&
1744 thread->exc_actions[i].flavor == flavors[j] ) {
1745 masks[j] |= (1 << i);
1746 break;
1747 }
1748 }
1749
1750 if (j == count) {
1751 masks[j] = (1 << i);
1752 ports[j] = ipc_port_copy_send(thread->exc_actions[i].port);
1753 behaviors[j] = thread->exc_actions[i].behavior;
1754 flavors[j] = thread->exc_actions[i].flavor;
1755 ++count;
1756 if (count >= *CountCnt)
1757 break;
1758 }
1759 }
1760 }
1761
1762 thread_mtx_unlock(thread);
1763
1764 *CountCnt = count;
1765
1766 return (KERN_SUCCESS);
1767 }
1768
1769 kern_return_t
1770 task_get_exception_ports(
1771 task_t task,
1772 exception_mask_t exception_mask,
1773 exception_mask_array_t masks,
1774 mach_msg_type_number_t *CountCnt,
1775 exception_port_array_t ports,
1776 exception_behavior_array_t behaviors,
1777 thread_state_flavor_array_t flavors)
1778 {
1779 unsigned int i, j, count;
1780
1781 if (task == TASK_NULL)
1782 return (KERN_INVALID_ARGUMENT);
1783
1784 if (exception_mask & ~EXC_MASK_ALL)
1785 return (KERN_INVALID_ARGUMENT);
1786
1787 itk_lock(task);
1788
1789 if (task->itk_self == IP_NULL) {
1790 itk_unlock(task);
1791
1792 return (KERN_FAILURE);
1793 }
1794
1795 count = 0;
1796
1797 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
1798 if (exception_mask & (1 << i)) {
1799 for (j = 0; j < count; ++j) {
1800 /*
1801 * search for an identical entry, if found
1802 * set corresponding mask for this exception.
1803 */
1804 if ( task->exc_actions[i].port == ports[j] &&
1805 task->exc_actions[i].behavior == behaviors[j] &&
1806 task->exc_actions[i].flavor == flavors[j] ) {
1807 masks[j] |= (1 << i);
1808 break;
1809 }
1810 }
1811
1812 if (j == count) {
1813 masks[j] = (1 << i);
1814 ports[j] = ipc_port_copy_send(task->exc_actions[i].port);
1815 behaviors[j] = task->exc_actions[i].behavior;
1816 flavors[j] = task->exc_actions[i].flavor;
1817 ++count;
1818 if (count > *CountCnt)
1819 break;
1820 }
1821 }
1822 }
1823
1824 itk_unlock(task);
1825
1826 *CountCnt = count;
1827
1828 return (KERN_SUCCESS);
1829 }