]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/ipc_tt.c
xnu-344.tar.gz
[apple/xnu.git] / osfmk / kern / ipc_tt.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_COPYRIGHT@
24 */
25 /*
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50 /*
51 */
52
53 /*
54 * File: ipc_tt.c
55 * Purpose:
56 * Task and thread related IPC functions.
57 */
58
59 #include <mach/boolean.h>
60 #include <mach_rt.h>
61 #include <mach/kern_return.h>
62 #include <mach/mach_param.h>
63 #include <mach/task_special_ports.h>
64 #include <mach/thread_special_ports.h>
65 #include <mach/thread_status.h>
66 #include <mach/exception_types.h>
67 #include <mach/mach_traps.h>
68 #include <mach/task_server.h>
69 #include <mach/thread_act_server.h>
70 #include <mach/mach_host_server.h>
71 #include <mach/vm_map_server.h>
72 #include <kern/host.h>
73 #include <kern/ipc_tt.h>
74 #include <kern/thread_act.h>
75 #include <kern/misc_protos.h>
76 #include <vm/vm_pageout.h>
77
78 /*
79 * Routine: ipc_task_init
80 * Purpose:
81 * Initialize a task's IPC state.
82 *
83 * If non-null, some state will be inherited from the parent.
84 * The parent must be appropriately initialized.
85 * Conditions:
86 * Nothing locked.
87 */
88
89 void
90 ipc_task_init(
91 task_t task,
92 task_t parent)
93 {
94 ipc_space_t space;
95 ipc_port_t kport;
96 kern_return_t kr;
97 int i;
98
99
100 kr = ipc_space_create(&ipc_table_entries[0], &space);
101 if (kr != KERN_SUCCESS)
102 panic("ipc_task_init");
103
104
105 kport = ipc_port_alloc_kernel();
106 if (kport == IP_NULL)
107 panic("ipc_task_init");
108
109 itk_lock_init(task);
110 task->itk_self = kport;
111 task->itk_sself = ipc_port_make_send(kport);
112 task->itk_space = space;
113 space->is_fast = task->kernel_loaded;
114
115 if (parent == TASK_NULL) {
116 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
117 task->exc_actions[i].port = IP_NULL;
118 }/* for */
119 task->itk_host = ipc_port_make_send(realhost.host_self);
120 task->itk_bootstrap = IP_NULL;
121 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
122 task->itk_registered[i] = IP_NULL;
123 } else {
124 itk_lock(parent);
125 assert(parent->itk_self != IP_NULL);
126
127 /* inherit registered ports */
128
129 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
130 task->itk_registered[i] =
131 ipc_port_copy_send(parent->itk_registered[i]);
132
133 /* inherit exception and bootstrap ports */
134
135 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
136 task->exc_actions[i].port =
137 ipc_port_copy_send(parent->exc_actions[i].port);
138 task->exc_actions[i].flavor =
139 parent->exc_actions[i].flavor;
140 task->exc_actions[i].behavior =
141 parent->exc_actions[i].behavior;
142 }/* for */
143 task->itk_host =
144 ipc_port_copy_send(parent->itk_host);
145
146 task->itk_bootstrap =
147 ipc_port_copy_send(parent->itk_bootstrap);
148
149 itk_unlock(parent);
150 }
151 }
152
153 /*
154 * Routine: ipc_task_enable
155 * Purpose:
156 * Enable a task for IPC access.
157 * Conditions:
158 * Nothing locked.
159 */
160
161 void
162 ipc_task_enable(
163 task_t task)
164 {
165 ipc_port_t kport;
166
167 itk_lock(task);
168 kport = task->itk_self;
169 if (kport != IP_NULL)
170 ipc_kobject_set(kport, (ipc_kobject_t) task, IKOT_TASK);
171 itk_unlock(task);
172 }
173
174 /*
175 * Routine: ipc_task_disable
176 * Purpose:
177 * Disable IPC access to a task.
178 * Conditions:
179 * Nothing locked.
180 */
181
182 void
183 ipc_task_disable(
184 task_t task)
185 {
186 ipc_port_t kport;
187
188 itk_lock(task);
189 kport = task->itk_self;
190 if (kport != IP_NULL)
191 ipc_kobject_set(kport, IKO_NULL, IKOT_NONE);
192 itk_unlock(task);
193 }
194
195 /*
196 * Routine: ipc_task_terminate
197 * Purpose:
198 * Clean up and destroy a task's IPC state.
199 * Conditions:
200 * Nothing locked. The task must be suspended.
201 * (Or the current thread must be in the task.)
202 */
203
204 void
205 ipc_task_terminate(
206 task_t task)
207 {
208 ipc_port_t kport;
209 int i;
210
211 itk_lock(task);
212 kport = task->itk_self;
213
214 if (kport == IP_NULL) {
215 /* the task is already terminated (can this happen?) */
216 itk_unlock(task);
217 return;
218 }
219
220 task->itk_self = IP_NULL;
221 itk_unlock(task);
222
223 /* release the naked send rights */
224
225 if (IP_VALID(task->itk_sself))
226 ipc_port_release_send(task->itk_sself);
227
228 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
229 if (IP_VALID(task->exc_actions[i].port)) {
230 ipc_port_release_send(task->exc_actions[i].port);
231 }
232 }/* for */
233 if (IP_VALID(task->itk_host))
234 ipc_port_release_send(task->itk_host);
235
236 if (IP_VALID(task->itk_bootstrap))
237 ipc_port_release_send(task->itk_bootstrap);
238
239 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
240 if (IP_VALID(task->itk_registered[i]))
241 ipc_port_release_send(task->itk_registered[i]);
242
243 ipc_port_release_send(task->wired_ledger_port);
244 ipc_port_release_send(task->paged_ledger_port);
245
246 /* destroy the kernel port */
247 ipc_port_dealloc_kernel(kport);
248 }
249
250 /*
251 * Routine: ipc_thread_init
252 * Purpose:
253 * Initialize a thread's IPC state.
254 * Conditions:
255 * Nothing locked.
256 */
257
258 void
259 ipc_thread_init(
260 thread_t thread)
261 {
262 ipc_kmsg_queue_init(&thread->ith_messages);
263 thread->ith_mig_reply = MACH_PORT_NULL;
264 thread->ith_rpc_reply = IP_NULL;
265 }
266
267 /*
268 * Routine: ipc_thread_terminate
269 * Purpose:
270 * Clean up and destroy a thread's IPC state.
271 * Conditions:
272 * Nothing locked. The thread must be suspended.
273 * (Or be the current thread.)
274 */
275
276 void
277 ipc_thread_terminate(
278 thread_t thread)
279 {
280 assert(ipc_kmsg_queue_empty(&thread->ith_messages));
281
282 if (thread->ith_rpc_reply != IP_NULL)
283 ipc_port_dealloc_reply(thread->ith_rpc_reply);
284 thread->ith_rpc_reply = IP_NULL;
285 }
286
287 /*
288 * Routine: ipc_thr_act_init
289 * Purpose:
290 * Initialize an thr_act's IPC state.
291 * Conditions:
292 * Nothing locked.
293 */
294
295 void
296 ipc_thr_act_init(task_t task, thread_act_t thr_act)
297 {
298 ipc_port_t kport; int i;
299
300 kport = ipc_port_alloc_kernel();
301 if (kport == IP_NULL)
302 panic("ipc_thr_act_init");
303
304 thr_act->ith_self = kport;
305 thr_act->ith_sself = ipc_port_make_send(kport);
306
307 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++)
308 thr_act->exc_actions[i].port = IP_NULL;
309
310 ipc_kobject_set(kport, (ipc_kobject_t) thr_act, IKOT_ACT);
311 }
312
313 void
314 ipc_thr_act_disable(thread_act_t thr_act)
315 {
316 int i;
317 ipc_port_t kport;
318
319 kport = thr_act->ith_self;
320
321 if (kport != IP_NULL)
322 ipc_kobject_set(kport, IKO_NULL, IKOT_NONE);
323 }
324
325 void
326 ipc_thr_act_terminate(thread_act_t thr_act)
327 {
328 ipc_port_t kport; int i;
329
330 kport = thr_act->ith_self;
331
332 if (kport == IP_NULL) {
333 /* the thread is already terminated (can this happen?) */
334 return;
335 }
336
337 thr_act->ith_self = IP_NULL;
338
339 /* release the naked send rights */
340
341 if (IP_VALID(thr_act->ith_sself))
342 ipc_port_release_send(thr_act->ith_sself);
343 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
344 if (IP_VALID(thr_act->exc_actions[i].port))
345 ipc_port_release_send(thr_act->exc_actions[i].port);
346 }
347
348 /* destroy the kernel port */
349 ipc_port_dealloc_kernel(kport);
350 }
351
352 /*
353 * Routine: retrieve_task_self_fast
354 * Purpose:
355 * Optimized version of retrieve_task_self,
356 * that only works for the current task.
357 *
358 * Return a send right (possibly null/dead)
359 * for the task's user-visible self port.
360 * Conditions:
361 * Nothing locked.
362 */
363
364 ipc_port_t
365 retrieve_task_self_fast(
366 register task_t task)
367 {
368 register ipc_port_t port;
369
370 assert(task == current_task());
371
372 itk_lock(task);
373 assert(task->itk_self != IP_NULL);
374
375 if ((port = task->itk_sself) == task->itk_self) {
376 /* no interposing */
377
378 ip_lock(port);
379 assert(ip_active(port));
380 ip_reference(port);
381 port->ip_srights++;
382 ip_unlock(port);
383 } else
384 port = ipc_port_copy_send(port);
385 itk_unlock(task);
386
387 return port;
388 }
389
390 /*
391 * Routine: retrieve_act_self_fast
392 * Purpose:
393 * Optimized version of retrieve_thread_self,
394 * that only works for the current thread.
395 *
396 * Return a send right (possibly null/dead)
397 * for the thread's user-visible self port.
398 * Conditions:
399 * Nothing locked.
400 */
401
402 ipc_port_t
403 retrieve_act_self_fast(thread_act_t thr_act)
404 {
405 register ipc_port_t port;
406
407 assert(thr_act == current_act());
408 act_lock(thr_act);
409 assert(thr_act->ith_self != IP_NULL);
410
411 if ((port = thr_act->ith_sself) == thr_act->ith_self) {
412 /* no interposing */
413
414 ip_lock(port);
415 assert(ip_active(port));
416 ip_reference(port);
417 port->ip_srights++;
418 ip_unlock(port);
419 } else
420 port = ipc_port_copy_send(port);
421 act_unlock(thr_act);
422
423 return port;
424 }
425
426 /*
427 * Routine: task_self_trap [mach trap]
428 * Purpose:
429 * Give the caller send rights for his own task port.
430 * Conditions:
431 * Nothing locked.
432 * Returns:
433 * MACH_PORT_NULL if there are any resource failures
434 * or other errors.
435 */
436
437 mach_port_name_t
438 task_self_trap(void)
439 {
440 task_t task = current_task();
441 ipc_port_t sright;
442
443 sright = retrieve_task_self_fast(task);
444 return ipc_port_copyout_send(sright, task->itk_space);
445 }
446
447 /*
448 * Routine: thread_self_trap [mach trap]
449 * Purpose:
450 * Give the caller send rights for his own thread port.
451 * Conditions:
452 * Nothing locked.
453 * Returns:
454 * MACH_PORT_NULL if there are any resource failures
455 * or other errors.
456 */
457
458 mach_port_name_t
459 thread_self_trap(void)
460 {
461 thread_act_t thr_act = current_act();
462 task_t task = thr_act->task;
463 ipc_port_t sright;
464
465 sright = retrieve_act_self_fast(thr_act);
466 return ipc_port_copyout_send(sright, task->itk_space);
467 }
468
469 /*
470 * Routine: mach_reply_port [mach trap]
471 * Purpose:
472 * Allocate a port for the caller.
473 * Conditions:
474 * Nothing locked.
475 * Returns:
476 * MACH_PORT_NULL if there are any resource failures
477 * or other errors.
478 */
479
480 mach_port_name_t
481 mach_reply_port(void)
482 {
483 ipc_port_t port;
484 mach_port_name_t name;
485 kern_return_t kr;
486
487 kr = ipc_port_alloc(current_task()->itk_space, &name, &port);
488 if (kr == KERN_SUCCESS)
489 ip_unlock(port);
490 else
491 name = MACH_PORT_NULL;
492
493 return name;
494 }
495
496 /*
497 * Routine: task_get_special_port [kernel call]
498 * Purpose:
499 * Clones a send right for one of the task's
500 * special ports.
501 * Conditions:
502 * Nothing locked.
503 * Returns:
504 * KERN_SUCCESS Extracted a send right.
505 * KERN_INVALID_ARGUMENT The task is null.
506 * KERN_FAILURE The task/space is dead.
507 * KERN_INVALID_ARGUMENT Invalid special port.
508 */
509
510 kern_return_t
511 task_get_special_port(
512 task_t task,
513 int which,
514 ipc_port_t *portp)
515 {
516 ipc_port_t *whichp;
517 ipc_port_t port;
518
519 if (task == TASK_NULL)
520 return KERN_INVALID_ARGUMENT;
521
522 switch (which) {
523 case TASK_KERNEL_PORT:
524 whichp = &task->itk_sself;
525 break;
526
527 case TASK_HOST_PORT:
528 whichp = &task->itk_host;
529 break;
530
531 case TASK_BOOTSTRAP_PORT:
532 whichp = &task->itk_bootstrap;
533 break;
534
535 case TASK_WIRED_LEDGER_PORT:
536 whichp = &task->wired_ledger_port;
537 break;
538
539 case TASK_PAGED_LEDGER_PORT:
540 whichp = &task->paged_ledger_port;
541 break;
542
543 default:
544 return KERN_INVALID_ARGUMENT;
545 }
546
547 itk_lock(task);
548 if (task->itk_self == IP_NULL) {
549 itk_unlock(task);
550 return KERN_FAILURE;
551 }
552
553 port = ipc_port_copy_send(*whichp);
554 itk_unlock(task);
555
556 *portp = port;
557 return KERN_SUCCESS;
558 }
559
560 /*
561 * Routine: task_set_special_port [kernel call]
562 * Purpose:
563 * Changes one of the task's special ports,
564 * setting it to the supplied send right.
565 * Conditions:
566 * Nothing locked. If successful, consumes
567 * the supplied send right.
568 * Returns:
569 * KERN_SUCCESS Changed the special port.
570 * KERN_INVALID_ARGUMENT The task is null.
571 * KERN_FAILURE The task/space is dead.
572 * KERN_INVALID_ARGUMENT Invalid special port.
573 */
574
575 kern_return_t
576 task_set_special_port(
577 task_t task,
578 int which,
579 ipc_port_t port)
580 {
581 ipc_port_t *whichp;
582 ipc_port_t old;
583
584 if (task == TASK_NULL)
585 return KERN_INVALID_ARGUMENT;
586
587 switch (which) {
588 case TASK_KERNEL_PORT:
589 whichp = &task->itk_sself;
590 break;
591
592 case TASK_HOST_PORT:
593 whichp = &task->itk_host;
594 break;
595
596 case TASK_BOOTSTRAP_PORT:
597 whichp = &task->itk_bootstrap;
598 break;
599
600 case TASK_WIRED_LEDGER_PORT:
601 whichp = &task->wired_ledger_port;
602 break;
603
604 case TASK_PAGED_LEDGER_PORT:
605 whichp = &task->paged_ledger_port;
606 break;
607
608 default:
609 return KERN_INVALID_ARGUMENT;
610 }/* switch */
611
612 itk_lock(task);
613 if (task->itk_self == IP_NULL) {
614 itk_unlock(task);
615 return KERN_FAILURE;
616 }
617
618 old = *whichp;
619 *whichp = port;
620 itk_unlock(task);
621
622 if (IP_VALID(old))
623 ipc_port_release_send(old);
624 return KERN_SUCCESS;
625 }
626
627
628 /*
629 * Routine: mach_ports_register [kernel call]
630 * Purpose:
631 * Stash a handful of port send rights in the task.
632 * Child tasks will inherit these rights, but they
633 * must use mach_ports_lookup to acquire them.
634 *
635 * The rights are supplied in a (wired) kalloc'd segment.
636 * Rights which aren't supplied are assumed to be null.
637 * Conditions:
638 * Nothing locked. If successful, consumes
639 * the supplied rights and memory.
640 * Returns:
641 * KERN_SUCCESS Stashed the port rights.
642 * KERN_INVALID_ARGUMENT The task is null.
643 * KERN_INVALID_ARGUMENT The task is dead.
644 * KERN_INVALID_ARGUMENT Too many port rights supplied.
645 */
646
647 kern_return_t
648 mach_ports_register(
649 task_t task,
650 mach_port_array_t memory,
651 mach_msg_type_number_t portsCnt)
652 {
653 ipc_port_t ports[TASK_PORT_REGISTER_MAX];
654 int i;
655
656 if ((task == TASK_NULL) ||
657 (portsCnt > TASK_PORT_REGISTER_MAX))
658 return KERN_INVALID_ARGUMENT;
659
660 /*
661 * Pad the port rights with nulls.
662 */
663
664 for (i = 0; i < portsCnt; i++)
665 ports[i] = memory[i];
666 for (; i < TASK_PORT_REGISTER_MAX; i++)
667 ports[i] = IP_NULL;
668
669 itk_lock(task);
670 if (task->itk_self == IP_NULL) {
671 itk_unlock(task);
672 return KERN_INVALID_ARGUMENT;
673 }
674
675 /*
676 * Replace the old send rights with the new.
677 * Release the old rights after unlocking.
678 */
679
680 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
681 ipc_port_t old;
682
683 old = task->itk_registered[i];
684 task->itk_registered[i] = ports[i];
685 ports[i] = old;
686 }
687
688 itk_unlock(task);
689
690 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
691 if (IP_VALID(ports[i]))
692 ipc_port_release_send(ports[i]);
693
694 /*
695 * Now that the operation is known to be successful,
696 * we can free the memory.
697 */
698
699 if (portsCnt != 0)
700 kfree((vm_offset_t) memory,
701 (vm_size_t) (portsCnt * sizeof(mach_port_t)));
702
703 return KERN_SUCCESS;
704 }
705
706 /*
707 * Routine: mach_ports_lookup [kernel call]
708 * Purpose:
709 * Retrieves (clones) the stashed port send rights.
710 * Conditions:
711 * Nothing locked. If successful, the caller gets
712 * rights and memory.
713 * Returns:
714 * KERN_SUCCESS Retrieved the send rights.
715 * KERN_INVALID_ARGUMENT The task is null.
716 * KERN_INVALID_ARGUMENT The task is dead.
717 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
718 */
719
720 kern_return_t
721 mach_ports_lookup(
722 task_t task,
723 mach_port_array_t *portsp,
724 mach_msg_type_number_t *portsCnt)
725 {
726 vm_offset_t memory;
727 vm_size_t size;
728 ipc_port_t *ports;
729 int i;
730
731 kern_return_t kr;
732
733 if (task == TASK_NULL)
734 return KERN_INVALID_ARGUMENT;
735
736 size = (vm_size_t) (TASK_PORT_REGISTER_MAX * sizeof(ipc_port_t));
737
738 memory = kalloc(size);
739 if (memory == 0)
740 return KERN_RESOURCE_SHORTAGE;
741
742 itk_lock(task);
743 if (task->itk_self == IP_NULL) {
744 itk_unlock(task);
745
746 kfree(memory, size);
747 return KERN_INVALID_ARGUMENT;
748 }
749
750 ports = (ipc_port_t *) memory;
751
752 /*
753 * Clone port rights. Because kalloc'd memory
754 * is wired, we won't fault while holding the task lock.
755 */
756
757 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
758 ports[i] = ipc_port_copy_send(task->itk_registered[i]);
759
760 itk_unlock(task);
761
762 *portsp = (mach_port_array_t) ports;
763 *portsCnt = TASK_PORT_REGISTER_MAX;
764 return KERN_SUCCESS;
765 }
766
767 /*
768 * Routine: convert_port_to_locked_task
769 * Purpose:
770 * Internal helper routine to convert from a port to a locked
771 * task. Used by several routines that try to convert from a
772 * task port to a reference on some task related object.
773 * Conditions:
774 * Nothing locked, blocking OK.
775 */
776 task_t
777 convert_port_to_locked_task(ipc_port_t port)
778 {
779 while (IP_VALID(port)) {
780 task_t task;
781
782 ip_lock(port);
783 if (!ip_active(port) || (ip_kotype(port) != IKOT_TASK)) {
784 ip_unlock(port);
785 return TASK_NULL;
786 }
787 task = (task_t) port->ip_kobject;
788 assert(task != TASK_NULL);
789
790 /*
791 * Normal lock ordering puts task_lock() before ip_lock().
792 * Attempt out-of-order locking here.
793 */
794 if (task_lock_try(task)) {
795 ip_unlock(port);
796 return(task);
797 }
798
799 ip_unlock(port);
800 mutex_pause();
801 }
802 return TASK_NULL;
803 }
804
805 /*
806 * Routine: convert_port_to_task
807 * Purpose:
808 * Convert from a port to a task.
809 * Doesn't consume the port ref; produces a task ref,
810 * which may be null.
811 * Conditions:
812 * Nothing locked.
813 */
814 task_t
815 convert_port_to_task(
816 ipc_port_t port)
817 {
818 task_t task;
819
820 task = convert_port_to_locked_task(port);
821 if (task) {
822 task->ref_count++;
823 task_unlock(task);
824 }
825 return task;
826 }
827
828 /*
829 * Routine: convert_port_to_space
830 * Purpose:
831 * Convert from a port to a space.
832 * Doesn't consume the port ref; produces a space ref,
833 * which may be null.
834 * Conditions:
835 * Nothing locked.
836 */
837 ipc_space_t
838 convert_port_to_space(
839 ipc_port_t port)
840 {
841 ipc_space_t space;
842 task_t task;
843
844 task = convert_port_to_locked_task(port);
845
846 if (task == TASK_NULL)
847 return IPC_SPACE_NULL;
848
849 if (!task->active) {
850 task_unlock(task);
851 return IPC_SPACE_NULL;
852 }
853
854 space = task->itk_space;
855 is_reference(space);
856 task_unlock(task);
857 return (space);
858 }
859
860 upl_t
861 convert_port_to_upl(
862 ipc_port_t port)
863 {
864 upl_t upl;
865
866 ip_lock(port);
867 if (!ip_active(port) || (ip_kotype(port) != IKOT_UPL)) {
868 ip_unlock(port);
869 return (upl_t)NULL;
870 }
871 upl = (upl_t) port->ip_kobject;
872 ip_unlock(port);
873 upl_lock(upl);
874 upl->ref_count+=1;
875 upl_unlock(upl);
876 return upl;
877 }
878
879 mach_port_t
880 convert_upl_to_port(
881 upl_t upl)
882 {
883 return MACH_PORT_NULL;
884 }
885
886 __private_extern__ void
887 upl_no_senders(
888 upl_t upl,
889 mach_port_mscount_t mscount)
890 {
891 return;
892 }
893
894 /*
895 * Routine: convert_port_entry_to_map
896 * Purpose:
897 * Convert from a port specifying an entry or a task
898 * to a map. Doesn't consume the port ref; produces a map ref,
899 * which may be null. Unlike convert_port_to_map, the
900 * port may be task or a named entry backed.
901 * Conditions:
902 * Nothing locked.
903 */
904
905
906 vm_map_t
907 convert_port_entry_to_map(
908 ipc_port_t port)
909 {
910 task_t task;
911 vm_map_t map;
912 vm_named_entry_t named_entry;
913
914 if(IP_VALID(port) && (ip_kotype(port) == IKOT_NAMED_ENTRY)) {
915 while(TRUE) {
916 ip_lock(port);
917 if(ip_active(port) && (ip_kotype(port)
918 == IKOT_NAMED_ENTRY)) {
919 named_entry =
920 (vm_named_entry_t)port->ip_kobject;
921 if (!(mutex_try(&(named_entry)->Lock))) {
922 ip_unlock(port);
923 mutex_pause();
924 continue;
925 }
926 named_entry->ref_count++;
927 mutex_unlock(&(named_entry)->Lock);
928 ip_unlock(port);
929 if ((named_entry->is_sub_map) &&
930 (named_entry->protection
931 & VM_PROT_WRITE)) {
932 map = named_entry->backing.map;
933 } else {
934 mach_destroy_memory_entry(port);
935 return VM_MAP_NULL;
936 }
937 vm_map_reference_swap(map);
938 mach_destroy_memory_entry(port);
939 break;
940 }
941 else
942 return VM_MAP_NULL;
943 }
944 } else {
945 task_t task;
946
947 task = convert_port_to_locked_task(port);
948
949 if (task == TASK_NULL)
950 return VM_MAP_NULL;
951
952 if (!task->active) {
953 task_unlock(task);
954 return VM_MAP_NULL;
955 }
956
957 map = task->map;
958 vm_map_reference_swap(map);
959 task_unlock(task);
960 }
961
962 return map;
963 }
964
965 /*
966 * Routine: convert_port_entry_to_object
967 * Purpose:
968 * Convert from a port specifying a named entry to an
969 * object. Doesn't consume the port ref; produces a map ref,
970 * which may be null.
971 * Conditions:
972 * Nothing locked.
973 */
974
975
976 vm_object_t
977 convert_port_entry_to_object(
978 ipc_port_t port)
979 {
980 vm_object_t object;
981 vm_named_entry_t named_entry;
982
983 if(IP_VALID(port) && (ip_kotype(port) == IKOT_NAMED_ENTRY)) {
984 while(TRUE) {
985 ip_lock(port);
986 if(ip_active(port) && (ip_kotype(port)
987 == IKOT_NAMED_ENTRY)) {
988 named_entry =
989 (vm_named_entry_t)port->ip_kobject;
990 if (!(mutex_try(&(named_entry)->Lock))) {
991 ip_unlock(port);
992 mutex_pause();
993 continue;
994 }
995 named_entry->ref_count++;
996 mutex_unlock(&(named_entry)->Lock);
997 ip_unlock(port);
998 if ((!named_entry->is_sub_map) &&
999 (named_entry->protection
1000 & VM_PROT_WRITE)) {
1001 object = named_entry->object;
1002 } else {
1003 mach_destroy_memory_entry(port);
1004 return (vm_object_t)NULL;
1005 }
1006 vm_object_reference(named_entry->object);
1007 mach_destroy_memory_entry(port);
1008 break;
1009 }
1010 else
1011 return (vm_object_t)NULL;
1012 }
1013 } else {
1014 return (vm_object_t)NULL;
1015 }
1016
1017 return object;
1018 }
1019
1020 /*
1021 * Routine: convert_port_to_map
1022 * Purpose:
1023 * Convert from a port to a map.
1024 * Doesn't consume the port ref; produces a map ref,
1025 * which may be null.
1026 * Conditions:
1027 * Nothing locked.
1028 */
1029
1030 vm_map_t
1031 convert_port_to_map(
1032 ipc_port_t port)
1033 {
1034 task_t task;
1035 vm_map_t map;
1036
1037 task = convert_port_to_locked_task(port);
1038
1039 if (task == TASK_NULL)
1040 return VM_MAP_NULL;
1041
1042 if (!task->active) {
1043 task_unlock(task);
1044 return VM_MAP_NULL;
1045 }
1046
1047 map = task->map;
1048 vm_map_reference_swap(map);
1049 task_unlock(task);
1050 return map;
1051 }
1052
1053
1054 /*
1055 * Routine: convert_port_to_act
1056 * Purpose:
1057 * Convert from a port to a thr_act.
1058 * Doesn't consume the port ref; produces an thr_act ref,
1059 * which may be null.
1060 * Conditions:
1061 * Nothing locked.
1062 */
1063
1064 thread_act_t
1065 convert_port_to_act( ipc_port_t port )
1066 {
1067 boolean_t r;
1068 thread_act_t thr_act = 0;
1069
1070 r = FALSE;
1071 while (!r && IP_VALID(port)) {
1072 ip_lock(port);
1073 r = ref_act_port_locked(port, &thr_act);
1074 /* port unlocked */
1075 }
1076 return (thr_act);
1077 }
1078
1079 boolean_t
1080 ref_act_port_locked( ipc_port_t port, thread_act_t *pthr_act )
1081 {
1082 thread_act_t thr_act;
1083
1084 thr_act = 0;
1085 if (ip_active(port) &&
1086 (ip_kotype(port) == IKOT_ACT)) {
1087 thr_act = (thread_act_t) port->ip_kobject;
1088 assert(thr_act != THR_ACT_NULL);
1089
1090 /*
1091 * Normal lock ordering is act_lock(), then ip_lock().
1092 * Allow out-of-order locking here, using
1093 * act_reference_act_locked() to accomodate it.
1094 */
1095 if (!act_lock_try(thr_act)) {
1096 ip_unlock(port);
1097 mutex_pause();
1098 return (FALSE);
1099 }
1100 act_locked_act_reference(thr_act);
1101 act_unlock(thr_act);
1102 }
1103 *pthr_act = thr_act;
1104 ip_unlock(port);
1105 return (TRUE);
1106 }
1107
1108 /*
1109 * Routine: port_name_to_act
1110 * Purpose:
1111 * Convert from a port name to an act reference
1112 * A name of MACH_PORT_NULL is valid for the null act
1113 * Conditions:
1114 * Nothing locked.
1115 */
1116 thread_act_t
1117 port_name_to_act(
1118 mach_port_name_t name)
1119 {
1120 thread_act_t thr_act = THR_ACT_NULL;
1121 ipc_port_t kern_port;
1122 kern_return_t kr;
1123
1124 if (MACH_PORT_VALID(name)) {
1125 kr = ipc_object_copyin(current_space(), name,
1126 MACH_MSG_TYPE_COPY_SEND,
1127 (ipc_object_t *) &kern_port);
1128 if (kr != KERN_SUCCESS)
1129 return THR_ACT_NULL;
1130
1131 thr_act = convert_port_to_act(kern_port);
1132
1133 if (IP_VALID(kern_port))
1134 ipc_port_release_send(kern_port);
1135 }
1136 return thr_act;
1137 }
1138
1139 task_t
1140 port_name_to_task(
1141 mach_port_name_t name)
1142 {
1143 ipc_port_t kern_port;
1144 kern_return_t kr;
1145 task_t task = TASK_NULL;
1146
1147 if (MACH_PORT_VALID(name)) {
1148 kr = ipc_object_copyin(current_space(), name,
1149 MACH_MSG_TYPE_COPY_SEND,
1150 (ipc_object_t *) &kern_port);
1151 if (kr != KERN_SUCCESS)
1152 return TASK_NULL;
1153
1154 task = convert_port_to_task(kern_port);
1155
1156 if (IP_VALID(kern_port))
1157 ipc_port_release_send(kern_port);
1158 }
1159 return task;
1160 }
1161
1162 /*
1163 * Routine: convert_task_to_port
1164 * Purpose:
1165 * Convert from a task to a port.
1166 * Consumes a task ref; produces a naked send right
1167 * which may be invalid.
1168 * Conditions:
1169 * Nothing locked.
1170 */
1171
1172 ipc_port_t
1173 convert_task_to_port(
1174 task_t task)
1175 {
1176 ipc_port_t port;
1177
1178 itk_lock(task);
1179 if (task->itk_self != IP_NULL)
1180 #if NORMA_TASK
1181 if (task->map == VM_MAP_NULL)
1182 /* norma placeholder task */
1183 port = ipc_port_copy_send(task->itk_self);
1184 else
1185 #endif /* NORMA_TASK */
1186 port = ipc_port_make_send(task->itk_self);
1187 else
1188 port = IP_NULL;
1189 itk_unlock(task);
1190
1191 task_deallocate(task);
1192 return port;
1193 }
1194
1195 /*
1196 * Routine: convert_act_to_port
1197 * Purpose:
1198 * Convert from a thr_act to a port.
1199 * Consumes an thr_act ref; produces a naked send right
1200 * which may be invalid.
1201 * Conditions:
1202 * Nothing locked.
1203 */
1204
1205 ipc_port_t
1206 convert_act_to_port(thr_act)
1207 thread_act_t thr_act;
1208 {
1209 ipc_port_t port;
1210
1211 act_lock(thr_act);
1212 if (thr_act->ith_self != IP_NULL)
1213 port = ipc_port_make_send(thr_act->ith_self);
1214 else
1215 port = IP_NULL;
1216 act_unlock(thr_act);
1217
1218 act_deallocate(thr_act);
1219 return port;
1220 }
1221
1222 /*
1223 * Routine: space_deallocate
1224 * Purpose:
1225 * Deallocate a space ref produced by convert_port_to_space.
1226 * Conditions:
1227 * Nothing locked.
1228 */
1229
1230 void
1231 space_deallocate(
1232 ipc_space_t space)
1233 {
1234 if (space != IS_NULL)
1235 is_release(space);
1236 }
1237
1238 /*
1239 * Routine: thread/task_set_exception_ports [kernel call]
1240 * Purpose:
1241 * Sets the thread/task exception port, flavor and
1242 * behavior for the exception types specified by the mask.
1243 * There will be one send right per exception per valid
1244 * port.
1245 * Conditions:
1246 * Nothing locked. If successful, consumes
1247 * the supplied send right.
1248 * Returns:
1249 * KERN_SUCCESS Changed the special port.
1250 * KERN_INVALID_ARGUMENT The thread is null,
1251 * Illegal mask bit set.
1252 * Illegal exception behavior
1253 * KERN_FAILURE The thread is dead.
1254 */
1255
1256 kern_return_t
1257 thread_set_exception_ports(
1258 thread_act_t thr_act,
1259 exception_mask_t exception_mask,
1260 ipc_port_t new_port,
1261 exception_behavior_t new_behavior,
1262 thread_state_flavor_t new_flavor)
1263 {
1264 register int i;
1265 ipc_port_t old_port[EXC_TYPES_COUNT];
1266
1267 if (!thr_act)
1268 return KERN_INVALID_ARGUMENT;
1269
1270 if (exception_mask & ~EXC_MASK_ALL)
1271 return KERN_INVALID_ARGUMENT;
1272
1273 if (IP_VALID(new_port)) {
1274 switch (new_behavior) {
1275 case EXCEPTION_DEFAULT:
1276 case EXCEPTION_STATE:
1277 case EXCEPTION_STATE_IDENTITY:
1278 break;
1279 default:
1280 return KERN_INVALID_ARGUMENT;
1281 }
1282 }
1283
1284 /*
1285 * Check the validity of the thread_state_flavor by calling the
1286 * VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in
1287 * osfmk/mach/ARCHITECTURE/thread_status.h
1288 */
1289 if (!VALID_THREAD_STATE_FLAVOR(new_flavor)) {
1290 return KERN_INVALID_ARGUMENT;
1291 }
1292
1293 act_lock(thr_act);
1294 if (!thr_act->active) {
1295 act_unlock(thr_act);
1296 return KERN_FAILURE;
1297 }
1298
1299 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
1300 if (exception_mask & (1 << i)) {
1301 old_port[i] = thr_act->exc_actions[i].port;
1302 thr_act->exc_actions[i].port =
1303 ipc_port_copy_send(new_port);
1304 thr_act->exc_actions[i].behavior = new_behavior;
1305 thr_act->exc_actions[i].flavor = new_flavor;
1306 } else
1307 old_port[i] = IP_NULL;
1308 }/* for */
1309 /*
1310 * Consume send rights without any lock held.
1311 */
1312 act_unlock(thr_act);
1313 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++)
1314 if (IP_VALID(old_port[i]))
1315 ipc_port_release_send(old_port[i]);
1316 if (IP_VALID(new_port)) /* consume send right */
1317 ipc_port_release_send(new_port);
1318
1319 return KERN_SUCCESS;
1320 }/* thread_set_exception_port */
1321
1322 kern_return_t
1323 task_set_exception_ports(
1324 task_t task,
1325 exception_mask_t exception_mask,
1326 ipc_port_t new_port,
1327 exception_behavior_t new_behavior,
1328 thread_state_flavor_t new_flavor)
1329 {
1330 register int i;
1331 ipc_port_t old_port[EXC_TYPES_COUNT];
1332
1333 if (task == TASK_NULL) {
1334 return KERN_INVALID_ARGUMENT;
1335 }
1336
1337 if (exception_mask & ~EXC_MASK_ALL) {
1338 return KERN_INVALID_ARGUMENT;
1339 }
1340
1341 if (IP_VALID(new_port)) {
1342 switch (new_behavior) {
1343 case EXCEPTION_DEFAULT:
1344 case EXCEPTION_STATE:
1345 case EXCEPTION_STATE_IDENTITY:
1346 break;
1347 default:
1348 return KERN_INVALID_ARGUMENT;
1349 }
1350 }
1351 /* Cannot easily check "new_flavor", but that just means that
1352 * the flavor in the generated exception message might be garbage:
1353 * GIGO */
1354
1355 itk_lock(task);
1356 if (task->itk_self == IP_NULL) {
1357 itk_unlock(task);
1358 return KERN_FAILURE;
1359 }
1360
1361 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
1362 if (exception_mask & (1 << i)) {
1363 old_port[i] = task->exc_actions[i].port;
1364 task->exc_actions[i].port =
1365 ipc_port_copy_send(new_port);
1366 task->exc_actions[i].behavior = new_behavior;
1367 task->exc_actions[i].flavor = new_flavor;
1368 } else
1369 old_port[i] = IP_NULL;
1370 }/* for */
1371
1372 /*
1373 * Consume send rights without any lock held.
1374 */
1375 itk_unlock(task);
1376 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++)
1377 if (IP_VALID(old_port[i]))
1378 ipc_port_release_send(old_port[i]);
1379 if (IP_VALID(new_port)) /* consume send right */
1380 ipc_port_release_send(new_port);
1381
1382 return KERN_SUCCESS;
1383 }/* task_set_exception_port */
1384
1385 /*
1386 * Routine: thread/task_swap_exception_ports [kernel call]
1387 * Purpose:
1388 * Sets the thread/task exception port, flavor and
1389 * behavior for the exception types specified by the
1390 * mask.
1391 *
1392 * The old ports, behavior and flavors are returned
1393 * Count specifies the array sizes on input and
1394 * the number of returned ports etc. on output. The
1395 * arrays must be large enough to hold all the returned
1396 * data, MIG returnes an error otherwise. The masks
1397 * array specifies the corresponding exception type(s).
1398 *
1399 * Conditions:
1400 * Nothing locked. If successful, consumes
1401 * the supplied send right.
1402 *
1403 * Returns upto [in} CountCnt elements.
1404 * Returns:
1405 * KERN_SUCCESS Changed the special port.
1406 * KERN_INVALID_ARGUMENT The thread is null,
1407 * Illegal mask bit set.
1408 * Illegal exception behavior
1409 * KERN_FAILURE The thread is dead.
1410 */
1411
1412 kern_return_t
1413 thread_swap_exception_ports(
1414 thread_act_t thr_act,
1415 exception_mask_t exception_mask,
1416 ipc_port_t new_port,
1417 exception_behavior_t new_behavior,
1418 thread_state_flavor_t new_flavor,
1419 exception_mask_array_t masks,
1420 mach_msg_type_number_t * CountCnt,
1421 exception_port_array_t ports,
1422 exception_behavior_array_t behaviors,
1423 thread_state_flavor_array_t flavors )
1424 {
1425 register int i,
1426 j,
1427 count;
1428 ipc_port_t old_port[EXC_TYPES_COUNT];
1429
1430 if (!thr_act)
1431 return KERN_INVALID_ARGUMENT;
1432
1433 if (exception_mask & ~EXC_MASK_ALL) {
1434 return KERN_INVALID_ARGUMENT;
1435 }
1436
1437 if (IP_VALID(new_port)) {
1438 switch (new_behavior) {
1439 case EXCEPTION_DEFAULT:
1440 case EXCEPTION_STATE:
1441 case EXCEPTION_STATE_IDENTITY:
1442 break;
1443 default:
1444 return KERN_INVALID_ARGUMENT;
1445 }
1446 }
1447 /* Cannot easily check "new_flavor", but that just means that
1448 * the flavor in the generated exception message might be garbage:
1449 * GIGO */
1450
1451 act_lock(thr_act);
1452 if (!thr_act->active) {
1453 act_unlock(thr_act);
1454 return KERN_FAILURE;
1455 }
1456
1457 count = 0;
1458
1459 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
1460 if (exception_mask & (1 << i)) {
1461 for (j = 0; j < count; j++) {
1462 /*
1463 * search for an identical entry, if found
1464 * set corresponding mask for this exception.
1465 */
1466 if (thr_act->exc_actions[i].port == ports[j] &&
1467 thr_act->exc_actions[i].behavior ==behaviors[j]
1468 && thr_act->exc_actions[i].flavor ==flavors[j])
1469 {
1470 masks[j] |= (1 << i);
1471 break;
1472 }
1473 }/* for */
1474 if (j == count) {
1475 masks[j] = (1 << i);
1476 ports[j] =
1477 ipc_port_copy_send(thr_act->exc_actions[i].port);
1478
1479 behaviors[j] = thr_act->exc_actions[i].behavior;
1480 flavors[j] = thr_act->exc_actions[i].flavor;
1481 count++;
1482 }
1483
1484 old_port[i] = thr_act->exc_actions[i].port;
1485 thr_act->exc_actions[i].port =
1486 ipc_port_copy_send(new_port);
1487 thr_act->exc_actions[i].behavior = new_behavior;
1488 thr_act->exc_actions[i].flavor = new_flavor;
1489 if (count > *CountCnt) {
1490 break;
1491 }
1492 } else
1493 old_port[i] = IP_NULL;
1494 }/* for */
1495
1496 /*
1497 * Consume send rights without any lock held.
1498 */
1499 act_unlock(thr_act);
1500 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++)
1501 if (IP_VALID(old_port[i]))
1502 ipc_port_release_send(old_port[i]);
1503 if (IP_VALID(new_port)) /* consume send right */
1504 ipc_port_release_send(new_port);
1505 *CountCnt = count;
1506 return KERN_SUCCESS;
1507 }/* thread_swap_exception_ports */
1508
1509 kern_return_t
1510 task_swap_exception_ports(
1511 task_t task,
1512 exception_mask_t exception_mask,
1513 ipc_port_t new_port,
1514 exception_behavior_t new_behavior,
1515 thread_state_flavor_t new_flavor,
1516 exception_mask_array_t masks,
1517 mach_msg_type_number_t * CountCnt,
1518 exception_port_array_t ports,
1519 exception_behavior_array_t behaviors,
1520 thread_state_flavor_array_t flavors )
1521 {
1522 register int i,
1523 j,
1524 count;
1525 ipc_port_t old_port[EXC_TYPES_COUNT];
1526
1527 if (task == TASK_NULL)
1528 return KERN_INVALID_ARGUMENT;
1529
1530 if (exception_mask & ~EXC_MASK_ALL) {
1531 return KERN_INVALID_ARGUMENT;
1532 }
1533
1534 if (IP_VALID(new_port)) {
1535 switch (new_behavior) {
1536 case EXCEPTION_DEFAULT:
1537 case EXCEPTION_STATE:
1538 case EXCEPTION_STATE_IDENTITY:
1539 break;
1540 default:
1541 return KERN_INVALID_ARGUMENT;
1542 }
1543 }
1544 /* Cannot easily check "new_flavor", but that just means that
1545 * the flavor in the generated exception message might be garbage:
1546 * GIGO */
1547
1548 itk_lock(task);
1549 if (task->itk_self == IP_NULL) {
1550 itk_unlock(task);
1551 return KERN_FAILURE;
1552 }
1553
1554 count = 0;
1555
1556 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
1557 if (exception_mask & (1 << i)) {
1558 for (j = 0; j < count; j++) {
1559 /*
1560 * search for an identical entry, if found
1561 * set corresponding mask for this exception.
1562 */
1563 if (task->exc_actions[i].port == ports[j] &&
1564 task->exc_actions[i].behavior == behaviors[j]
1565 && task->exc_actions[i].flavor == flavors[j])
1566 {
1567 masks[j] |= (1 << i);
1568 break;
1569 }
1570 }/* for */
1571 if (j == count) {
1572 masks[j] = (1 << i);
1573 ports[j] =
1574 ipc_port_copy_send(task->exc_actions[i].port);
1575 behaviors[j] = task->exc_actions[i].behavior;
1576 flavors[j] = task->exc_actions[i].flavor;
1577 count++;
1578 }
1579 old_port[i] = task->exc_actions[i].port;
1580 task->exc_actions[i].port =
1581 ipc_port_copy_send(new_port);
1582 task->exc_actions[i].behavior = new_behavior;
1583 task->exc_actions[i].flavor = new_flavor;
1584 if (count > *CountCnt) {
1585 break;
1586 }
1587 } else
1588 old_port[i] = IP_NULL;
1589 }/* for */
1590
1591
1592 /*
1593 * Consume send rights without any lock held.
1594 */
1595 itk_unlock(task);
1596 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++)
1597 if (IP_VALID(old_port[i]))
1598 ipc_port_release_send(old_port[i]);
1599 if (IP_VALID(new_port)) /* consume send right */
1600 ipc_port_release_send(new_port);
1601 *CountCnt = count;
1602
1603 return KERN_SUCCESS;
1604 }/* task_swap_exception_ports */
1605
1606 /*
1607 * Routine: thread/task_get_exception_ports [kernel call]
1608 * Purpose:
1609 * Clones a send right for each of the thread/task's exception
1610 * ports specified in the mask and returns the behaviour
1611 * and flavor of said port.
1612 *
1613 * Returns upto [in} CountCnt elements.
1614 *
1615 * Conditions:
1616 * Nothing locked.
1617 * Returns:
1618 * KERN_SUCCESS Extracted a send right.
1619 * KERN_INVALID_ARGUMENT The thread is null,
1620 * Invalid special port,
1621 * Illegal mask bit set.
1622 * KERN_FAILURE The thread is dead.
1623 */
1624
1625 kern_return_t
1626 thread_get_exception_ports(
1627 thread_act_t thr_act,
1628 exception_mask_t exception_mask,
1629 exception_mask_array_t masks,
1630 mach_msg_type_number_t * CountCnt,
1631 exception_port_array_t ports,
1632 exception_behavior_array_t behaviors,
1633 thread_state_flavor_array_t flavors )
1634 {
1635 register int i,
1636 j,
1637 count;
1638
1639 if (!thr_act)
1640 return KERN_INVALID_ARGUMENT;
1641
1642 if (exception_mask & ~EXC_MASK_ALL) {
1643 return KERN_INVALID_ARGUMENT;
1644 }
1645
1646 act_lock(thr_act);
1647 if (!thr_act->active) {
1648 act_unlock(thr_act);
1649 return KERN_FAILURE;
1650 }
1651
1652 count = 0;
1653
1654 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
1655 if (exception_mask & (1 << i)) {
1656 for (j = 0; j < count; j++) {
1657 /*
1658 * search for an identical entry, if found
1659 * set corresponding mask for this exception.
1660 */
1661 if (thr_act->exc_actions[i].port == ports[j] &&
1662 thr_act->exc_actions[i].behavior ==behaviors[j]
1663 && thr_act->exc_actions[i].flavor == flavors[j])
1664 {
1665 masks[j] |= (1 << i);
1666 break;
1667 }
1668 }/* for */
1669 if (j == count) {
1670 masks[j] = (1 << i);
1671 ports[j] =
1672 ipc_port_copy_send(thr_act->exc_actions[i].port);
1673 behaviors[j] = thr_act->exc_actions[i].behavior;
1674 flavors[j] = thr_act->exc_actions[i].flavor;
1675 count++;
1676 if (count >= *CountCnt) {
1677 break;
1678 }
1679 }
1680 }
1681 }/* for */
1682
1683 act_unlock(thr_act);
1684
1685 *CountCnt = count;
1686 return KERN_SUCCESS;
1687 }/* thread_get_exception_ports */
1688
1689 kern_return_t
1690 task_get_exception_ports(
1691 task_t task,
1692 exception_mask_t exception_mask,
1693 exception_mask_array_t masks,
1694 mach_msg_type_number_t * CountCnt,
1695 exception_port_array_t ports,
1696 exception_behavior_array_t behaviors,
1697 thread_state_flavor_array_t flavors )
1698 {
1699 register int i,
1700 j,
1701 count;
1702
1703 if (task == TASK_NULL)
1704 return KERN_INVALID_ARGUMENT;
1705
1706 if (exception_mask & ~EXC_MASK_ALL) {
1707 return KERN_INVALID_ARGUMENT;
1708 }
1709
1710 itk_lock(task);
1711 if (task->itk_self == IP_NULL) {
1712 itk_unlock(task);
1713 return KERN_FAILURE;
1714 }
1715
1716 count = 0;
1717
1718 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
1719 if (exception_mask & (1 << i)) {
1720 for (j = 0; j < count; j++) {
1721 /*
1722 * search for an identical entry, if found
1723 * set corresponding mask for this exception.
1724 */
1725 if (task->exc_actions[i].port == ports[j] &&
1726 task->exc_actions[i].behavior == behaviors[j]
1727 && task->exc_actions[i].flavor == flavors[j])
1728 {
1729 masks[j] |= (1 << i);
1730 break;
1731 }
1732 }/* for */
1733 if (j == count) {
1734 masks[j] = (1 << i);
1735 ports[j] =
1736 ipc_port_copy_send(task->exc_actions[i].port);
1737 behaviors[j] = task->exc_actions[i].behavior;
1738 flavors[j] = task->exc_actions[i].flavor;
1739 count++;
1740 if (count > *CountCnt) {
1741 break;
1742 }
1743 }
1744 }
1745 }/* for */
1746
1747 itk_unlock(task);
1748
1749 *CountCnt = count;
1750 return KERN_SUCCESS;
1751 }/* task_get_exception_ports */