]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/ipc_tt.c
xnu-344.21.74.tar.gz
[apple/xnu.git] / osfmk / kern / ipc_tt.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25 /*
26 * @OSF_COPYRIGHT@
27 */
28 /*
29 * Mach Operating System
30 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
31 * All Rights Reserved.
32 *
33 * Permission to use, copy, modify and distribute this software and its
34 * documentation is hereby granted, provided that both the copyright
35 * notice and this permission notice appear in all copies of the
36 * software, derivative works or modified versions, and any portions
37 * thereof, and that both notices appear in supporting documentation.
38 *
39 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
40 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
41 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
42 *
43 * Carnegie Mellon requests users of this software to return to
44 *
45 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
46 * School of Computer Science
47 * Carnegie Mellon University
48 * Pittsburgh PA 15213-3890
49 *
50 * any improvements or extensions that they make and grant Carnegie Mellon
51 * the rights to redistribute these changes.
52 */
53 /*
54 */
55
56 /*
57 * File: ipc_tt.c
58 * Purpose:
59 * Task and thread related IPC functions.
60 */
61
62 #include <mach/mach_types.h>
63 #include <mach/boolean.h>
64 #include <mach_rt.h>
65 #include <mach/kern_return.h>
66 #include <mach/mach_param.h>
67 #include <mach/task_special_ports.h>
68 #include <mach/thread_special_ports.h>
69 #include <mach/thread_status.h>
70 #include <mach/exception_types.h>
71 #include <mach/mach_traps.h>
72 #include <mach/task_server.h>
73 #include <mach/thread_act_server.h>
74 #include <mach/mach_host_server.h>
75 #include <mach/vm_map_server.h>
76 #include <kern/host.h>
77 #include <kern/ipc_tt.h>
78 #include <kern/thread_act.h>
79 #include <kern/misc_protos.h>
80 #include <vm/vm_pageout.h>
81
82 /*
83 * Routine: ipc_task_init
84 * Purpose:
85 * Initialize a task's IPC state.
86 *
87 * If non-null, some state will be inherited from the parent.
88 * The parent must be appropriately initialized.
89 * Conditions:
90 * Nothing locked.
91 */
92
93 void
94 ipc_task_init(
95 task_t task,
96 task_t parent)
97 {
98 ipc_space_t space;
99 ipc_port_t kport;
100 kern_return_t kr;
101 int i;
102
103
104 kr = ipc_space_create(&ipc_table_entries[0], &space);
105 if (kr != KERN_SUCCESS)
106 panic("ipc_task_init");
107
108
109 kport = ipc_port_alloc_kernel();
110 if (kport == IP_NULL)
111 panic("ipc_task_init");
112
113 itk_lock_init(task);
114 task->itk_self = kport;
115 task->itk_sself = ipc_port_make_send(kport);
116 task->itk_space = space;
117 space->is_fast = task->kernel_loaded;
118
119 if (parent == TASK_NULL) {
120 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
121 task->exc_actions[i].port = IP_NULL;
122 }/* for */
123 task->itk_host = ipc_port_make_send(realhost.host_self);
124 task->itk_bootstrap = IP_NULL;
125 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
126 task->itk_registered[i] = IP_NULL;
127 } else {
128 itk_lock(parent);
129 assert(parent->itk_self != IP_NULL);
130
131 /* inherit registered ports */
132
133 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
134 task->itk_registered[i] =
135 ipc_port_copy_send(parent->itk_registered[i]);
136
137 /* inherit exception and bootstrap ports */
138
139 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
140 task->exc_actions[i].port =
141 ipc_port_copy_send(parent->exc_actions[i].port);
142 task->exc_actions[i].flavor =
143 parent->exc_actions[i].flavor;
144 task->exc_actions[i].behavior =
145 parent->exc_actions[i].behavior;
146 }/* for */
147 task->itk_host =
148 ipc_port_copy_send(parent->itk_host);
149
150 task->itk_bootstrap =
151 ipc_port_copy_send(parent->itk_bootstrap);
152
153 itk_unlock(parent);
154 }
155 }
156
157 /*
158 * Routine: ipc_task_enable
159 * Purpose:
160 * Enable a task for IPC access.
161 * Conditions:
162 * Nothing locked.
163 */
164
165 void
166 ipc_task_enable(
167 task_t task)
168 {
169 ipc_port_t kport;
170
171 itk_lock(task);
172 kport = task->itk_self;
173 if (kport != IP_NULL)
174 ipc_kobject_set(kport, (ipc_kobject_t) task, IKOT_TASK);
175 itk_unlock(task);
176 }
177
178 /*
179 * Routine: ipc_task_disable
180 * Purpose:
181 * Disable IPC access to a task.
182 * Conditions:
183 * Nothing locked.
184 */
185
186 void
187 ipc_task_disable(
188 task_t task)
189 {
190 ipc_port_t kport;
191
192 itk_lock(task);
193 kport = task->itk_self;
194 if (kport != IP_NULL)
195 ipc_kobject_set(kport, IKO_NULL, IKOT_NONE);
196 itk_unlock(task);
197 }
198
199 /*
200 * Routine: ipc_task_terminate
201 * Purpose:
202 * Clean up and destroy a task's IPC state.
203 * Conditions:
204 * Nothing locked. The task must be suspended.
205 * (Or the current thread must be in the task.)
206 */
207
208 void
209 ipc_task_terminate(
210 task_t task)
211 {
212 ipc_port_t kport;
213 int i;
214
215 itk_lock(task);
216 kport = task->itk_self;
217
218 if (kport == IP_NULL) {
219 /* the task is already terminated (can this happen?) */
220 itk_unlock(task);
221 return;
222 }
223
224 task->itk_self = IP_NULL;
225 itk_unlock(task);
226
227 /* release the naked send rights */
228
229 if (IP_VALID(task->itk_sself))
230 ipc_port_release_send(task->itk_sself);
231
232 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
233 if (IP_VALID(task->exc_actions[i].port)) {
234 ipc_port_release_send(task->exc_actions[i].port);
235 }
236 }/* for */
237 if (IP_VALID(task->itk_host))
238 ipc_port_release_send(task->itk_host);
239
240 if (IP_VALID(task->itk_bootstrap))
241 ipc_port_release_send(task->itk_bootstrap);
242
243 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
244 if (IP_VALID(task->itk_registered[i]))
245 ipc_port_release_send(task->itk_registered[i]);
246
247 ipc_port_release_send(task->wired_ledger_port);
248 ipc_port_release_send(task->paged_ledger_port);
249
250 /* destroy the kernel port */
251 ipc_port_dealloc_kernel(kport);
252 }
253
254 /*
255 * Routine: ipc_thread_init
256 * Purpose:
257 * Initialize a thread's IPC state.
258 * Conditions:
259 * Nothing locked.
260 */
261
262 void
263 ipc_thread_init(
264 thread_t thread)
265 {
266 ipc_kmsg_queue_init(&thread->ith_messages);
267 thread->ith_mig_reply = MACH_PORT_NULL;
268 thread->ith_rpc_reply = IP_NULL;
269 }
270
271 /*
272 * Routine: ipc_thread_terminate
273 * Purpose:
274 * Clean up and destroy a thread's IPC state.
275 * Conditions:
276 * Nothing locked. The thread must be suspended.
277 * (Or be the current thread.)
278 */
279
280 void
281 ipc_thread_terminate(
282 thread_t thread)
283 {
284 assert(ipc_kmsg_queue_empty(&thread->ith_messages));
285
286 if (thread->ith_rpc_reply != IP_NULL)
287 ipc_port_dealloc_reply(thread->ith_rpc_reply);
288 thread->ith_rpc_reply = IP_NULL;
289 }
290
291 /*
292 * Routine: ipc_thr_act_init
293 * Purpose:
294 * Initialize an thr_act's IPC state.
295 * Conditions:
296 * Nothing locked.
297 */
298
299 void
300 ipc_thr_act_init(task_t task, thread_act_t thr_act)
301 {
302 ipc_port_t kport; int i;
303
304 kport = ipc_port_alloc_kernel();
305 if (kport == IP_NULL)
306 panic("ipc_thr_act_init");
307
308 thr_act->ith_self = kport;
309 thr_act->ith_sself = ipc_port_make_send(kport);
310
311 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++)
312 thr_act->exc_actions[i].port = IP_NULL;
313
314 ipc_kobject_set(kport, (ipc_kobject_t) thr_act, IKOT_ACT);
315 }
316
317 void
318 ipc_thr_act_disable(thread_act_t thr_act)
319 {
320 int i;
321 ipc_port_t kport;
322
323 kport = thr_act->ith_self;
324
325 if (kport != IP_NULL)
326 ipc_kobject_set(kport, IKO_NULL, IKOT_NONE);
327 }
328
329 void
330 ipc_thr_act_terminate(thread_act_t thr_act)
331 {
332 ipc_port_t kport; int i;
333
334 kport = thr_act->ith_self;
335
336 if (kport == IP_NULL) {
337 /* the thread is already terminated (can this happen?) */
338 return;
339 }
340
341 thr_act->ith_self = IP_NULL;
342
343 /* release the naked send rights */
344
345 if (IP_VALID(thr_act->ith_sself))
346 ipc_port_release_send(thr_act->ith_sself);
347 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
348 if (IP_VALID(thr_act->exc_actions[i].port))
349 ipc_port_release_send(thr_act->exc_actions[i].port);
350 }
351
352 /* destroy the kernel port */
353 ipc_port_dealloc_kernel(kport);
354 }
355
356 /*
357 * Routine: retrieve_task_self_fast
358 * Purpose:
359 * Optimized version of retrieve_task_self,
360 * that only works for the current task.
361 *
362 * Return a send right (possibly null/dead)
363 * for the task's user-visible self port.
364 * Conditions:
365 * Nothing locked.
366 */
367
368 ipc_port_t
369 retrieve_task_self_fast(
370 register task_t task)
371 {
372 register ipc_port_t port;
373
374 assert(task == current_task());
375
376 itk_lock(task);
377 assert(task->itk_self != IP_NULL);
378
379 if ((port = task->itk_sself) == task->itk_self) {
380 /* no interposing */
381
382 ip_lock(port);
383 assert(ip_active(port));
384 ip_reference(port);
385 port->ip_srights++;
386 ip_unlock(port);
387 } else
388 port = ipc_port_copy_send(port);
389 itk_unlock(task);
390
391 return port;
392 }
393
394 /*
395 * Routine: retrieve_act_self_fast
396 * Purpose:
397 * Optimized version of retrieve_thread_self,
398 * that only works for the current thread.
399 *
400 * Return a send right (possibly null/dead)
401 * for the thread's user-visible self port.
402 * Conditions:
403 * Nothing locked.
404 */
405
406 ipc_port_t
407 retrieve_act_self_fast(thread_act_t thr_act)
408 {
409 register ipc_port_t port;
410
411 assert(thr_act == current_act());
412 act_lock(thr_act);
413 assert(thr_act->ith_self != IP_NULL);
414
415 if ((port = thr_act->ith_sself) == thr_act->ith_self) {
416 /* no interposing */
417
418 ip_lock(port);
419 assert(ip_active(port));
420 ip_reference(port);
421 port->ip_srights++;
422 ip_unlock(port);
423 } else
424 port = ipc_port_copy_send(port);
425 act_unlock(thr_act);
426
427 return port;
428 }
429
430 /*
431 * Routine: task_self_trap [mach trap]
432 * Purpose:
433 * Give the caller send rights for his own task port.
434 * Conditions:
435 * Nothing locked.
436 * Returns:
437 * MACH_PORT_NULL if there are any resource failures
438 * or other errors.
439 */
440
441 mach_port_name_t
442 task_self_trap(void)
443 {
444 task_t task = current_task();
445 ipc_port_t sright;
446
447 sright = retrieve_task_self_fast(task);
448 return ipc_port_copyout_send(sright, task->itk_space);
449 }
450
451 /*
452 * Routine: thread_self_trap [mach trap]
453 * Purpose:
454 * Give the caller send rights for his own thread port.
455 * Conditions:
456 * Nothing locked.
457 * Returns:
458 * MACH_PORT_NULL if there are any resource failures
459 * or other errors.
460 */
461
462 mach_port_name_t
463 thread_self_trap(void)
464 {
465 thread_act_t thr_act = current_act();
466 task_t task = thr_act->task;
467 ipc_port_t sright;
468
469 sright = retrieve_act_self_fast(thr_act);
470 return ipc_port_copyout_send(sright, task->itk_space);
471 }
472
473 /*
474 * Routine: mach_reply_port [mach trap]
475 * Purpose:
476 * Allocate a port for the caller.
477 * Conditions:
478 * Nothing locked.
479 * Returns:
480 * MACH_PORT_NULL if there are any resource failures
481 * or other errors.
482 */
483
484 mach_port_name_t
485 mach_reply_port(void)
486 {
487 ipc_port_t port;
488 mach_port_name_t name;
489 kern_return_t kr;
490
491 kr = ipc_port_alloc(current_task()->itk_space, &name, &port);
492 if (kr == KERN_SUCCESS)
493 ip_unlock(port);
494 else
495 name = MACH_PORT_NULL;
496
497 return name;
498 }
499
500 /*
501 * Routine: task_get_special_port [kernel call]
502 * Purpose:
503 * Clones a send right for one of the task's
504 * special ports.
505 * Conditions:
506 * Nothing locked.
507 * Returns:
508 * KERN_SUCCESS Extracted a send right.
509 * KERN_INVALID_ARGUMENT The task is null.
510 * KERN_FAILURE The task/space is dead.
511 * KERN_INVALID_ARGUMENT Invalid special port.
512 */
513
514 kern_return_t
515 task_get_special_port(
516 task_t task,
517 int which,
518 ipc_port_t *portp)
519 {
520 ipc_port_t *whichp;
521 ipc_port_t port;
522
523 if (task == TASK_NULL)
524 return KERN_INVALID_ARGUMENT;
525
526 switch (which) {
527 case TASK_KERNEL_PORT:
528 whichp = &task->itk_sself;
529 break;
530
531 case TASK_HOST_PORT:
532 whichp = &task->itk_host;
533 break;
534
535 case TASK_BOOTSTRAP_PORT:
536 whichp = &task->itk_bootstrap;
537 break;
538
539 case TASK_WIRED_LEDGER_PORT:
540 whichp = &task->wired_ledger_port;
541 break;
542
543 case TASK_PAGED_LEDGER_PORT:
544 whichp = &task->paged_ledger_port;
545 break;
546
547 default:
548 return KERN_INVALID_ARGUMENT;
549 }
550
551 itk_lock(task);
552 if (task->itk_self == IP_NULL) {
553 itk_unlock(task);
554 return KERN_FAILURE;
555 }
556
557 port = ipc_port_copy_send(*whichp);
558 itk_unlock(task);
559
560 *portp = port;
561 return KERN_SUCCESS;
562 }
563
564 /*
565 * Routine: task_set_special_port [kernel call]
566 * Purpose:
567 * Changes one of the task's special ports,
568 * setting it to the supplied send right.
569 * Conditions:
570 * Nothing locked. If successful, consumes
571 * the supplied send right.
572 * Returns:
573 * KERN_SUCCESS Changed the special port.
574 * KERN_INVALID_ARGUMENT The task is null.
575 * KERN_FAILURE The task/space is dead.
576 * KERN_INVALID_ARGUMENT Invalid special port.
577 */
578
579 kern_return_t
580 task_set_special_port(
581 task_t task,
582 int which,
583 ipc_port_t port)
584 {
585 ipc_port_t *whichp;
586 ipc_port_t old;
587
588 if (task == TASK_NULL)
589 return KERN_INVALID_ARGUMENT;
590
591 switch (which) {
592 case TASK_KERNEL_PORT:
593 whichp = &task->itk_sself;
594 break;
595
596 case TASK_HOST_PORT:
597 whichp = &task->itk_host;
598 break;
599
600 case TASK_BOOTSTRAP_PORT:
601 whichp = &task->itk_bootstrap;
602 break;
603
604 case TASK_WIRED_LEDGER_PORT:
605 whichp = &task->wired_ledger_port;
606 break;
607
608 case TASK_PAGED_LEDGER_PORT:
609 whichp = &task->paged_ledger_port;
610 break;
611
612 default:
613 return KERN_INVALID_ARGUMENT;
614 }/* switch */
615
616 itk_lock(task);
617 if (task->itk_self == IP_NULL) {
618 itk_unlock(task);
619 return KERN_FAILURE;
620 }
621
622 old = *whichp;
623 *whichp = port;
624 itk_unlock(task);
625
626 if (IP_VALID(old))
627 ipc_port_release_send(old);
628 return KERN_SUCCESS;
629 }
630
631
632 /*
633 * Routine: mach_ports_register [kernel call]
634 * Purpose:
635 * Stash a handful of port send rights in the task.
636 * Child tasks will inherit these rights, but they
637 * must use mach_ports_lookup to acquire them.
638 *
639 * The rights are supplied in a (wired) kalloc'd segment.
640 * Rights which aren't supplied are assumed to be null.
641 * Conditions:
642 * Nothing locked. If successful, consumes
643 * the supplied rights and memory.
644 * Returns:
645 * KERN_SUCCESS Stashed the port rights.
646 * KERN_INVALID_ARGUMENT The task is null.
647 * KERN_INVALID_ARGUMENT The task is dead.
648 * KERN_INVALID_ARGUMENT Too many port rights supplied.
649 */
650
651 kern_return_t
652 mach_ports_register(
653 task_t task,
654 mach_port_array_t memory,
655 mach_msg_type_number_t portsCnt)
656 {
657 ipc_port_t ports[TASK_PORT_REGISTER_MAX];
658 int i;
659
660 if ((task == TASK_NULL) ||
661 (portsCnt > TASK_PORT_REGISTER_MAX))
662 return KERN_INVALID_ARGUMENT;
663
664 /*
665 * Pad the port rights with nulls.
666 */
667
668 for (i = 0; i < portsCnt; i++)
669 ports[i] = memory[i];
670 for (; i < TASK_PORT_REGISTER_MAX; i++)
671 ports[i] = IP_NULL;
672
673 itk_lock(task);
674 if (task->itk_self == IP_NULL) {
675 itk_unlock(task);
676 return KERN_INVALID_ARGUMENT;
677 }
678
679 /*
680 * Replace the old send rights with the new.
681 * Release the old rights after unlocking.
682 */
683
684 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
685 ipc_port_t old;
686
687 old = task->itk_registered[i];
688 task->itk_registered[i] = ports[i];
689 ports[i] = old;
690 }
691
692 itk_unlock(task);
693
694 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
695 if (IP_VALID(ports[i]))
696 ipc_port_release_send(ports[i]);
697
698 /*
699 * Now that the operation is known to be successful,
700 * we can free the memory.
701 */
702
703 if (portsCnt != 0)
704 kfree((vm_offset_t) memory,
705 (vm_size_t) (portsCnt * sizeof(mach_port_t)));
706
707 return KERN_SUCCESS;
708 }
709
710 /*
711 * Routine: mach_ports_lookup [kernel call]
712 * Purpose:
713 * Retrieves (clones) the stashed port send rights.
714 * Conditions:
715 * Nothing locked. If successful, the caller gets
716 * rights and memory.
717 * Returns:
718 * KERN_SUCCESS Retrieved the send rights.
719 * KERN_INVALID_ARGUMENT The task is null.
720 * KERN_INVALID_ARGUMENT The task is dead.
721 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
722 */
723
724 kern_return_t
725 mach_ports_lookup(
726 task_t task,
727 mach_port_array_t *portsp,
728 mach_msg_type_number_t *portsCnt)
729 {
730 vm_offset_t memory;
731 vm_size_t size;
732 ipc_port_t *ports;
733 int i;
734
735 kern_return_t kr;
736
737 if (task == TASK_NULL)
738 return KERN_INVALID_ARGUMENT;
739
740 size = (vm_size_t) (TASK_PORT_REGISTER_MAX * sizeof(ipc_port_t));
741
742 memory = kalloc(size);
743 if (memory == 0)
744 return KERN_RESOURCE_SHORTAGE;
745
746 itk_lock(task);
747 if (task->itk_self == IP_NULL) {
748 itk_unlock(task);
749
750 kfree(memory, size);
751 return KERN_INVALID_ARGUMENT;
752 }
753
754 ports = (ipc_port_t *) memory;
755
756 /*
757 * Clone port rights. Because kalloc'd memory
758 * is wired, we won't fault while holding the task lock.
759 */
760
761 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
762 ports[i] = ipc_port_copy_send(task->itk_registered[i]);
763
764 itk_unlock(task);
765
766 *portsp = (mach_port_array_t) ports;
767 *portsCnt = TASK_PORT_REGISTER_MAX;
768 return KERN_SUCCESS;
769 }
770
771 /*
772 * Routine: convert_port_to_locked_task
773 * Purpose:
774 * Internal helper routine to convert from a port to a locked
775 * task. Used by several routines that try to convert from a
776 * task port to a reference on some task related object.
777 * Conditions:
778 * Nothing locked, blocking OK.
779 */
780 task_t
781 convert_port_to_locked_task(ipc_port_t port)
782 {
783 while (IP_VALID(port)) {
784 task_t task;
785
786 ip_lock(port);
787 if (!ip_active(port) || (ip_kotype(port) != IKOT_TASK)) {
788 ip_unlock(port);
789 return TASK_NULL;
790 }
791 task = (task_t) port->ip_kobject;
792 assert(task != TASK_NULL);
793
794 /*
795 * Normal lock ordering puts task_lock() before ip_lock().
796 * Attempt out-of-order locking here.
797 */
798 if (task_lock_try(task)) {
799 ip_unlock(port);
800 return(task);
801 }
802
803 ip_unlock(port);
804 mutex_pause();
805 }
806 return TASK_NULL;
807 }
808
809 /*
810 * Routine: convert_port_to_task
811 * Purpose:
812 * Convert from a port to a task.
813 * Doesn't consume the port ref; produces a task ref,
814 * which may be null.
815 * Conditions:
816 * Nothing locked.
817 */
818 task_t
819 convert_port_to_task(
820 ipc_port_t port)
821 {
822 task_t task;
823
824 task = convert_port_to_locked_task(port);
825 if (task) {
826 task->ref_count++;
827 task_unlock(task);
828 }
829 return task;
830 }
831
832 /*
833 * Routine: convert_port_to_space
834 * Purpose:
835 * Convert from a port to a space.
836 * Doesn't consume the port ref; produces a space ref,
837 * which may be null.
838 * Conditions:
839 * Nothing locked.
840 */
841 ipc_space_t
842 convert_port_to_space(
843 ipc_port_t port)
844 {
845 ipc_space_t space;
846 task_t task;
847
848 task = convert_port_to_locked_task(port);
849
850 if (task == TASK_NULL)
851 return IPC_SPACE_NULL;
852
853 if (!task->active) {
854 task_unlock(task);
855 return IPC_SPACE_NULL;
856 }
857
858 space = task->itk_space;
859 is_reference(space);
860 task_unlock(task);
861 return (space);
862 }
863
864 upl_t
865 convert_port_to_upl(
866 ipc_port_t port)
867 {
868 upl_t upl;
869
870 ip_lock(port);
871 if (!ip_active(port) || (ip_kotype(port) != IKOT_UPL)) {
872 ip_unlock(port);
873 return (upl_t)NULL;
874 }
875 upl = (upl_t) port->ip_kobject;
876 ip_unlock(port);
877 upl_lock(upl);
878 upl->ref_count+=1;
879 upl_unlock(upl);
880 return upl;
881 }
882
883 mach_port_t
884 convert_upl_to_port(
885 upl_t upl)
886 {
887 return MACH_PORT_NULL;
888 }
889
890 __private_extern__ void
891 upl_no_senders(
892 upl_t upl,
893 mach_port_mscount_t mscount)
894 {
895 return;
896 }
897
898 /*
899 * Routine: convert_port_entry_to_map
900 * Purpose:
901 * Convert from a port specifying an entry or a task
902 * to a map. Doesn't consume the port ref; produces a map ref,
903 * which may be null. Unlike convert_port_to_map, the
904 * port may be task or a named entry backed.
905 * Conditions:
906 * Nothing locked.
907 */
908
909
910 vm_map_t
911 convert_port_entry_to_map(
912 ipc_port_t port)
913 {
914 task_t task;
915 vm_map_t map;
916 vm_named_entry_t named_entry;
917
918 if(IP_VALID(port) && (ip_kotype(port) == IKOT_NAMED_ENTRY)) {
919 while(TRUE) {
920 ip_lock(port);
921 if(ip_active(port) && (ip_kotype(port)
922 == IKOT_NAMED_ENTRY)) {
923 named_entry =
924 (vm_named_entry_t)port->ip_kobject;
925 if (!(mutex_try(&(named_entry)->Lock))) {
926 ip_unlock(port);
927 mutex_pause();
928 continue;
929 }
930 named_entry->ref_count++;
931 mutex_unlock(&(named_entry)->Lock);
932 ip_unlock(port);
933 if ((named_entry->is_sub_map) &&
934 (named_entry->protection
935 & VM_PROT_WRITE)) {
936 map = named_entry->backing.map;
937 } else {
938 mach_destroy_memory_entry(port);
939 return VM_MAP_NULL;
940 }
941 vm_map_reference_swap(map);
942 mach_destroy_memory_entry(port);
943 break;
944 }
945 else
946 return VM_MAP_NULL;
947 }
948 } else {
949 task_t task;
950
951 task = convert_port_to_locked_task(port);
952
953 if (task == TASK_NULL)
954 return VM_MAP_NULL;
955
956 if (!task->active) {
957 task_unlock(task);
958 return VM_MAP_NULL;
959 }
960
961 map = task->map;
962 vm_map_reference_swap(map);
963 task_unlock(task);
964 }
965
966 return map;
967 }
968
969 /*
970 * Routine: convert_port_entry_to_object
971 * Purpose:
972 * Convert from a port specifying a named entry to an
973 * object. Doesn't consume the port ref; produces a map ref,
974 * which may be null.
975 * Conditions:
976 * Nothing locked.
977 */
978
979
980 vm_object_t
981 convert_port_entry_to_object(
982 ipc_port_t port)
983 {
984 vm_object_t object;
985 vm_named_entry_t named_entry;
986
987 if(IP_VALID(port) && (ip_kotype(port) == IKOT_NAMED_ENTRY)) {
988 while(TRUE) {
989 ip_lock(port);
990 if(ip_active(port) && (ip_kotype(port)
991 == IKOT_NAMED_ENTRY)) {
992 named_entry =
993 (vm_named_entry_t)port->ip_kobject;
994 if (!(mutex_try(&(named_entry)->Lock))) {
995 ip_unlock(port);
996 mutex_pause();
997 continue;
998 }
999 named_entry->ref_count++;
1000 mutex_unlock(&(named_entry)->Lock);
1001 ip_unlock(port);
1002 if ((!named_entry->is_sub_map) &&
1003 (named_entry->protection
1004 & VM_PROT_WRITE)) {
1005 object = named_entry->object;
1006 } else {
1007 mach_destroy_memory_entry(port);
1008 return (vm_object_t)NULL;
1009 }
1010 vm_object_reference(named_entry->object);
1011 mach_destroy_memory_entry(port);
1012 break;
1013 }
1014 else
1015 return (vm_object_t)NULL;
1016 }
1017 } else {
1018 return (vm_object_t)NULL;
1019 }
1020
1021 return object;
1022 }
1023
1024 /*
1025 * Routine: convert_port_to_map
1026 * Purpose:
1027 * Convert from a port to a map.
1028 * Doesn't consume the port ref; produces a map ref,
1029 * which may be null.
1030 * Conditions:
1031 * Nothing locked.
1032 */
1033
1034 vm_map_t
1035 convert_port_to_map(
1036 ipc_port_t port)
1037 {
1038 task_t task;
1039 vm_map_t map;
1040
1041 task = convert_port_to_locked_task(port);
1042
1043 if (task == TASK_NULL)
1044 return VM_MAP_NULL;
1045
1046 if (!task->active) {
1047 task_unlock(task);
1048 return VM_MAP_NULL;
1049 }
1050
1051 map = task->map;
1052 vm_map_reference_swap(map);
1053 task_unlock(task);
1054 return map;
1055 }
1056
1057
1058 /*
1059 * Routine: convert_port_to_act
1060 * Purpose:
1061 * Convert from a port to a thr_act.
1062 * Doesn't consume the port ref; produces an thr_act ref,
1063 * which may be null.
1064 * Conditions:
1065 * Nothing locked.
1066 */
1067
1068 thread_act_t
1069 convert_port_to_act( ipc_port_t port )
1070 {
1071 boolean_t r;
1072 thread_act_t thr_act = 0;
1073
1074 r = FALSE;
1075 while (!r && IP_VALID(port)) {
1076 ip_lock(port);
1077 r = ref_act_port_locked(port, &thr_act);
1078 /* port unlocked */
1079 }
1080 return (thr_act);
1081 }
1082
1083 boolean_t
1084 ref_act_port_locked( ipc_port_t port, thread_act_t *pthr_act )
1085 {
1086 thread_act_t thr_act;
1087
1088 thr_act = 0;
1089 if (ip_active(port) &&
1090 (ip_kotype(port) == IKOT_ACT)) {
1091 thr_act = (thread_act_t) port->ip_kobject;
1092 assert(thr_act != THR_ACT_NULL);
1093
1094 /*
1095 * Normal lock ordering is act_lock(), then ip_lock().
1096 * Allow out-of-order locking here, using
1097 * act_reference_act_locked() to accomodate it.
1098 */
1099 if (!act_lock_try(thr_act)) {
1100 ip_unlock(port);
1101 mutex_pause();
1102 return (FALSE);
1103 }
1104 act_locked_act_reference(thr_act);
1105 act_unlock(thr_act);
1106 }
1107 *pthr_act = thr_act;
1108 ip_unlock(port);
1109 return (TRUE);
1110 }
1111
1112 /*
1113 * Routine: port_name_to_act
1114 * Purpose:
1115 * Convert from a port name to an act reference
1116 * A name of MACH_PORT_NULL is valid for the null act
1117 * Conditions:
1118 * Nothing locked.
1119 */
1120 thread_act_t
1121 port_name_to_act(
1122 mach_port_name_t name)
1123 {
1124 thread_act_t thr_act = THR_ACT_NULL;
1125 ipc_port_t kern_port;
1126 kern_return_t kr;
1127
1128 if (MACH_PORT_VALID(name)) {
1129 kr = ipc_object_copyin(current_space(), name,
1130 MACH_MSG_TYPE_COPY_SEND,
1131 (ipc_object_t *) &kern_port);
1132 if (kr != KERN_SUCCESS)
1133 return THR_ACT_NULL;
1134
1135 thr_act = convert_port_to_act(kern_port);
1136
1137 if (IP_VALID(kern_port))
1138 ipc_port_release_send(kern_port);
1139 }
1140 return thr_act;
1141 }
1142
1143 task_t
1144 port_name_to_task(
1145 mach_port_name_t name)
1146 {
1147 ipc_port_t kern_port;
1148 kern_return_t kr;
1149 task_t task = TASK_NULL;
1150
1151 if (MACH_PORT_VALID(name)) {
1152 kr = ipc_object_copyin(current_space(), name,
1153 MACH_MSG_TYPE_COPY_SEND,
1154 (ipc_object_t *) &kern_port);
1155 if (kr != KERN_SUCCESS)
1156 return TASK_NULL;
1157
1158 task = convert_port_to_task(kern_port);
1159
1160 if (IP_VALID(kern_port))
1161 ipc_port_release_send(kern_port);
1162 }
1163 return task;
1164 }
1165
1166 /*
1167 * Routine: convert_task_to_port
1168 * Purpose:
1169 * Convert from a task to a port.
1170 * Consumes a task ref; produces a naked send right
1171 * which may be invalid.
1172 * Conditions:
1173 * Nothing locked.
1174 */
1175
1176 ipc_port_t
1177 convert_task_to_port(
1178 task_t task)
1179 {
1180 ipc_port_t port;
1181
1182 itk_lock(task);
1183 if (task->itk_self != IP_NULL)
1184 #if NORMA_TASK
1185 if (task->map == VM_MAP_NULL)
1186 /* norma placeholder task */
1187 port = ipc_port_copy_send(task->itk_self);
1188 else
1189 #endif /* NORMA_TASK */
1190 port = ipc_port_make_send(task->itk_self);
1191 else
1192 port = IP_NULL;
1193 itk_unlock(task);
1194
1195 task_deallocate(task);
1196 return port;
1197 }
1198
1199 /*
1200 * Routine: convert_act_to_port
1201 * Purpose:
1202 * Convert from a thr_act to a port.
1203 * Consumes an thr_act ref; produces a naked send right
1204 * which may be invalid.
1205 * Conditions:
1206 * Nothing locked.
1207 */
1208
1209 ipc_port_t
1210 convert_act_to_port(thr_act)
1211 thread_act_t thr_act;
1212 {
1213 ipc_port_t port;
1214
1215 act_lock(thr_act);
1216 if (thr_act->ith_self != IP_NULL)
1217 port = ipc_port_make_send(thr_act->ith_self);
1218 else
1219 port = IP_NULL;
1220 act_unlock(thr_act);
1221
1222 act_deallocate(thr_act);
1223 return port;
1224 }
1225
1226 /*
1227 * Routine: space_deallocate
1228 * Purpose:
1229 * Deallocate a space ref produced by convert_port_to_space.
1230 * Conditions:
1231 * Nothing locked.
1232 */
1233
1234 void
1235 space_deallocate(
1236 ipc_space_t space)
1237 {
1238 if (space != IS_NULL)
1239 is_release(space);
1240 }
1241
1242 /*
1243 * Routine: thread/task_set_exception_ports [kernel call]
1244 * Purpose:
1245 * Sets the thread/task exception port, flavor and
1246 * behavior for the exception types specified by the mask.
1247 * There will be one send right per exception per valid
1248 * port.
1249 * Conditions:
1250 * Nothing locked. If successful, consumes
1251 * the supplied send right.
1252 * Returns:
1253 * KERN_SUCCESS Changed the special port.
1254 * KERN_INVALID_ARGUMENT The thread is null,
1255 * Illegal mask bit set.
1256 * Illegal exception behavior
1257 * KERN_FAILURE The thread is dead.
1258 */
1259
1260 kern_return_t
1261 thread_set_exception_ports(
1262 thread_act_t thr_act,
1263 exception_mask_t exception_mask,
1264 ipc_port_t new_port,
1265 exception_behavior_t new_behavior,
1266 thread_state_flavor_t new_flavor)
1267 {
1268 register int i;
1269 ipc_port_t old_port[EXC_TYPES_COUNT];
1270
1271 if (!thr_act)
1272 return KERN_INVALID_ARGUMENT;
1273
1274 if (exception_mask & ~EXC_MASK_ALL)
1275 return KERN_INVALID_ARGUMENT;
1276
1277 if (IP_VALID(new_port)) {
1278 switch (new_behavior) {
1279 case EXCEPTION_DEFAULT:
1280 case EXCEPTION_STATE:
1281 case EXCEPTION_STATE_IDENTITY:
1282 break;
1283 default:
1284 return KERN_INVALID_ARGUMENT;
1285 }
1286 }
1287
1288 /*
1289 * Check the validity of the thread_state_flavor by calling the
1290 * VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in
1291 * osfmk/mach/ARCHITECTURE/thread_status.h
1292 */
1293 if (!VALID_THREAD_STATE_FLAVOR(new_flavor)) {
1294 return KERN_INVALID_ARGUMENT;
1295 }
1296
1297 act_lock(thr_act);
1298 if (!thr_act->active) {
1299 act_unlock(thr_act);
1300 return KERN_FAILURE;
1301 }
1302
1303 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
1304 if (exception_mask & (1 << i)) {
1305 old_port[i] = thr_act->exc_actions[i].port;
1306 thr_act->exc_actions[i].port =
1307 ipc_port_copy_send(new_port);
1308 thr_act->exc_actions[i].behavior = new_behavior;
1309 thr_act->exc_actions[i].flavor = new_flavor;
1310 } else
1311 old_port[i] = IP_NULL;
1312 }/* for */
1313 /*
1314 * Consume send rights without any lock held.
1315 */
1316 act_unlock(thr_act);
1317 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++)
1318 if (IP_VALID(old_port[i]))
1319 ipc_port_release_send(old_port[i]);
1320 if (IP_VALID(new_port)) /* consume send right */
1321 ipc_port_release_send(new_port);
1322
1323 return KERN_SUCCESS;
1324 }/* thread_set_exception_port */
1325
1326 kern_return_t
1327 task_set_exception_ports(
1328 task_t task,
1329 exception_mask_t exception_mask,
1330 ipc_port_t new_port,
1331 exception_behavior_t new_behavior,
1332 thread_state_flavor_t new_flavor)
1333 {
1334 register int i;
1335 ipc_port_t old_port[EXC_TYPES_COUNT];
1336
1337 if (task == TASK_NULL) {
1338 return KERN_INVALID_ARGUMENT;
1339 }
1340
1341 if (exception_mask & ~EXC_MASK_ALL) {
1342 return KERN_INVALID_ARGUMENT;
1343 }
1344
1345 if (IP_VALID(new_port)) {
1346 switch (new_behavior) {
1347 case EXCEPTION_DEFAULT:
1348 case EXCEPTION_STATE:
1349 case EXCEPTION_STATE_IDENTITY:
1350 break;
1351 default:
1352 return KERN_INVALID_ARGUMENT;
1353 }
1354 }
1355 /* Cannot easily check "new_flavor", but that just means that
1356 * the flavor in the generated exception message might be garbage:
1357 * GIGO */
1358
1359 itk_lock(task);
1360 if (task->itk_self == IP_NULL) {
1361 itk_unlock(task);
1362 return KERN_FAILURE;
1363 }
1364
1365 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
1366 if (exception_mask & (1 << i)) {
1367 old_port[i] = task->exc_actions[i].port;
1368 task->exc_actions[i].port =
1369 ipc_port_copy_send(new_port);
1370 task->exc_actions[i].behavior = new_behavior;
1371 task->exc_actions[i].flavor = new_flavor;
1372 } else
1373 old_port[i] = IP_NULL;
1374 }/* for */
1375
1376 /*
1377 * Consume send rights without any lock held.
1378 */
1379 itk_unlock(task);
1380 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++)
1381 if (IP_VALID(old_port[i]))
1382 ipc_port_release_send(old_port[i]);
1383 if (IP_VALID(new_port)) /* consume send right */
1384 ipc_port_release_send(new_port);
1385
1386 return KERN_SUCCESS;
1387 }/* task_set_exception_port */
1388
1389 /*
1390 * Routine: thread/task_swap_exception_ports [kernel call]
1391 * Purpose:
1392 * Sets the thread/task exception port, flavor and
1393 * behavior for the exception types specified by the
1394 * mask.
1395 *
1396 * The old ports, behavior and flavors are returned
1397 * Count specifies the array sizes on input and
1398 * the number of returned ports etc. on output. The
1399 * arrays must be large enough to hold all the returned
1400 * data, MIG returnes an error otherwise. The masks
1401 * array specifies the corresponding exception type(s).
1402 *
1403 * Conditions:
1404 * Nothing locked. If successful, consumes
1405 * the supplied send right.
1406 *
1407 * Returns upto [in} CountCnt elements.
1408 * Returns:
1409 * KERN_SUCCESS Changed the special port.
1410 * KERN_INVALID_ARGUMENT The thread is null,
1411 * Illegal mask bit set.
1412 * Illegal exception behavior
1413 * KERN_FAILURE The thread is dead.
1414 */
1415
1416 kern_return_t
1417 thread_swap_exception_ports(
1418 thread_act_t thr_act,
1419 exception_mask_t exception_mask,
1420 ipc_port_t new_port,
1421 exception_behavior_t new_behavior,
1422 thread_state_flavor_t new_flavor,
1423 exception_mask_array_t masks,
1424 mach_msg_type_number_t * CountCnt,
1425 exception_port_array_t ports,
1426 exception_behavior_array_t behaviors,
1427 thread_state_flavor_array_t flavors )
1428 {
1429 register int i,
1430 j,
1431 count;
1432 ipc_port_t old_port[EXC_TYPES_COUNT];
1433
1434 if (!thr_act)
1435 return KERN_INVALID_ARGUMENT;
1436
1437 if (exception_mask & ~EXC_MASK_ALL) {
1438 return KERN_INVALID_ARGUMENT;
1439 }
1440
1441 if (IP_VALID(new_port)) {
1442 switch (new_behavior) {
1443 case EXCEPTION_DEFAULT:
1444 case EXCEPTION_STATE:
1445 case EXCEPTION_STATE_IDENTITY:
1446 break;
1447 default:
1448 return KERN_INVALID_ARGUMENT;
1449 }
1450 }
1451 /* Cannot easily check "new_flavor", but that just means that
1452 * the flavor in the generated exception message might be garbage:
1453 * GIGO */
1454
1455 act_lock(thr_act);
1456 if (!thr_act->active) {
1457 act_unlock(thr_act);
1458 return KERN_FAILURE;
1459 }
1460
1461 count = 0;
1462
1463 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
1464 if (exception_mask & (1 << i)) {
1465 for (j = 0; j < count; j++) {
1466 /*
1467 * search for an identical entry, if found
1468 * set corresponding mask for this exception.
1469 */
1470 if (thr_act->exc_actions[i].port == ports[j] &&
1471 thr_act->exc_actions[i].behavior ==behaviors[j]
1472 && thr_act->exc_actions[i].flavor ==flavors[j])
1473 {
1474 masks[j] |= (1 << i);
1475 break;
1476 }
1477 }/* for */
1478 if (j == count) {
1479 masks[j] = (1 << i);
1480 ports[j] =
1481 ipc_port_copy_send(thr_act->exc_actions[i].port);
1482
1483 behaviors[j] = thr_act->exc_actions[i].behavior;
1484 flavors[j] = thr_act->exc_actions[i].flavor;
1485 count++;
1486 }
1487
1488 old_port[i] = thr_act->exc_actions[i].port;
1489 thr_act->exc_actions[i].port =
1490 ipc_port_copy_send(new_port);
1491 thr_act->exc_actions[i].behavior = new_behavior;
1492 thr_act->exc_actions[i].flavor = new_flavor;
1493 if (count > *CountCnt) {
1494 break;
1495 }
1496 } else
1497 old_port[i] = IP_NULL;
1498 }/* for */
1499
1500 /*
1501 * Consume send rights without any lock held.
1502 */
1503 act_unlock(thr_act);
1504 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++)
1505 if (IP_VALID(old_port[i]))
1506 ipc_port_release_send(old_port[i]);
1507 if (IP_VALID(new_port)) /* consume send right */
1508 ipc_port_release_send(new_port);
1509 *CountCnt = count;
1510 return KERN_SUCCESS;
1511 }/* thread_swap_exception_ports */
1512
1513 kern_return_t
1514 task_swap_exception_ports(
1515 task_t task,
1516 exception_mask_t exception_mask,
1517 ipc_port_t new_port,
1518 exception_behavior_t new_behavior,
1519 thread_state_flavor_t new_flavor,
1520 exception_mask_array_t masks,
1521 mach_msg_type_number_t * CountCnt,
1522 exception_port_array_t ports,
1523 exception_behavior_array_t behaviors,
1524 thread_state_flavor_array_t flavors )
1525 {
1526 register int i,
1527 j,
1528 count;
1529 ipc_port_t old_port[EXC_TYPES_COUNT];
1530
1531 if (task == TASK_NULL)
1532 return KERN_INVALID_ARGUMENT;
1533
1534 if (exception_mask & ~EXC_MASK_ALL) {
1535 return KERN_INVALID_ARGUMENT;
1536 }
1537
1538 if (IP_VALID(new_port)) {
1539 switch (new_behavior) {
1540 case EXCEPTION_DEFAULT:
1541 case EXCEPTION_STATE:
1542 case EXCEPTION_STATE_IDENTITY:
1543 break;
1544 default:
1545 return KERN_INVALID_ARGUMENT;
1546 }
1547 }
1548 /* Cannot easily check "new_flavor", but that just means that
1549 * the flavor in the generated exception message might be garbage:
1550 * GIGO */
1551
1552 itk_lock(task);
1553 if (task->itk_self == IP_NULL) {
1554 itk_unlock(task);
1555 return KERN_FAILURE;
1556 }
1557
1558 count = 0;
1559
1560 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
1561 if (exception_mask & (1 << i)) {
1562 for (j = 0; j < count; j++) {
1563 /*
1564 * search for an identical entry, if found
1565 * set corresponding mask for this exception.
1566 */
1567 if (task->exc_actions[i].port == ports[j] &&
1568 task->exc_actions[i].behavior == behaviors[j]
1569 && task->exc_actions[i].flavor == flavors[j])
1570 {
1571 masks[j] |= (1 << i);
1572 break;
1573 }
1574 }/* for */
1575 if (j == count) {
1576 masks[j] = (1 << i);
1577 ports[j] =
1578 ipc_port_copy_send(task->exc_actions[i].port);
1579 behaviors[j] = task->exc_actions[i].behavior;
1580 flavors[j] = task->exc_actions[i].flavor;
1581 count++;
1582 }
1583 old_port[i] = task->exc_actions[i].port;
1584 task->exc_actions[i].port =
1585 ipc_port_copy_send(new_port);
1586 task->exc_actions[i].behavior = new_behavior;
1587 task->exc_actions[i].flavor = new_flavor;
1588 if (count > *CountCnt) {
1589 break;
1590 }
1591 } else
1592 old_port[i] = IP_NULL;
1593 }/* for */
1594
1595
1596 /*
1597 * Consume send rights without any lock held.
1598 */
1599 itk_unlock(task);
1600 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++)
1601 if (IP_VALID(old_port[i]))
1602 ipc_port_release_send(old_port[i]);
1603 if (IP_VALID(new_port)) /* consume send right */
1604 ipc_port_release_send(new_port);
1605 *CountCnt = count;
1606
1607 return KERN_SUCCESS;
1608 }/* task_swap_exception_ports */
1609
1610 /*
1611 * Routine: thread/task_get_exception_ports [kernel call]
1612 * Purpose:
1613 * Clones a send right for each of the thread/task's exception
1614 * ports specified in the mask and returns the behaviour
1615 * and flavor of said port.
1616 *
1617 * Returns upto [in} CountCnt elements.
1618 *
1619 * Conditions:
1620 * Nothing locked.
1621 * Returns:
1622 * KERN_SUCCESS Extracted a send right.
1623 * KERN_INVALID_ARGUMENT The thread is null,
1624 * Invalid special port,
1625 * Illegal mask bit set.
1626 * KERN_FAILURE The thread is dead.
1627 */
1628
1629 kern_return_t
1630 thread_get_exception_ports(
1631 thread_act_t thr_act,
1632 exception_mask_t exception_mask,
1633 exception_mask_array_t masks,
1634 mach_msg_type_number_t * CountCnt,
1635 exception_port_array_t ports,
1636 exception_behavior_array_t behaviors,
1637 thread_state_flavor_array_t flavors )
1638 {
1639 register int i,
1640 j,
1641 count;
1642
1643 if (!thr_act)
1644 return KERN_INVALID_ARGUMENT;
1645
1646 if (exception_mask & ~EXC_MASK_ALL) {
1647 return KERN_INVALID_ARGUMENT;
1648 }
1649
1650 act_lock(thr_act);
1651 if (!thr_act->active) {
1652 act_unlock(thr_act);
1653 return KERN_FAILURE;
1654 }
1655
1656 count = 0;
1657
1658 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
1659 if (exception_mask & (1 << i)) {
1660 for (j = 0; j < count; j++) {
1661 /*
1662 * search for an identical entry, if found
1663 * set corresponding mask for this exception.
1664 */
1665 if (thr_act->exc_actions[i].port == ports[j] &&
1666 thr_act->exc_actions[i].behavior ==behaviors[j]
1667 && thr_act->exc_actions[i].flavor == flavors[j])
1668 {
1669 masks[j] |= (1 << i);
1670 break;
1671 }
1672 }/* for */
1673 if (j == count) {
1674 masks[j] = (1 << i);
1675 ports[j] =
1676 ipc_port_copy_send(thr_act->exc_actions[i].port);
1677 behaviors[j] = thr_act->exc_actions[i].behavior;
1678 flavors[j] = thr_act->exc_actions[i].flavor;
1679 count++;
1680 if (count >= *CountCnt) {
1681 break;
1682 }
1683 }
1684 }
1685 }/* for */
1686
1687 act_unlock(thr_act);
1688
1689 *CountCnt = count;
1690 return KERN_SUCCESS;
1691 }/* thread_get_exception_ports */
1692
1693 kern_return_t
1694 task_get_exception_ports(
1695 task_t task,
1696 exception_mask_t exception_mask,
1697 exception_mask_array_t masks,
1698 mach_msg_type_number_t * CountCnt,
1699 exception_port_array_t ports,
1700 exception_behavior_array_t behaviors,
1701 thread_state_flavor_array_t flavors )
1702 {
1703 register int i,
1704 j,
1705 count;
1706
1707 if (task == TASK_NULL)
1708 return KERN_INVALID_ARGUMENT;
1709
1710 if (exception_mask & ~EXC_MASK_ALL) {
1711 return KERN_INVALID_ARGUMENT;
1712 }
1713
1714 itk_lock(task);
1715 if (task->itk_self == IP_NULL) {
1716 itk_unlock(task);
1717 return KERN_FAILURE;
1718 }
1719
1720 count = 0;
1721
1722 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
1723 if (exception_mask & (1 << i)) {
1724 for (j = 0; j < count; j++) {
1725 /*
1726 * search for an identical entry, if found
1727 * set corresponding mask for this exception.
1728 */
1729 if (task->exc_actions[i].port == ports[j] &&
1730 task->exc_actions[i].behavior == behaviors[j]
1731 && task->exc_actions[i].flavor == flavors[j])
1732 {
1733 masks[j] |= (1 << i);
1734 break;
1735 }
1736 }/* for */
1737 if (j == count) {
1738 masks[j] = (1 << i);
1739 ports[j] =
1740 ipc_port_copy_send(task->exc_actions[i].port);
1741 behaviors[j] = task->exc_actions[i].behavior;
1742 flavors[j] = task->exc_actions[i].flavor;
1743 count++;
1744 if (count > *CountCnt) {
1745 break;
1746 }
1747 }
1748 }
1749 }/* for */
1750
1751 itk_unlock(task);
1752
1753 *CountCnt = count;
1754 return KERN_SUCCESS;
1755 }/* task_get_exception_ports */