]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/ipc_tt.c
xnu-123.5.tar.gz
[apple/xnu.git] / osfmk / kern / ipc_tt.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_COPYRIGHT@
24 */
25 /*
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50 /*
51 */
52
53 /*
54 * File: ipc_tt.c
55 * Purpose:
56 * Task and thread related IPC functions.
57 */
58
59 #include <mach/boolean.h>
60 #include <mach_rt.h>
61 #include <mach/kern_return.h>
62 #include <mach/mach_param.h>
63 #include <mach/task_special_ports.h>
64 #include <mach/thread_special_ports.h>
65 #include <mach/thread_status.h>
66 #include <mach/exception_types.h>
67 #include <mach/mach_traps.h>
68 #include <mach/task_server.h>
69 #include <mach/thread_act_server.h>
70 #include <mach/mach_host_server.h>
71 #include <mach/vm_map_server.h>
72 #include <kern/host.h>
73 #include <kern/ipc_tt.h>
74 #include <kern/thread_act.h>
75 #include <kern/misc_protos.h>
76 #include <vm/vm_pageout.h>
77
78 /*
79 * Routine: ipc_task_init
80 * Purpose:
81 * Initialize a task's IPC state.
82 *
83 * If non-null, some state will be inherited from the parent.
84 * The parent must be appropriately initialized.
85 * Conditions:
86 * Nothing locked.
87 */
88
89 void
90 ipc_task_init(
91 task_t task,
92 task_t parent)
93 {
94 ipc_space_t space;
95 ipc_port_t kport;
96 kern_return_t kr;
97 int i;
98
99
100 kr = ipc_space_create(&ipc_table_entries[0], &space);
101 if (kr != KERN_SUCCESS)
102 panic("ipc_task_init");
103
104
105 kport = ipc_port_alloc_kernel();
106 if (kport == IP_NULL)
107 panic("ipc_task_init");
108
109 itk_lock_init(task);
110 task->itk_self = kport;
111 task->itk_sself = ipc_port_make_send(kport);
112 task->itk_space = space;
113 space->is_fast = task->kernel_loaded;
114
115 if (parent == TASK_NULL) {
116 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
117 task->exc_actions[i].port = IP_NULL;
118 }/* for */
119 task->exc_actions[EXC_MACH_SYSCALL].port =
120 ipc_port_make_send(realhost.host_self);
121 task->itk_host = ipc_port_make_send(realhost.host_self);
122 task->itk_bootstrap = IP_NULL;
123 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
124 task->itk_registered[i] = IP_NULL;
125 } else {
126 itk_lock(parent);
127 assert(parent->itk_self != IP_NULL);
128
129 /* inherit registered ports */
130
131 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
132 task->itk_registered[i] =
133 ipc_port_copy_send(parent->itk_registered[i]);
134
135 /* inherit exception and bootstrap ports */
136
137 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
138 task->exc_actions[i].port =
139 ipc_port_copy_send(parent->exc_actions[i].port);
140 task->exc_actions[i].flavor =
141 parent->exc_actions[i].flavor;
142 task->exc_actions[i].behavior =
143 parent->exc_actions[i].behavior;
144 }/* for */
145 task->itk_host =
146 ipc_port_copy_send(parent->itk_host);
147
148 task->itk_bootstrap =
149 ipc_port_copy_send(parent->itk_bootstrap);
150
151 itk_unlock(parent);
152 }
153 }
154
155 /*
156 * Routine: ipc_task_enable
157 * Purpose:
158 * Enable a task for IPC access.
159 * Conditions:
160 * Nothing locked.
161 */
162
163 void
164 ipc_task_enable(
165 task_t task)
166 {
167 ipc_port_t kport;
168
169 itk_lock(task);
170 kport = task->itk_self;
171 if (kport != IP_NULL)
172 ipc_kobject_set(kport, (ipc_kobject_t) task, IKOT_TASK);
173 itk_unlock(task);
174 }
175
176 /*
177 * Routine: ipc_task_disable
178 * Purpose:
179 * Disable IPC access to a task.
180 * Conditions:
181 * Nothing locked.
182 */
183
184 void
185 ipc_task_disable(
186 task_t task)
187 {
188 ipc_port_t kport;
189
190 itk_lock(task);
191 kport = task->itk_self;
192 if (kport != IP_NULL)
193 ipc_kobject_set(kport, IKO_NULL, IKOT_NONE);
194 itk_unlock(task);
195 }
196
197 /*
198 * Routine: ipc_task_terminate
199 * Purpose:
200 * Clean up and destroy a task's IPC state.
201 * Conditions:
202 * Nothing locked. The task must be suspended.
203 * (Or the current thread must be in the task.)
204 */
205
206 void
207 ipc_task_terminate(
208 task_t task)
209 {
210 ipc_port_t kport;
211 int i;
212
213 itk_lock(task);
214 kport = task->itk_self;
215
216 if (kport == IP_NULL) {
217 /* the task is already terminated (can this happen?) */
218 itk_unlock(task);
219 return;
220 }
221
222 task->itk_self = IP_NULL;
223 itk_unlock(task);
224
225 /* release the naked send rights */
226
227 if (IP_VALID(task->itk_sself))
228 ipc_port_release_send(task->itk_sself);
229
230 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
231 if (IP_VALID(task->exc_actions[i].port)) {
232 ipc_port_release_send(task->exc_actions[i].port);
233 }
234 }/* for */
235 if (IP_VALID(task->itk_host))
236 ipc_port_release_send(task->itk_host);
237
238 if (IP_VALID(task->itk_bootstrap))
239 ipc_port_release_send(task->itk_bootstrap);
240
241 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
242 if (IP_VALID(task->itk_registered[i]))
243 ipc_port_release_send(task->itk_registered[i]);
244
245 ipc_port_release_send(task->wired_ledger_port);
246 ipc_port_release_send(task->paged_ledger_port);
247
248 /* destroy the kernel port */
249 ipc_port_dealloc_kernel(kport);
250 }
251
252 /*
253 * Routine: ipc_thread_init
254 * Purpose:
255 * Initialize a thread's IPC state.
256 * Conditions:
257 * Nothing locked.
258 */
259
260 void
261 ipc_thread_init(
262 thread_t thread)
263 {
264 ipc_kmsg_queue_init(&thread->ith_messages);
265 thread->ith_mig_reply = MACH_PORT_NULL;
266 thread->ith_rpc_reply = IP_NULL;
267 }
268
269 /*
270 * Routine: ipc_thread_terminate
271 * Purpose:
272 * Clean up and destroy a thread's IPC state.
273 * Conditions:
274 * Nothing locked. The thread must be suspended.
275 * (Or be the current thread.)
276 */
277
278 void
279 ipc_thread_terminate(
280 thread_t thread)
281 {
282 assert(ipc_kmsg_queue_empty(&thread->ith_messages));
283
284 if (thread->ith_rpc_reply != IP_NULL)
285 ipc_port_dealloc_reply(thread->ith_rpc_reply);
286 thread->ith_rpc_reply = IP_NULL;
287 }
288
289 /*
290 * Routine: ipc_thr_act_init
291 * Purpose:
292 * Initialize an thr_act's IPC state.
293 * Conditions:
294 * Nothing locked.
295 */
296
297 void
298 ipc_thr_act_init(task_t task, thread_act_t thr_act)
299 {
300 ipc_port_t kport; int i;
301
302 kport = ipc_port_alloc_kernel();
303 if (kport == IP_NULL)
304 panic("ipc_thr_act_init");
305
306 thr_act->ith_self = kport;
307 thr_act->ith_sself = ipc_port_make_send(kport);
308
309 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++)
310 thr_act->exc_actions[i].port = IP_NULL;
311
312 thr_act->exc_actions[EXC_MACH_SYSCALL].port =
313 ipc_port_make_send(realhost.host_self);
314
315 ipc_kobject_set(kport, (ipc_kobject_t) thr_act, IKOT_ACT);
316 }
317
318 void
319 ipc_thr_act_disable(thread_act_t thr_act)
320 {
321 int i;
322 ipc_port_t kport;
323
324 kport = thr_act->ith_self;
325
326 if (kport != IP_NULL)
327 ipc_kobject_set(kport, IKO_NULL, IKOT_NONE);
328 }
329
330 void
331 ipc_thr_act_terminate(thread_act_t thr_act)
332 {
333 ipc_port_t kport; int i;
334
335 kport = thr_act->ith_self;
336
337 if (kport == IP_NULL) {
338 /* the thread is already terminated (can this happen?) */
339 return;
340 }
341
342 thr_act->ith_self = IP_NULL;
343
344 /* release the naked send rights */
345
346 if (IP_VALID(thr_act->ith_sself))
347 ipc_port_release_send(thr_act->ith_sself);
348 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
349 if (IP_VALID(thr_act->exc_actions[i].port))
350 ipc_port_release_send(thr_act->exc_actions[i].port);
351 }
352
353 /* destroy the kernel port */
354 ipc_port_dealloc_kernel(kport);
355 }
356
357 /*
358 * Routine: retrieve_task_self_fast
359 * Purpose:
360 * Optimized version of retrieve_task_self,
361 * that only works for the current task.
362 *
363 * Return a send right (possibly null/dead)
364 * for the task's user-visible self port.
365 * Conditions:
366 * Nothing locked.
367 */
368
369 ipc_port_t
370 retrieve_task_self_fast(
371 register task_t task)
372 {
373 register ipc_port_t port;
374
375 assert(task == current_task());
376
377 itk_lock(task);
378 assert(task->itk_self != IP_NULL);
379
380 if ((port = task->itk_sself) == task->itk_self) {
381 /* no interposing */
382
383 ip_lock(port);
384 assert(ip_active(port));
385 ip_reference(port);
386 port->ip_srights++;
387 ip_unlock(port);
388 } else
389 port = ipc_port_copy_send(port);
390 itk_unlock(task);
391
392 return port;
393 }
394
395 /*
396 * Routine: retrieve_act_self_fast
397 * Purpose:
398 * Optimized version of retrieve_thread_self,
399 * that only works for the current thread.
400 *
401 * Return a send right (possibly null/dead)
402 * for the thread's user-visible self port.
403 * Conditions:
404 * Nothing locked.
405 */
406
407 ipc_port_t
408 retrieve_act_self_fast(thread_act_t thr_act)
409 {
410 register ipc_port_t port;
411
412 assert(thr_act == current_act());
413 act_lock(thr_act);
414 assert(thr_act->ith_self != IP_NULL);
415
416 if ((port = thr_act->ith_sself) == thr_act->ith_self) {
417 /* no interposing */
418
419 ip_lock(port);
420 assert(ip_active(port));
421 ip_reference(port);
422 port->ip_srights++;
423 ip_unlock(port);
424 } else
425 port = ipc_port_copy_send(port);
426 act_unlock(thr_act);
427
428 return port;
429 }
430
431 /*
432 * Routine: task_self_trap [mach trap]
433 * Purpose:
434 * Give the caller send rights for his own task port.
435 * Conditions:
436 * Nothing locked.
437 * Returns:
438 * MACH_PORT_NULL if there are any resource failures
439 * or other errors.
440 */
441
442 mach_port_name_t
443 task_self_trap(void)
444 {
445 task_t task = current_task();
446 ipc_port_t sright;
447
448 sright = retrieve_task_self_fast(task);
449 return ipc_port_copyout_send(sright, task->itk_space);
450 }
451
452 /*
453 * Routine: thread_self_trap [mach trap]
454 * Purpose:
455 * Give the caller send rights for his own thread port.
456 * Conditions:
457 * Nothing locked.
458 * Returns:
459 * MACH_PORT_NULL if there are any resource failures
460 * or other errors.
461 */
462
463 mach_port_name_t
464 thread_self_trap(void)
465 {
466 thread_act_t thr_act = current_act();
467 task_t task = thr_act->task;
468 ipc_port_t sright;
469
470 sright = retrieve_act_self_fast(thr_act);
471 return ipc_port_copyout_send(sright, task->itk_space);
472 }
473
474 /*
475 * Routine: mach_reply_port [mach trap]
476 * Purpose:
477 * Allocate a port for the caller.
478 * Conditions:
479 * Nothing locked.
480 * Returns:
481 * MACH_PORT_NULL if there are any resource failures
482 * or other errors.
483 */
484
485 mach_port_name_t
486 mach_reply_port(void)
487 {
488 ipc_port_t port;
489 mach_port_name_t name;
490 kern_return_t kr;
491
492 kr = ipc_port_alloc(current_task()->itk_space, &name, &port);
493 if (kr == KERN_SUCCESS)
494 ip_unlock(port);
495 else
496 name = MACH_PORT_NULL;
497
498 return name;
499 }
500
501 /*
502 * Routine: task_get_special_port [kernel call]
503 * Purpose:
504 * Clones a send right for one of the task's
505 * special ports.
506 * Conditions:
507 * Nothing locked.
508 * Returns:
509 * KERN_SUCCESS Extracted a send right.
510 * KERN_INVALID_ARGUMENT The task is null.
511 * KERN_FAILURE The task/space is dead.
512 * KERN_INVALID_ARGUMENT Invalid special port.
513 */
514
515 kern_return_t
516 task_get_special_port(
517 task_t task,
518 int which,
519 ipc_port_t *portp)
520 {
521 ipc_port_t *whichp;
522 ipc_port_t port;
523
524 if (task == TASK_NULL)
525 return KERN_INVALID_ARGUMENT;
526
527 switch (which) {
528 case TASK_KERNEL_PORT:
529 whichp = &task->itk_sself;
530 break;
531
532 case TASK_HOST_PORT:
533 whichp = &task->itk_host;
534 break;
535
536 case TASK_BOOTSTRAP_PORT:
537 whichp = &task->itk_bootstrap;
538 break;
539
540 case TASK_WIRED_LEDGER_PORT:
541 whichp = &task->wired_ledger_port;
542 break;
543
544 case TASK_PAGED_LEDGER_PORT:
545 whichp = &task->paged_ledger_port;
546 break;
547
548 default:
549 return KERN_INVALID_ARGUMENT;
550 }
551
552 itk_lock(task);
553 if (task->itk_self == IP_NULL) {
554 itk_unlock(task);
555 return KERN_FAILURE;
556 }
557
558 port = ipc_port_copy_send(*whichp);
559 itk_unlock(task);
560
561 *portp = port;
562 return KERN_SUCCESS;
563 }
564
565 /*
566 * Routine: task_set_special_port [kernel call]
567 * Purpose:
568 * Changes one of the task's special ports,
569 * setting it to the supplied send right.
570 * Conditions:
571 * Nothing locked. If successful, consumes
572 * the supplied send right.
573 * Returns:
574 * KERN_SUCCESS Changed the special port.
575 * KERN_INVALID_ARGUMENT The task is null.
576 * KERN_FAILURE The task/space is dead.
577 * KERN_INVALID_ARGUMENT Invalid special port.
578 */
579
580 kern_return_t
581 task_set_special_port(
582 task_t task,
583 int which,
584 ipc_port_t port)
585 {
586 ipc_port_t *whichp;
587 ipc_port_t old;
588
589 if (task == TASK_NULL)
590 return KERN_INVALID_ARGUMENT;
591
592 switch (which) {
593 case TASK_KERNEL_PORT:
594 whichp = &task->itk_sself;
595 break;
596
597 case TASK_HOST_PORT:
598 whichp = &task->itk_host;
599 break;
600
601 case TASK_BOOTSTRAP_PORT:
602 whichp = &task->itk_bootstrap;
603 break;
604
605 case TASK_WIRED_LEDGER_PORT:
606 whichp = &task->wired_ledger_port;
607 break;
608
609 case TASK_PAGED_LEDGER_PORT:
610 whichp = &task->paged_ledger_port;
611 break;
612
613 default:
614 return KERN_INVALID_ARGUMENT;
615 }/* switch */
616
617 itk_lock(task);
618 if (task->itk_self == IP_NULL) {
619 itk_unlock(task);
620 return KERN_FAILURE;
621 }
622
623 old = *whichp;
624 *whichp = port;
625 itk_unlock(task);
626
627 if (IP_VALID(old))
628 ipc_port_release_send(old);
629 return KERN_SUCCESS;
630 }
631
632
633 /*
634 * Routine: mach_ports_register [kernel call]
635 * Purpose:
636 * Stash a handful of port send rights in the task.
637 * Child tasks will inherit these rights, but they
638 * must use mach_ports_lookup to acquire them.
639 *
640 * The rights are supplied in a (wired) kalloc'd segment.
641 * Rights which aren't supplied are assumed to be null.
642 * Conditions:
643 * Nothing locked. If successful, consumes
644 * the supplied rights and memory.
645 * Returns:
646 * KERN_SUCCESS Stashed the port rights.
647 * KERN_INVALID_ARGUMENT The task is null.
648 * KERN_INVALID_ARGUMENT The task is dead.
649 * KERN_INVALID_ARGUMENT Too many port rights supplied.
650 */
651
652 kern_return_t
653 mach_ports_register(
654 task_t task,
655 mach_port_array_t memory,
656 mach_msg_type_number_t portsCnt)
657 {
658 ipc_port_t ports[TASK_PORT_REGISTER_MAX];
659 int i;
660
661 if ((task == TASK_NULL) ||
662 (portsCnt > TASK_PORT_REGISTER_MAX))
663 return KERN_INVALID_ARGUMENT;
664
665 /*
666 * Pad the port rights with nulls.
667 */
668
669 for (i = 0; i < portsCnt; i++)
670 ports[i] = memory[i];
671 for (; i < TASK_PORT_REGISTER_MAX; i++)
672 ports[i] = IP_NULL;
673
674 itk_lock(task);
675 if (task->itk_self == IP_NULL) {
676 itk_unlock(task);
677 return KERN_INVALID_ARGUMENT;
678 }
679
680 /*
681 * Replace the old send rights with the new.
682 * Release the old rights after unlocking.
683 */
684
685 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
686 ipc_port_t old;
687
688 old = task->itk_registered[i];
689 task->itk_registered[i] = ports[i];
690 ports[i] = old;
691 }
692
693 itk_unlock(task);
694
695 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
696 if (IP_VALID(ports[i]))
697 ipc_port_release_send(ports[i]);
698
699 /*
700 * Now that the operation is known to be successful,
701 * we can free the memory.
702 */
703
704 if (portsCnt != 0)
705 kfree((vm_offset_t) memory,
706 (vm_size_t) (portsCnt * sizeof(mach_port_t)));
707
708 return KERN_SUCCESS;
709 }
710
711 /*
712 * Routine: mach_ports_lookup [kernel call]
713 * Purpose:
714 * Retrieves (clones) the stashed port send rights.
715 * Conditions:
716 * Nothing locked. If successful, the caller gets
717 * rights and memory.
718 * Returns:
719 * KERN_SUCCESS Retrieved the send rights.
720 * KERN_INVALID_ARGUMENT The task is null.
721 * KERN_INVALID_ARGUMENT The task is dead.
722 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
723 */
724
725 kern_return_t
726 mach_ports_lookup(
727 task_t task,
728 mach_port_array_t *portsp,
729 mach_msg_type_number_t *portsCnt)
730 {
731 vm_offset_t memory;
732 vm_size_t size;
733 ipc_port_t *ports;
734 int i;
735
736 kern_return_t kr;
737
738 if (task == TASK_NULL)
739 return KERN_INVALID_ARGUMENT;
740
741 size = (vm_size_t) (TASK_PORT_REGISTER_MAX * sizeof(ipc_port_t));
742
743 memory = kalloc(size);
744 if (memory == 0)
745 return KERN_RESOURCE_SHORTAGE;
746
747 itk_lock(task);
748 if (task->itk_self == IP_NULL) {
749 itk_unlock(task);
750
751 kfree(memory, size);
752 return KERN_INVALID_ARGUMENT;
753 }
754
755 ports = (ipc_port_t *) memory;
756
757 /*
758 * Clone port rights. Because kalloc'd memory
759 * is wired, we won't fault while holding the task lock.
760 */
761
762 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
763 ports[i] = ipc_port_copy_send(task->itk_registered[i]);
764
765 itk_unlock(task);
766
767 *portsp = (mach_port_array_t) ports;
768 *portsCnt = TASK_PORT_REGISTER_MAX;
769 return KERN_SUCCESS;
770 }
771
772 /*
773 * Routine: convert_port_to_locked_task
774 * Purpose:
775 * Internal helper routine to convert from a port to a locked
776 * task. Used by several routines that try to convert from a
777 * task port to a reference on some task related object.
778 * Conditions:
779 * Nothing locked, blocking OK.
780 */
781 task_t
782 convert_port_to_locked_task(ipc_port_t port)
783 {
784 while (IP_VALID(port)) {
785 task_t task;
786
787 ip_lock(port);
788 if (!ip_active(port) || (ip_kotype(port) != IKOT_TASK)) {
789 ip_unlock(port);
790 return TASK_NULL;
791 }
792 task = (task_t) port->ip_kobject;
793 assert(task != TASK_NULL);
794
795 /*
796 * Normal lock ordering puts task_lock() before ip_lock().
797 * Attempt out-of-order locking here.
798 */
799 if (task_lock_try(task)) {
800 ip_unlock(port);
801 return(task);
802 }
803
804 ip_unlock(port);
805 mutex_pause();
806 }
807 return TASK_NULL;
808 }
809
810 /*
811 * Routine: convert_port_to_task
812 * Purpose:
813 * Convert from a port to a task.
814 * Doesn't consume the port ref; produces a task ref,
815 * which may be null.
816 * Conditions:
817 * Nothing locked.
818 */
819 task_t
820 convert_port_to_task(
821 ipc_port_t port)
822 {
823 task_t task;
824
825 task = convert_port_to_locked_task(port);
826 if (task) {
827 task->ref_count++;
828 task_unlock(task);
829 }
830 return task;
831 }
832
833 /*
834 * Routine: convert_port_to_space
835 * Purpose:
836 * Convert from a port to a space.
837 * Doesn't consume the port ref; produces a space ref,
838 * which may be null.
839 * Conditions:
840 * Nothing locked.
841 */
842 ipc_space_t
843 convert_port_to_space(
844 ipc_port_t port)
845 {
846 ipc_space_t space;
847 task_t task;
848
849 task = convert_port_to_locked_task(port);
850
851 if (task == TASK_NULL)
852 return IPC_SPACE_NULL;
853
854 if (!task->active) {
855 task_unlock(task);
856 return IPC_SPACE_NULL;
857 }
858
859 space = task->itk_space;
860 is_reference(space);
861 task_unlock(task);
862 return (space);
863 }
864
865 upl_t
866 convert_port_to_upl(
867 ipc_port_t port)
868 {
869 upl_t upl;
870
871 ip_lock(port);
872 if (!ip_active(port) || (ip_kotype(port) != IKOT_UPL)) {
873 ip_unlock(port);
874 return (upl_t)NULL;
875 }
876 upl = (upl_t) port->ip_kobject;
877 ip_unlock(port);
878 upl_lock(upl);
879 upl->ref_count+=1;
880 upl_unlock(upl);
881 return upl;
882 }
883
884 /*
885 * Routine: convert_port_entry_to_map
886 * Purpose:
887 * Convert from a port specifying an entry or a task
888 * to a map. Doesn't consume the port ref; produces a map ref,
889 * which may be null. Unlike convert_port_to_map, the
890 * port may be task or a named entry backed.
891 * Conditions:
892 * Nothing locked.
893 */
894
895
896 vm_map_t
897 convert_port_entry_to_map(
898 ipc_port_t port)
899 {
900 task_t task;
901 vm_map_t map;
902 vm_named_entry_t named_entry;
903
904 if(IP_VALID(port) && (ip_kotype(port) == IKOT_NAMED_ENTRY)) {
905 while(TRUE) {
906 ip_lock(port);
907 if(ip_active(port) && (ip_kotype(port)
908 == IKOT_NAMED_ENTRY)) {
909 named_entry =
910 (vm_named_entry_t)port->ip_kobject;
911 if (!(mutex_try(&(named_entry)->Lock))) {
912 ip_unlock(port);
913 mutex_pause();
914 continue;
915 }
916 named_entry->ref_count++;
917 mutex_unlock(&(named_entry)->Lock);
918 ip_unlock(port);
919 if ((named_entry->is_sub_map) &&
920 (named_entry->protection
921 & VM_PROT_WRITE)) {
922 map = named_entry->backing.map;
923 } else {
924 mach_destroy_memory_entry(port);
925 return VM_MAP_NULL;
926 }
927 vm_map_reference_swap(map);
928 mach_destroy_memory_entry(port);
929 break;
930 }
931 else
932 return VM_MAP_NULL;
933 }
934 } else {
935 task_t task;
936
937 task = convert_port_to_locked_task(port);
938
939 if (task == TASK_NULL)
940 return VM_MAP_NULL;
941
942 if (!task->active) {
943 task_unlock(task);
944 return VM_MAP_NULL;
945 }
946
947 map = task->map;
948 vm_map_reference_swap(map);
949 task_unlock(task);
950 }
951
952 return map;
953 }
954
955 /*
956 * Routine: convert_port_entry_to_object
957 * Purpose:
958 * Convert from a port specifying a named entry to an
959 * object. Doesn't consume the port ref; produces a map ref,
960 * which may be null.
961 * Conditions:
962 * Nothing locked.
963 */
964
965
966 vm_object_t
967 convert_port_entry_to_object(
968 ipc_port_t port)
969 {
970 vm_object_t object;
971 vm_named_entry_t named_entry;
972
973 if(IP_VALID(port) && (ip_kotype(port) == IKOT_NAMED_ENTRY)) {
974 while(TRUE) {
975 ip_lock(port);
976 if(ip_active(port) && (ip_kotype(port)
977 == IKOT_NAMED_ENTRY)) {
978 named_entry =
979 (vm_named_entry_t)port->ip_kobject;
980 if (!(mutex_try(&(named_entry)->Lock))) {
981 ip_unlock(port);
982 mutex_pause();
983 continue;
984 }
985 named_entry->ref_count++;
986 mutex_unlock(&(named_entry)->Lock);
987 ip_unlock(port);
988 if ((!named_entry->is_sub_map) &&
989 (named_entry->protection
990 & VM_PROT_WRITE)) {
991 object = named_entry->object;
992 } else {
993 mach_destroy_memory_entry(port);
994 return (vm_object_t)NULL;
995 }
996 vm_object_reference(named_entry->object);
997 mach_destroy_memory_entry(port);
998 break;
999 }
1000 else
1001 return (vm_object_t)NULL;
1002 }
1003 } else {
1004 return (vm_object_t)NULL;
1005 }
1006
1007 return object;
1008 }
1009
1010 /*
1011 * Routine: convert_port_to_map
1012 * Purpose:
1013 * Convert from a port to a map.
1014 * Doesn't consume the port ref; produces a map ref,
1015 * which may be null.
1016 * Conditions:
1017 * Nothing locked.
1018 */
1019
1020 vm_map_t
1021 convert_port_to_map(
1022 ipc_port_t port)
1023 {
1024 task_t task;
1025 vm_map_t map;
1026
1027 task = convert_port_to_locked_task(port);
1028
1029 if (task == TASK_NULL)
1030 return VM_MAP_NULL;
1031
1032 if (!task->active) {
1033 task_unlock(task);
1034 return VM_MAP_NULL;
1035 }
1036
1037 map = task->map;
1038 vm_map_reference_swap(map);
1039 task_unlock(task);
1040 return map;
1041 }
1042
1043
1044 /*
1045 * Routine: convert_port_to_act
1046 * Purpose:
1047 * Convert from a port to a thr_act.
1048 * Doesn't consume the port ref; produces an thr_act ref,
1049 * which may be null.
1050 * Conditions:
1051 * Nothing locked.
1052 */
1053
1054 thread_act_t
1055 convert_port_to_act( ipc_port_t port )
1056 {
1057 boolean_t r;
1058 thread_act_t thr_act = 0;
1059
1060 r = FALSE;
1061 while (!r && IP_VALID(port)) {
1062 ip_lock(port);
1063 r = ref_act_port_locked(port, &thr_act);
1064 /* port unlocked */
1065 }
1066 return (thr_act);
1067 }
1068
1069 boolean_t
1070 ref_act_port_locked( ipc_port_t port, thread_act_t *pthr_act )
1071 {
1072 thread_act_t thr_act;
1073
1074 thr_act = 0;
1075 if (ip_active(port) &&
1076 (ip_kotype(port) == IKOT_ACT)) {
1077 thr_act = (thread_act_t) port->ip_kobject;
1078 assert(thr_act != THR_ACT_NULL);
1079
1080 /*
1081 * Normal lock ordering is act_lock(), then ip_lock().
1082 * Allow out-of-order locking here, using
1083 * act_reference_act_locked() to accomodate it.
1084 */
1085 if (!act_lock_try(thr_act)) {
1086 ip_unlock(port);
1087 mutex_pause();
1088 return (FALSE);
1089 }
1090 act_locked_act_reference(thr_act);
1091 act_unlock(thr_act);
1092 }
1093 *pthr_act = thr_act;
1094 ip_unlock(port);
1095 return (TRUE);
1096 }
1097
1098 /*
1099 * Routine: port_name_to_act
1100 * Purpose:
1101 * Convert from a port name to an act reference
1102 * A name of MACH_PORT_NULL is valid for the null act
1103 * Conditions:
1104 * Nothing locked.
1105 */
1106 thread_act_t
1107 port_name_to_act(
1108 mach_port_name_t name)
1109 {
1110 thread_act_t thr_act = THR_ACT_NULL;
1111 ipc_port_t kern_port;
1112 kern_return_t kr;
1113
1114 if (MACH_PORT_VALID(name)) {
1115 kr = ipc_object_copyin(current_space(), name,
1116 MACH_MSG_TYPE_COPY_SEND,
1117 (ipc_object_t *) &kern_port);
1118 if (kr != KERN_SUCCESS)
1119 return THR_ACT_NULL;
1120
1121 thr_act = convert_port_to_act(kern_port);
1122
1123 if (IP_VALID(kern_port))
1124 ipc_port_release_send(kern_port);
1125 }
1126 return thr_act;
1127 }
1128
1129 task_t
1130 port_name_to_task(
1131 mach_port_name_t name)
1132 {
1133 ipc_port_t kern_port;
1134 kern_return_t kr;
1135 task_t task = TASK_NULL;
1136
1137 if (MACH_PORT_VALID(name)) {
1138 kr = ipc_object_copyin(current_space(), name,
1139 MACH_MSG_TYPE_COPY_SEND,
1140 (ipc_object_t *) &kern_port);
1141 if (kr != KERN_SUCCESS)
1142 return TASK_NULL;
1143
1144 task = convert_port_to_task(kern_port);
1145
1146 if (IP_VALID(kern_port))
1147 ipc_port_release_send(kern_port);
1148 }
1149 return task;
1150 }
1151
1152 /*
1153 * Routine: convert_task_to_port
1154 * Purpose:
1155 * Convert from a task to a port.
1156 * Consumes a task ref; produces a naked send right
1157 * which may be invalid.
1158 * Conditions:
1159 * Nothing locked.
1160 */
1161
1162 ipc_port_t
1163 convert_task_to_port(
1164 task_t task)
1165 {
1166 ipc_port_t port;
1167
1168 itk_lock(task);
1169 if (task->itk_self != IP_NULL)
1170 #if NORMA_TASK
1171 if (task->map == VM_MAP_NULL)
1172 /* norma placeholder task */
1173 port = ipc_port_copy_send(task->itk_self);
1174 else
1175 #endif /* NORMA_TASK */
1176 port = ipc_port_make_send(task->itk_self);
1177 else
1178 port = IP_NULL;
1179 itk_unlock(task);
1180
1181 task_deallocate(task);
1182 return port;
1183 }
1184
1185 /*
1186 * Routine: convert_act_to_port
1187 * Purpose:
1188 * Convert from a thr_act to a port.
1189 * Consumes an thr_act ref; produces a naked send right
1190 * which may be invalid.
1191 * Conditions:
1192 * Nothing locked.
1193 */
1194
1195 ipc_port_t
1196 convert_act_to_port(thr_act)
1197 thread_act_t thr_act;
1198 {
1199 ipc_port_t port;
1200
1201 act_lock(thr_act);
1202 if (thr_act->ith_self != IP_NULL)
1203 port = ipc_port_make_send(thr_act->ith_self);
1204 else
1205 port = IP_NULL;
1206 act_unlock(thr_act);
1207
1208 act_deallocate(thr_act);
1209 return port;
1210 }
1211
1212 /*
1213 * Routine: space_deallocate
1214 * Purpose:
1215 * Deallocate a space ref produced by convert_port_to_space.
1216 * Conditions:
1217 * Nothing locked.
1218 */
1219
1220 void
1221 space_deallocate(
1222 ipc_space_t space)
1223 {
1224 if (space != IS_NULL)
1225 is_release(space);
1226 }
1227
1228 /*
1229 * Routine: thread/task_set_exception_ports [kernel call]
1230 * Purpose:
1231 * Sets the thread/task exception port, flavor and
1232 * behavior for the exception types specified by the mask.
1233 * There will be one send right per exception per valid
1234 * port.
1235 * Conditions:
1236 * Nothing locked. If successful, consumes
1237 * the supplied send right.
1238 * Returns:
1239 * KERN_SUCCESS Changed the special port.
1240 * KERN_INVALID_ARGUMENT The thread is null,
1241 * Illegal mask bit set.
1242 * Illegal exception behavior
1243 * KERN_FAILURE The thread is dead.
1244 */
1245
1246 kern_return_t
1247 thread_set_exception_ports(
1248 thread_act_t thr_act,
1249 exception_mask_t exception_mask,
1250 ipc_port_t new_port,
1251 exception_behavior_t new_behavior,
1252 thread_state_flavor_t new_flavor)
1253 {
1254 register int i;
1255 ipc_port_t old_port[EXC_TYPES_COUNT];
1256
1257 if (!thr_act)
1258 return KERN_INVALID_ARGUMENT;
1259
1260 if (exception_mask & ~EXC_MASK_ALL)
1261 return KERN_INVALID_ARGUMENT;
1262
1263 if (IP_VALID(new_port)) {
1264 switch (new_behavior) {
1265 case EXCEPTION_DEFAULT:
1266 case EXCEPTION_STATE:
1267 case EXCEPTION_STATE_IDENTITY:
1268 break;
1269 default:
1270 return KERN_INVALID_ARGUMENT;
1271 }
1272 }
1273
1274 /*
1275 * Check the validity of the thread_state_flavor by calling the
1276 * VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in
1277 * osfmk/mach/ARCHITECTURE/thread_status.h
1278 */
1279 if (!VALID_THREAD_STATE_FLAVOR(new_flavor)) {
1280 return KERN_INVALID_ARGUMENT;
1281 }
1282
1283 act_lock(thr_act);
1284 if (!thr_act->active) {
1285 act_unlock(thr_act);
1286 return KERN_FAILURE;
1287 }
1288
1289 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
1290 if (exception_mask & (1 << i)) {
1291 old_port[i] = thr_act->exc_actions[i].port;
1292 thr_act->exc_actions[i].port =
1293 ipc_port_copy_send(new_port);
1294 thr_act->exc_actions[i].behavior = new_behavior;
1295 thr_act->exc_actions[i].flavor = new_flavor;
1296 } else
1297 old_port[i] = IP_NULL;
1298 }/* for */
1299 /*
1300 * Consume send rights without any lock held.
1301 */
1302 act_unlock(thr_act);
1303 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++)
1304 if (IP_VALID(old_port[i]))
1305 ipc_port_release_send(old_port[i]);
1306 if (IP_VALID(new_port)) /* consume send right */
1307 ipc_port_release_send(new_port);
1308
1309 return KERN_SUCCESS;
1310 }/* thread_set_exception_port */
1311
1312 kern_return_t
1313 task_set_exception_ports(
1314 task_t task,
1315 exception_mask_t exception_mask,
1316 ipc_port_t new_port,
1317 exception_behavior_t new_behavior,
1318 thread_state_flavor_t new_flavor)
1319 {
1320 register int i;
1321 ipc_port_t old_port[EXC_TYPES_COUNT];
1322
1323 if (task == TASK_NULL) {
1324 return KERN_INVALID_ARGUMENT;
1325 }
1326
1327 if (exception_mask & ~EXC_MASK_ALL) {
1328 return KERN_INVALID_ARGUMENT;
1329 }
1330
1331 if (IP_VALID(new_port)) {
1332 switch (new_behavior) {
1333 case EXCEPTION_DEFAULT:
1334 case EXCEPTION_STATE:
1335 case EXCEPTION_STATE_IDENTITY:
1336 break;
1337 default:
1338 return KERN_INVALID_ARGUMENT;
1339 }
1340 }
1341 /* Cannot easily check "new_flavor", but that just means that
1342 * the flavor in the generated exception message might be garbage:
1343 * GIGO */
1344
1345 itk_lock(task);
1346 if (task->itk_self == IP_NULL) {
1347 itk_unlock(task);
1348 return KERN_FAILURE;
1349 }
1350
1351 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
1352 if (exception_mask & (1 << i)) {
1353 old_port[i] = task->exc_actions[i].port;
1354 task->exc_actions[i].port =
1355 ipc_port_copy_send(new_port);
1356 task->exc_actions[i].behavior = new_behavior;
1357 task->exc_actions[i].flavor = new_flavor;
1358 } else
1359 old_port[i] = IP_NULL;
1360 }/* for */
1361
1362 /*
1363 * Consume send rights without any lock held.
1364 */
1365 itk_unlock(task);
1366 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++)
1367 if (IP_VALID(old_port[i]))
1368 ipc_port_release_send(old_port[i]);
1369 if (IP_VALID(new_port)) /* consume send right */
1370 ipc_port_release_send(new_port);
1371
1372 return KERN_SUCCESS;
1373 }/* task_set_exception_port */
1374
1375 /*
1376 * Routine: thread/task_swap_exception_ports [kernel call]
1377 * Purpose:
1378 * Sets the thread/task exception port, flavor and
1379 * behavior for the exception types specified by the
1380 * mask.
1381 *
1382 * The old ports, behavior and flavors are returned
1383 * Count specifies the array sizes on input and
1384 * the number of returned ports etc. on output. The
1385 * arrays must be large enough to hold all the returned
1386 * data, MIG returnes an error otherwise. The masks
1387 * array specifies the corresponding exception type(s).
1388 *
1389 * Conditions:
1390 * Nothing locked. If successful, consumes
1391 * the supplied send right.
1392 *
1393 * Returns upto [in} CountCnt elements.
1394 * Returns:
1395 * KERN_SUCCESS Changed the special port.
1396 * KERN_INVALID_ARGUMENT The thread is null,
1397 * Illegal mask bit set.
1398 * Illegal exception behavior
1399 * KERN_FAILURE The thread is dead.
1400 */
1401
1402 kern_return_t
1403 thread_swap_exception_ports(
1404 thread_act_t thr_act,
1405 exception_mask_t exception_mask,
1406 ipc_port_t new_port,
1407 exception_behavior_t new_behavior,
1408 thread_state_flavor_t new_flavor,
1409 exception_mask_array_t masks,
1410 mach_msg_type_number_t * CountCnt,
1411 exception_port_array_t ports,
1412 exception_behavior_array_t behaviors,
1413 thread_state_flavor_array_t flavors )
1414 {
1415 register int i,
1416 j,
1417 count;
1418 ipc_port_t old_port[EXC_TYPES_COUNT];
1419
1420 if (!thr_act)
1421 return KERN_INVALID_ARGUMENT;
1422
1423 if (exception_mask & ~EXC_MASK_ALL) {
1424 return KERN_INVALID_ARGUMENT;
1425 }
1426
1427 if (IP_VALID(new_port)) {
1428 switch (new_behavior) {
1429 case EXCEPTION_DEFAULT:
1430 case EXCEPTION_STATE:
1431 case EXCEPTION_STATE_IDENTITY:
1432 break;
1433 default:
1434 return KERN_INVALID_ARGUMENT;
1435 }
1436 }
1437 /* Cannot easily check "new_flavor", but that just means that
1438 * the flavor in the generated exception message might be garbage:
1439 * GIGO */
1440
1441 act_lock(thr_act);
1442 if (!thr_act->active) {
1443 act_unlock(thr_act);
1444 return KERN_FAILURE;
1445 }
1446
1447 count = 0;
1448
1449 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
1450 if (exception_mask & (1 << i)) {
1451 for (j = 0; j < count; j++) {
1452 /*
1453 * search for an identical entry, if found
1454 * set corresponding mask for this exception.
1455 */
1456 if (thr_act->exc_actions[i].port == ports[j] &&
1457 thr_act->exc_actions[i].behavior ==behaviors[j]
1458 && thr_act->exc_actions[i].flavor ==flavors[j])
1459 {
1460 masks[j] |= (1 << i);
1461 break;
1462 }
1463 }/* for */
1464 if (j == count) {
1465 masks[j] = (1 << i);
1466 ports[j] =
1467 ipc_port_copy_send(thr_act->exc_actions[i].port);
1468
1469 behaviors[j] = thr_act->exc_actions[i].behavior;
1470 flavors[j] = thr_act->exc_actions[i].flavor;
1471 count++;
1472 }
1473
1474 old_port[i] = thr_act->exc_actions[i].port;
1475 thr_act->exc_actions[i].port =
1476 ipc_port_copy_send(new_port);
1477 thr_act->exc_actions[i].behavior = new_behavior;
1478 thr_act->exc_actions[i].flavor = new_flavor;
1479 if (count > *CountCnt) {
1480 break;
1481 }
1482 } else
1483 old_port[i] = IP_NULL;
1484 }/* for */
1485
1486 /*
1487 * Consume send rights without any lock held.
1488 */
1489 act_unlock(thr_act);
1490 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++)
1491 if (IP_VALID(old_port[i]))
1492 ipc_port_release_send(old_port[i]);
1493 if (IP_VALID(new_port)) /* consume send right */
1494 ipc_port_release_send(new_port);
1495 *CountCnt = count;
1496 return KERN_SUCCESS;
1497 }/* thread_swap_exception_ports */
1498
1499 kern_return_t
1500 task_swap_exception_ports(
1501 task_t task,
1502 exception_mask_t exception_mask,
1503 ipc_port_t new_port,
1504 exception_behavior_t new_behavior,
1505 thread_state_flavor_t new_flavor,
1506 exception_mask_array_t masks,
1507 mach_msg_type_number_t * CountCnt,
1508 exception_port_array_t ports,
1509 exception_behavior_array_t behaviors,
1510 thread_state_flavor_array_t flavors )
1511 {
1512 register int i,
1513 j,
1514 count;
1515 ipc_port_t old_port[EXC_TYPES_COUNT];
1516
1517 if (task == TASK_NULL)
1518 return KERN_INVALID_ARGUMENT;
1519
1520 if (exception_mask & ~EXC_MASK_ALL) {
1521 return KERN_INVALID_ARGUMENT;
1522 }
1523
1524 if (IP_VALID(new_port)) {
1525 switch (new_behavior) {
1526 case EXCEPTION_DEFAULT:
1527 case EXCEPTION_STATE:
1528 case EXCEPTION_STATE_IDENTITY:
1529 break;
1530 default:
1531 return KERN_INVALID_ARGUMENT;
1532 }
1533 }
1534 /* Cannot easily check "new_flavor", but that just means that
1535 * the flavor in the generated exception message might be garbage:
1536 * GIGO */
1537
1538 itk_lock(task);
1539 if (task->itk_self == IP_NULL) {
1540 itk_unlock(task);
1541 return KERN_FAILURE;
1542 }
1543
1544 count = 0;
1545
1546 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
1547 if (exception_mask & (1 << i)) {
1548 for (j = 0; j < count; j++) {
1549 /*
1550 * search for an identical entry, if found
1551 * set corresponding mask for this exception.
1552 */
1553 if (task->exc_actions[i].port == ports[j] &&
1554 task->exc_actions[i].behavior == behaviors[j]
1555 && task->exc_actions[i].flavor == flavors[j])
1556 {
1557 masks[j] |= (1 << i);
1558 break;
1559 }
1560 }/* for */
1561 if (j == count) {
1562 masks[j] = (1 << i);
1563 ports[j] =
1564 ipc_port_copy_send(task->exc_actions[i].port);
1565 behaviors[j] = task->exc_actions[i].behavior;
1566 flavors[j] = task->exc_actions[i].flavor;
1567 count++;
1568 }
1569 old_port[i] = task->exc_actions[i].port;
1570 task->exc_actions[i].port =
1571 ipc_port_copy_send(new_port);
1572 task->exc_actions[i].behavior = new_behavior;
1573 task->exc_actions[i].flavor = new_flavor;
1574 if (count > *CountCnt) {
1575 break;
1576 }
1577 } else
1578 old_port[i] = IP_NULL;
1579 }/* for */
1580
1581
1582 /*
1583 * Consume send rights without any lock held.
1584 */
1585 itk_unlock(task);
1586 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++)
1587 if (IP_VALID(old_port[i]))
1588 ipc_port_release_send(old_port[i]);
1589 if (IP_VALID(new_port)) /* consume send right */
1590 ipc_port_release_send(new_port);
1591 *CountCnt = count;
1592
1593 return KERN_SUCCESS;
1594 }/* task_swap_exception_ports */
1595
1596 /*
1597 * Routine: thread/task_get_exception_ports [kernel call]
1598 * Purpose:
1599 * Clones a send right for each of the thread/task's exception
1600 * ports specified in the mask and returns the behaviour
1601 * and flavor of said port.
1602 *
1603 * Returns upto [in} CountCnt elements.
1604 *
1605 * Conditions:
1606 * Nothing locked.
1607 * Returns:
1608 * KERN_SUCCESS Extracted a send right.
1609 * KERN_INVALID_ARGUMENT The thread is null,
1610 * Invalid special port,
1611 * Illegal mask bit set.
1612 * KERN_FAILURE The thread is dead.
1613 */
1614
1615 kern_return_t
1616 thread_get_exception_ports(
1617 thread_act_t thr_act,
1618 exception_mask_t exception_mask,
1619 exception_mask_array_t masks,
1620 mach_msg_type_number_t * CountCnt,
1621 exception_port_array_t ports,
1622 exception_behavior_array_t behaviors,
1623 thread_state_flavor_array_t flavors )
1624 {
1625 register int i,
1626 j,
1627 count;
1628
1629 if (!thr_act)
1630 return KERN_INVALID_ARGUMENT;
1631
1632 if (exception_mask & ~EXC_MASK_ALL) {
1633 return KERN_INVALID_ARGUMENT;
1634 }
1635
1636 act_lock(thr_act);
1637 if (!thr_act->active) {
1638 act_unlock(thr_act);
1639 return KERN_FAILURE;
1640 }
1641
1642 count = 0;
1643
1644 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
1645 if (exception_mask & (1 << i)) {
1646 for (j = 0; j < count; j++) {
1647 /*
1648 * search for an identical entry, if found
1649 * set corresponding mask for this exception.
1650 */
1651 if (thr_act->exc_actions[i].port == ports[j] &&
1652 thr_act->exc_actions[i].behavior ==behaviors[j]
1653 && thr_act->exc_actions[i].flavor == flavors[j])
1654 {
1655 masks[j] |= (1 << i);
1656 break;
1657 }
1658 }/* for */
1659 if (j == count) {
1660 masks[j] = (1 << i);
1661 ports[j] =
1662 ipc_port_copy_send(thr_act->exc_actions[i].port);
1663 behaviors[j] = thr_act->exc_actions[i].behavior;
1664 flavors[j] = thr_act->exc_actions[i].flavor;
1665 count++;
1666 if (count >= *CountCnt) {
1667 break;
1668 }
1669 }
1670 }
1671 }/* for */
1672
1673 act_unlock(thr_act);
1674
1675 *CountCnt = count;
1676 return KERN_SUCCESS;
1677 }/* thread_get_exception_ports */
1678
1679 kern_return_t
1680 task_get_exception_ports(
1681 task_t task,
1682 exception_mask_t exception_mask,
1683 exception_mask_array_t masks,
1684 mach_msg_type_number_t * CountCnt,
1685 exception_port_array_t ports,
1686 exception_behavior_array_t behaviors,
1687 thread_state_flavor_array_t flavors )
1688 {
1689 register int i,
1690 j,
1691 count;
1692
1693 if (task == TASK_NULL)
1694 return KERN_INVALID_ARGUMENT;
1695
1696 if (exception_mask & ~EXC_MASK_ALL) {
1697 return KERN_INVALID_ARGUMENT;
1698 }
1699
1700 itk_lock(task);
1701 if (task->itk_self == IP_NULL) {
1702 itk_unlock(task);
1703 return KERN_FAILURE;
1704 }
1705
1706 count = 0;
1707
1708 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
1709 if (exception_mask & (1 << i)) {
1710 for (j = 0; j < count; j++) {
1711 /*
1712 * search for an identical entry, if found
1713 * set corresponding mask for this exception.
1714 */
1715 if (task->exc_actions[i].port == ports[j] &&
1716 task->exc_actions[i].behavior == behaviors[j]
1717 && task->exc_actions[i].flavor == flavors[j])
1718 {
1719 masks[j] |= (1 << i);
1720 break;
1721 }
1722 }/* for */
1723 if (j == count) {
1724 masks[j] = (1 << i);
1725 ports[j] =
1726 ipc_port_copy_send(task->exc_actions[i].port);
1727 behaviors[j] = task->exc_actions[i].behavior;
1728 flavors[j] = task->exc_actions[i].flavor;
1729 count++;
1730 if (count > *CountCnt) {
1731 break;
1732 }
1733 }
1734 }
1735 }/* for */
1736
1737 itk_unlock(task);
1738
1739 *CountCnt = count;
1740 return KERN_SUCCESS;
1741 }/* task_get_exception_ports */