]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/ipc_tt.c
4fc95a8bca639e284ae071593b78c74d853fa416
[apple/xnu.git] / osfmk / kern / ipc_tt.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_COPYRIGHT@
24 */
25 /*
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50 /*
51 */
52
53 /*
54 * File: ipc_tt.c
55 * Purpose:
56 * Task and thread related IPC functions.
57 */
58
59 #include <mach/boolean.h>
60 #include <mach_rt.h>
61 #include <mach/kern_return.h>
62 #include <mach/mach_param.h>
63 #include <mach/task_special_ports.h>
64 #include <mach/thread_special_ports.h>
65 #include <mach/thread_status.h>
66 #include <mach/exception_types.h>
67 #include <mach/mach_traps.h>
68 #include <mach/task_server.h>
69 #include <mach/thread_act_server.h>
70 #include <mach/mach_host_server.h>
71 #include <mach/vm_map_server.h>
72 #include <kern/host.h>
73 #include <kern/ipc_tt.h>
74 #include <kern/thread_act.h>
75 #include <kern/misc_protos.h>
76 #include <vm/vm_pageout.h>
77
78 /*
79 * Routine: ipc_task_init
80 * Purpose:
81 * Initialize a task's IPC state.
82 *
83 * If non-null, some state will be inherited from the parent.
84 * The parent must be appropriately initialized.
85 * Conditions:
86 * Nothing locked.
87 */
88
89 void
90 ipc_task_init(
91 task_t task,
92 task_t parent)
93 {
94 ipc_space_t space;
95 ipc_port_t kport;
96 kern_return_t kr;
97 int i;
98
99
100 kr = ipc_space_create(&ipc_table_entries[0], &space);
101 if (kr != KERN_SUCCESS)
102 panic("ipc_task_init");
103
104
105 kport = ipc_port_alloc_kernel();
106 if (kport == IP_NULL)
107 panic("ipc_task_init");
108
109 itk_lock_init(task);
110 task->itk_self = kport;
111 task->itk_sself = ipc_port_make_send(kport);
112 task->itk_space = space;
113 space->is_fast = task->kernel_loaded;
114
115 if (parent == TASK_NULL) {
116 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
117 task->exc_actions[i].port = IP_NULL;
118 }/* for */
119 task->exc_actions[EXC_MACH_SYSCALL].port =
120 ipc_port_make_send(realhost.host_self);
121 task->itk_host = ipc_port_make_send(realhost.host_self);
122 task->itk_bootstrap = IP_NULL;
123 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
124 task->itk_registered[i] = IP_NULL;
125 } else {
126 itk_lock(parent);
127 assert(parent->itk_self != IP_NULL);
128
129 /* inherit registered ports */
130
131 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
132 task->itk_registered[i] =
133 ipc_port_copy_send(parent->itk_registered[i]);
134
135 /* inherit exception and bootstrap ports */
136
137 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
138 task->exc_actions[i].port =
139 ipc_port_copy_send(parent->exc_actions[i].port);
140 task->exc_actions[i].flavor =
141 parent->exc_actions[i].flavor;
142 task->exc_actions[i].behavior =
143 parent->exc_actions[i].behavior;
144 }/* for */
145 task->itk_host =
146 ipc_port_copy_send(parent->itk_host);
147
148 task->itk_bootstrap =
149 ipc_port_copy_send(parent->itk_bootstrap);
150
151 itk_unlock(parent);
152 }
153 }
154
155 /*
156 * Routine: ipc_task_enable
157 * Purpose:
158 * Enable a task for IPC access.
159 * Conditions:
160 * Nothing locked.
161 */
162
163 void
164 ipc_task_enable(
165 task_t task)
166 {
167 ipc_port_t kport;
168
169 itk_lock(task);
170 kport = task->itk_self;
171 if (kport != IP_NULL)
172 ipc_kobject_set(kport, (ipc_kobject_t) task, IKOT_TASK);
173 itk_unlock(task);
174 }
175
176 /*
177 * Routine: ipc_task_disable
178 * Purpose:
179 * Disable IPC access to a task.
180 * Conditions:
181 * Nothing locked.
182 */
183
184 void
185 ipc_task_disable(
186 task_t task)
187 {
188 ipc_port_t kport;
189
190 itk_lock(task);
191 kport = task->itk_self;
192 if (kport != IP_NULL)
193 ipc_kobject_set(kport, IKO_NULL, IKOT_NONE);
194 itk_unlock(task);
195 }
196
197 /*
198 * Routine: ipc_task_terminate
199 * Purpose:
200 * Clean up and destroy a task's IPC state.
201 * Conditions:
202 * Nothing locked. The task must be suspended.
203 * (Or the current thread must be in the task.)
204 */
205
206 void
207 ipc_task_terminate(
208 task_t task)
209 {
210 ipc_port_t kport;
211 int i;
212
213 itk_lock(task);
214 kport = task->itk_self;
215
216 if (kport == IP_NULL) {
217 /* the task is already terminated (can this happen?) */
218 itk_unlock(task);
219 return;
220 }
221
222 task->itk_self = IP_NULL;
223 itk_unlock(task);
224
225 /* release the naked send rights */
226
227 if (IP_VALID(task->itk_sself))
228 ipc_port_release_send(task->itk_sself);
229
230 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
231 if (IP_VALID(task->exc_actions[i].port)) {
232 ipc_port_release_send(task->exc_actions[i].port);
233 }
234 }/* for */
235 if (IP_VALID(task->itk_host))
236 ipc_port_release_send(task->itk_host);
237
238 if (IP_VALID(task->itk_bootstrap))
239 ipc_port_release_send(task->itk_bootstrap);
240
241 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
242 if (IP_VALID(task->itk_registered[i]))
243 ipc_port_release_send(task->itk_registered[i]);
244
245 ipc_port_release_send(task->wired_ledger_port);
246 ipc_port_release_send(task->paged_ledger_port);
247
248 /* destroy the kernel port */
249 ipc_port_dealloc_kernel(kport);
250 }
251
252 /*
253 * Routine: ipc_thread_init
254 * Purpose:
255 * Initialize a thread's IPC state.
256 * Conditions:
257 * Nothing locked.
258 */
259
260 void
261 ipc_thread_init(
262 thread_t thread)
263 {
264 ipc_kmsg_queue_init(&thread->ith_messages);
265 thread->ith_mig_reply = MACH_PORT_NULL;
266 thread->ith_rpc_reply = IP_NULL;
267 }
268
269 /*
270 * Routine: ipc_thread_terminate
271 * Purpose:
272 * Clean up and destroy a thread's IPC state.
273 * Conditions:
274 * Nothing locked. The thread must be suspended.
275 * (Or be the current thread.)
276 */
277
278 void
279 ipc_thread_terminate(
280 thread_t thread)
281 {
282 assert(ipc_kmsg_queue_empty(&thread->ith_messages));
283
284 if (thread->ith_rpc_reply != IP_NULL)
285 ipc_port_dealloc_reply(thread->ith_rpc_reply);
286 thread->ith_rpc_reply = IP_NULL;
287 }
288
289 /*
290 * Routine: ipc_thr_act_init
291 * Purpose:
292 * Initialize an thr_act's IPC state.
293 * Conditions:
294 * Nothing locked.
295 */
296
297 void
298 ipc_thr_act_init(task_t task, thread_act_t thr_act)
299 {
300 ipc_port_t kport; int i;
301
302 kport = ipc_port_alloc_kernel();
303 if (kport == IP_NULL)
304 panic("ipc_thr_act_init");
305
306 thr_act->ith_self = kport;
307 thr_act->ith_sself = ipc_port_make_send(kport);
308
309 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++)
310 thr_act->exc_actions[i].port = IP_NULL;
311
312 thr_act->exc_actions[EXC_MACH_SYSCALL].port =
313 ipc_port_make_send(realhost.host_self);
314
315 ipc_kobject_set(kport, (ipc_kobject_t) thr_act, IKOT_ACT);
316 }
317
318 void
319 ipc_thr_act_disable(thread_act_t thr_act)
320 {
321 int i;
322 ipc_port_t kport;
323
324 kport = thr_act->ith_self;
325
326 if (kport != IP_NULL)
327 ipc_kobject_set(kport, IKO_NULL, IKOT_NONE);
328 }
329
330 void
331 ipc_thr_act_terminate(thread_act_t thr_act)
332 {
333 ipc_port_t kport; int i;
334
335 kport = thr_act->ith_self;
336
337 if (kport == IP_NULL) {
338 /* the thread is already terminated (can this happen?) */
339 return;
340 }
341
342 thr_act->ith_self = IP_NULL;
343
344 /* release the naked send rights */
345
346 if (IP_VALID(thr_act->ith_sself))
347 ipc_port_release_send(thr_act->ith_sself);
348 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
349 if (IP_VALID(thr_act->exc_actions[i].port))
350 ipc_port_release_send(thr_act->exc_actions[i].port);
351 }
352
353 /* destroy the kernel port */
354 ipc_port_dealloc_kernel(kport);
355 }
356
357 /*
358 * Routine: retrieve_task_self_fast
359 * Purpose:
360 * Optimized version of retrieve_task_self,
361 * that only works for the current task.
362 *
363 * Return a send right (possibly null/dead)
364 * for the task's user-visible self port.
365 * Conditions:
366 * Nothing locked.
367 */
368
369 ipc_port_t
370 retrieve_task_self_fast(
371 register task_t task)
372 {
373 register ipc_port_t port;
374
375 assert(task == current_task());
376
377 itk_lock(task);
378 assert(task->itk_self != IP_NULL);
379
380 if ((port = task->itk_sself) == task->itk_self) {
381 /* no interposing */
382
383 ip_lock(port);
384 assert(ip_active(port));
385 ip_reference(port);
386 port->ip_srights++;
387 ip_unlock(port);
388 } else
389 port = ipc_port_copy_send(port);
390 itk_unlock(task);
391
392 return port;
393 }
394
395 /*
396 * Routine: retrieve_act_self_fast
397 * Purpose:
398 * Optimized version of retrieve_thread_self,
399 * that only works for the current thread.
400 *
401 * Return a send right (possibly null/dead)
402 * for the thread's user-visible self port.
403 * Conditions:
404 * Nothing locked.
405 */
406
407 ipc_port_t
408 retrieve_act_self_fast(thread_act_t thr_act)
409 {
410 register ipc_port_t port;
411
412 assert(thr_act == current_act());
413 act_lock(thr_act);
414 assert(thr_act->ith_self != IP_NULL);
415
416 if ((port = thr_act->ith_sself) == thr_act->ith_self) {
417 /* no interposing */
418
419 ip_lock(port);
420 assert(ip_active(port));
421 ip_reference(port);
422 port->ip_srights++;
423 ip_unlock(port);
424 } else
425 port = ipc_port_copy_send(port);
426 act_unlock(thr_act);
427
428 return port;
429 }
430
431 /*
432 * Routine: task_self_trap [mach trap]
433 * Purpose:
434 * Give the caller send rights for his own task port.
435 * Conditions:
436 * Nothing locked.
437 * Returns:
438 * MACH_PORT_NULL if there are any resource failures
439 * or other errors.
440 */
441
442 mach_port_name_t
443 task_self_trap(void)
444 {
445 task_t task = current_task();
446 ipc_port_t sright;
447
448 sright = retrieve_task_self_fast(task);
449 return ipc_port_copyout_send(sright, task->itk_space);
450 }
451
452 /*
453 * Routine: thread_self_trap [mach trap]
454 * Purpose:
455 * Give the caller send rights for his own thread port.
456 * Conditions:
457 * Nothing locked.
458 * Returns:
459 * MACH_PORT_NULL if there are any resource failures
460 * or other errors.
461 */
462
463 mach_port_name_t
464 thread_self_trap(void)
465 {
466 thread_act_t thr_act = current_act();
467 task_t task = thr_act->task;
468 ipc_port_t sright;
469
470 sright = retrieve_act_self_fast(thr_act);
471 return ipc_port_copyout_send(sright, task->itk_space);
472 }
473
474 /*
475 * Routine: mach_reply_port [mach trap]
476 * Purpose:
477 * Allocate a port for the caller.
478 * Conditions:
479 * Nothing locked.
480 * Returns:
481 * MACH_PORT_NULL if there are any resource failures
482 * or other errors.
483 */
484
485 mach_port_name_t
486 mach_reply_port(void)
487 {
488 ipc_port_t port;
489 mach_port_name_t name;
490 kern_return_t kr;
491
492 kr = ipc_port_alloc(current_task()->itk_space, &name, &port);
493 if (kr == KERN_SUCCESS)
494 ip_unlock(port);
495 else
496 name = MACH_PORT_NULL;
497
498 return name;
499 }
500
501 /*
502 * Routine: task_get_special_port [kernel call]
503 * Purpose:
504 * Clones a send right for one of the task's
505 * special ports.
506 * Conditions:
507 * Nothing locked.
508 * Returns:
509 * KERN_SUCCESS Extracted a send right.
510 * KERN_INVALID_ARGUMENT The task is null.
511 * KERN_FAILURE The task/space is dead.
512 * KERN_INVALID_ARGUMENT Invalid special port.
513 */
514
515 kern_return_t
516 task_get_special_port(
517 task_t task,
518 int which,
519 ipc_port_t *portp)
520 {
521 ipc_port_t *whichp;
522 ipc_port_t port;
523
524 if (task == TASK_NULL)
525 return KERN_INVALID_ARGUMENT;
526
527 switch (which) {
528 case TASK_KERNEL_PORT:
529 whichp = &task->itk_sself;
530 break;
531
532 case TASK_HOST_PORT:
533 whichp = &task->itk_host;
534 break;
535
536 case TASK_BOOTSTRAP_PORT:
537 whichp = &task->itk_bootstrap;
538 break;
539
540 case TASK_WIRED_LEDGER_PORT:
541 whichp = &task->wired_ledger_port;
542 break;
543
544 case TASK_PAGED_LEDGER_PORT:
545 whichp = &task->paged_ledger_port;
546 break;
547
548 default:
549 return KERN_INVALID_ARGUMENT;
550 }
551
552 itk_lock(task);
553 if (task->itk_self == IP_NULL) {
554 itk_unlock(task);
555 return KERN_FAILURE;
556 }
557
558 port = ipc_port_copy_send(*whichp);
559 itk_unlock(task);
560
561 *portp = port;
562 return KERN_SUCCESS;
563 }
564
565 /*
566 * Routine: task_set_special_port [kernel call]
567 * Purpose:
568 * Changes one of the task's special ports,
569 * setting it to the supplied send right.
570 * Conditions:
571 * Nothing locked. If successful, consumes
572 * the supplied send right.
573 * Returns:
574 * KERN_SUCCESS Changed the special port.
575 * KERN_INVALID_ARGUMENT The task is null.
576 * KERN_FAILURE The task/space is dead.
577 * KERN_INVALID_ARGUMENT Invalid special port.
578 */
579
580 kern_return_t
581 task_set_special_port(
582 task_t task,
583 int which,
584 ipc_port_t port)
585 {
586 ipc_port_t *whichp;
587 ipc_port_t old;
588
589 if (task == TASK_NULL)
590 return KERN_INVALID_ARGUMENT;
591
592 switch (which) {
593 case TASK_KERNEL_PORT:
594 whichp = &task->itk_sself;
595 break;
596
597 case TASK_HOST_PORT:
598 whichp = &task->itk_host;
599 break;
600
601 case TASK_BOOTSTRAP_PORT:
602 whichp = &task->itk_bootstrap;
603 break;
604
605 case TASK_WIRED_LEDGER_PORT:
606 whichp = &task->wired_ledger_port;
607 break;
608
609 case TASK_PAGED_LEDGER_PORT:
610 whichp = &task->paged_ledger_port;
611 break;
612
613 default:
614 return KERN_INVALID_ARGUMENT;
615 }/* switch */
616
617 itk_lock(task);
618 if (task->itk_self == IP_NULL) {
619 itk_unlock(task);
620 return KERN_FAILURE;
621 }
622
623 old = *whichp;
624 *whichp = port;
625 itk_unlock(task);
626
627 if (IP_VALID(old))
628 ipc_port_release_send(old);
629 return KERN_SUCCESS;
630 }
631
632
633 /*
634 * Routine: mach_ports_register [kernel call]
635 * Purpose:
636 * Stash a handful of port send rights in the task.
637 * Child tasks will inherit these rights, but they
638 * must use mach_ports_lookup to acquire them.
639 *
640 * The rights are supplied in a (wired) kalloc'd segment.
641 * Rights which aren't supplied are assumed to be null.
642 * Conditions:
643 * Nothing locked. If successful, consumes
644 * the supplied rights and memory.
645 * Returns:
646 * KERN_SUCCESS Stashed the port rights.
647 * KERN_INVALID_ARGUMENT The task is null.
648 * KERN_INVALID_ARGUMENT The task is dead.
649 * KERN_INVALID_ARGUMENT Too many port rights supplied.
650 */
651
652 kern_return_t
653 mach_ports_register(
654 task_t task,
655 mach_port_array_t memory,
656 mach_msg_type_number_t portsCnt)
657 {
658 ipc_port_t ports[TASK_PORT_REGISTER_MAX];
659 int i;
660
661 if ((task == TASK_NULL) ||
662 (portsCnt > TASK_PORT_REGISTER_MAX))
663 return KERN_INVALID_ARGUMENT;
664
665 /*
666 * Pad the port rights with nulls.
667 */
668
669 for (i = 0; i < portsCnt; i++)
670 ports[i] = memory[i];
671 for (; i < TASK_PORT_REGISTER_MAX; i++)
672 ports[i] = IP_NULL;
673
674 itk_lock(task);
675 if (task->itk_self == IP_NULL) {
676 itk_unlock(task);
677 return KERN_INVALID_ARGUMENT;
678 }
679
680 /*
681 * Replace the old send rights with the new.
682 * Release the old rights after unlocking.
683 */
684
685 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
686 ipc_port_t old;
687
688 old = task->itk_registered[i];
689 task->itk_registered[i] = ports[i];
690 ports[i] = old;
691 }
692
693 itk_unlock(task);
694
695 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
696 if (IP_VALID(ports[i]))
697 ipc_port_release_send(ports[i]);
698
699 /*
700 * Now that the operation is known to be successful,
701 * we can free the memory.
702 */
703
704 if (portsCnt != 0)
705 kfree((vm_offset_t) memory,
706 (vm_size_t) (portsCnt * sizeof(mach_port_t)));
707
708 return KERN_SUCCESS;
709 }
710
711 /*
712 * Routine: mach_ports_lookup [kernel call]
713 * Purpose:
714 * Retrieves (clones) the stashed port send rights.
715 * Conditions:
716 * Nothing locked. If successful, the caller gets
717 * rights and memory.
718 * Returns:
719 * KERN_SUCCESS Retrieved the send rights.
720 * KERN_INVALID_ARGUMENT The task is null.
721 * KERN_INVALID_ARGUMENT The task is dead.
722 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
723 */
724
725 kern_return_t
726 mach_ports_lookup(
727 task_t task,
728 mach_port_array_t *portsp,
729 mach_msg_type_number_t *portsCnt)
730 {
731 vm_offset_t memory;
732 vm_size_t size;
733 ipc_port_t *ports;
734 int i;
735
736 kern_return_t kr;
737
738 if (task == TASK_NULL)
739 return KERN_INVALID_ARGUMENT;
740
741 size = (vm_size_t) (TASK_PORT_REGISTER_MAX * sizeof(ipc_port_t));
742
743 memory = kalloc(size);
744 if (memory == 0)
745 return KERN_RESOURCE_SHORTAGE;
746
747 itk_lock(task);
748 if (task->itk_self == IP_NULL) {
749 itk_unlock(task);
750
751 kfree(memory, size);
752 return KERN_INVALID_ARGUMENT;
753 }
754
755 ports = (ipc_port_t *) memory;
756
757 /*
758 * Clone port rights. Because kalloc'd memory
759 * is wired, we won't fault while holding the task lock.
760 */
761
762 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
763 ports[i] = ipc_port_copy_send(task->itk_registered[i]);
764
765 itk_unlock(task);
766
767 *portsp = (mach_port_array_t) ports;
768 *portsCnt = TASK_PORT_REGISTER_MAX;
769 return KERN_SUCCESS;
770 }
771
772 /*
773 * Routine: convert_port_to_locked_task
774 * Purpose:
775 * Internal helper routine to convert from a port to a locked
776 * task. Used by several routines that try to convert from a
777 * task port to a reference on some task related object.
778 * Conditions:
779 * Nothing locked, blocking OK.
780 */
781 task_t
782 convert_port_to_locked_task(ipc_port_t port)
783 {
784 while (IP_VALID(port)) {
785 task_t task;
786
787 ip_lock(port);
788 if (!ip_active(port) || (ip_kotype(port) != IKOT_TASK)) {
789 ip_unlock(port);
790 return TASK_NULL;
791 }
792 task = (task_t) port->ip_kobject;
793 assert(task != TASK_NULL);
794
795 /*
796 * Normal lock ordering puts task_lock() before ip_lock().
797 * Attempt out-of-order locking here.
798 */
799 if (task_lock_try(task)) {
800 ip_unlock(port);
801 return(task);
802 }
803
804 ip_unlock(port);
805 mutex_pause();
806 }
807 return TASK_NULL;
808 }
809
810 /*
811 * Routine: convert_port_to_task
812 * Purpose:
813 * Convert from a port to a task.
814 * Doesn't consume the port ref; produces a task ref,
815 * which may be null.
816 * Conditions:
817 * Nothing locked.
818 */
819 task_t
820 convert_port_to_task(
821 ipc_port_t port)
822 {
823 task_t task;
824
825 task = convert_port_to_locked_task(port);
826 if (task) {
827 task->ref_count++;
828 task_unlock(task);
829 }
830 return task;
831 }
832
833 /*
834 * Routine: convert_port_to_space
835 * Purpose:
836 * Convert from a port to a space.
837 * Doesn't consume the port ref; produces a space ref,
838 * which may be null.
839 * Conditions:
840 * Nothing locked.
841 */
842 ipc_space_t
843 convert_port_to_space(
844 ipc_port_t port)
845 {
846 ipc_space_t space;
847 task_t task;
848
849 task = convert_port_to_locked_task(port);
850
851 if (task == TASK_NULL)
852 return IPC_SPACE_NULL;
853
854 if (!task->active) {
855 task_unlock(task);
856 return IPC_SPACE_NULL;
857 }
858
859 space = task->itk_space;
860 is_reference(space);
861 task_unlock(task);
862 return (space);
863 }
864
865 upl_t
866 convert_port_to_upl(
867 ipc_port_t port)
868 {
869 upl_t upl;
870
871 ip_lock(port);
872 if (!ip_active(port) || (ip_kotype(port) != IKOT_UPL)) {
873 ip_unlock(port);
874 return (upl_t)NULL;
875 }
876 upl = (upl_t) port->ip_kobject;
877 ip_unlock(port);
878 upl_lock(upl);
879 upl->ref_count+=1;
880 upl_unlock(upl);
881 return upl;
882 }
883
884 mach_port_t
885 convert_upl_to_port(
886 upl_t upl)
887 {
888 return MACH_PORT_NULL;
889 }
890
891 __private_extern__ void
892 upl_no_senders(
893 upl_t upl,
894 mach_port_mscount_t mscount)
895 {
896 return;
897 }
898
899 /*
900 * Routine: convert_port_entry_to_map
901 * Purpose:
902 * Convert from a port specifying an entry or a task
903 * to a map. Doesn't consume the port ref; produces a map ref,
904 * which may be null. Unlike convert_port_to_map, the
905 * port may be task or a named entry backed.
906 * Conditions:
907 * Nothing locked.
908 */
909
910
911 vm_map_t
912 convert_port_entry_to_map(
913 ipc_port_t port)
914 {
915 task_t task;
916 vm_map_t map;
917 vm_named_entry_t named_entry;
918
919 if(IP_VALID(port) && (ip_kotype(port) == IKOT_NAMED_ENTRY)) {
920 while(TRUE) {
921 ip_lock(port);
922 if(ip_active(port) && (ip_kotype(port)
923 == IKOT_NAMED_ENTRY)) {
924 named_entry =
925 (vm_named_entry_t)port->ip_kobject;
926 if (!(mutex_try(&(named_entry)->Lock))) {
927 ip_unlock(port);
928 mutex_pause();
929 continue;
930 }
931 named_entry->ref_count++;
932 mutex_unlock(&(named_entry)->Lock);
933 ip_unlock(port);
934 if ((named_entry->is_sub_map) &&
935 (named_entry->protection
936 & VM_PROT_WRITE)) {
937 map = named_entry->backing.map;
938 } else {
939 mach_destroy_memory_entry(port);
940 return VM_MAP_NULL;
941 }
942 vm_map_reference_swap(map);
943 mach_destroy_memory_entry(port);
944 break;
945 }
946 else
947 return VM_MAP_NULL;
948 }
949 } else {
950 task_t task;
951
952 task = convert_port_to_locked_task(port);
953
954 if (task == TASK_NULL)
955 return VM_MAP_NULL;
956
957 if (!task->active) {
958 task_unlock(task);
959 return VM_MAP_NULL;
960 }
961
962 map = task->map;
963 vm_map_reference_swap(map);
964 task_unlock(task);
965 }
966
967 return map;
968 }
969
970 /*
971 * Routine: convert_port_entry_to_object
972 * Purpose:
973 * Convert from a port specifying a named entry to an
974 * object. Doesn't consume the port ref; produces a map ref,
975 * which may be null.
976 * Conditions:
977 * Nothing locked.
978 */
979
980
981 vm_object_t
982 convert_port_entry_to_object(
983 ipc_port_t port)
984 {
985 vm_object_t object;
986 vm_named_entry_t named_entry;
987
988 if(IP_VALID(port) && (ip_kotype(port) == IKOT_NAMED_ENTRY)) {
989 while(TRUE) {
990 ip_lock(port);
991 if(ip_active(port) && (ip_kotype(port)
992 == IKOT_NAMED_ENTRY)) {
993 named_entry =
994 (vm_named_entry_t)port->ip_kobject;
995 if (!(mutex_try(&(named_entry)->Lock))) {
996 ip_unlock(port);
997 mutex_pause();
998 continue;
999 }
1000 named_entry->ref_count++;
1001 mutex_unlock(&(named_entry)->Lock);
1002 ip_unlock(port);
1003 if ((!named_entry->is_sub_map) &&
1004 (named_entry->protection
1005 & VM_PROT_WRITE)) {
1006 object = named_entry->object;
1007 } else {
1008 mach_destroy_memory_entry(port);
1009 return (vm_object_t)NULL;
1010 }
1011 vm_object_reference(named_entry->object);
1012 mach_destroy_memory_entry(port);
1013 break;
1014 }
1015 else
1016 return (vm_object_t)NULL;
1017 }
1018 } else {
1019 return (vm_object_t)NULL;
1020 }
1021
1022 return object;
1023 }
1024
1025 /*
1026 * Routine: convert_port_to_map
1027 * Purpose:
1028 * Convert from a port to a map.
1029 * Doesn't consume the port ref; produces a map ref,
1030 * which may be null.
1031 * Conditions:
1032 * Nothing locked.
1033 */
1034
1035 vm_map_t
1036 convert_port_to_map(
1037 ipc_port_t port)
1038 {
1039 task_t task;
1040 vm_map_t map;
1041
1042 task = convert_port_to_locked_task(port);
1043
1044 if (task == TASK_NULL)
1045 return VM_MAP_NULL;
1046
1047 if (!task->active) {
1048 task_unlock(task);
1049 return VM_MAP_NULL;
1050 }
1051
1052 map = task->map;
1053 vm_map_reference_swap(map);
1054 task_unlock(task);
1055 return map;
1056 }
1057
1058
1059 /*
1060 * Routine: convert_port_to_act
1061 * Purpose:
1062 * Convert from a port to a thr_act.
1063 * Doesn't consume the port ref; produces an thr_act ref,
1064 * which may be null.
1065 * Conditions:
1066 * Nothing locked.
1067 */
1068
1069 thread_act_t
1070 convert_port_to_act( ipc_port_t port )
1071 {
1072 boolean_t r;
1073 thread_act_t thr_act = 0;
1074
1075 r = FALSE;
1076 while (!r && IP_VALID(port)) {
1077 ip_lock(port);
1078 r = ref_act_port_locked(port, &thr_act);
1079 /* port unlocked */
1080 }
1081 return (thr_act);
1082 }
1083
1084 boolean_t
1085 ref_act_port_locked( ipc_port_t port, thread_act_t *pthr_act )
1086 {
1087 thread_act_t thr_act;
1088
1089 thr_act = 0;
1090 if (ip_active(port) &&
1091 (ip_kotype(port) == IKOT_ACT)) {
1092 thr_act = (thread_act_t) port->ip_kobject;
1093 assert(thr_act != THR_ACT_NULL);
1094
1095 /*
1096 * Normal lock ordering is act_lock(), then ip_lock().
1097 * Allow out-of-order locking here, using
1098 * act_reference_act_locked() to accomodate it.
1099 */
1100 if (!act_lock_try(thr_act)) {
1101 ip_unlock(port);
1102 mutex_pause();
1103 return (FALSE);
1104 }
1105 act_locked_act_reference(thr_act);
1106 act_unlock(thr_act);
1107 }
1108 *pthr_act = thr_act;
1109 ip_unlock(port);
1110 return (TRUE);
1111 }
1112
1113 /*
1114 * Routine: port_name_to_act
1115 * Purpose:
1116 * Convert from a port name to an act reference
1117 * A name of MACH_PORT_NULL is valid for the null act
1118 * Conditions:
1119 * Nothing locked.
1120 */
1121 thread_act_t
1122 port_name_to_act(
1123 mach_port_name_t name)
1124 {
1125 thread_act_t thr_act = THR_ACT_NULL;
1126 ipc_port_t kern_port;
1127 kern_return_t kr;
1128
1129 if (MACH_PORT_VALID(name)) {
1130 kr = ipc_object_copyin(current_space(), name,
1131 MACH_MSG_TYPE_COPY_SEND,
1132 (ipc_object_t *) &kern_port);
1133 if (kr != KERN_SUCCESS)
1134 return THR_ACT_NULL;
1135
1136 thr_act = convert_port_to_act(kern_port);
1137
1138 if (IP_VALID(kern_port))
1139 ipc_port_release_send(kern_port);
1140 }
1141 return thr_act;
1142 }
1143
1144 task_t
1145 port_name_to_task(
1146 mach_port_name_t name)
1147 {
1148 ipc_port_t kern_port;
1149 kern_return_t kr;
1150 task_t task = TASK_NULL;
1151
1152 if (MACH_PORT_VALID(name)) {
1153 kr = ipc_object_copyin(current_space(), name,
1154 MACH_MSG_TYPE_COPY_SEND,
1155 (ipc_object_t *) &kern_port);
1156 if (kr != KERN_SUCCESS)
1157 return TASK_NULL;
1158
1159 task = convert_port_to_task(kern_port);
1160
1161 if (IP_VALID(kern_port))
1162 ipc_port_release_send(kern_port);
1163 }
1164 return task;
1165 }
1166
1167 /*
1168 * Routine: convert_task_to_port
1169 * Purpose:
1170 * Convert from a task to a port.
1171 * Consumes a task ref; produces a naked send right
1172 * which may be invalid.
1173 * Conditions:
1174 * Nothing locked.
1175 */
1176
1177 ipc_port_t
1178 convert_task_to_port(
1179 task_t task)
1180 {
1181 ipc_port_t port;
1182
1183 itk_lock(task);
1184 if (task->itk_self != IP_NULL)
1185 #if NORMA_TASK
1186 if (task->map == VM_MAP_NULL)
1187 /* norma placeholder task */
1188 port = ipc_port_copy_send(task->itk_self);
1189 else
1190 #endif /* NORMA_TASK */
1191 port = ipc_port_make_send(task->itk_self);
1192 else
1193 port = IP_NULL;
1194 itk_unlock(task);
1195
1196 task_deallocate(task);
1197 return port;
1198 }
1199
1200 /*
1201 * Routine: convert_act_to_port
1202 * Purpose:
1203 * Convert from a thr_act to a port.
1204 * Consumes an thr_act ref; produces a naked send right
1205 * which may be invalid.
1206 * Conditions:
1207 * Nothing locked.
1208 */
1209
1210 ipc_port_t
1211 convert_act_to_port(thr_act)
1212 thread_act_t thr_act;
1213 {
1214 ipc_port_t port;
1215
1216 act_lock(thr_act);
1217 if (thr_act->ith_self != IP_NULL)
1218 port = ipc_port_make_send(thr_act->ith_self);
1219 else
1220 port = IP_NULL;
1221 act_unlock(thr_act);
1222
1223 act_deallocate(thr_act);
1224 return port;
1225 }
1226
1227 /*
1228 * Routine: space_deallocate
1229 * Purpose:
1230 * Deallocate a space ref produced by convert_port_to_space.
1231 * Conditions:
1232 * Nothing locked.
1233 */
1234
1235 void
1236 space_deallocate(
1237 ipc_space_t space)
1238 {
1239 if (space != IS_NULL)
1240 is_release(space);
1241 }
1242
1243 /*
1244 * Routine: thread/task_set_exception_ports [kernel call]
1245 * Purpose:
1246 * Sets the thread/task exception port, flavor and
1247 * behavior for the exception types specified by the mask.
1248 * There will be one send right per exception per valid
1249 * port.
1250 * Conditions:
1251 * Nothing locked. If successful, consumes
1252 * the supplied send right.
1253 * Returns:
1254 * KERN_SUCCESS Changed the special port.
1255 * KERN_INVALID_ARGUMENT The thread is null,
1256 * Illegal mask bit set.
1257 * Illegal exception behavior
1258 * KERN_FAILURE The thread is dead.
1259 */
1260
1261 kern_return_t
1262 thread_set_exception_ports(
1263 thread_act_t thr_act,
1264 exception_mask_t exception_mask,
1265 ipc_port_t new_port,
1266 exception_behavior_t new_behavior,
1267 thread_state_flavor_t new_flavor)
1268 {
1269 register int i;
1270 ipc_port_t old_port[EXC_TYPES_COUNT];
1271
1272 if (!thr_act)
1273 return KERN_INVALID_ARGUMENT;
1274
1275 if (exception_mask & ~EXC_MASK_ALL)
1276 return KERN_INVALID_ARGUMENT;
1277
1278 if (IP_VALID(new_port)) {
1279 switch (new_behavior) {
1280 case EXCEPTION_DEFAULT:
1281 case EXCEPTION_STATE:
1282 case EXCEPTION_STATE_IDENTITY:
1283 break;
1284 default:
1285 return KERN_INVALID_ARGUMENT;
1286 }
1287 }
1288
1289 /*
1290 * Check the validity of the thread_state_flavor by calling the
1291 * VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in
1292 * osfmk/mach/ARCHITECTURE/thread_status.h
1293 */
1294 if (!VALID_THREAD_STATE_FLAVOR(new_flavor)) {
1295 return KERN_INVALID_ARGUMENT;
1296 }
1297
1298 act_lock(thr_act);
1299 if (!thr_act->active) {
1300 act_unlock(thr_act);
1301 return KERN_FAILURE;
1302 }
1303
1304 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
1305 if (exception_mask & (1 << i)) {
1306 old_port[i] = thr_act->exc_actions[i].port;
1307 thr_act->exc_actions[i].port =
1308 ipc_port_copy_send(new_port);
1309 thr_act->exc_actions[i].behavior = new_behavior;
1310 thr_act->exc_actions[i].flavor = new_flavor;
1311 } else
1312 old_port[i] = IP_NULL;
1313 }/* for */
1314 /*
1315 * Consume send rights without any lock held.
1316 */
1317 act_unlock(thr_act);
1318 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++)
1319 if (IP_VALID(old_port[i]))
1320 ipc_port_release_send(old_port[i]);
1321 if (IP_VALID(new_port)) /* consume send right */
1322 ipc_port_release_send(new_port);
1323
1324 return KERN_SUCCESS;
1325 }/* thread_set_exception_port */
1326
1327 kern_return_t
1328 task_set_exception_ports(
1329 task_t task,
1330 exception_mask_t exception_mask,
1331 ipc_port_t new_port,
1332 exception_behavior_t new_behavior,
1333 thread_state_flavor_t new_flavor)
1334 {
1335 register int i;
1336 ipc_port_t old_port[EXC_TYPES_COUNT];
1337
1338 if (task == TASK_NULL) {
1339 return KERN_INVALID_ARGUMENT;
1340 }
1341
1342 if (exception_mask & ~EXC_MASK_ALL) {
1343 return KERN_INVALID_ARGUMENT;
1344 }
1345
1346 if (IP_VALID(new_port)) {
1347 switch (new_behavior) {
1348 case EXCEPTION_DEFAULT:
1349 case EXCEPTION_STATE:
1350 case EXCEPTION_STATE_IDENTITY:
1351 break;
1352 default:
1353 return KERN_INVALID_ARGUMENT;
1354 }
1355 }
1356 /* Cannot easily check "new_flavor", but that just means that
1357 * the flavor in the generated exception message might be garbage:
1358 * GIGO */
1359
1360 itk_lock(task);
1361 if (task->itk_self == IP_NULL) {
1362 itk_unlock(task);
1363 return KERN_FAILURE;
1364 }
1365
1366 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
1367 if (exception_mask & (1 << i)) {
1368 old_port[i] = task->exc_actions[i].port;
1369 task->exc_actions[i].port =
1370 ipc_port_copy_send(new_port);
1371 task->exc_actions[i].behavior = new_behavior;
1372 task->exc_actions[i].flavor = new_flavor;
1373 } else
1374 old_port[i] = IP_NULL;
1375 }/* for */
1376
1377 /*
1378 * Consume send rights without any lock held.
1379 */
1380 itk_unlock(task);
1381 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++)
1382 if (IP_VALID(old_port[i]))
1383 ipc_port_release_send(old_port[i]);
1384 if (IP_VALID(new_port)) /* consume send right */
1385 ipc_port_release_send(new_port);
1386
1387 return KERN_SUCCESS;
1388 }/* task_set_exception_port */
1389
1390 /*
1391 * Routine: thread/task_swap_exception_ports [kernel call]
1392 * Purpose:
1393 * Sets the thread/task exception port, flavor and
1394 * behavior for the exception types specified by the
1395 * mask.
1396 *
1397 * The old ports, behavior and flavors are returned
1398 * Count specifies the array sizes on input and
1399 * the number of returned ports etc. on output. The
1400 * arrays must be large enough to hold all the returned
1401 * data, MIG returnes an error otherwise. The masks
1402 * array specifies the corresponding exception type(s).
1403 *
1404 * Conditions:
1405 * Nothing locked. If successful, consumes
1406 * the supplied send right.
1407 *
1408 * Returns upto [in} CountCnt elements.
1409 * Returns:
1410 * KERN_SUCCESS Changed the special port.
1411 * KERN_INVALID_ARGUMENT The thread is null,
1412 * Illegal mask bit set.
1413 * Illegal exception behavior
1414 * KERN_FAILURE The thread is dead.
1415 */
1416
1417 kern_return_t
1418 thread_swap_exception_ports(
1419 thread_act_t thr_act,
1420 exception_mask_t exception_mask,
1421 ipc_port_t new_port,
1422 exception_behavior_t new_behavior,
1423 thread_state_flavor_t new_flavor,
1424 exception_mask_array_t masks,
1425 mach_msg_type_number_t * CountCnt,
1426 exception_port_array_t ports,
1427 exception_behavior_array_t behaviors,
1428 thread_state_flavor_array_t flavors )
1429 {
1430 register int i,
1431 j,
1432 count;
1433 ipc_port_t old_port[EXC_TYPES_COUNT];
1434
1435 if (!thr_act)
1436 return KERN_INVALID_ARGUMENT;
1437
1438 if (exception_mask & ~EXC_MASK_ALL) {
1439 return KERN_INVALID_ARGUMENT;
1440 }
1441
1442 if (IP_VALID(new_port)) {
1443 switch (new_behavior) {
1444 case EXCEPTION_DEFAULT:
1445 case EXCEPTION_STATE:
1446 case EXCEPTION_STATE_IDENTITY:
1447 break;
1448 default:
1449 return KERN_INVALID_ARGUMENT;
1450 }
1451 }
1452 /* Cannot easily check "new_flavor", but that just means that
1453 * the flavor in the generated exception message might be garbage:
1454 * GIGO */
1455
1456 act_lock(thr_act);
1457 if (!thr_act->active) {
1458 act_unlock(thr_act);
1459 return KERN_FAILURE;
1460 }
1461
1462 count = 0;
1463
1464 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
1465 if (exception_mask & (1 << i)) {
1466 for (j = 0; j < count; j++) {
1467 /*
1468 * search for an identical entry, if found
1469 * set corresponding mask for this exception.
1470 */
1471 if (thr_act->exc_actions[i].port == ports[j] &&
1472 thr_act->exc_actions[i].behavior ==behaviors[j]
1473 && thr_act->exc_actions[i].flavor ==flavors[j])
1474 {
1475 masks[j] |= (1 << i);
1476 break;
1477 }
1478 }/* for */
1479 if (j == count) {
1480 masks[j] = (1 << i);
1481 ports[j] =
1482 ipc_port_copy_send(thr_act->exc_actions[i].port);
1483
1484 behaviors[j] = thr_act->exc_actions[i].behavior;
1485 flavors[j] = thr_act->exc_actions[i].flavor;
1486 count++;
1487 }
1488
1489 old_port[i] = thr_act->exc_actions[i].port;
1490 thr_act->exc_actions[i].port =
1491 ipc_port_copy_send(new_port);
1492 thr_act->exc_actions[i].behavior = new_behavior;
1493 thr_act->exc_actions[i].flavor = new_flavor;
1494 if (count > *CountCnt) {
1495 break;
1496 }
1497 } else
1498 old_port[i] = IP_NULL;
1499 }/* for */
1500
1501 /*
1502 * Consume send rights without any lock held.
1503 */
1504 act_unlock(thr_act);
1505 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++)
1506 if (IP_VALID(old_port[i]))
1507 ipc_port_release_send(old_port[i]);
1508 if (IP_VALID(new_port)) /* consume send right */
1509 ipc_port_release_send(new_port);
1510 *CountCnt = count;
1511 return KERN_SUCCESS;
1512 }/* thread_swap_exception_ports */
1513
1514 kern_return_t
1515 task_swap_exception_ports(
1516 task_t task,
1517 exception_mask_t exception_mask,
1518 ipc_port_t new_port,
1519 exception_behavior_t new_behavior,
1520 thread_state_flavor_t new_flavor,
1521 exception_mask_array_t masks,
1522 mach_msg_type_number_t * CountCnt,
1523 exception_port_array_t ports,
1524 exception_behavior_array_t behaviors,
1525 thread_state_flavor_array_t flavors )
1526 {
1527 register int i,
1528 j,
1529 count;
1530 ipc_port_t old_port[EXC_TYPES_COUNT];
1531
1532 if (task == TASK_NULL)
1533 return KERN_INVALID_ARGUMENT;
1534
1535 if (exception_mask & ~EXC_MASK_ALL) {
1536 return KERN_INVALID_ARGUMENT;
1537 }
1538
1539 if (IP_VALID(new_port)) {
1540 switch (new_behavior) {
1541 case EXCEPTION_DEFAULT:
1542 case EXCEPTION_STATE:
1543 case EXCEPTION_STATE_IDENTITY:
1544 break;
1545 default:
1546 return KERN_INVALID_ARGUMENT;
1547 }
1548 }
1549 /* Cannot easily check "new_flavor", but that just means that
1550 * the flavor in the generated exception message might be garbage:
1551 * GIGO */
1552
1553 itk_lock(task);
1554 if (task->itk_self == IP_NULL) {
1555 itk_unlock(task);
1556 return KERN_FAILURE;
1557 }
1558
1559 count = 0;
1560
1561 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
1562 if (exception_mask & (1 << i)) {
1563 for (j = 0; j < count; j++) {
1564 /*
1565 * search for an identical entry, if found
1566 * set corresponding mask for this exception.
1567 */
1568 if (task->exc_actions[i].port == ports[j] &&
1569 task->exc_actions[i].behavior == behaviors[j]
1570 && task->exc_actions[i].flavor == flavors[j])
1571 {
1572 masks[j] |= (1 << i);
1573 break;
1574 }
1575 }/* for */
1576 if (j == count) {
1577 masks[j] = (1 << i);
1578 ports[j] =
1579 ipc_port_copy_send(task->exc_actions[i].port);
1580 behaviors[j] = task->exc_actions[i].behavior;
1581 flavors[j] = task->exc_actions[i].flavor;
1582 count++;
1583 }
1584 old_port[i] = task->exc_actions[i].port;
1585 task->exc_actions[i].port =
1586 ipc_port_copy_send(new_port);
1587 task->exc_actions[i].behavior = new_behavior;
1588 task->exc_actions[i].flavor = new_flavor;
1589 if (count > *CountCnt) {
1590 break;
1591 }
1592 } else
1593 old_port[i] = IP_NULL;
1594 }/* for */
1595
1596
1597 /*
1598 * Consume send rights without any lock held.
1599 */
1600 itk_unlock(task);
1601 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++)
1602 if (IP_VALID(old_port[i]))
1603 ipc_port_release_send(old_port[i]);
1604 if (IP_VALID(new_port)) /* consume send right */
1605 ipc_port_release_send(new_port);
1606 *CountCnt = count;
1607
1608 return KERN_SUCCESS;
1609 }/* task_swap_exception_ports */
1610
1611 /*
1612 * Routine: thread/task_get_exception_ports [kernel call]
1613 * Purpose:
1614 * Clones a send right for each of the thread/task's exception
1615 * ports specified in the mask and returns the behaviour
1616 * and flavor of said port.
1617 *
1618 * Returns upto [in} CountCnt elements.
1619 *
1620 * Conditions:
1621 * Nothing locked.
1622 * Returns:
1623 * KERN_SUCCESS Extracted a send right.
1624 * KERN_INVALID_ARGUMENT The thread is null,
1625 * Invalid special port,
1626 * Illegal mask bit set.
1627 * KERN_FAILURE The thread is dead.
1628 */
1629
1630 kern_return_t
1631 thread_get_exception_ports(
1632 thread_act_t thr_act,
1633 exception_mask_t exception_mask,
1634 exception_mask_array_t masks,
1635 mach_msg_type_number_t * CountCnt,
1636 exception_port_array_t ports,
1637 exception_behavior_array_t behaviors,
1638 thread_state_flavor_array_t flavors )
1639 {
1640 register int i,
1641 j,
1642 count;
1643
1644 if (!thr_act)
1645 return KERN_INVALID_ARGUMENT;
1646
1647 if (exception_mask & ~EXC_MASK_ALL) {
1648 return KERN_INVALID_ARGUMENT;
1649 }
1650
1651 act_lock(thr_act);
1652 if (!thr_act->active) {
1653 act_unlock(thr_act);
1654 return KERN_FAILURE;
1655 }
1656
1657 count = 0;
1658
1659 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
1660 if (exception_mask & (1 << i)) {
1661 for (j = 0; j < count; j++) {
1662 /*
1663 * search for an identical entry, if found
1664 * set corresponding mask for this exception.
1665 */
1666 if (thr_act->exc_actions[i].port == ports[j] &&
1667 thr_act->exc_actions[i].behavior ==behaviors[j]
1668 && thr_act->exc_actions[i].flavor == flavors[j])
1669 {
1670 masks[j] |= (1 << i);
1671 break;
1672 }
1673 }/* for */
1674 if (j == count) {
1675 masks[j] = (1 << i);
1676 ports[j] =
1677 ipc_port_copy_send(thr_act->exc_actions[i].port);
1678 behaviors[j] = thr_act->exc_actions[i].behavior;
1679 flavors[j] = thr_act->exc_actions[i].flavor;
1680 count++;
1681 if (count >= *CountCnt) {
1682 break;
1683 }
1684 }
1685 }
1686 }/* for */
1687
1688 act_unlock(thr_act);
1689
1690 *CountCnt = count;
1691 return KERN_SUCCESS;
1692 }/* thread_get_exception_ports */
1693
1694 kern_return_t
1695 task_get_exception_ports(
1696 task_t task,
1697 exception_mask_t exception_mask,
1698 exception_mask_array_t masks,
1699 mach_msg_type_number_t * CountCnt,
1700 exception_port_array_t ports,
1701 exception_behavior_array_t behaviors,
1702 thread_state_flavor_array_t flavors )
1703 {
1704 register int i,
1705 j,
1706 count;
1707
1708 if (task == TASK_NULL)
1709 return KERN_INVALID_ARGUMENT;
1710
1711 if (exception_mask & ~EXC_MASK_ALL) {
1712 return KERN_INVALID_ARGUMENT;
1713 }
1714
1715 itk_lock(task);
1716 if (task->itk_self == IP_NULL) {
1717 itk_unlock(task);
1718 return KERN_FAILURE;
1719 }
1720
1721 count = 0;
1722
1723 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
1724 if (exception_mask & (1 << i)) {
1725 for (j = 0; j < count; j++) {
1726 /*
1727 * search for an identical entry, if found
1728 * set corresponding mask for this exception.
1729 */
1730 if (task->exc_actions[i].port == ports[j] &&
1731 task->exc_actions[i].behavior == behaviors[j]
1732 && task->exc_actions[i].flavor == flavors[j])
1733 {
1734 masks[j] |= (1 << i);
1735 break;
1736 }
1737 }/* for */
1738 if (j == count) {
1739 masks[j] = (1 << i);
1740 ports[j] =
1741 ipc_port_copy_send(task->exc_actions[i].port);
1742 behaviors[j] = task->exc_actions[i].behavior;
1743 flavors[j] = task->exc_actions[i].flavor;
1744 count++;
1745 if (count > *CountCnt) {
1746 break;
1747 }
1748 }
1749 }
1750 }/* for */
1751
1752 itk_unlock(task);
1753
1754 *CountCnt = count;
1755 return KERN_SUCCESS;
1756 }/* task_get_exception_ports */