]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/ipc_tt.c
xnu-517.7.7.tar.gz
[apple/xnu.git] / osfmk / kern / ipc_tt.c
1 /*
2 * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_COPYRIGHT@
24 */
25 /*
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50 /*
51 */
52
53 /*
54 * File: ipc_tt.c
55 * Purpose:
56 * Task and thread related IPC functions.
57 */
58
59 #include <mach/mach_types.h>
60 #include <mach/boolean.h>
61 #include <mach_rt.h>
62 #include <mach/kern_return.h>
63 #include <mach/mach_param.h>
64 #include <mach/task_special_ports.h>
65 #include <mach/thread_special_ports.h>
66 #include <mach/thread_status.h>
67 #include <mach/exception_types.h>
68 #include <mach/mach_traps.h>
69 #include <mach/task_server.h>
70 #include <mach/thread_act_server.h>
71 #include <mach/mach_host_server.h>
72 #include <mach/vm_map_server.h>
73 #include <kern/host.h>
74 #include <kern/ipc_tt.h>
75 #include <kern/thread_act.h>
76 #include <kern/misc_protos.h>
77 #include <vm/vm_pageout.h>
78
79 /*
80 * Routine: ipc_task_init
81 * Purpose:
82 * Initialize a task's IPC state.
83 *
84 * If non-null, some state will be inherited from the parent.
85 * The parent must be appropriately initialized.
86 * Conditions:
87 * Nothing locked.
88 */
89
90 void
91 ipc_task_init(
92 task_t task,
93 task_t parent)
94 {
95 ipc_space_t space;
96 ipc_port_t kport;
97 kern_return_t kr;
98 int i;
99
100
101 kr = ipc_space_create(&ipc_table_entries[0], &space);
102 if (kr != KERN_SUCCESS)
103 panic("ipc_task_init");
104
105
106 kport = ipc_port_alloc_kernel();
107 if (kport == IP_NULL)
108 panic("ipc_task_init");
109
110 itk_lock_init(task);
111 task->itk_self = kport;
112 task->itk_sself = ipc_port_make_send(kport);
113 task->itk_space = space;
114 space->is_fast = FALSE;
115
116 if (parent == TASK_NULL) {
117 ipc_port_t port;
118
119 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
120 task->exc_actions[i].port = IP_NULL;
121 }/* for */
122
123 kr = host_get_host_port(host_priv_self(), &port);
124 assert(kr == KERN_SUCCESS);
125 task->itk_host = port;
126
127 task->itk_bootstrap = IP_NULL;
128
129 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
130 task->itk_registered[i] = IP_NULL;
131 } else {
132 itk_lock(parent);
133 assert(parent->itk_self != IP_NULL);
134
135 /* inherit registered ports */
136
137 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
138 task->itk_registered[i] =
139 ipc_port_copy_send(parent->itk_registered[i]);
140
141 /* inherit exception and bootstrap ports */
142
143 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
144 task->exc_actions[i].port =
145 ipc_port_copy_send(parent->exc_actions[i].port);
146 task->exc_actions[i].flavor =
147 parent->exc_actions[i].flavor;
148 task->exc_actions[i].behavior =
149 parent->exc_actions[i].behavior;
150 }/* for */
151 task->itk_host =
152 ipc_port_copy_send(parent->itk_host);
153
154 task->itk_bootstrap =
155 ipc_port_copy_send(parent->itk_bootstrap);
156
157 itk_unlock(parent);
158 }
159 }
160
161 /*
162 * Routine: ipc_task_enable
163 * Purpose:
164 * Enable a task for IPC access.
165 * Conditions:
166 * Nothing locked.
167 */
168
169 void
170 ipc_task_enable(
171 task_t task)
172 {
173 ipc_port_t kport;
174
175 itk_lock(task);
176 kport = task->itk_self;
177 if (kport != IP_NULL)
178 ipc_kobject_set(kport, (ipc_kobject_t) task, IKOT_TASK);
179 itk_unlock(task);
180 }
181
182 /*
183 * Routine: ipc_task_disable
184 * Purpose:
185 * Disable IPC access to a task.
186 * Conditions:
187 * Nothing locked.
188 */
189
190 void
191 ipc_task_disable(
192 task_t task)
193 {
194 ipc_port_t kport;
195
196 itk_lock(task);
197 kport = task->itk_self;
198 if (kport != IP_NULL)
199 ipc_kobject_set(kport, IKO_NULL, IKOT_NONE);
200 itk_unlock(task);
201 }
202
203 /*
204 * Routine: ipc_task_terminate
205 * Purpose:
206 * Clean up and destroy a task's IPC state.
207 * Conditions:
208 * Nothing locked. The task must be suspended.
209 * (Or the current thread must be in the task.)
210 */
211
212 void
213 ipc_task_terminate(
214 task_t task)
215 {
216 ipc_port_t kport;
217 int i;
218
219 itk_lock(task);
220 kport = task->itk_self;
221
222 if (kport == IP_NULL) {
223 /* the task is already terminated (can this happen?) */
224 itk_unlock(task);
225 return;
226 }
227
228 task->itk_self = IP_NULL;
229 itk_unlock(task);
230
231 /* release the naked send rights */
232
233 if (IP_VALID(task->itk_sself))
234 ipc_port_release_send(task->itk_sself);
235
236 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
237 if (IP_VALID(task->exc_actions[i].port)) {
238 ipc_port_release_send(task->exc_actions[i].port);
239 }
240 }/* for */
241 if (IP_VALID(task->itk_host))
242 ipc_port_release_send(task->itk_host);
243
244 if (IP_VALID(task->itk_bootstrap))
245 ipc_port_release_send(task->itk_bootstrap);
246
247 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
248 if (IP_VALID(task->itk_registered[i]))
249 ipc_port_release_send(task->itk_registered[i]);
250
251 ipc_port_release_send(task->wired_ledger_port);
252 ipc_port_release_send(task->paged_ledger_port);
253
254 /* destroy the kernel port */
255 ipc_port_dealloc_kernel(kport);
256 }
257
258 /*
259 * Routine: ipc_task_reset
260 * Purpose:
261 * Reset a task's IPC state to protect it when
262 * it enters an elevated security context.
263 * Conditions:
264 * Nothing locked. The task must be suspended.
265 * (Or the current thread must be in the task.)
266 */
267
268 void
269 ipc_task_reset(
270 task_t task)
271 {
272 ipc_port_t old_kport, new_kport;
273 ipc_port_t old_sself;
274 #if 0
275 ipc_port_t old_exc_actions[EXC_TYPES_COUNT];
276 int i;
277 #endif
278
279 new_kport = ipc_port_alloc_kernel();
280 if (new_kport == IP_NULL)
281 panic("ipc_task_reset");
282
283 itk_lock(task);
284
285 old_kport = task->itk_self;
286
287 if (old_kport == IP_NULL) {
288 /* the task is already terminated (can this happen?) */
289 itk_unlock(task);
290 ipc_port_dealloc_kernel(new_kport);
291 return;
292 }
293
294 task->itk_self = new_kport;
295 old_sself = task->itk_sself;
296 task->itk_sself = ipc_port_make_send(new_kport);
297 ipc_kobject_set(old_kport, IKO_NULL, IKOT_NONE);
298 ipc_kobject_set(new_kport, (ipc_kobject_t) task, IKOT_TASK);
299
300 #if 0
301 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
302 old_exc_actions[i] = task->exc_action[i].port;
303 task->exc_actions[i].port = IP_NULL;
304 }/* for */
305 #endif
306
307 itk_unlock(task);
308
309 /* release the naked send rights */
310
311 if (IP_VALID(old_sself))
312 ipc_port_release_send(old_sself);
313
314 #if 0
315 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
316 if (IP_VALID(old_exc_actions[i])) {
317 ipc_port_release_send(old_exc_actions[i]);
318 }
319 }/* for */
320 #endif
321
322 /* destroy the kernel port */
323 ipc_port_dealloc_kernel(old_kport);
324 }
325
326 /*
327 * Routine: ipc_thread_init
328 * Purpose:
329 * Initialize a thread's IPC state.
330 * Conditions:
331 * Nothing locked.
332 */
333
334 void
335 ipc_thread_init(
336 thread_t thread)
337 {
338 ipc_kmsg_queue_init(&thread->ith_messages);
339 thread->ith_mig_reply = MACH_PORT_NULL;
340 thread->ith_rpc_reply = IP_NULL;
341 }
342
343 /*
344 * Routine: ipc_thread_terminate
345 * Purpose:
346 * Clean up and destroy a thread's IPC state.
347 * Conditions:
348 * Nothing locked. The thread must be suspended.
349 * (Or be the current thread.)
350 */
351
352 void
353 ipc_thread_terminate(
354 thread_t thread)
355 {
356 assert(ipc_kmsg_queue_empty(&thread->ith_messages));
357
358 if (thread->ith_rpc_reply != IP_NULL)
359 ipc_port_dealloc_reply(thread->ith_rpc_reply);
360 thread->ith_rpc_reply = IP_NULL;
361 }
362
363 /*
364 * Routine: ipc_thr_act_init
365 * Purpose:
366 * Initialize an thr_act's IPC state.
367 * Conditions:
368 * Nothing locked.
369 */
370
371 void
372 ipc_thr_act_init(task_t task, thread_act_t thr_act)
373 {
374 ipc_port_t kport; int i;
375
376 kport = ipc_port_alloc_kernel();
377 if (kport == IP_NULL)
378 panic("ipc_thr_act_init");
379
380 thr_act->ith_self = kport;
381 thr_act->ith_sself = ipc_port_make_send(kport);
382
383 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++)
384 thr_act->exc_actions[i].port = IP_NULL;
385
386 ipc_kobject_set(kport, (ipc_kobject_t) thr_act, IKOT_ACT);
387 }
388
389 void
390 ipc_thr_act_disable(thread_act_t thr_act)
391 {
392 int i;
393 ipc_port_t kport;
394
395 kport = thr_act->ith_self;
396
397 if (kport != IP_NULL)
398 ipc_kobject_set(kport, IKO_NULL, IKOT_NONE);
399 }
400
401 void
402 ipc_thr_act_terminate(thread_act_t thr_act)
403 {
404 ipc_port_t kport; int i;
405
406 kport = thr_act->ith_self;
407
408 if (kport == IP_NULL) {
409 /* the thread is already terminated (can this happen?) */
410 return;
411 }
412
413 thr_act->ith_self = IP_NULL;
414
415 /* release the naked send rights */
416
417 if (IP_VALID(thr_act->ith_sself))
418 ipc_port_release_send(thr_act->ith_sself);
419 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
420 if (IP_VALID(thr_act->exc_actions[i].port))
421 ipc_port_release_send(thr_act->exc_actions[i].port);
422 }
423
424 /* destroy the kernel port */
425 ipc_port_dealloc_kernel(kport);
426 }
427
428 /*
429 * Routine: retrieve_task_self_fast
430 * Purpose:
431 * Optimized version of retrieve_task_self,
432 * that only works for the current task.
433 *
434 * Return a send right (possibly null/dead)
435 * for the task's user-visible self port.
436 * Conditions:
437 * Nothing locked.
438 */
439
440 ipc_port_t
441 retrieve_task_self_fast(
442 register task_t task)
443 {
444 register ipc_port_t port;
445
446 assert(task == current_task());
447
448 itk_lock(task);
449 assert(task->itk_self != IP_NULL);
450
451 if ((port = task->itk_sself) == task->itk_self) {
452 /* no interposing */
453
454 ip_lock(port);
455 assert(ip_active(port));
456 ip_reference(port);
457 port->ip_srights++;
458 ip_unlock(port);
459 } else
460 port = ipc_port_copy_send(port);
461 itk_unlock(task);
462
463 return port;
464 }
465
466 /*
467 * Routine: retrieve_act_self_fast
468 * Purpose:
469 * Optimized version of retrieve_thread_self,
470 * that only works for the current thread.
471 *
472 * Return a send right (possibly null/dead)
473 * for the thread's user-visible self port.
474 * Conditions:
475 * Nothing locked.
476 */
477
478 ipc_port_t
479 retrieve_act_self_fast(thread_act_t thr_act)
480 {
481 register ipc_port_t port;
482
483 assert(thr_act == current_act());
484 act_lock(thr_act);
485 assert(thr_act->ith_self != IP_NULL);
486
487 if ((port = thr_act->ith_sself) == thr_act->ith_self) {
488 /* no interposing */
489
490 ip_lock(port);
491 assert(ip_active(port));
492 ip_reference(port);
493 port->ip_srights++;
494 ip_unlock(port);
495 } else
496 port = ipc_port_copy_send(port);
497 act_unlock(thr_act);
498
499 return port;
500 }
501
502 /*
503 * Routine: task_self_trap [mach trap]
504 * Purpose:
505 * Give the caller send rights for his own task port.
506 * Conditions:
507 * Nothing locked.
508 * Returns:
509 * MACH_PORT_NULL if there are any resource failures
510 * or other errors.
511 */
512
513 mach_port_name_t
514 task_self_trap(void)
515 {
516 task_t task = current_task();
517 ipc_port_t sright;
518
519 sright = retrieve_task_self_fast(task);
520 return ipc_port_copyout_send(sright, task->itk_space);
521 }
522
523 /*
524 * Routine: thread_self_trap [mach trap]
525 * Purpose:
526 * Give the caller send rights for his own thread port.
527 * Conditions:
528 * Nothing locked.
529 * Returns:
530 * MACH_PORT_NULL if there are any resource failures
531 * or other errors.
532 */
533
534 mach_port_name_t
535 thread_self_trap(void)
536 {
537 thread_act_t thr_act = current_act();
538 task_t task = thr_act->task;
539 ipc_port_t sright;
540
541 sright = retrieve_act_self_fast(thr_act);
542 return ipc_port_copyout_send(sright, task->itk_space);
543 }
544
545 /*
546 * Routine: mach_reply_port [mach trap]
547 * Purpose:
548 * Allocate a port for the caller.
549 * Conditions:
550 * Nothing locked.
551 * Returns:
552 * MACH_PORT_NULL if there are any resource failures
553 * or other errors.
554 */
555
556 mach_port_name_t
557 mach_reply_port(void)
558 {
559 ipc_port_t port;
560 mach_port_name_t name;
561 kern_return_t kr;
562
563 kr = ipc_port_alloc(current_task()->itk_space, &name, &port);
564 if (kr == KERN_SUCCESS)
565 ip_unlock(port);
566 else
567 name = MACH_PORT_NULL;
568
569 return name;
570 }
571
572 /*
573 * Routine: task_get_special_port [kernel call]
574 * Purpose:
575 * Clones a send right for one of the task's
576 * special ports.
577 * Conditions:
578 * Nothing locked.
579 * Returns:
580 * KERN_SUCCESS Extracted a send right.
581 * KERN_INVALID_ARGUMENT The task is null.
582 * KERN_FAILURE The task/space is dead.
583 * KERN_INVALID_ARGUMENT Invalid special port.
584 */
585
586 kern_return_t
587 task_get_special_port(
588 task_t task,
589 int which,
590 ipc_port_t *portp)
591 {
592 ipc_port_t *whichp;
593 ipc_port_t port;
594
595 if (task == TASK_NULL)
596 return KERN_INVALID_ARGUMENT;
597
598 switch (which) {
599 case TASK_KERNEL_PORT:
600 whichp = &task->itk_sself;
601 break;
602
603 case TASK_HOST_PORT:
604 whichp = &task->itk_host;
605 break;
606
607 case TASK_BOOTSTRAP_PORT:
608 whichp = &task->itk_bootstrap;
609 break;
610
611 case TASK_WIRED_LEDGER_PORT:
612 whichp = &task->wired_ledger_port;
613 break;
614
615 case TASK_PAGED_LEDGER_PORT:
616 whichp = &task->paged_ledger_port;
617 break;
618
619 default:
620 return KERN_INVALID_ARGUMENT;
621 }
622
623 itk_lock(task);
624 if (task->itk_self == IP_NULL) {
625 itk_unlock(task);
626 return KERN_FAILURE;
627 }
628
629 port = ipc_port_copy_send(*whichp);
630 itk_unlock(task);
631
632 *portp = port;
633 return KERN_SUCCESS;
634 }
635
636 /*
637 * Routine: task_set_special_port [kernel call]
638 * Purpose:
639 * Changes one of the task's special ports,
640 * setting it to the supplied send right.
641 * Conditions:
642 * Nothing locked. If successful, consumes
643 * the supplied send right.
644 * Returns:
645 * KERN_SUCCESS Changed the special port.
646 * KERN_INVALID_ARGUMENT The task is null.
647 * KERN_FAILURE The task/space is dead.
648 * KERN_INVALID_ARGUMENT Invalid special port.
649 */
650
651 kern_return_t
652 task_set_special_port(
653 task_t task,
654 int which,
655 ipc_port_t port)
656 {
657 ipc_port_t *whichp;
658 ipc_port_t old;
659
660 if (task == TASK_NULL)
661 return KERN_INVALID_ARGUMENT;
662
663 switch (which) {
664 case TASK_KERNEL_PORT:
665 whichp = &task->itk_sself;
666 break;
667
668 case TASK_HOST_PORT:
669 whichp = &task->itk_host;
670 break;
671
672 case TASK_BOOTSTRAP_PORT:
673 whichp = &task->itk_bootstrap;
674 break;
675
676 case TASK_WIRED_LEDGER_PORT:
677 whichp = &task->wired_ledger_port;
678 break;
679
680 case TASK_PAGED_LEDGER_PORT:
681 whichp = &task->paged_ledger_port;
682 break;
683
684 default:
685 return KERN_INVALID_ARGUMENT;
686 }/* switch */
687
688 itk_lock(task);
689 if (task->itk_self == IP_NULL) {
690 itk_unlock(task);
691 return KERN_FAILURE;
692 }
693
694 old = *whichp;
695 *whichp = port;
696 itk_unlock(task);
697
698 if (IP_VALID(old))
699 ipc_port_release_send(old);
700 return KERN_SUCCESS;
701 }
702
703
704 /*
705 * Routine: mach_ports_register [kernel call]
706 * Purpose:
707 * Stash a handful of port send rights in the task.
708 * Child tasks will inherit these rights, but they
709 * must use mach_ports_lookup to acquire them.
710 *
711 * The rights are supplied in a (wired) kalloc'd segment.
712 * Rights which aren't supplied are assumed to be null.
713 * Conditions:
714 * Nothing locked. If successful, consumes
715 * the supplied rights and memory.
716 * Returns:
717 * KERN_SUCCESS Stashed the port rights.
718 * KERN_INVALID_ARGUMENT The task is null.
719 * KERN_INVALID_ARGUMENT The task is dead.
720 * KERN_INVALID_ARGUMENT Too many port rights supplied.
721 */
722
723 kern_return_t
724 mach_ports_register(
725 task_t task,
726 mach_port_array_t memory,
727 mach_msg_type_number_t portsCnt)
728 {
729 ipc_port_t ports[TASK_PORT_REGISTER_MAX];
730 int i;
731
732 if ((task == TASK_NULL) ||
733 (portsCnt > TASK_PORT_REGISTER_MAX))
734 return KERN_INVALID_ARGUMENT;
735
736 /*
737 * Pad the port rights with nulls.
738 */
739
740 for (i = 0; i < portsCnt; i++)
741 ports[i] = memory[i];
742 for (; i < TASK_PORT_REGISTER_MAX; i++)
743 ports[i] = IP_NULL;
744
745 itk_lock(task);
746 if (task->itk_self == IP_NULL) {
747 itk_unlock(task);
748 return KERN_INVALID_ARGUMENT;
749 }
750
751 /*
752 * Replace the old send rights with the new.
753 * Release the old rights after unlocking.
754 */
755
756 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
757 ipc_port_t old;
758
759 old = task->itk_registered[i];
760 task->itk_registered[i] = ports[i];
761 ports[i] = old;
762 }
763
764 itk_unlock(task);
765
766 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
767 if (IP_VALID(ports[i]))
768 ipc_port_release_send(ports[i]);
769
770 /*
771 * Now that the operation is known to be successful,
772 * we can free the memory.
773 */
774
775 if (portsCnt != 0)
776 kfree((vm_offset_t) memory,
777 (vm_size_t) (portsCnt * sizeof(mach_port_t)));
778
779 return KERN_SUCCESS;
780 }
781
782 /*
783 * Routine: mach_ports_lookup [kernel call]
784 * Purpose:
785 * Retrieves (clones) the stashed port send rights.
786 * Conditions:
787 * Nothing locked. If successful, the caller gets
788 * rights and memory.
789 * Returns:
790 * KERN_SUCCESS Retrieved the send rights.
791 * KERN_INVALID_ARGUMENT The task is null.
792 * KERN_INVALID_ARGUMENT The task is dead.
793 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
794 */
795
796 kern_return_t
797 mach_ports_lookup(
798 task_t task,
799 mach_port_array_t *portsp,
800 mach_msg_type_number_t *portsCnt)
801 {
802 vm_offset_t memory;
803 vm_size_t size;
804 ipc_port_t *ports;
805 int i;
806
807 kern_return_t kr;
808
809 if (task == TASK_NULL)
810 return KERN_INVALID_ARGUMENT;
811
812 size = (vm_size_t) (TASK_PORT_REGISTER_MAX * sizeof(ipc_port_t));
813
814 memory = kalloc(size);
815 if (memory == 0)
816 return KERN_RESOURCE_SHORTAGE;
817
818 itk_lock(task);
819 if (task->itk_self == IP_NULL) {
820 itk_unlock(task);
821
822 kfree(memory, size);
823 return KERN_INVALID_ARGUMENT;
824 }
825
826 ports = (ipc_port_t *) memory;
827
828 /*
829 * Clone port rights. Because kalloc'd memory
830 * is wired, we won't fault while holding the task lock.
831 */
832
833 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
834 ports[i] = ipc_port_copy_send(task->itk_registered[i]);
835
836 itk_unlock(task);
837
838 *portsp = (mach_port_array_t) ports;
839 *portsCnt = TASK_PORT_REGISTER_MAX;
840 return KERN_SUCCESS;
841 }
842
843 /*
844 * Routine: convert_port_to_locked_task
845 * Purpose:
846 * Internal helper routine to convert from a port to a locked
847 * task. Used by several routines that try to convert from a
848 * task port to a reference on some task related object.
849 * Conditions:
850 * Nothing locked, blocking OK.
851 */
852 task_t
853 convert_port_to_locked_task(ipc_port_t port)
854 {
855 while (IP_VALID(port)) {
856 task_t task;
857
858 ip_lock(port);
859 if (!ip_active(port) || (ip_kotype(port) != IKOT_TASK)) {
860 ip_unlock(port);
861 return TASK_NULL;
862 }
863 task = (task_t) port->ip_kobject;
864 assert(task != TASK_NULL);
865
866 /*
867 * Normal lock ordering puts task_lock() before ip_lock().
868 * Attempt out-of-order locking here.
869 */
870 if (task_lock_try(task)) {
871 ip_unlock(port);
872 return(task);
873 }
874
875 ip_unlock(port);
876 mutex_pause();
877 }
878 return TASK_NULL;
879 }
880
881 /*
882 * Routine: convert_port_to_task
883 * Purpose:
884 * Convert from a port to a task.
885 * Doesn't consume the port ref; produces a task ref,
886 * which may be null.
887 * Conditions:
888 * Nothing locked.
889 */
890 task_t
891 convert_port_to_task(
892 ipc_port_t port)
893 {
894 task_t task;
895
896 task = convert_port_to_locked_task(port);
897 if (task) {
898 task->ref_count++;
899 task_unlock(task);
900 }
901 return task;
902 }
903
904 /*
905 * Routine: convert_port_to_space
906 * Purpose:
907 * Convert from a port to a space.
908 * Doesn't consume the port ref; produces a space ref,
909 * which may be null.
910 * Conditions:
911 * Nothing locked.
912 */
913 ipc_space_t
914 convert_port_to_space(
915 ipc_port_t port)
916 {
917 ipc_space_t space;
918 task_t task;
919
920 task = convert_port_to_locked_task(port);
921
922 if (task == TASK_NULL)
923 return IPC_SPACE_NULL;
924
925 if (!task->active) {
926 task_unlock(task);
927 return IPC_SPACE_NULL;
928 }
929
930 space = task->itk_space;
931 is_reference(space);
932 task_unlock(task);
933 return (space);
934 }
935
936 upl_t
937 convert_port_to_upl(
938 ipc_port_t port)
939 {
940 upl_t upl;
941
942 ip_lock(port);
943 if (!ip_active(port) || (ip_kotype(port) != IKOT_UPL)) {
944 ip_unlock(port);
945 return (upl_t)NULL;
946 }
947 upl = (upl_t) port->ip_kobject;
948 ip_unlock(port);
949 upl_lock(upl);
950 upl->ref_count+=1;
951 upl_unlock(upl);
952 return upl;
953 }
954
955 mach_port_t
956 convert_upl_to_port(
957 upl_t upl)
958 {
959 return MACH_PORT_NULL;
960 }
961
962 __private_extern__ void
963 upl_no_senders(
964 upl_t upl,
965 mach_port_mscount_t mscount)
966 {
967 return;
968 }
969
970 /*
971 * Routine: convert_port_entry_to_map
972 * Purpose:
973 * Convert from a port specifying an entry or a task
974 * to a map. Doesn't consume the port ref; produces a map ref,
975 * which may be null. Unlike convert_port_to_map, the
976 * port may be task or a named entry backed.
977 * Conditions:
978 * Nothing locked.
979 */
980
981
982 vm_map_t
983 convert_port_entry_to_map(
984 ipc_port_t port)
985 {
986 task_t task;
987 vm_map_t map;
988 vm_named_entry_t named_entry;
989
990 if(IP_VALID(port) && (ip_kotype(port) == IKOT_NAMED_ENTRY)) {
991 while(TRUE) {
992 ip_lock(port);
993 if(ip_active(port) && (ip_kotype(port)
994 == IKOT_NAMED_ENTRY)) {
995 named_entry =
996 (vm_named_entry_t)port->ip_kobject;
997 if (!(mutex_try(&(named_entry)->Lock))) {
998 ip_unlock(port);
999 mutex_pause();
1000 continue;
1001 }
1002 named_entry->ref_count++;
1003 mutex_unlock(&(named_entry)->Lock);
1004 ip_unlock(port);
1005 if ((named_entry->is_sub_map) &&
1006 (named_entry->protection
1007 & VM_PROT_WRITE)) {
1008 map = named_entry->backing.map;
1009 } else {
1010 mach_destroy_memory_entry(port);
1011 return VM_MAP_NULL;
1012 }
1013 vm_map_reference_swap(map);
1014 mach_destroy_memory_entry(port);
1015 break;
1016 }
1017 else
1018 return VM_MAP_NULL;
1019 }
1020 } else {
1021 task_t task;
1022
1023 task = convert_port_to_locked_task(port);
1024
1025 if (task == TASK_NULL)
1026 return VM_MAP_NULL;
1027
1028 if (!task->active) {
1029 task_unlock(task);
1030 return VM_MAP_NULL;
1031 }
1032
1033 map = task->map;
1034 vm_map_reference_swap(map);
1035 task_unlock(task);
1036 }
1037
1038 return map;
1039 }
1040
1041 /*
1042 * Routine: convert_port_entry_to_object
1043 * Purpose:
1044 * Convert from a port specifying a named entry to an
1045 * object. Doesn't consume the port ref; produces a map ref,
1046 * which may be null.
1047 * Conditions:
1048 * Nothing locked.
1049 */
1050
1051
1052 vm_object_t
1053 convert_port_entry_to_object(
1054 ipc_port_t port)
1055 {
1056 vm_object_t object;
1057 vm_named_entry_t named_entry;
1058
1059 if(IP_VALID(port) && (ip_kotype(port) == IKOT_NAMED_ENTRY)) {
1060 while(TRUE) {
1061 ip_lock(port);
1062 if(ip_active(port) && (ip_kotype(port)
1063 == IKOT_NAMED_ENTRY)) {
1064 named_entry =
1065 (vm_named_entry_t)port->ip_kobject;
1066 if (!(mutex_try(&(named_entry)->Lock))) {
1067 ip_unlock(port);
1068 mutex_pause();
1069 continue;
1070 }
1071 named_entry->ref_count++;
1072 mutex_unlock(&(named_entry)->Lock);
1073 ip_unlock(port);
1074 if ((!named_entry->is_sub_map) &&
1075 (named_entry->protection
1076 & VM_PROT_WRITE)) {
1077 object = named_entry->object;
1078 } else {
1079 mach_destroy_memory_entry(port);
1080 return (vm_object_t)NULL;
1081 }
1082 vm_object_reference(named_entry->object);
1083 mach_destroy_memory_entry(port);
1084 break;
1085 }
1086 else
1087 return (vm_object_t)NULL;
1088 }
1089 } else {
1090 return (vm_object_t)NULL;
1091 }
1092
1093 return object;
1094 }
1095
1096 /*
1097 * Routine: convert_port_to_map
1098 * Purpose:
1099 * Convert from a port to a map.
1100 * Doesn't consume the port ref; produces a map ref,
1101 * which may be null.
1102 * Conditions:
1103 * Nothing locked.
1104 */
1105
1106 vm_map_t
1107 convert_port_to_map(
1108 ipc_port_t port)
1109 {
1110 task_t task;
1111 vm_map_t map;
1112
1113 task = convert_port_to_locked_task(port);
1114
1115 if (task == TASK_NULL)
1116 return VM_MAP_NULL;
1117
1118 if (!task->active) {
1119 task_unlock(task);
1120 return VM_MAP_NULL;
1121 }
1122
1123 map = task->map;
1124 vm_map_reference_swap(map);
1125 task_unlock(task);
1126 return map;
1127 }
1128
1129
1130 /*
1131 * Routine: convert_port_to_act
1132 * Purpose:
1133 * Convert from a port to a thr_act.
1134 * Doesn't consume the port ref; produces an thr_act ref,
1135 * which may be null.
1136 * Conditions:
1137 * Nothing locked.
1138 */
1139
1140 thread_act_t
1141 convert_port_to_act( ipc_port_t port )
1142 {
1143 boolean_t r;
1144 thread_act_t thr_act = 0;
1145
1146 r = FALSE;
1147 while (!r && IP_VALID(port)) {
1148 ip_lock(port);
1149 r = ref_act_port_locked(port, &thr_act);
1150 /* port unlocked */
1151 }
1152 return (thr_act);
1153 }
1154
1155 boolean_t
1156 ref_act_port_locked( ipc_port_t port, thread_act_t *pthr_act )
1157 {
1158 thread_act_t thr_act;
1159
1160 thr_act = 0;
1161 if (ip_active(port) &&
1162 (ip_kotype(port) == IKOT_ACT)) {
1163 thr_act = (thread_act_t) port->ip_kobject;
1164 assert(thr_act != THR_ACT_NULL);
1165
1166 /*
1167 * Out of order locking here, normal
1168 * ordering is act_lock(), then ip_lock().
1169 */
1170 if (!act_lock_try(thr_act)) {
1171 ip_unlock(port);
1172 mutex_pause();
1173 return (FALSE);
1174 }
1175 act_reference_locked(thr_act);
1176 act_unlock(thr_act);
1177 }
1178 *pthr_act = thr_act;
1179 ip_unlock(port);
1180 return (TRUE);
1181 }
1182
1183 /*
1184 * Routine: port_name_to_act
1185 * Purpose:
1186 * Convert from a port name to an act reference
1187 * A name of MACH_PORT_NULL is valid for the null act
1188 * Conditions:
1189 * Nothing locked.
1190 */
1191 thread_act_t
1192 port_name_to_act(
1193 mach_port_name_t name)
1194 {
1195 thread_act_t thr_act = THR_ACT_NULL;
1196 ipc_port_t kern_port;
1197 kern_return_t kr;
1198
1199 if (MACH_PORT_VALID(name)) {
1200 kr = ipc_object_copyin(current_space(), name,
1201 MACH_MSG_TYPE_COPY_SEND,
1202 (ipc_object_t *) &kern_port);
1203 if (kr != KERN_SUCCESS)
1204 return THR_ACT_NULL;
1205
1206 thr_act = convert_port_to_act(kern_port);
1207
1208 if (IP_VALID(kern_port))
1209 ipc_port_release_send(kern_port);
1210 }
1211 return thr_act;
1212 }
1213
1214 task_t
1215 port_name_to_task(
1216 mach_port_name_t name)
1217 {
1218 ipc_port_t kern_port;
1219 kern_return_t kr;
1220 task_t task = TASK_NULL;
1221
1222 if (MACH_PORT_VALID(name)) {
1223 kr = ipc_object_copyin(current_space(), name,
1224 MACH_MSG_TYPE_COPY_SEND,
1225 (ipc_object_t *) &kern_port);
1226 if (kr != KERN_SUCCESS)
1227 return TASK_NULL;
1228
1229 task = convert_port_to_task(kern_port);
1230
1231 if (IP_VALID(kern_port))
1232 ipc_port_release_send(kern_port);
1233 }
1234 return task;
1235 }
1236
1237 /*
1238 * Routine: convert_task_to_port
1239 * Purpose:
1240 * Convert from a task to a port.
1241 * Consumes a task ref; produces a naked send right
1242 * which may be invalid.
1243 * Conditions:
1244 * Nothing locked.
1245 */
1246
1247 ipc_port_t
1248 convert_task_to_port(
1249 task_t task)
1250 {
1251 ipc_port_t port;
1252
1253 itk_lock(task);
1254 if (task->itk_self != IP_NULL)
1255 #if NORMA_TASK
1256 if (task->map == VM_MAP_NULL)
1257 /* norma placeholder task */
1258 port = ipc_port_copy_send(task->itk_self);
1259 else
1260 #endif /* NORMA_TASK */
1261 port = ipc_port_make_send(task->itk_self);
1262 else
1263 port = IP_NULL;
1264 itk_unlock(task);
1265
1266 task_deallocate(task);
1267 return port;
1268 }
1269
1270 /*
1271 * Routine: convert_act_to_port
1272 * Purpose:
1273 * Convert from a thr_act to a port.
1274 * Consumes an thr_act ref; produces a naked send right
1275 * which may be invalid.
1276 * Conditions:
1277 * Nothing locked.
1278 */
1279
1280 ipc_port_t
1281 convert_act_to_port(thr_act)
1282 thread_act_t thr_act;
1283 {
1284 ipc_port_t port;
1285
1286 act_lock(thr_act);
1287 if (thr_act->ith_self != IP_NULL)
1288 port = ipc_port_make_send(thr_act->ith_self);
1289 else
1290 port = IP_NULL;
1291 act_unlock(thr_act);
1292
1293 act_deallocate(thr_act);
1294 return port;
1295 }
1296
1297 /*
1298 * Routine: space_deallocate
1299 * Purpose:
1300 * Deallocate a space ref produced by convert_port_to_space.
1301 * Conditions:
1302 * Nothing locked.
1303 */
1304
1305 void
1306 space_deallocate(
1307 ipc_space_t space)
1308 {
1309 if (space != IS_NULL)
1310 is_release(space);
1311 }
1312
1313 /*
1314 * Routine: thread/task_set_exception_ports [kernel call]
1315 * Purpose:
1316 * Sets the thread/task exception port, flavor and
1317 * behavior for the exception types specified by the mask.
1318 * There will be one send right per exception per valid
1319 * port.
1320 * Conditions:
1321 * Nothing locked. If successful, consumes
1322 * the supplied send right.
1323 * Returns:
1324 * KERN_SUCCESS Changed the special port.
1325 * KERN_INVALID_ARGUMENT The thread is null,
1326 * Illegal mask bit set.
1327 * Illegal exception behavior
1328 * KERN_FAILURE The thread is dead.
1329 */
1330
1331 kern_return_t
1332 thread_set_exception_ports(
1333 thread_act_t thr_act,
1334 exception_mask_t exception_mask,
1335 ipc_port_t new_port,
1336 exception_behavior_t new_behavior,
1337 thread_state_flavor_t new_flavor)
1338 {
1339 register int i;
1340 ipc_port_t old_port[EXC_TYPES_COUNT];
1341
1342 if (!thr_act)
1343 return KERN_INVALID_ARGUMENT;
1344
1345 if (exception_mask & ~EXC_MASK_ALL)
1346 return KERN_INVALID_ARGUMENT;
1347
1348 if (IP_VALID(new_port)) {
1349 switch (new_behavior) {
1350 case EXCEPTION_DEFAULT:
1351 case EXCEPTION_STATE:
1352 case EXCEPTION_STATE_IDENTITY:
1353 break;
1354 default:
1355 return KERN_INVALID_ARGUMENT;
1356 }
1357 }
1358
1359 /*
1360 * Check the validity of the thread_state_flavor by calling the
1361 * VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in
1362 * osfmk/mach/ARCHITECTURE/thread_status.h
1363 */
1364 if (!VALID_THREAD_STATE_FLAVOR(new_flavor)) {
1365 return KERN_INVALID_ARGUMENT;
1366 }
1367
1368 act_lock(thr_act);
1369 if (!thr_act->active) {
1370 act_unlock(thr_act);
1371 return KERN_FAILURE;
1372 }
1373
1374 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
1375 if (exception_mask & (1 << i)) {
1376 old_port[i] = thr_act->exc_actions[i].port;
1377 thr_act->exc_actions[i].port =
1378 ipc_port_copy_send(new_port);
1379 thr_act->exc_actions[i].behavior = new_behavior;
1380 thr_act->exc_actions[i].flavor = new_flavor;
1381 } else
1382 old_port[i] = IP_NULL;
1383 }/* for */
1384 /*
1385 * Consume send rights without any lock held.
1386 */
1387 act_unlock(thr_act);
1388 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++)
1389 if (IP_VALID(old_port[i]))
1390 ipc_port_release_send(old_port[i]);
1391 if (IP_VALID(new_port)) /* consume send right */
1392 ipc_port_release_send(new_port);
1393
1394 return KERN_SUCCESS;
1395 }/* thread_set_exception_port */
1396
1397 kern_return_t
1398 task_set_exception_ports(
1399 task_t task,
1400 exception_mask_t exception_mask,
1401 ipc_port_t new_port,
1402 exception_behavior_t new_behavior,
1403 thread_state_flavor_t new_flavor)
1404 {
1405 register int i;
1406 ipc_port_t old_port[EXC_TYPES_COUNT];
1407
1408 if (task == TASK_NULL) {
1409 return KERN_INVALID_ARGUMENT;
1410 }
1411
1412 if (exception_mask & ~EXC_MASK_ALL) {
1413 return KERN_INVALID_ARGUMENT;
1414 }
1415
1416 if (IP_VALID(new_port)) {
1417 switch (new_behavior) {
1418 case EXCEPTION_DEFAULT:
1419 case EXCEPTION_STATE:
1420 case EXCEPTION_STATE_IDENTITY:
1421 break;
1422 default:
1423 return KERN_INVALID_ARGUMENT;
1424 }
1425 }
1426 /* Cannot easily check "new_flavor", but that just means that
1427 * the flavor in the generated exception message might be garbage:
1428 * GIGO */
1429
1430 itk_lock(task);
1431 if (task->itk_self == IP_NULL) {
1432 itk_unlock(task);
1433 return KERN_FAILURE;
1434 }
1435
1436 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
1437 if (exception_mask & (1 << i)) {
1438 old_port[i] = task->exc_actions[i].port;
1439 task->exc_actions[i].port =
1440 ipc_port_copy_send(new_port);
1441 task->exc_actions[i].behavior = new_behavior;
1442 task->exc_actions[i].flavor = new_flavor;
1443 } else
1444 old_port[i] = IP_NULL;
1445 }/* for */
1446
1447 /*
1448 * Consume send rights without any lock held.
1449 */
1450 itk_unlock(task);
1451 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++)
1452 if (IP_VALID(old_port[i]))
1453 ipc_port_release_send(old_port[i]);
1454 if (IP_VALID(new_port)) /* consume send right */
1455 ipc_port_release_send(new_port);
1456
1457 return KERN_SUCCESS;
1458 }/* task_set_exception_port */
1459
1460 /*
1461 * Routine: thread/task_swap_exception_ports [kernel call]
1462 * Purpose:
1463 * Sets the thread/task exception port, flavor and
1464 * behavior for the exception types specified by the
1465 * mask.
1466 *
1467 * The old ports, behavior and flavors are returned
1468 * Count specifies the array sizes on input and
1469 * the number of returned ports etc. on output. The
1470 * arrays must be large enough to hold all the returned
1471 * data, MIG returnes an error otherwise. The masks
1472 * array specifies the corresponding exception type(s).
1473 *
1474 * Conditions:
1475 * Nothing locked. If successful, consumes
1476 * the supplied send right.
1477 *
1478 * Returns upto [in} CountCnt elements.
1479 * Returns:
1480 * KERN_SUCCESS Changed the special port.
1481 * KERN_INVALID_ARGUMENT The thread is null,
1482 * Illegal mask bit set.
1483 * Illegal exception behavior
1484 * KERN_FAILURE The thread is dead.
1485 */
1486
1487 kern_return_t
1488 thread_swap_exception_ports(
1489 thread_act_t thr_act,
1490 exception_mask_t exception_mask,
1491 ipc_port_t new_port,
1492 exception_behavior_t new_behavior,
1493 thread_state_flavor_t new_flavor,
1494 exception_mask_array_t masks,
1495 mach_msg_type_number_t * CountCnt,
1496 exception_port_array_t ports,
1497 exception_behavior_array_t behaviors,
1498 thread_state_flavor_array_t flavors )
1499 {
1500 register int i,
1501 j,
1502 count;
1503 ipc_port_t old_port[EXC_TYPES_COUNT];
1504
1505 if (!thr_act)
1506 return KERN_INVALID_ARGUMENT;
1507
1508 if (exception_mask & ~EXC_MASK_ALL) {
1509 return KERN_INVALID_ARGUMENT;
1510 }
1511
1512 if (IP_VALID(new_port)) {
1513 switch (new_behavior) {
1514 case EXCEPTION_DEFAULT:
1515 case EXCEPTION_STATE:
1516 case EXCEPTION_STATE_IDENTITY:
1517 break;
1518 default:
1519 return KERN_INVALID_ARGUMENT;
1520 }
1521 }
1522 /* Cannot easily check "new_flavor", but that just means that
1523 * the flavor in the generated exception message might be garbage:
1524 * GIGO */
1525
1526 act_lock(thr_act);
1527 if (!thr_act->active) {
1528 act_unlock(thr_act);
1529 return KERN_FAILURE;
1530 }
1531
1532 count = 0;
1533
1534 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
1535 if (exception_mask & (1 << i)) {
1536 for (j = 0; j < count; j++) {
1537 /*
1538 * search for an identical entry, if found
1539 * set corresponding mask for this exception.
1540 */
1541 if (thr_act->exc_actions[i].port == ports[j] &&
1542 thr_act->exc_actions[i].behavior ==behaviors[j]
1543 && thr_act->exc_actions[i].flavor ==flavors[j])
1544 {
1545 masks[j] |= (1 << i);
1546 break;
1547 }
1548 }/* for */
1549 if (j == count) {
1550 masks[j] = (1 << i);
1551 ports[j] =
1552 ipc_port_copy_send(thr_act->exc_actions[i].port);
1553
1554 behaviors[j] = thr_act->exc_actions[i].behavior;
1555 flavors[j] = thr_act->exc_actions[i].flavor;
1556 count++;
1557 }
1558
1559 old_port[i] = thr_act->exc_actions[i].port;
1560 thr_act->exc_actions[i].port =
1561 ipc_port_copy_send(new_port);
1562 thr_act->exc_actions[i].behavior = new_behavior;
1563 thr_act->exc_actions[i].flavor = new_flavor;
1564 if (count > *CountCnt) {
1565 break;
1566 }
1567 } else
1568 old_port[i] = IP_NULL;
1569 }/* for */
1570
1571 /*
1572 * Consume send rights without any lock held.
1573 */
1574 act_unlock(thr_act);
1575 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++)
1576 if (IP_VALID(old_port[i]))
1577 ipc_port_release_send(old_port[i]);
1578 if (IP_VALID(new_port)) /* consume send right */
1579 ipc_port_release_send(new_port);
1580 *CountCnt = count;
1581 return KERN_SUCCESS;
1582 }/* thread_swap_exception_ports */
1583
1584 kern_return_t
1585 task_swap_exception_ports(
1586 task_t task,
1587 exception_mask_t exception_mask,
1588 ipc_port_t new_port,
1589 exception_behavior_t new_behavior,
1590 thread_state_flavor_t new_flavor,
1591 exception_mask_array_t masks,
1592 mach_msg_type_number_t * CountCnt,
1593 exception_port_array_t ports,
1594 exception_behavior_array_t behaviors,
1595 thread_state_flavor_array_t flavors )
1596 {
1597 register int i,
1598 j,
1599 count;
1600 ipc_port_t old_port[EXC_TYPES_COUNT];
1601
1602 if (task == TASK_NULL)
1603 return KERN_INVALID_ARGUMENT;
1604
1605 if (exception_mask & ~EXC_MASK_ALL) {
1606 return KERN_INVALID_ARGUMENT;
1607 }
1608
1609 if (IP_VALID(new_port)) {
1610 switch (new_behavior) {
1611 case EXCEPTION_DEFAULT:
1612 case EXCEPTION_STATE:
1613 case EXCEPTION_STATE_IDENTITY:
1614 break;
1615 default:
1616 return KERN_INVALID_ARGUMENT;
1617 }
1618 }
1619 /* Cannot easily check "new_flavor", but that just means that
1620 * the flavor in the generated exception message might be garbage:
1621 * GIGO */
1622
1623 itk_lock(task);
1624 if (task->itk_self == IP_NULL) {
1625 itk_unlock(task);
1626 return KERN_FAILURE;
1627 }
1628
1629 count = 0;
1630
1631 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
1632 if (exception_mask & (1 << i)) {
1633 for (j = 0; j < count; j++) {
1634 /*
1635 * search for an identical entry, if found
1636 * set corresponding mask for this exception.
1637 */
1638 if (task->exc_actions[i].port == ports[j] &&
1639 task->exc_actions[i].behavior == behaviors[j]
1640 && task->exc_actions[i].flavor == flavors[j])
1641 {
1642 masks[j] |= (1 << i);
1643 break;
1644 }
1645 }/* for */
1646 if (j == count) {
1647 masks[j] = (1 << i);
1648 ports[j] =
1649 ipc_port_copy_send(task->exc_actions[i].port);
1650 behaviors[j] = task->exc_actions[i].behavior;
1651 flavors[j] = task->exc_actions[i].flavor;
1652 count++;
1653 }
1654 old_port[i] = task->exc_actions[i].port;
1655 task->exc_actions[i].port =
1656 ipc_port_copy_send(new_port);
1657 task->exc_actions[i].behavior = new_behavior;
1658 task->exc_actions[i].flavor = new_flavor;
1659 if (count > *CountCnt) {
1660 break;
1661 }
1662 } else
1663 old_port[i] = IP_NULL;
1664 }/* for */
1665
1666
1667 /*
1668 * Consume send rights without any lock held.
1669 */
1670 itk_unlock(task);
1671 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++)
1672 if (IP_VALID(old_port[i]))
1673 ipc_port_release_send(old_port[i]);
1674 if (IP_VALID(new_port)) /* consume send right */
1675 ipc_port_release_send(new_port);
1676 *CountCnt = count;
1677
1678 return KERN_SUCCESS;
1679 }/* task_swap_exception_ports */
1680
1681 /*
1682 * Routine: thread/task_get_exception_ports [kernel call]
1683 * Purpose:
1684 * Clones a send right for each of the thread/task's exception
1685 * ports specified in the mask and returns the behaviour
1686 * and flavor of said port.
1687 *
1688 * Returns upto [in} CountCnt elements.
1689 *
1690 * Conditions:
1691 * Nothing locked.
1692 * Returns:
1693 * KERN_SUCCESS Extracted a send right.
1694 * KERN_INVALID_ARGUMENT The thread is null,
1695 * Invalid special port,
1696 * Illegal mask bit set.
1697 * KERN_FAILURE The thread is dead.
1698 */
1699
1700 kern_return_t
1701 thread_get_exception_ports(
1702 thread_act_t thr_act,
1703 exception_mask_t exception_mask,
1704 exception_mask_array_t masks,
1705 mach_msg_type_number_t * CountCnt,
1706 exception_port_array_t ports,
1707 exception_behavior_array_t behaviors,
1708 thread_state_flavor_array_t flavors )
1709 {
1710 register int i,
1711 j,
1712 count;
1713
1714 if (!thr_act)
1715 return KERN_INVALID_ARGUMENT;
1716
1717 if (exception_mask & ~EXC_MASK_ALL) {
1718 return KERN_INVALID_ARGUMENT;
1719 }
1720
1721 act_lock(thr_act);
1722 if (!thr_act->active) {
1723 act_unlock(thr_act);
1724 return KERN_FAILURE;
1725 }
1726
1727 count = 0;
1728
1729 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
1730 if (exception_mask & (1 << i)) {
1731 for (j = 0; j < count; j++) {
1732 /*
1733 * search for an identical entry, if found
1734 * set corresponding mask for this exception.
1735 */
1736 if (thr_act->exc_actions[i].port == ports[j] &&
1737 thr_act->exc_actions[i].behavior ==behaviors[j]
1738 && thr_act->exc_actions[i].flavor == flavors[j])
1739 {
1740 masks[j] |= (1 << i);
1741 break;
1742 }
1743 }/* for */
1744 if (j == count) {
1745 masks[j] = (1 << i);
1746 ports[j] =
1747 ipc_port_copy_send(thr_act->exc_actions[i].port);
1748 behaviors[j] = thr_act->exc_actions[i].behavior;
1749 flavors[j] = thr_act->exc_actions[i].flavor;
1750 count++;
1751 if (count >= *CountCnt) {
1752 break;
1753 }
1754 }
1755 }
1756 }/* for */
1757
1758 act_unlock(thr_act);
1759
1760 *CountCnt = count;
1761 return KERN_SUCCESS;
1762 }/* thread_get_exception_ports */
1763
1764 kern_return_t
1765 task_get_exception_ports(
1766 task_t task,
1767 exception_mask_t exception_mask,
1768 exception_mask_array_t masks,
1769 mach_msg_type_number_t * CountCnt,
1770 exception_port_array_t ports,
1771 exception_behavior_array_t behaviors,
1772 thread_state_flavor_array_t flavors )
1773 {
1774 register int i,
1775 j,
1776 count;
1777
1778 if (task == TASK_NULL)
1779 return KERN_INVALID_ARGUMENT;
1780
1781 if (exception_mask & ~EXC_MASK_ALL) {
1782 return KERN_INVALID_ARGUMENT;
1783 }
1784
1785 itk_lock(task);
1786 if (task->itk_self == IP_NULL) {
1787 itk_unlock(task);
1788 return KERN_FAILURE;
1789 }
1790
1791 count = 0;
1792
1793 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
1794 if (exception_mask & (1 << i)) {
1795 for (j = 0; j < count; j++) {
1796 /*
1797 * search for an identical entry, if found
1798 * set corresponding mask for this exception.
1799 */
1800 if (task->exc_actions[i].port == ports[j] &&
1801 task->exc_actions[i].behavior == behaviors[j]
1802 && task->exc_actions[i].flavor == flavors[j])
1803 {
1804 masks[j] |= (1 << i);
1805 break;
1806 }
1807 }/* for */
1808 if (j == count) {
1809 masks[j] = (1 << i);
1810 ports[j] =
1811 ipc_port_copy_send(task->exc_actions[i].port);
1812 behaviors[j] = task->exc_actions[i].behavior;
1813 flavors[j] = task->exc_actions[i].flavor;
1814 count++;
1815 if (count > *CountCnt) {
1816 break;
1817 }
1818 }
1819 }
1820 }/* for */
1821
1822 itk_unlock(task);
1823
1824 *CountCnt = count;
1825 return KERN_SUCCESS;
1826 }/* task_get_exception_ports */