]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/ipc_tt.c
xnu-517.3.15.tar.gz
[apple/xnu.git] / osfmk / kern / ipc_tt.c
1 /*
2 * Copyright (c) 2000-2003 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25 /*
26 * @OSF_COPYRIGHT@
27 */
28 /*
29 * Mach Operating System
30 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
31 * All Rights Reserved.
32 *
33 * Permission to use, copy, modify and distribute this software and its
34 * documentation is hereby granted, provided that both the copyright
35 * notice and this permission notice appear in all copies of the
36 * software, derivative works or modified versions, and any portions
37 * thereof, and that both notices appear in supporting documentation.
38 *
39 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
40 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
41 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
42 *
43 * Carnegie Mellon requests users of this software to return to
44 *
45 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
46 * School of Computer Science
47 * Carnegie Mellon University
48 * Pittsburgh PA 15213-3890
49 *
50 * any improvements or extensions that they make and grant Carnegie Mellon
51 * the rights to redistribute these changes.
52 */
53 /*
54 */
55
56 /*
57 * File: ipc_tt.c
58 * Purpose:
59 * Task and thread related IPC functions.
60 */
61
62 #include <mach/mach_types.h>
63 #include <mach/boolean.h>
64 #include <mach_rt.h>
65 #include <mach/kern_return.h>
66 #include <mach/mach_param.h>
67 #include <mach/task_special_ports.h>
68 #include <mach/thread_special_ports.h>
69 #include <mach/thread_status.h>
70 #include <mach/exception_types.h>
71 #include <mach/mach_traps.h>
72 #include <mach/task_server.h>
73 #include <mach/thread_act_server.h>
74 #include <mach/mach_host_server.h>
75 #include <mach/vm_map_server.h>
76 #include <kern/host.h>
77 #include <kern/ipc_tt.h>
78 #include <kern/thread_act.h>
79 #include <kern/misc_protos.h>
80 #include <vm/vm_pageout.h>
81
82 /*
83 * Routine: ipc_task_init
84 * Purpose:
85 * Initialize a task's IPC state.
86 *
87 * If non-null, some state will be inherited from the parent.
88 * The parent must be appropriately initialized.
89 * Conditions:
90 * Nothing locked.
91 */
92
93 void
94 ipc_task_init(
95 task_t task,
96 task_t parent)
97 {
98 ipc_space_t space;
99 ipc_port_t kport;
100 kern_return_t kr;
101 int i;
102
103
104 kr = ipc_space_create(&ipc_table_entries[0], &space);
105 if (kr != KERN_SUCCESS)
106 panic("ipc_task_init");
107
108
109 kport = ipc_port_alloc_kernel();
110 if (kport == IP_NULL)
111 panic("ipc_task_init");
112
113 itk_lock_init(task);
114 task->itk_self = kport;
115 task->itk_sself = ipc_port_make_send(kport);
116 task->itk_space = space;
117 space->is_fast = FALSE;
118
119 if (parent == TASK_NULL) {
120 ipc_port_t port;
121
122 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
123 task->exc_actions[i].port = IP_NULL;
124 }/* for */
125
126 kr = host_get_host_port(host_priv_self(), &port);
127 assert(kr == KERN_SUCCESS);
128 task->itk_host = port;
129
130 task->itk_bootstrap = IP_NULL;
131
132 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
133 task->itk_registered[i] = IP_NULL;
134 } else {
135 itk_lock(parent);
136 assert(parent->itk_self != IP_NULL);
137
138 /* inherit registered ports */
139
140 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
141 task->itk_registered[i] =
142 ipc_port_copy_send(parent->itk_registered[i]);
143
144 /* inherit exception and bootstrap ports */
145
146 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
147 task->exc_actions[i].port =
148 ipc_port_copy_send(parent->exc_actions[i].port);
149 task->exc_actions[i].flavor =
150 parent->exc_actions[i].flavor;
151 task->exc_actions[i].behavior =
152 parent->exc_actions[i].behavior;
153 }/* for */
154 task->itk_host =
155 ipc_port_copy_send(parent->itk_host);
156
157 task->itk_bootstrap =
158 ipc_port_copy_send(parent->itk_bootstrap);
159
160 itk_unlock(parent);
161 }
162 }
163
164 /*
165 * Routine: ipc_task_enable
166 * Purpose:
167 * Enable a task for IPC access.
168 * Conditions:
169 * Nothing locked.
170 */
171
172 void
173 ipc_task_enable(
174 task_t task)
175 {
176 ipc_port_t kport;
177
178 itk_lock(task);
179 kport = task->itk_self;
180 if (kport != IP_NULL)
181 ipc_kobject_set(kport, (ipc_kobject_t) task, IKOT_TASK);
182 itk_unlock(task);
183 }
184
185 /*
186 * Routine: ipc_task_disable
187 * Purpose:
188 * Disable IPC access to a task.
189 * Conditions:
190 * Nothing locked.
191 */
192
193 void
194 ipc_task_disable(
195 task_t task)
196 {
197 ipc_port_t kport;
198
199 itk_lock(task);
200 kport = task->itk_self;
201 if (kport != IP_NULL)
202 ipc_kobject_set(kport, IKO_NULL, IKOT_NONE);
203 itk_unlock(task);
204 }
205
206 /*
207 * Routine: ipc_task_terminate
208 * Purpose:
209 * Clean up and destroy a task's IPC state.
210 * Conditions:
211 * Nothing locked. The task must be suspended.
212 * (Or the current thread must be in the task.)
213 */
214
215 void
216 ipc_task_terminate(
217 task_t task)
218 {
219 ipc_port_t kport;
220 int i;
221
222 itk_lock(task);
223 kport = task->itk_self;
224
225 if (kport == IP_NULL) {
226 /* the task is already terminated (can this happen?) */
227 itk_unlock(task);
228 return;
229 }
230
231 task->itk_self = IP_NULL;
232 itk_unlock(task);
233
234 /* release the naked send rights */
235
236 if (IP_VALID(task->itk_sself))
237 ipc_port_release_send(task->itk_sself);
238
239 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
240 if (IP_VALID(task->exc_actions[i].port)) {
241 ipc_port_release_send(task->exc_actions[i].port);
242 }
243 }/* for */
244 if (IP_VALID(task->itk_host))
245 ipc_port_release_send(task->itk_host);
246
247 if (IP_VALID(task->itk_bootstrap))
248 ipc_port_release_send(task->itk_bootstrap);
249
250 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
251 if (IP_VALID(task->itk_registered[i]))
252 ipc_port_release_send(task->itk_registered[i]);
253
254 ipc_port_release_send(task->wired_ledger_port);
255 ipc_port_release_send(task->paged_ledger_port);
256
257 /* destroy the kernel port */
258 ipc_port_dealloc_kernel(kport);
259 }
260
261 /*
262 * Routine: ipc_task_reset
263 * Purpose:
264 * Reset a task's IPC state to protect it when
265 * it enters an elevated security context.
266 * Conditions:
267 * Nothing locked. The task must be suspended.
268 * (Or the current thread must be in the task.)
269 */
270
271 void
272 ipc_task_reset(
273 task_t task)
274 {
275 ipc_port_t old_kport, new_kport;
276 ipc_port_t old_sself;
277 #if 0
278 ipc_port_t old_exc_actions[EXC_TYPES_COUNT];
279 int i;
280 #endif
281
282 new_kport = ipc_port_alloc_kernel();
283 if (new_kport == IP_NULL)
284 panic("ipc_task_reset");
285
286 itk_lock(task);
287
288 old_kport = task->itk_self;
289
290 if (old_kport == IP_NULL) {
291 /* the task is already terminated (can this happen?) */
292 itk_unlock(task);
293 ipc_port_dealloc_kernel(new_kport);
294 return;
295 }
296
297 task->itk_self = new_kport;
298 old_sself = task->itk_sself;
299 task->itk_sself = ipc_port_make_send(new_kport);
300 ipc_kobject_set(old_kport, IKO_NULL, IKOT_NONE);
301 ipc_kobject_set(new_kport, (ipc_kobject_t) task, IKOT_TASK);
302
303 #if 0
304 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
305 old_exc_actions[i] = task->exc_action[i].port;
306 task->exc_actions[i].port = IP_NULL;
307 }/* for */
308 #endif
309
310 itk_unlock(task);
311
312 /* release the naked send rights */
313
314 if (IP_VALID(old_sself))
315 ipc_port_release_send(old_sself);
316
317 #if 0
318 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
319 if (IP_VALID(old_exc_actions[i])) {
320 ipc_port_release_send(old_exc_actions[i]);
321 }
322 }/* for */
323 #endif
324
325 /* destroy the kernel port */
326 ipc_port_dealloc_kernel(old_kport);
327 }
328
329 /*
330 * Routine: ipc_thread_init
331 * Purpose:
332 * Initialize a thread's IPC state.
333 * Conditions:
334 * Nothing locked.
335 */
336
337 void
338 ipc_thread_init(
339 thread_t thread)
340 {
341 ipc_kmsg_queue_init(&thread->ith_messages);
342 thread->ith_mig_reply = MACH_PORT_NULL;
343 thread->ith_rpc_reply = IP_NULL;
344 }
345
346 /*
347 * Routine: ipc_thread_terminate
348 * Purpose:
349 * Clean up and destroy a thread's IPC state.
350 * Conditions:
351 * Nothing locked. The thread must be suspended.
352 * (Or be the current thread.)
353 */
354
355 void
356 ipc_thread_terminate(
357 thread_t thread)
358 {
359 assert(ipc_kmsg_queue_empty(&thread->ith_messages));
360
361 if (thread->ith_rpc_reply != IP_NULL)
362 ipc_port_dealloc_reply(thread->ith_rpc_reply);
363 thread->ith_rpc_reply = IP_NULL;
364 }
365
366 /*
367 * Routine: ipc_thr_act_init
368 * Purpose:
369 * Initialize an thr_act's IPC state.
370 * Conditions:
371 * Nothing locked.
372 */
373
374 void
375 ipc_thr_act_init(task_t task, thread_act_t thr_act)
376 {
377 ipc_port_t kport; int i;
378
379 kport = ipc_port_alloc_kernel();
380 if (kport == IP_NULL)
381 panic("ipc_thr_act_init");
382
383 thr_act->ith_self = kport;
384 thr_act->ith_sself = ipc_port_make_send(kport);
385
386 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++)
387 thr_act->exc_actions[i].port = IP_NULL;
388
389 ipc_kobject_set(kport, (ipc_kobject_t) thr_act, IKOT_ACT);
390 }
391
392 void
393 ipc_thr_act_disable(thread_act_t thr_act)
394 {
395 int i;
396 ipc_port_t kport;
397
398 kport = thr_act->ith_self;
399
400 if (kport != IP_NULL)
401 ipc_kobject_set(kport, IKO_NULL, IKOT_NONE);
402 }
403
404 void
405 ipc_thr_act_terminate(thread_act_t thr_act)
406 {
407 ipc_port_t kport; int i;
408
409 kport = thr_act->ith_self;
410
411 if (kport == IP_NULL) {
412 /* the thread is already terminated (can this happen?) */
413 return;
414 }
415
416 thr_act->ith_self = IP_NULL;
417
418 /* release the naked send rights */
419
420 if (IP_VALID(thr_act->ith_sself))
421 ipc_port_release_send(thr_act->ith_sself);
422 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
423 if (IP_VALID(thr_act->exc_actions[i].port))
424 ipc_port_release_send(thr_act->exc_actions[i].port);
425 }
426
427 /* destroy the kernel port */
428 ipc_port_dealloc_kernel(kport);
429 }
430
431 /*
432 * Routine: retrieve_task_self_fast
433 * Purpose:
434 * Optimized version of retrieve_task_self,
435 * that only works for the current task.
436 *
437 * Return a send right (possibly null/dead)
438 * for the task's user-visible self port.
439 * Conditions:
440 * Nothing locked.
441 */
442
443 ipc_port_t
444 retrieve_task_self_fast(
445 register task_t task)
446 {
447 register ipc_port_t port;
448
449 assert(task == current_task());
450
451 itk_lock(task);
452 assert(task->itk_self != IP_NULL);
453
454 if ((port = task->itk_sself) == task->itk_self) {
455 /* no interposing */
456
457 ip_lock(port);
458 assert(ip_active(port));
459 ip_reference(port);
460 port->ip_srights++;
461 ip_unlock(port);
462 } else
463 port = ipc_port_copy_send(port);
464 itk_unlock(task);
465
466 return port;
467 }
468
469 /*
470 * Routine: retrieve_act_self_fast
471 * Purpose:
472 * Optimized version of retrieve_thread_self,
473 * that only works for the current thread.
474 *
475 * Return a send right (possibly null/dead)
476 * for the thread's user-visible self port.
477 * Conditions:
478 * Nothing locked.
479 */
480
481 ipc_port_t
482 retrieve_act_self_fast(thread_act_t thr_act)
483 {
484 register ipc_port_t port;
485
486 assert(thr_act == current_act());
487 act_lock(thr_act);
488 assert(thr_act->ith_self != IP_NULL);
489
490 if ((port = thr_act->ith_sself) == thr_act->ith_self) {
491 /* no interposing */
492
493 ip_lock(port);
494 assert(ip_active(port));
495 ip_reference(port);
496 port->ip_srights++;
497 ip_unlock(port);
498 } else
499 port = ipc_port_copy_send(port);
500 act_unlock(thr_act);
501
502 return port;
503 }
504
505 /*
506 * Routine: task_self_trap [mach trap]
507 * Purpose:
508 * Give the caller send rights for his own task port.
509 * Conditions:
510 * Nothing locked.
511 * Returns:
512 * MACH_PORT_NULL if there are any resource failures
513 * or other errors.
514 */
515
516 mach_port_name_t
517 task_self_trap(void)
518 {
519 task_t task = current_task();
520 ipc_port_t sright;
521
522 sright = retrieve_task_self_fast(task);
523 return ipc_port_copyout_send(sright, task->itk_space);
524 }
525
526 /*
527 * Routine: thread_self_trap [mach trap]
528 * Purpose:
529 * Give the caller send rights for his own thread port.
530 * Conditions:
531 * Nothing locked.
532 * Returns:
533 * MACH_PORT_NULL if there are any resource failures
534 * or other errors.
535 */
536
537 mach_port_name_t
538 thread_self_trap(void)
539 {
540 thread_act_t thr_act = current_act();
541 task_t task = thr_act->task;
542 ipc_port_t sright;
543
544 sright = retrieve_act_self_fast(thr_act);
545 return ipc_port_copyout_send(sright, task->itk_space);
546 }
547
548 /*
549 * Routine: mach_reply_port [mach trap]
550 * Purpose:
551 * Allocate a port for the caller.
552 * Conditions:
553 * Nothing locked.
554 * Returns:
555 * MACH_PORT_NULL if there are any resource failures
556 * or other errors.
557 */
558
559 mach_port_name_t
560 mach_reply_port(void)
561 {
562 ipc_port_t port;
563 mach_port_name_t name;
564 kern_return_t kr;
565
566 kr = ipc_port_alloc(current_task()->itk_space, &name, &port);
567 if (kr == KERN_SUCCESS)
568 ip_unlock(port);
569 else
570 name = MACH_PORT_NULL;
571
572 return name;
573 }
574
575 /*
576 * Routine: task_get_special_port [kernel call]
577 * Purpose:
578 * Clones a send right for one of the task's
579 * special ports.
580 * Conditions:
581 * Nothing locked.
582 * Returns:
583 * KERN_SUCCESS Extracted a send right.
584 * KERN_INVALID_ARGUMENT The task is null.
585 * KERN_FAILURE The task/space is dead.
586 * KERN_INVALID_ARGUMENT Invalid special port.
587 */
588
589 kern_return_t
590 task_get_special_port(
591 task_t task,
592 int which,
593 ipc_port_t *portp)
594 {
595 ipc_port_t *whichp;
596 ipc_port_t port;
597
598 if (task == TASK_NULL)
599 return KERN_INVALID_ARGUMENT;
600
601 switch (which) {
602 case TASK_KERNEL_PORT:
603 whichp = &task->itk_sself;
604 break;
605
606 case TASK_HOST_PORT:
607 whichp = &task->itk_host;
608 break;
609
610 case TASK_BOOTSTRAP_PORT:
611 whichp = &task->itk_bootstrap;
612 break;
613
614 case TASK_WIRED_LEDGER_PORT:
615 whichp = &task->wired_ledger_port;
616 break;
617
618 case TASK_PAGED_LEDGER_PORT:
619 whichp = &task->paged_ledger_port;
620 break;
621
622 default:
623 return KERN_INVALID_ARGUMENT;
624 }
625
626 itk_lock(task);
627 if (task->itk_self == IP_NULL) {
628 itk_unlock(task);
629 return KERN_FAILURE;
630 }
631
632 port = ipc_port_copy_send(*whichp);
633 itk_unlock(task);
634
635 *portp = port;
636 return KERN_SUCCESS;
637 }
638
639 /*
640 * Routine: task_set_special_port [kernel call]
641 * Purpose:
642 * Changes one of the task's special ports,
643 * setting it to the supplied send right.
644 * Conditions:
645 * Nothing locked. If successful, consumes
646 * the supplied send right.
647 * Returns:
648 * KERN_SUCCESS Changed the special port.
649 * KERN_INVALID_ARGUMENT The task is null.
650 * KERN_FAILURE The task/space is dead.
651 * KERN_INVALID_ARGUMENT Invalid special port.
652 */
653
654 kern_return_t
655 task_set_special_port(
656 task_t task,
657 int which,
658 ipc_port_t port)
659 {
660 ipc_port_t *whichp;
661 ipc_port_t old;
662
663 if (task == TASK_NULL)
664 return KERN_INVALID_ARGUMENT;
665
666 switch (which) {
667 case TASK_KERNEL_PORT:
668 whichp = &task->itk_sself;
669 break;
670
671 case TASK_HOST_PORT:
672 whichp = &task->itk_host;
673 break;
674
675 case TASK_BOOTSTRAP_PORT:
676 whichp = &task->itk_bootstrap;
677 break;
678
679 case TASK_WIRED_LEDGER_PORT:
680 whichp = &task->wired_ledger_port;
681 break;
682
683 case TASK_PAGED_LEDGER_PORT:
684 whichp = &task->paged_ledger_port;
685 break;
686
687 default:
688 return KERN_INVALID_ARGUMENT;
689 }/* switch */
690
691 itk_lock(task);
692 if (task->itk_self == IP_NULL) {
693 itk_unlock(task);
694 return KERN_FAILURE;
695 }
696
697 old = *whichp;
698 *whichp = port;
699 itk_unlock(task);
700
701 if (IP_VALID(old))
702 ipc_port_release_send(old);
703 return KERN_SUCCESS;
704 }
705
706
707 /*
708 * Routine: mach_ports_register [kernel call]
709 * Purpose:
710 * Stash a handful of port send rights in the task.
711 * Child tasks will inherit these rights, but they
712 * must use mach_ports_lookup to acquire them.
713 *
714 * The rights are supplied in a (wired) kalloc'd segment.
715 * Rights which aren't supplied are assumed to be null.
716 * Conditions:
717 * Nothing locked. If successful, consumes
718 * the supplied rights and memory.
719 * Returns:
720 * KERN_SUCCESS Stashed the port rights.
721 * KERN_INVALID_ARGUMENT The task is null.
722 * KERN_INVALID_ARGUMENT The task is dead.
723 * KERN_INVALID_ARGUMENT Too many port rights supplied.
724 */
725
726 kern_return_t
727 mach_ports_register(
728 task_t task,
729 mach_port_array_t memory,
730 mach_msg_type_number_t portsCnt)
731 {
732 ipc_port_t ports[TASK_PORT_REGISTER_MAX];
733 int i;
734
735 if ((task == TASK_NULL) ||
736 (portsCnt > TASK_PORT_REGISTER_MAX))
737 return KERN_INVALID_ARGUMENT;
738
739 /*
740 * Pad the port rights with nulls.
741 */
742
743 for (i = 0; i < portsCnt; i++)
744 ports[i] = memory[i];
745 for (; i < TASK_PORT_REGISTER_MAX; i++)
746 ports[i] = IP_NULL;
747
748 itk_lock(task);
749 if (task->itk_self == IP_NULL) {
750 itk_unlock(task);
751 return KERN_INVALID_ARGUMENT;
752 }
753
754 /*
755 * Replace the old send rights with the new.
756 * Release the old rights after unlocking.
757 */
758
759 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
760 ipc_port_t old;
761
762 old = task->itk_registered[i];
763 task->itk_registered[i] = ports[i];
764 ports[i] = old;
765 }
766
767 itk_unlock(task);
768
769 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
770 if (IP_VALID(ports[i]))
771 ipc_port_release_send(ports[i]);
772
773 /*
774 * Now that the operation is known to be successful,
775 * we can free the memory.
776 */
777
778 if (portsCnt != 0)
779 kfree((vm_offset_t) memory,
780 (vm_size_t) (portsCnt * sizeof(mach_port_t)));
781
782 return KERN_SUCCESS;
783 }
784
785 /*
786 * Routine: mach_ports_lookup [kernel call]
787 * Purpose:
788 * Retrieves (clones) the stashed port send rights.
789 * Conditions:
790 * Nothing locked. If successful, the caller gets
791 * rights and memory.
792 * Returns:
793 * KERN_SUCCESS Retrieved the send rights.
794 * KERN_INVALID_ARGUMENT The task is null.
795 * KERN_INVALID_ARGUMENT The task is dead.
796 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
797 */
798
799 kern_return_t
800 mach_ports_lookup(
801 task_t task,
802 mach_port_array_t *portsp,
803 mach_msg_type_number_t *portsCnt)
804 {
805 vm_offset_t memory;
806 vm_size_t size;
807 ipc_port_t *ports;
808 int i;
809
810 kern_return_t kr;
811
812 if (task == TASK_NULL)
813 return KERN_INVALID_ARGUMENT;
814
815 size = (vm_size_t) (TASK_PORT_REGISTER_MAX * sizeof(ipc_port_t));
816
817 memory = kalloc(size);
818 if (memory == 0)
819 return KERN_RESOURCE_SHORTAGE;
820
821 itk_lock(task);
822 if (task->itk_self == IP_NULL) {
823 itk_unlock(task);
824
825 kfree(memory, size);
826 return KERN_INVALID_ARGUMENT;
827 }
828
829 ports = (ipc_port_t *) memory;
830
831 /*
832 * Clone port rights. Because kalloc'd memory
833 * is wired, we won't fault while holding the task lock.
834 */
835
836 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
837 ports[i] = ipc_port_copy_send(task->itk_registered[i]);
838
839 itk_unlock(task);
840
841 *portsp = (mach_port_array_t) ports;
842 *portsCnt = TASK_PORT_REGISTER_MAX;
843 return KERN_SUCCESS;
844 }
845
846 /*
847 * Routine: convert_port_to_locked_task
848 * Purpose:
849 * Internal helper routine to convert from a port to a locked
850 * task. Used by several routines that try to convert from a
851 * task port to a reference on some task related object.
852 * Conditions:
853 * Nothing locked, blocking OK.
854 */
855 task_t
856 convert_port_to_locked_task(ipc_port_t port)
857 {
858 while (IP_VALID(port)) {
859 task_t task;
860
861 ip_lock(port);
862 if (!ip_active(port) || (ip_kotype(port) != IKOT_TASK)) {
863 ip_unlock(port);
864 return TASK_NULL;
865 }
866 task = (task_t) port->ip_kobject;
867 assert(task != TASK_NULL);
868
869 /*
870 * Normal lock ordering puts task_lock() before ip_lock().
871 * Attempt out-of-order locking here.
872 */
873 if (task_lock_try(task)) {
874 ip_unlock(port);
875 return(task);
876 }
877
878 ip_unlock(port);
879 mutex_pause();
880 }
881 return TASK_NULL;
882 }
883
884 /*
885 * Routine: convert_port_to_task
886 * Purpose:
887 * Convert from a port to a task.
888 * Doesn't consume the port ref; produces a task ref,
889 * which may be null.
890 * Conditions:
891 * Nothing locked.
892 */
893 task_t
894 convert_port_to_task(
895 ipc_port_t port)
896 {
897 task_t task;
898
899 task = convert_port_to_locked_task(port);
900 if (task) {
901 task->ref_count++;
902 task_unlock(task);
903 }
904 return task;
905 }
906
907 /*
908 * Routine: convert_port_to_space
909 * Purpose:
910 * Convert from a port to a space.
911 * Doesn't consume the port ref; produces a space ref,
912 * which may be null.
913 * Conditions:
914 * Nothing locked.
915 */
916 ipc_space_t
917 convert_port_to_space(
918 ipc_port_t port)
919 {
920 ipc_space_t space;
921 task_t task;
922
923 task = convert_port_to_locked_task(port);
924
925 if (task == TASK_NULL)
926 return IPC_SPACE_NULL;
927
928 if (!task->active) {
929 task_unlock(task);
930 return IPC_SPACE_NULL;
931 }
932
933 space = task->itk_space;
934 is_reference(space);
935 task_unlock(task);
936 return (space);
937 }
938
939 upl_t
940 convert_port_to_upl(
941 ipc_port_t port)
942 {
943 upl_t upl;
944
945 ip_lock(port);
946 if (!ip_active(port) || (ip_kotype(port) != IKOT_UPL)) {
947 ip_unlock(port);
948 return (upl_t)NULL;
949 }
950 upl = (upl_t) port->ip_kobject;
951 ip_unlock(port);
952 upl_lock(upl);
953 upl->ref_count+=1;
954 upl_unlock(upl);
955 return upl;
956 }
957
958 mach_port_t
959 convert_upl_to_port(
960 upl_t upl)
961 {
962 return MACH_PORT_NULL;
963 }
964
965 __private_extern__ void
966 upl_no_senders(
967 upl_t upl,
968 mach_port_mscount_t mscount)
969 {
970 return;
971 }
972
973 /*
974 * Routine: convert_port_entry_to_map
975 * Purpose:
976 * Convert from a port specifying an entry or a task
977 * to a map. Doesn't consume the port ref; produces a map ref,
978 * which may be null. Unlike convert_port_to_map, the
979 * port may be task or a named entry backed.
980 * Conditions:
981 * Nothing locked.
982 */
983
984
985 vm_map_t
986 convert_port_entry_to_map(
987 ipc_port_t port)
988 {
989 task_t task;
990 vm_map_t map;
991 vm_named_entry_t named_entry;
992
993 if(IP_VALID(port) && (ip_kotype(port) == IKOT_NAMED_ENTRY)) {
994 while(TRUE) {
995 ip_lock(port);
996 if(ip_active(port) && (ip_kotype(port)
997 == IKOT_NAMED_ENTRY)) {
998 named_entry =
999 (vm_named_entry_t)port->ip_kobject;
1000 if (!(mutex_try(&(named_entry)->Lock))) {
1001 ip_unlock(port);
1002 mutex_pause();
1003 continue;
1004 }
1005 named_entry->ref_count++;
1006 mutex_unlock(&(named_entry)->Lock);
1007 ip_unlock(port);
1008 if ((named_entry->is_sub_map) &&
1009 (named_entry->protection
1010 & VM_PROT_WRITE)) {
1011 map = named_entry->backing.map;
1012 } else {
1013 mach_destroy_memory_entry(port);
1014 return VM_MAP_NULL;
1015 }
1016 vm_map_reference_swap(map);
1017 mach_destroy_memory_entry(port);
1018 break;
1019 }
1020 else
1021 return VM_MAP_NULL;
1022 }
1023 } else {
1024 task_t task;
1025
1026 task = convert_port_to_locked_task(port);
1027
1028 if (task == TASK_NULL)
1029 return VM_MAP_NULL;
1030
1031 if (!task->active) {
1032 task_unlock(task);
1033 return VM_MAP_NULL;
1034 }
1035
1036 map = task->map;
1037 vm_map_reference_swap(map);
1038 task_unlock(task);
1039 }
1040
1041 return map;
1042 }
1043
1044 /*
1045 * Routine: convert_port_entry_to_object
1046 * Purpose:
1047 * Convert from a port specifying a named entry to an
1048 * object. Doesn't consume the port ref; produces a map ref,
1049 * which may be null.
1050 * Conditions:
1051 * Nothing locked.
1052 */
1053
1054
1055 vm_object_t
1056 convert_port_entry_to_object(
1057 ipc_port_t port)
1058 {
1059 vm_object_t object;
1060 vm_named_entry_t named_entry;
1061
1062 if(IP_VALID(port) && (ip_kotype(port) == IKOT_NAMED_ENTRY)) {
1063 while(TRUE) {
1064 ip_lock(port);
1065 if(ip_active(port) && (ip_kotype(port)
1066 == IKOT_NAMED_ENTRY)) {
1067 named_entry =
1068 (vm_named_entry_t)port->ip_kobject;
1069 if (!(mutex_try(&(named_entry)->Lock))) {
1070 ip_unlock(port);
1071 mutex_pause();
1072 continue;
1073 }
1074 named_entry->ref_count++;
1075 mutex_unlock(&(named_entry)->Lock);
1076 ip_unlock(port);
1077 if ((!named_entry->is_sub_map) &&
1078 (named_entry->protection
1079 & VM_PROT_WRITE)) {
1080 object = named_entry->object;
1081 } else {
1082 mach_destroy_memory_entry(port);
1083 return (vm_object_t)NULL;
1084 }
1085 vm_object_reference(named_entry->object);
1086 mach_destroy_memory_entry(port);
1087 break;
1088 }
1089 else
1090 return (vm_object_t)NULL;
1091 }
1092 } else {
1093 return (vm_object_t)NULL;
1094 }
1095
1096 return object;
1097 }
1098
1099 /*
1100 * Routine: convert_port_to_map
1101 * Purpose:
1102 * Convert from a port to a map.
1103 * Doesn't consume the port ref; produces a map ref,
1104 * which may be null.
1105 * Conditions:
1106 * Nothing locked.
1107 */
1108
1109 vm_map_t
1110 convert_port_to_map(
1111 ipc_port_t port)
1112 {
1113 task_t task;
1114 vm_map_t map;
1115
1116 task = convert_port_to_locked_task(port);
1117
1118 if (task == TASK_NULL)
1119 return VM_MAP_NULL;
1120
1121 if (!task->active) {
1122 task_unlock(task);
1123 return VM_MAP_NULL;
1124 }
1125
1126 map = task->map;
1127 vm_map_reference_swap(map);
1128 task_unlock(task);
1129 return map;
1130 }
1131
1132
1133 /*
1134 * Routine: convert_port_to_act
1135 * Purpose:
1136 * Convert from a port to a thr_act.
1137 * Doesn't consume the port ref; produces an thr_act ref,
1138 * which may be null.
1139 * Conditions:
1140 * Nothing locked.
1141 */
1142
1143 thread_act_t
1144 convert_port_to_act( ipc_port_t port )
1145 {
1146 boolean_t r;
1147 thread_act_t thr_act = 0;
1148
1149 r = FALSE;
1150 while (!r && IP_VALID(port)) {
1151 ip_lock(port);
1152 r = ref_act_port_locked(port, &thr_act);
1153 /* port unlocked */
1154 }
1155 return (thr_act);
1156 }
1157
1158 boolean_t
1159 ref_act_port_locked( ipc_port_t port, thread_act_t *pthr_act )
1160 {
1161 thread_act_t thr_act;
1162
1163 thr_act = 0;
1164 if (ip_active(port) &&
1165 (ip_kotype(port) == IKOT_ACT)) {
1166 thr_act = (thread_act_t) port->ip_kobject;
1167 assert(thr_act != THR_ACT_NULL);
1168
1169 /*
1170 * Out of order locking here, normal
1171 * ordering is act_lock(), then ip_lock().
1172 */
1173 if (!act_lock_try(thr_act)) {
1174 ip_unlock(port);
1175 mutex_pause();
1176 return (FALSE);
1177 }
1178 act_reference_locked(thr_act);
1179 act_unlock(thr_act);
1180 }
1181 *pthr_act = thr_act;
1182 ip_unlock(port);
1183 return (TRUE);
1184 }
1185
1186 /*
1187 * Routine: port_name_to_act
1188 * Purpose:
1189 * Convert from a port name to an act reference
1190 * A name of MACH_PORT_NULL is valid for the null act
1191 * Conditions:
1192 * Nothing locked.
1193 */
1194 thread_act_t
1195 port_name_to_act(
1196 mach_port_name_t name)
1197 {
1198 thread_act_t thr_act = THR_ACT_NULL;
1199 ipc_port_t kern_port;
1200 kern_return_t kr;
1201
1202 if (MACH_PORT_VALID(name)) {
1203 kr = ipc_object_copyin(current_space(), name,
1204 MACH_MSG_TYPE_COPY_SEND,
1205 (ipc_object_t *) &kern_port);
1206 if (kr != KERN_SUCCESS)
1207 return THR_ACT_NULL;
1208
1209 thr_act = convert_port_to_act(kern_port);
1210
1211 if (IP_VALID(kern_port))
1212 ipc_port_release_send(kern_port);
1213 }
1214 return thr_act;
1215 }
1216
1217 task_t
1218 port_name_to_task(
1219 mach_port_name_t name)
1220 {
1221 ipc_port_t kern_port;
1222 kern_return_t kr;
1223 task_t task = TASK_NULL;
1224
1225 if (MACH_PORT_VALID(name)) {
1226 kr = ipc_object_copyin(current_space(), name,
1227 MACH_MSG_TYPE_COPY_SEND,
1228 (ipc_object_t *) &kern_port);
1229 if (kr != KERN_SUCCESS)
1230 return TASK_NULL;
1231
1232 task = convert_port_to_task(kern_port);
1233
1234 if (IP_VALID(kern_port))
1235 ipc_port_release_send(kern_port);
1236 }
1237 return task;
1238 }
1239
1240 /*
1241 * Routine: convert_task_to_port
1242 * Purpose:
1243 * Convert from a task to a port.
1244 * Consumes a task ref; produces a naked send right
1245 * which may be invalid.
1246 * Conditions:
1247 * Nothing locked.
1248 */
1249
1250 ipc_port_t
1251 convert_task_to_port(
1252 task_t task)
1253 {
1254 ipc_port_t port;
1255
1256 itk_lock(task);
1257 if (task->itk_self != IP_NULL)
1258 #if NORMA_TASK
1259 if (task->map == VM_MAP_NULL)
1260 /* norma placeholder task */
1261 port = ipc_port_copy_send(task->itk_self);
1262 else
1263 #endif /* NORMA_TASK */
1264 port = ipc_port_make_send(task->itk_self);
1265 else
1266 port = IP_NULL;
1267 itk_unlock(task);
1268
1269 task_deallocate(task);
1270 return port;
1271 }
1272
1273 /*
1274 * Routine: convert_act_to_port
1275 * Purpose:
1276 * Convert from a thr_act to a port.
1277 * Consumes an thr_act ref; produces a naked send right
1278 * which may be invalid.
1279 * Conditions:
1280 * Nothing locked.
1281 */
1282
1283 ipc_port_t
1284 convert_act_to_port(thr_act)
1285 thread_act_t thr_act;
1286 {
1287 ipc_port_t port;
1288
1289 act_lock(thr_act);
1290 if (thr_act->ith_self != IP_NULL)
1291 port = ipc_port_make_send(thr_act->ith_self);
1292 else
1293 port = IP_NULL;
1294 act_unlock(thr_act);
1295
1296 act_deallocate(thr_act);
1297 return port;
1298 }
1299
1300 /*
1301 * Routine: space_deallocate
1302 * Purpose:
1303 * Deallocate a space ref produced by convert_port_to_space.
1304 * Conditions:
1305 * Nothing locked.
1306 */
1307
1308 void
1309 space_deallocate(
1310 ipc_space_t space)
1311 {
1312 if (space != IS_NULL)
1313 is_release(space);
1314 }
1315
1316 /*
1317 * Routine: thread/task_set_exception_ports [kernel call]
1318 * Purpose:
1319 * Sets the thread/task exception port, flavor and
1320 * behavior for the exception types specified by the mask.
1321 * There will be one send right per exception per valid
1322 * port.
1323 * Conditions:
1324 * Nothing locked. If successful, consumes
1325 * the supplied send right.
1326 * Returns:
1327 * KERN_SUCCESS Changed the special port.
1328 * KERN_INVALID_ARGUMENT The thread is null,
1329 * Illegal mask bit set.
1330 * Illegal exception behavior
1331 * KERN_FAILURE The thread is dead.
1332 */
1333
1334 kern_return_t
1335 thread_set_exception_ports(
1336 thread_act_t thr_act,
1337 exception_mask_t exception_mask,
1338 ipc_port_t new_port,
1339 exception_behavior_t new_behavior,
1340 thread_state_flavor_t new_flavor)
1341 {
1342 register int i;
1343 ipc_port_t old_port[EXC_TYPES_COUNT];
1344
1345 if (!thr_act)
1346 return KERN_INVALID_ARGUMENT;
1347
1348 if (exception_mask & ~EXC_MASK_ALL)
1349 return KERN_INVALID_ARGUMENT;
1350
1351 if (IP_VALID(new_port)) {
1352 switch (new_behavior) {
1353 case EXCEPTION_DEFAULT:
1354 case EXCEPTION_STATE:
1355 case EXCEPTION_STATE_IDENTITY:
1356 break;
1357 default:
1358 return KERN_INVALID_ARGUMENT;
1359 }
1360 }
1361
1362 /*
1363 * Check the validity of the thread_state_flavor by calling the
1364 * VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in
1365 * osfmk/mach/ARCHITECTURE/thread_status.h
1366 */
1367 if (!VALID_THREAD_STATE_FLAVOR(new_flavor)) {
1368 return KERN_INVALID_ARGUMENT;
1369 }
1370
1371 act_lock(thr_act);
1372 if (!thr_act->active) {
1373 act_unlock(thr_act);
1374 return KERN_FAILURE;
1375 }
1376
1377 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
1378 if (exception_mask & (1 << i)) {
1379 old_port[i] = thr_act->exc_actions[i].port;
1380 thr_act->exc_actions[i].port =
1381 ipc_port_copy_send(new_port);
1382 thr_act->exc_actions[i].behavior = new_behavior;
1383 thr_act->exc_actions[i].flavor = new_flavor;
1384 } else
1385 old_port[i] = IP_NULL;
1386 }/* for */
1387 /*
1388 * Consume send rights without any lock held.
1389 */
1390 act_unlock(thr_act);
1391 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++)
1392 if (IP_VALID(old_port[i]))
1393 ipc_port_release_send(old_port[i]);
1394 if (IP_VALID(new_port)) /* consume send right */
1395 ipc_port_release_send(new_port);
1396
1397 return KERN_SUCCESS;
1398 }/* thread_set_exception_port */
1399
1400 kern_return_t
1401 task_set_exception_ports(
1402 task_t task,
1403 exception_mask_t exception_mask,
1404 ipc_port_t new_port,
1405 exception_behavior_t new_behavior,
1406 thread_state_flavor_t new_flavor)
1407 {
1408 register int i;
1409 ipc_port_t old_port[EXC_TYPES_COUNT];
1410
1411 if (task == TASK_NULL) {
1412 return KERN_INVALID_ARGUMENT;
1413 }
1414
1415 if (exception_mask & ~EXC_MASK_ALL) {
1416 return KERN_INVALID_ARGUMENT;
1417 }
1418
1419 if (IP_VALID(new_port)) {
1420 switch (new_behavior) {
1421 case EXCEPTION_DEFAULT:
1422 case EXCEPTION_STATE:
1423 case EXCEPTION_STATE_IDENTITY:
1424 break;
1425 default:
1426 return KERN_INVALID_ARGUMENT;
1427 }
1428 }
1429 /* Cannot easily check "new_flavor", but that just means that
1430 * the flavor in the generated exception message might be garbage:
1431 * GIGO */
1432
1433 itk_lock(task);
1434 if (task->itk_self == IP_NULL) {
1435 itk_unlock(task);
1436 return KERN_FAILURE;
1437 }
1438
1439 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
1440 if (exception_mask & (1 << i)) {
1441 old_port[i] = task->exc_actions[i].port;
1442 task->exc_actions[i].port =
1443 ipc_port_copy_send(new_port);
1444 task->exc_actions[i].behavior = new_behavior;
1445 task->exc_actions[i].flavor = new_flavor;
1446 } else
1447 old_port[i] = IP_NULL;
1448 }/* for */
1449
1450 /*
1451 * Consume send rights without any lock held.
1452 */
1453 itk_unlock(task);
1454 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++)
1455 if (IP_VALID(old_port[i]))
1456 ipc_port_release_send(old_port[i]);
1457 if (IP_VALID(new_port)) /* consume send right */
1458 ipc_port_release_send(new_port);
1459
1460 return KERN_SUCCESS;
1461 }/* task_set_exception_port */
1462
1463 /*
1464 * Routine: thread/task_swap_exception_ports [kernel call]
1465 * Purpose:
1466 * Sets the thread/task exception port, flavor and
1467 * behavior for the exception types specified by the
1468 * mask.
1469 *
1470 * The old ports, behavior and flavors are returned
1471 * Count specifies the array sizes on input and
1472 * the number of returned ports etc. on output. The
1473 * arrays must be large enough to hold all the returned
1474 * data, MIG returnes an error otherwise. The masks
1475 * array specifies the corresponding exception type(s).
1476 *
1477 * Conditions:
1478 * Nothing locked. If successful, consumes
1479 * the supplied send right.
1480 *
1481 * Returns upto [in} CountCnt elements.
1482 * Returns:
1483 * KERN_SUCCESS Changed the special port.
1484 * KERN_INVALID_ARGUMENT The thread is null,
1485 * Illegal mask bit set.
1486 * Illegal exception behavior
1487 * KERN_FAILURE The thread is dead.
1488 */
1489
1490 kern_return_t
1491 thread_swap_exception_ports(
1492 thread_act_t thr_act,
1493 exception_mask_t exception_mask,
1494 ipc_port_t new_port,
1495 exception_behavior_t new_behavior,
1496 thread_state_flavor_t new_flavor,
1497 exception_mask_array_t masks,
1498 mach_msg_type_number_t * CountCnt,
1499 exception_port_array_t ports,
1500 exception_behavior_array_t behaviors,
1501 thread_state_flavor_array_t flavors )
1502 {
1503 register int i,
1504 j,
1505 count;
1506 ipc_port_t old_port[EXC_TYPES_COUNT];
1507
1508 if (!thr_act)
1509 return KERN_INVALID_ARGUMENT;
1510
1511 if (exception_mask & ~EXC_MASK_ALL) {
1512 return KERN_INVALID_ARGUMENT;
1513 }
1514
1515 if (IP_VALID(new_port)) {
1516 switch (new_behavior) {
1517 case EXCEPTION_DEFAULT:
1518 case EXCEPTION_STATE:
1519 case EXCEPTION_STATE_IDENTITY:
1520 break;
1521 default:
1522 return KERN_INVALID_ARGUMENT;
1523 }
1524 }
1525 /* Cannot easily check "new_flavor", but that just means that
1526 * the flavor in the generated exception message might be garbage:
1527 * GIGO */
1528
1529 act_lock(thr_act);
1530 if (!thr_act->active) {
1531 act_unlock(thr_act);
1532 return KERN_FAILURE;
1533 }
1534
1535 count = 0;
1536
1537 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
1538 if (exception_mask & (1 << i)) {
1539 for (j = 0; j < count; j++) {
1540 /*
1541 * search for an identical entry, if found
1542 * set corresponding mask for this exception.
1543 */
1544 if (thr_act->exc_actions[i].port == ports[j] &&
1545 thr_act->exc_actions[i].behavior ==behaviors[j]
1546 && thr_act->exc_actions[i].flavor ==flavors[j])
1547 {
1548 masks[j] |= (1 << i);
1549 break;
1550 }
1551 }/* for */
1552 if (j == count) {
1553 masks[j] = (1 << i);
1554 ports[j] =
1555 ipc_port_copy_send(thr_act->exc_actions[i].port);
1556
1557 behaviors[j] = thr_act->exc_actions[i].behavior;
1558 flavors[j] = thr_act->exc_actions[i].flavor;
1559 count++;
1560 }
1561
1562 old_port[i] = thr_act->exc_actions[i].port;
1563 thr_act->exc_actions[i].port =
1564 ipc_port_copy_send(new_port);
1565 thr_act->exc_actions[i].behavior = new_behavior;
1566 thr_act->exc_actions[i].flavor = new_flavor;
1567 if (count > *CountCnt) {
1568 break;
1569 }
1570 } else
1571 old_port[i] = IP_NULL;
1572 }/* for */
1573
1574 /*
1575 * Consume send rights without any lock held.
1576 */
1577 act_unlock(thr_act);
1578 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++)
1579 if (IP_VALID(old_port[i]))
1580 ipc_port_release_send(old_port[i]);
1581 if (IP_VALID(new_port)) /* consume send right */
1582 ipc_port_release_send(new_port);
1583 *CountCnt = count;
1584 return KERN_SUCCESS;
1585 }/* thread_swap_exception_ports */
1586
1587 kern_return_t
1588 task_swap_exception_ports(
1589 task_t task,
1590 exception_mask_t exception_mask,
1591 ipc_port_t new_port,
1592 exception_behavior_t new_behavior,
1593 thread_state_flavor_t new_flavor,
1594 exception_mask_array_t masks,
1595 mach_msg_type_number_t * CountCnt,
1596 exception_port_array_t ports,
1597 exception_behavior_array_t behaviors,
1598 thread_state_flavor_array_t flavors )
1599 {
1600 register int i,
1601 j,
1602 count;
1603 ipc_port_t old_port[EXC_TYPES_COUNT];
1604
1605 if (task == TASK_NULL)
1606 return KERN_INVALID_ARGUMENT;
1607
1608 if (exception_mask & ~EXC_MASK_ALL) {
1609 return KERN_INVALID_ARGUMENT;
1610 }
1611
1612 if (IP_VALID(new_port)) {
1613 switch (new_behavior) {
1614 case EXCEPTION_DEFAULT:
1615 case EXCEPTION_STATE:
1616 case EXCEPTION_STATE_IDENTITY:
1617 break;
1618 default:
1619 return KERN_INVALID_ARGUMENT;
1620 }
1621 }
1622 /* Cannot easily check "new_flavor", but that just means that
1623 * the flavor in the generated exception message might be garbage:
1624 * GIGO */
1625
1626 itk_lock(task);
1627 if (task->itk_self == IP_NULL) {
1628 itk_unlock(task);
1629 return KERN_FAILURE;
1630 }
1631
1632 count = 0;
1633
1634 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
1635 if (exception_mask & (1 << i)) {
1636 for (j = 0; j < count; j++) {
1637 /*
1638 * search for an identical entry, if found
1639 * set corresponding mask for this exception.
1640 */
1641 if (task->exc_actions[i].port == ports[j] &&
1642 task->exc_actions[i].behavior == behaviors[j]
1643 && task->exc_actions[i].flavor == flavors[j])
1644 {
1645 masks[j] |= (1 << i);
1646 break;
1647 }
1648 }/* for */
1649 if (j == count) {
1650 masks[j] = (1 << i);
1651 ports[j] =
1652 ipc_port_copy_send(task->exc_actions[i].port);
1653 behaviors[j] = task->exc_actions[i].behavior;
1654 flavors[j] = task->exc_actions[i].flavor;
1655 count++;
1656 }
1657 old_port[i] = task->exc_actions[i].port;
1658 task->exc_actions[i].port =
1659 ipc_port_copy_send(new_port);
1660 task->exc_actions[i].behavior = new_behavior;
1661 task->exc_actions[i].flavor = new_flavor;
1662 if (count > *CountCnt) {
1663 break;
1664 }
1665 } else
1666 old_port[i] = IP_NULL;
1667 }/* for */
1668
1669
1670 /*
1671 * Consume send rights without any lock held.
1672 */
1673 itk_unlock(task);
1674 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++)
1675 if (IP_VALID(old_port[i]))
1676 ipc_port_release_send(old_port[i]);
1677 if (IP_VALID(new_port)) /* consume send right */
1678 ipc_port_release_send(new_port);
1679 *CountCnt = count;
1680
1681 return KERN_SUCCESS;
1682 }/* task_swap_exception_ports */
1683
1684 /*
1685 * Routine: thread/task_get_exception_ports [kernel call]
1686 * Purpose:
1687 * Clones a send right for each of the thread/task's exception
1688 * ports specified in the mask and returns the behaviour
1689 * and flavor of said port.
1690 *
1691 * Returns upto [in} CountCnt elements.
1692 *
1693 * Conditions:
1694 * Nothing locked.
1695 * Returns:
1696 * KERN_SUCCESS Extracted a send right.
1697 * KERN_INVALID_ARGUMENT The thread is null,
1698 * Invalid special port,
1699 * Illegal mask bit set.
1700 * KERN_FAILURE The thread is dead.
1701 */
1702
1703 kern_return_t
1704 thread_get_exception_ports(
1705 thread_act_t thr_act,
1706 exception_mask_t exception_mask,
1707 exception_mask_array_t masks,
1708 mach_msg_type_number_t * CountCnt,
1709 exception_port_array_t ports,
1710 exception_behavior_array_t behaviors,
1711 thread_state_flavor_array_t flavors )
1712 {
1713 register int i,
1714 j,
1715 count;
1716
1717 if (!thr_act)
1718 return KERN_INVALID_ARGUMENT;
1719
1720 if (exception_mask & ~EXC_MASK_ALL) {
1721 return KERN_INVALID_ARGUMENT;
1722 }
1723
1724 act_lock(thr_act);
1725 if (!thr_act->active) {
1726 act_unlock(thr_act);
1727 return KERN_FAILURE;
1728 }
1729
1730 count = 0;
1731
1732 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
1733 if (exception_mask & (1 << i)) {
1734 for (j = 0; j < count; j++) {
1735 /*
1736 * search for an identical entry, if found
1737 * set corresponding mask for this exception.
1738 */
1739 if (thr_act->exc_actions[i].port == ports[j] &&
1740 thr_act->exc_actions[i].behavior ==behaviors[j]
1741 && thr_act->exc_actions[i].flavor == flavors[j])
1742 {
1743 masks[j] |= (1 << i);
1744 break;
1745 }
1746 }/* for */
1747 if (j == count) {
1748 masks[j] = (1 << i);
1749 ports[j] =
1750 ipc_port_copy_send(thr_act->exc_actions[i].port);
1751 behaviors[j] = thr_act->exc_actions[i].behavior;
1752 flavors[j] = thr_act->exc_actions[i].flavor;
1753 count++;
1754 if (count >= *CountCnt) {
1755 break;
1756 }
1757 }
1758 }
1759 }/* for */
1760
1761 act_unlock(thr_act);
1762
1763 *CountCnt = count;
1764 return KERN_SUCCESS;
1765 }/* thread_get_exception_ports */
1766
1767 kern_return_t
1768 task_get_exception_ports(
1769 task_t task,
1770 exception_mask_t exception_mask,
1771 exception_mask_array_t masks,
1772 mach_msg_type_number_t * CountCnt,
1773 exception_port_array_t ports,
1774 exception_behavior_array_t behaviors,
1775 thread_state_flavor_array_t flavors )
1776 {
1777 register int i,
1778 j,
1779 count;
1780
1781 if (task == TASK_NULL)
1782 return KERN_INVALID_ARGUMENT;
1783
1784 if (exception_mask & ~EXC_MASK_ALL) {
1785 return KERN_INVALID_ARGUMENT;
1786 }
1787
1788 itk_lock(task);
1789 if (task->itk_self == IP_NULL) {
1790 itk_unlock(task);
1791 return KERN_FAILURE;
1792 }
1793
1794 count = 0;
1795
1796 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
1797 if (exception_mask & (1 << i)) {
1798 for (j = 0; j < count; j++) {
1799 /*
1800 * search for an identical entry, if found
1801 * set corresponding mask for this exception.
1802 */
1803 if (task->exc_actions[i].port == ports[j] &&
1804 task->exc_actions[i].behavior == behaviors[j]
1805 && task->exc_actions[i].flavor == flavors[j])
1806 {
1807 masks[j] |= (1 << i);
1808 break;
1809 }
1810 }/* for */
1811 if (j == count) {
1812 masks[j] = (1 << i);
1813 ports[j] =
1814 ipc_port_copy_send(task->exc_actions[i].port);
1815 behaviors[j] = task->exc_actions[i].behavior;
1816 flavors[j] = task->exc_actions[i].flavor;
1817 count++;
1818 if (count > *CountCnt) {
1819 break;
1820 }
1821 }
1822 }
1823 }/* for */
1824
1825 itk_unlock(task);
1826
1827 *CountCnt = count;
1828 return KERN_SUCCESS;
1829 }/* task_get_exception_ports */