]> git.saurik.com Git - apple/xnu.git/blob - osfmk/kern/ipc_tt.c
xnu-792.13.8.tar.gz
[apple/xnu.git] / osfmk / kern / ipc_tt.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_OSREFERENCE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the
10 * License may not be used to create, or enable the creation or
11 * redistribution of, unlawful or unlicensed copies of an Apple operating
12 * system, or to circumvent, violate, or enable the circumvention or
13 * violation of, any terms of an Apple operating system software license
14 * agreement.
15 *
16 * Please obtain a copy of the License at
17 * http://www.opensource.apple.com/apsl/ and read it before using this
18 * file.
19 *
20 * The Original Code and all software distributed under the License are
21 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
22 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
23 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
25 * Please see the License for the specific language governing rights and
26 * limitations under the License.
27 *
28 * @APPLE_LICENSE_OSREFERENCE_HEADER_END@
29 */
30 /*
31 * @OSF_COPYRIGHT@
32 */
33 /*
34 * Mach Operating System
35 * Copyright (c) 1991,1990,1989,1988,1987 Carnegie Mellon University
36 * All Rights Reserved.
37 *
38 * Permission to use, copy, modify and distribute this software and its
39 * documentation is hereby granted, provided that both the copyright
40 * notice and this permission notice appear in all copies of the
41 * software, derivative works or modified versions, and any portions
42 * thereof, and that both notices appear in supporting documentation.
43 *
44 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
45 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
46 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
47 *
48 * Carnegie Mellon requests users of this software to return to
49 *
50 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
51 * School of Computer Science
52 * Carnegie Mellon University
53 * Pittsburgh PA 15213-3890
54 *
55 * any improvements or extensions that they make and grant Carnegie Mellon
56 * the rights to redistribute these changes.
57 */
58 /*
59 */
60
61 /*
62 * File: ipc_tt.c
63 * Purpose:
64 * Task and thread related IPC functions.
65 */
66
67 #include <mach/mach_types.h>
68 #include <mach/boolean.h>
69 #include <mach/kern_return.h>
70 #include <mach/mach_param.h>
71 #include <mach/task_special_ports.h>
72 #include <mach/thread_special_ports.h>
73 #include <mach/thread_status.h>
74 #include <mach/exception_types.h>
75 #include <mach/memory_object_types.h>
76 #include <mach/mach_traps.h>
77 #include <mach/task_server.h>
78 #include <mach/thread_act_server.h>
79 #include <mach/mach_host_server.h>
80 #include <mach/host_priv_server.h>
81 #include <mach/vm_map_server.h>
82
83 #include <kern/kern_types.h>
84 #include <kern/host.h>
85 #include <kern/ipc_kobject.h>
86 #include <kern/ipc_tt.h>
87 #include <kern/kalloc.h>
88 #include <kern/thread.h>
89 #include <kern/misc_protos.h>
90
91 #include <vm/vm_map.h>
92 #include <vm/vm_pageout.h>
93 #include <vm/vm_shared_memory_server.h>
94 #include <vm/vm_protos.h>
95
96 /* forward declarations */
97 task_t convert_port_to_locked_task(ipc_port_t port);
98
99
100 /*
101 * Routine: ipc_task_init
102 * Purpose:
103 * Initialize a task's IPC state.
104 *
105 * If non-null, some state will be inherited from the parent.
106 * The parent must be appropriately initialized.
107 * Conditions:
108 * Nothing locked.
109 */
110
111 void
112 ipc_task_init(
113 task_t task,
114 task_t parent)
115 {
116 ipc_space_t space;
117 ipc_port_t kport;
118 ipc_port_t nport;
119 kern_return_t kr;
120 int i;
121
122
123 kr = ipc_space_create(&ipc_table_entries[0], &space);
124 if (kr != KERN_SUCCESS)
125 panic("ipc_task_init");
126
127
128 kport = ipc_port_alloc_kernel();
129 if (kport == IP_NULL)
130 panic("ipc_task_init");
131
132 nport = ipc_port_alloc_kernel();
133 if (nport == IP_NULL)
134 panic("ipc_task_init");
135
136 itk_lock_init(task);
137 task->itk_self = kport;
138 task->itk_nself = nport;
139 task->itk_sself = ipc_port_make_send(kport);
140 task->itk_space = space;
141 space->is_fast = FALSE;
142
143 if (parent == TASK_NULL) {
144 ipc_port_t port;
145
146 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
147 task->exc_actions[i].port = IP_NULL;
148 }/* for */
149
150 kr = host_get_host_port(host_priv_self(), &port);
151 assert(kr == KERN_SUCCESS);
152 task->itk_host = port;
153
154 task->itk_bootstrap = IP_NULL;
155
156 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
157 task->itk_registered[i] = IP_NULL;
158 } else {
159 itk_lock(parent);
160 assert(parent->itk_self != IP_NULL);
161
162 /* inherit registered ports */
163
164 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
165 task->itk_registered[i] =
166 ipc_port_copy_send(parent->itk_registered[i]);
167
168 /* inherit exception and bootstrap ports */
169
170 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
171 task->exc_actions[i].port =
172 ipc_port_copy_send(parent->exc_actions[i].port);
173 task->exc_actions[i].flavor =
174 parent->exc_actions[i].flavor;
175 task->exc_actions[i].behavior =
176 parent->exc_actions[i].behavior;
177 task->exc_actions[i].privileged =
178 parent->exc_actions[i].privileged;
179 }/* for */
180 task->itk_host =
181 ipc_port_copy_send(parent->itk_host);
182
183 task->itk_bootstrap =
184 ipc_port_copy_send(parent->itk_bootstrap);
185
186 itk_unlock(parent);
187 }
188 }
189
190 /*
191 * Routine: ipc_task_enable
192 * Purpose:
193 * Enable a task for IPC access.
194 * Conditions:
195 * Nothing locked.
196 */
197
198 void
199 ipc_task_enable(
200 task_t task)
201 {
202 ipc_port_t kport;
203 ipc_port_t nport;
204
205 itk_lock(task);
206 kport = task->itk_self;
207 if (kport != IP_NULL)
208 ipc_kobject_set(kport, (ipc_kobject_t) task, IKOT_TASK);
209 nport = task->itk_nself;
210 if (nport != IP_NULL)
211 ipc_kobject_set(nport, (ipc_kobject_t) task, IKOT_TASK_NAME);
212 itk_unlock(task);
213 }
214
215 /*
216 * Routine: ipc_task_disable
217 * Purpose:
218 * Disable IPC access to a task.
219 * Conditions:
220 * Nothing locked.
221 */
222
223 void
224 ipc_task_disable(
225 task_t task)
226 {
227 ipc_port_t kport;
228 ipc_port_t nport;
229
230 itk_lock(task);
231 kport = task->itk_self;
232 if (kport != IP_NULL)
233 ipc_kobject_set(kport, IKO_NULL, IKOT_NONE);
234 nport = task->itk_nself;
235 if (nport != IP_NULL)
236 ipc_kobject_set(nport, IKO_NULL, IKOT_NONE);
237 itk_unlock(task);
238 }
239
240 /*
241 * Routine: ipc_task_terminate
242 * Purpose:
243 * Clean up and destroy a task's IPC state.
244 * Conditions:
245 * Nothing locked. The task must be suspended.
246 * (Or the current thread must be in the task.)
247 */
248
249 void
250 ipc_task_terminate(
251 task_t task)
252 {
253 ipc_port_t kport;
254 ipc_port_t nport;
255 int i;
256
257 itk_lock(task);
258 kport = task->itk_self;
259
260 if (kport == IP_NULL) {
261 /* the task is already terminated (can this happen?) */
262 itk_unlock(task);
263 return;
264 }
265 task->itk_self = IP_NULL;
266
267 nport = task->itk_nself;
268 assert(nport != IP_NULL);
269 task->itk_nself = IP_NULL;
270
271 itk_unlock(task);
272
273 /* release the naked send rights */
274
275 if (IP_VALID(task->itk_sself))
276 ipc_port_release_send(task->itk_sself);
277
278 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
279 if (IP_VALID(task->exc_actions[i].port)) {
280 ipc_port_release_send(task->exc_actions[i].port);
281 }
282 }
283
284 if (IP_VALID(task->itk_host))
285 ipc_port_release_send(task->itk_host);
286
287 if (IP_VALID(task->itk_bootstrap))
288 ipc_port_release_send(task->itk_bootstrap);
289
290 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
291 if (IP_VALID(task->itk_registered[i]))
292 ipc_port_release_send(task->itk_registered[i]);
293
294 ipc_port_release_send(task->wired_ledger_port);
295 ipc_port_release_send(task->paged_ledger_port);
296
297 /* destroy the kernel ports */
298 ipc_port_dealloc_kernel(kport);
299 ipc_port_dealloc_kernel(nport);
300 }
301
302 /*
303 * Routine: ipc_task_reset
304 * Purpose:
305 * Reset a task's IPC state to protect it when
306 * it enters an elevated security context. The
307 * task name port can remain the same - since
308 * it represents no specific privilege.
309 * Conditions:
310 * Nothing locked. The task must be suspended.
311 * (Or the current thread must be in the task.)
312 */
313
314 void
315 ipc_task_reset(
316 task_t task)
317 {
318 ipc_port_t old_kport, new_kport;
319 ipc_port_t old_sself;
320 ipc_port_t old_exc_actions[EXC_TYPES_COUNT];
321 int i;
322
323 new_kport = ipc_port_alloc_kernel();
324 if (new_kport == IP_NULL)
325 panic("ipc_task_reset");
326
327 itk_lock(task);
328
329 old_kport = task->itk_self;
330
331 if (old_kport == IP_NULL) {
332 /* the task is already terminated (can this happen?) */
333 itk_unlock(task);
334 ipc_port_dealloc_kernel(new_kport);
335 return;
336 }
337
338 task->itk_self = new_kport;
339 old_sself = task->itk_sself;
340 task->itk_sself = ipc_port_make_send(new_kport);
341 ipc_kobject_set(old_kport, IKO_NULL, IKOT_NONE);
342 ipc_kobject_set(new_kport, (ipc_kobject_t) task, IKOT_TASK);
343
344 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
345 if (!task->exc_actions[i].privileged) {
346 old_exc_actions[i] = task->exc_actions[i].port;
347 task->exc_actions[i].port = IP_NULL;
348 } else {
349 old_exc_actions[i] = IP_NULL;
350 }
351 }/* for */
352
353 itk_unlock(task);
354
355 /* release the naked send rights */
356
357 if (IP_VALID(old_sself))
358 ipc_port_release_send(old_sself);
359
360 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) {
361 if (IP_VALID(old_exc_actions[i])) {
362 ipc_port_release_send(old_exc_actions[i]);
363 }
364 }/* for */
365
366 /* destroy the kernel port */
367 ipc_port_dealloc_kernel(old_kport);
368 }
369
370 /*
371 * Routine: ipc_thread_init
372 * Purpose:
373 * Initialize a thread's IPC state.
374 * Conditions:
375 * Nothing locked.
376 */
377
378 void
379 ipc_thread_init(
380 thread_t thread)
381 {
382 ipc_port_t kport;
383 int i;
384
385 kport = ipc_port_alloc_kernel();
386 if (kport == IP_NULL)
387 panic("ipc_thread_init");
388
389 thread->ith_self = kport;
390 thread->ith_sself = ipc_port_make_send(kport);
391
392 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i)
393 thread->exc_actions[i].port = IP_NULL;
394
395 ipc_kobject_set(kport, (ipc_kobject_t)thread, IKOT_THREAD);
396
397 ipc_kmsg_queue_init(&thread->ith_messages);
398
399 thread->ith_rpc_reply = IP_NULL;
400 }
401
402 void
403 ipc_thread_disable(
404 thread_t thread)
405 {
406 ipc_port_t kport = thread->ith_self;
407
408 if (kport != IP_NULL)
409 ipc_kobject_set(kport, IKO_NULL, IKOT_NONE);
410 }
411
412 /*
413 * Routine: ipc_thread_terminate
414 * Purpose:
415 * Clean up and destroy a thread's IPC state.
416 * Conditions:
417 * Nothing locked.
418 */
419
420 void
421 ipc_thread_terminate(
422 thread_t thread)
423 {
424 ipc_port_t kport = thread->ith_self;
425
426 if (kport != IP_NULL) {
427 int i;
428
429 if (IP_VALID(thread->ith_sself))
430 ipc_port_release_send(thread->ith_sself);
431
432 thread->ith_sself = thread->ith_self = IP_NULL;
433
434 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
435 if (IP_VALID(thread->exc_actions[i].port))
436 ipc_port_release_send(thread->exc_actions[i].port);
437 }
438
439 ipc_port_dealloc_kernel(kport);
440 }
441
442 assert(ipc_kmsg_queue_empty(&thread->ith_messages));
443
444 if (thread->ith_rpc_reply != IP_NULL)
445 ipc_port_dealloc_reply(thread->ith_rpc_reply);
446
447 thread->ith_rpc_reply = IP_NULL;
448 }
449
450 /*
451 * Routine: retrieve_task_self_fast
452 * Purpose:
453 * Optimized version of retrieve_task_self,
454 * that only works for the current task.
455 *
456 * Return a send right (possibly null/dead)
457 * for the task's user-visible self port.
458 * Conditions:
459 * Nothing locked.
460 */
461
462 ipc_port_t
463 retrieve_task_self_fast(
464 register task_t task)
465 {
466 register ipc_port_t port;
467
468 assert(task == current_task());
469
470 itk_lock(task);
471 assert(task->itk_self != IP_NULL);
472
473 if ((port = task->itk_sself) == task->itk_self) {
474 /* no interposing */
475
476 ip_lock(port);
477 assert(ip_active(port));
478 ip_reference(port);
479 port->ip_srights++;
480 ip_unlock(port);
481 } else
482 port = ipc_port_copy_send(port);
483 itk_unlock(task);
484
485 return port;
486 }
487
488 /*
489 * Routine: retrieve_thread_self_fast
490 * Purpose:
491 * Return a send right (possibly null/dead)
492 * for the thread's user-visible self port.
493 *
494 * Only works for the current thread.
495 *
496 * Conditions:
497 * Nothing locked.
498 */
499
500 ipc_port_t
501 retrieve_thread_self_fast(
502 thread_t thread)
503 {
504 register ipc_port_t port;
505
506 assert(thread == current_thread());
507
508 thread_mtx_lock(thread);
509
510 assert(thread->ith_self != IP_NULL);
511
512 if ((port = thread->ith_sself) == thread->ith_self) {
513 /* no interposing */
514
515 ip_lock(port);
516 assert(ip_active(port));
517 ip_reference(port);
518 port->ip_srights++;
519 ip_unlock(port);
520 }
521 else
522 port = ipc_port_copy_send(port);
523
524 thread_mtx_unlock(thread);
525
526 return port;
527 }
528
529 /*
530 * Routine: task_self_trap [mach trap]
531 * Purpose:
532 * Give the caller send rights for his own task port.
533 * Conditions:
534 * Nothing locked.
535 * Returns:
536 * MACH_PORT_NULL if there are any resource failures
537 * or other errors.
538 */
539
540 mach_port_name_t
541 task_self_trap(
542 __unused struct task_self_trap_args *args)
543 {
544 task_t task = current_task();
545 ipc_port_t sright;
546 mach_port_name_t name;
547
548 sright = retrieve_task_self_fast(task);
549 name = ipc_port_copyout_send(sright, task->itk_space);
550 return name;
551 }
552
553 /*
554 * Routine: thread_self_trap [mach trap]
555 * Purpose:
556 * Give the caller send rights for his own thread port.
557 * Conditions:
558 * Nothing locked.
559 * Returns:
560 * MACH_PORT_NULL if there are any resource failures
561 * or other errors.
562 */
563
564 mach_port_name_t
565 thread_self_trap(
566 __unused struct thread_self_trap_args *args)
567 {
568 thread_t thread = current_thread();
569 task_t task = thread->task;
570 ipc_port_t sright;
571 mach_port_name_t name;
572
573 sright = retrieve_thread_self_fast(thread);
574 name = ipc_port_copyout_send(sright, task->itk_space);
575 return name;
576
577 }
578
579 /*
580 * Routine: mach_reply_port [mach trap]
581 * Purpose:
582 * Allocate a port for the caller.
583 * Conditions:
584 * Nothing locked.
585 * Returns:
586 * MACH_PORT_NULL if there are any resource failures
587 * or other errors.
588 */
589
590 mach_port_name_t
591 mach_reply_port(
592 __unused struct mach_reply_port_args *args)
593 {
594 ipc_port_t port;
595 mach_port_name_t name;
596 kern_return_t kr;
597
598 kr = ipc_port_alloc(current_task()->itk_space, &name, &port);
599 if (kr == KERN_SUCCESS)
600 ip_unlock(port);
601 else
602 name = MACH_PORT_NULL;
603 return name;
604 }
605
606 /*
607 * Routine: thread_get_special_port [kernel call]
608 * Purpose:
609 * Clones a send right for one of the thread's
610 * special ports.
611 * Conditions:
612 * Nothing locked.
613 * Returns:
614 * KERN_SUCCESS Extracted a send right.
615 * KERN_INVALID_ARGUMENT The thread is null.
616 * KERN_FAILURE The thread is dead.
617 * KERN_INVALID_ARGUMENT Invalid special port.
618 */
619
620 kern_return_t
621 thread_get_special_port(
622 thread_t thread,
623 int which,
624 ipc_port_t *portp)
625 {
626 kern_return_t result = KERN_SUCCESS;
627 ipc_port_t *whichp;
628
629 if (thread == THREAD_NULL)
630 return (KERN_INVALID_ARGUMENT);
631
632 switch (which) {
633
634 case THREAD_KERNEL_PORT:
635 whichp = &thread->ith_sself;
636 break;
637
638 default:
639 return (KERN_INVALID_ARGUMENT);
640 }
641
642 thread_mtx_lock(thread);
643
644 if (thread->active)
645 *portp = ipc_port_copy_send(*whichp);
646 else
647 result = KERN_FAILURE;
648
649 thread_mtx_unlock(thread);
650
651 return (result);
652 }
653
654 /*
655 * Routine: thread_set_special_port [kernel call]
656 * Purpose:
657 * Changes one of the thread's special ports,
658 * setting it to the supplied send right.
659 * Conditions:
660 * Nothing locked. If successful, consumes
661 * the supplied send right.
662 * Returns:
663 * KERN_SUCCESS Changed the special port.
664 * KERN_INVALID_ARGUMENT The thread is null.
665 * KERN_FAILURE The thread is dead.
666 * KERN_INVALID_ARGUMENT Invalid special port.
667 */
668
669 kern_return_t
670 thread_set_special_port(
671 thread_t thread,
672 int which,
673 ipc_port_t port)
674 {
675 kern_return_t result = KERN_SUCCESS;
676 ipc_port_t *whichp, old = IP_NULL;
677
678 if (thread == THREAD_NULL)
679 return (KERN_INVALID_ARGUMENT);
680
681 switch (which) {
682
683 case THREAD_KERNEL_PORT:
684 whichp = &thread->ith_sself;
685 break;
686
687 default:
688 return (KERN_INVALID_ARGUMENT);
689 }
690
691 thread_mtx_lock(thread);
692
693 if (thread->active) {
694 old = *whichp;
695 *whichp = port;
696 }
697 else
698 result = KERN_FAILURE;
699
700 thread_mtx_unlock(thread);
701
702 if (IP_VALID(old))
703 ipc_port_release_send(old);
704
705 return (result);
706 }
707
708 /*
709 * Routine: task_get_special_port [kernel call]
710 * Purpose:
711 * Clones a send right for one of the task's
712 * special ports.
713 * Conditions:
714 * Nothing locked.
715 * Returns:
716 * KERN_SUCCESS Extracted a send right.
717 * KERN_INVALID_ARGUMENT The task is null.
718 * KERN_FAILURE The task/space is dead.
719 * KERN_INVALID_ARGUMENT Invalid special port.
720 */
721
722 kern_return_t
723 task_get_special_port(
724 task_t task,
725 int which,
726 ipc_port_t *portp)
727 {
728 ipc_port_t port;
729
730 if (task == TASK_NULL)
731 return KERN_INVALID_ARGUMENT;
732
733 itk_lock(task);
734 if (task->itk_self == IP_NULL) {
735 itk_unlock(task);
736 return KERN_FAILURE;
737 }
738
739 switch (which) {
740 case TASK_KERNEL_PORT:
741 port = ipc_port_copy_send(task->itk_sself);
742 break;
743
744 case TASK_NAME_PORT:
745 port = ipc_port_make_send(task->itk_nself);
746 break;
747
748 case TASK_HOST_PORT:
749 port = ipc_port_copy_send(task->itk_host);
750 break;
751
752 case TASK_BOOTSTRAP_PORT:
753 port = ipc_port_copy_send(task->itk_bootstrap);
754 break;
755
756 case TASK_WIRED_LEDGER_PORT:
757 port = ipc_port_copy_send(task->wired_ledger_port);
758 break;
759
760 case TASK_PAGED_LEDGER_PORT:
761 port = ipc_port_copy_send(task->paged_ledger_port);
762 break;
763
764 default:
765 return KERN_INVALID_ARGUMENT;
766 }
767 itk_unlock(task);
768
769 *portp = port;
770 return KERN_SUCCESS;
771 }
772
773 /*
774 * Routine: task_set_special_port [kernel call]
775 * Purpose:
776 * Changes one of the task's special ports,
777 * setting it to the supplied send right.
778 * Conditions:
779 * Nothing locked. If successful, consumes
780 * the supplied send right.
781 * Returns:
782 * KERN_SUCCESS Changed the special port.
783 * KERN_INVALID_ARGUMENT The task is null.
784 * KERN_FAILURE The task/space is dead.
785 * KERN_INVALID_ARGUMENT Invalid special port.
786 */
787
788 kern_return_t
789 task_set_special_port(
790 task_t task,
791 int which,
792 ipc_port_t port)
793 {
794 ipc_port_t *whichp;
795 ipc_port_t old;
796
797 if (task == TASK_NULL)
798 return KERN_INVALID_ARGUMENT;
799
800 switch (which) {
801 case TASK_KERNEL_PORT:
802 whichp = &task->itk_sself;
803 break;
804
805 case TASK_HOST_PORT:
806 whichp = &task->itk_host;
807 break;
808
809 case TASK_BOOTSTRAP_PORT:
810 whichp = &task->itk_bootstrap;
811 break;
812
813 case TASK_WIRED_LEDGER_PORT:
814 whichp = &task->wired_ledger_port;
815 break;
816
817 case TASK_PAGED_LEDGER_PORT:
818 whichp = &task->paged_ledger_port;
819 break;
820
821 default:
822 return KERN_INVALID_ARGUMENT;
823 }/* switch */
824
825 itk_lock(task);
826 if (task->itk_self == IP_NULL) {
827 itk_unlock(task);
828 return KERN_FAILURE;
829 }
830
831 old = *whichp;
832 *whichp = port;
833 itk_unlock(task);
834
835 if (IP_VALID(old))
836 ipc_port_release_send(old);
837 return KERN_SUCCESS;
838 }
839
840
841 /*
842 * Routine: mach_ports_register [kernel call]
843 * Purpose:
844 * Stash a handful of port send rights in the task.
845 * Child tasks will inherit these rights, but they
846 * must use mach_ports_lookup to acquire them.
847 *
848 * The rights are supplied in a (wired) kalloc'd segment.
849 * Rights which aren't supplied are assumed to be null.
850 * Conditions:
851 * Nothing locked. If successful, consumes
852 * the supplied rights and memory.
853 * Returns:
854 * KERN_SUCCESS Stashed the port rights.
855 * KERN_INVALID_ARGUMENT The task is null.
856 * KERN_INVALID_ARGUMENT The task is dead.
857 * KERN_INVALID_ARGUMENT Too many port rights supplied.
858 */
859
860 kern_return_t
861 mach_ports_register(
862 task_t task,
863 mach_port_array_t memory,
864 mach_msg_type_number_t portsCnt)
865 {
866 ipc_port_t ports[TASK_PORT_REGISTER_MAX];
867 unsigned int i;
868
869 if ((task == TASK_NULL) ||
870 (portsCnt > TASK_PORT_REGISTER_MAX))
871 return KERN_INVALID_ARGUMENT;
872
873 /*
874 * Pad the port rights with nulls.
875 */
876
877 for (i = 0; i < portsCnt; i++)
878 ports[i] = memory[i];
879 for (; i < TASK_PORT_REGISTER_MAX; i++)
880 ports[i] = IP_NULL;
881
882 itk_lock(task);
883 if (task->itk_self == IP_NULL) {
884 itk_unlock(task);
885 return KERN_INVALID_ARGUMENT;
886 }
887
888 /*
889 * Replace the old send rights with the new.
890 * Release the old rights after unlocking.
891 */
892
893 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++) {
894 ipc_port_t old;
895
896 old = task->itk_registered[i];
897 task->itk_registered[i] = ports[i];
898 ports[i] = old;
899 }
900
901 itk_unlock(task);
902
903 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
904 if (IP_VALID(ports[i]))
905 ipc_port_release_send(ports[i]);
906
907 /*
908 * Now that the operation is known to be successful,
909 * we can free the memory.
910 */
911
912 if (portsCnt != 0)
913 kfree(memory,
914 (vm_size_t) (portsCnt * sizeof(mach_port_t)));
915
916 return KERN_SUCCESS;
917 }
918
919 /*
920 * Routine: mach_ports_lookup [kernel call]
921 * Purpose:
922 * Retrieves (clones) the stashed port send rights.
923 * Conditions:
924 * Nothing locked. If successful, the caller gets
925 * rights and memory.
926 * Returns:
927 * KERN_SUCCESS Retrieved the send rights.
928 * KERN_INVALID_ARGUMENT The task is null.
929 * KERN_INVALID_ARGUMENT The task is dead.
930 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
931 */
932
933 kern_return_t
934 mach_ports_lookup(
935 task_t task,
936 mach_port_array_t *portsp,
937 mach_msg_type_number_t *portsCnt)
938 {
939 void *memory;
940 vm_size_t size;
941 ipc_port_t *ports;
942 int i;
943
944 if (task == TASK_NULL)
945 return KERN_INVALID_ARGUMENT;
946
947 size = (vm_size_t) (TASK_PORT_REGISTER_MAX * sizeof(ipc_port_t));
948
949 memory = kalloc(size);
950 if (memory == 0)
951 return KERN_RESOURCE_SHORTAGE;
952
953 itk_lock(task);
954 if (task->itk_self == IP_NULL) {
955 itk_unlock(task);
956
957 kfree(memory, size);
958 return KERN_INVALID_ARGUMENT;
959 }
960
961 ports = (ipc_port_t *) memory;
962
963 /*
964 * Clone port rights. Because kalloc'd memory
965 * is wired, we won't fault while holding the task lock.
966 */
967
968 for (i = 0; i < TASK_PORT_REGISTER_MAX; i++)
969 ports[i] = ipc_port_copy_send(task->itk_registered[i]);
970
971 itk_unlock(task);
972
973 *portsp = (mach_port_array_t) ports;
974 *portsCnt = TASK_PORT_REGISTER_MAX;
975 return KERN_SUCCESS;
976 }
977
978 /*
979 * Routine: convert_port_to_locked_task
980 * Purpose:
981 * Internal helper routine to convert from a port to a locked
982 * task. Used by several routines that try to convert from a
983 * task port to a reference on some task related object.
984 * Conditions:
985 * Nothing locked, blocking OK.
986 */
987 task_t
988 convert_port_to_locked_task(ipc_port_t port)
989 {
990 while (IP_VALID(port)) {
991 task_t task;
992
993 ip_lock(port);
994 if (!ip_active(port) || (ip_kotype(port) != IKOT_TASK)) {
995 ip_unlock(port);
996 return TASK_NULL;
997 }
998 task = (task_t) port->ip_kobject;
999 assert(task != TASK_NULL);
1000
1001 /*
1002 * Normal lock ordering puts task_lock() before ip_lock().
1003 * Attempt out-of-order locking here.
1004 */
1005 if (task_lock_try(task)) {
1006 ip_unlock(port);
1007 return(task);
1008 }
1009
1010 ip_unlock(port);
1011 mutex_pause();
1012 }
1013 return TASK_NULL;
1014 }
1015
1016 /*
1017 * Routine: convert_port_to_task
1018 * Purpose:
1019 * Convert from a port to a task.
1020 * Doesn't consume the port ref; produces a task ref,
1021 * which may be null.
1022 * Conditions:
1023 * Nothing locked.
1024 */
1025 task_t
1026 convert_port_to_task(
1027 ipc_port_t port)
1028 {
1029 task_t task = TASK_NULL;
1030
1031 if (IP_VALID(port)) {
1032 ip_lock(port);
1033
1034 if ( ip_active(port) &&
1035 ip_kotype(port) == IKOT_TASK ) {
1036 task = (task_t)port->ip_kobject;
1037 assert(task != TASK_NULL);
1038
1039 task_reference_internal(task);
1040 }
1041
1042 ip_unlock(port);
1043 }
1044
1045 return (task);
1046 }
1047
1048 /*
1049 * Routine: convert_port_to_task_name
1050 * Purpose:
1051 * Convert from a port to a task name.
1052 * Doesn't consume the port ref; produces a task name ref,
1053 * which may be null.
1054 * Conditions:
1055 * Nothing locked.
1056 */
1057 task_name_t
1058 convert_port_to_task_name(
1059 ipc_port_t port)
1060 {
1061 task_name_t task = TASK_NULL;
1062
1063 if (IP_VALID(port)) {
1064 ip_lock(port);
1065
1066 if ( ip_active(port) &&
1067 (ip_kotype(port) == IKOT_TASK ||
1068 ip_kotype(port) == IKOT_TASK_NAME)) {
1069 task = (task_name_t)port->ip_kobject;
1070 assert(task != TASK_NAME_NULL);
1071
1072 task_reference_internal(task);
1073 }
1074
1075 ip_unlock(port);
1076 }
1077
1078 return (task);
1079 }
1080
1081 /*
1082 * Routine: convert_port_to_space
1083 * Purpose:
1084 * Convert from a port to a space.
1085 * Doesn't consume the port ref; produces a space ref,
1086 * which may be null.
1087 * Conditions:
1088 * Nothing locked.
1089 */
1090 ipc_space_t
1091 convert_port_to_space(
1092 ipc_port_t port)
1093 {
1094 ipc_space_t space;
1095 task_t task;
1096
1097 task = convert_port_to_locked_task(port);
1098
1099 if (task == TASK_NULL)
1100 return IPC_SPACE_NULL;
1101
1102 if (!task->active) {
1103 task_unlock(task);
1104 return IPC_SPACE_NULL;
1105 }
1106
1107 space = task->itk_space;
1108 is_reference(space);
1109 task_unlock(task);
1110 return (space);
1111 }
1112
1113 /*
1114 * Routine: convert_port_to_map
1115 * Purpose:
1116 * Convert from a port to a map.
1117 * Doesn't consume the port ref; produces a map ref,
1118 * which may be null.
1119 * Conditions:
1120 * Nothing locked.
1121 */
1122
1123 vm_map_t
1124 convert_port_to_map(
1125 ipc_port_t port)
1126 {
1127 task_t task;
1128 vm_map_t map;
1129
1130 task = convert_port_to_locked_task(port);
1131
1132 if (task == TASK_NULL)
1133 return VM_MAP_NULL;
1134
1135 if (!task->active) {
1136 task_unlock(task);
1137 return VM_MAP_NULL;
1138 }
1139
1140 map = task->map;
1141 vm_map_reference_swap(map);
1142 task_unlock(task);
1143 return map;
1144 }
1145
1146
1147 /*
1148 * Routine: convert_port_to_thread
1149 * Purpose:
1150 * Convert from a port to a thread.
1151 * Doesn't consume the port ref; produces an thread ref,
1152 * which may be null.
1153 * Conditions:
1154 * Nothing locked.
1155 */
1156
1157 thread_t
1158 convert_port_to_thread(
1159 ipc_port_t port)
1160 {
1161 thread_t thread = THREAD_NULL;
1162
1163 if (IP_VALID(port)) {
1164 ip_lock(port);
1165
1166 if ( ip_active(port) &&
1167 ip_kotype(port) == IKOT_THREAD ) {
1168 thread = (thread_t)port->ip_kobject;
1169 assert(thread != THREAD_NULL);
1170
1171 thread_reference_internal(thread);
1172 }
1173
1174 ip_unlock(port);
1175 }
1176
1177 return (thread);
1178 }
1179
1180 /*
1181 * Routine: port_name_to_thread
1182 * Purpose:
1183 * Convert from a port name to an thread reference
1184 * A name of MACH_PORT_NULL is valid for the null thread.
1185 * Conditions:
1186 * Nothing locked.
1187 */
1188 thread_t
1189 port_name_to_thread(
1190 mach_port_name_t name)
1191 {
1192 thread_t thread = THREAD_NULL;
1193 ipc_port_t kport;
1194
1195 if (MACH_PORT_VALID(name)) {
1196 if (ipc_object_copyin(current_space(), name,
1197 MACH_MSG_TYPE_COPY_SEND,
1198 (ipc_object_t *)&kport) != KERN_SUCCESS)
1199 return (THREAD_NULL);
1200
1201 thread = convert_port_to_thread(kport);
1202
1203 if (IP_VALID(kport))
1204 ipc_port_release_send(kport);
1205 }
1206
1207 return (thread);
1208 }
1209
1210 task_t
1211 port_name_to_task(
1212 mach_port_name_t name)
1213 {
1214 ipc_port_t kern_port;
1215 kern_return_t kr;
1216 task_t task = TASK_NULL;
1217
1218 if (MACH_PORT_VALID(name)) {
1219 kr = ipc_object_copyin(current_space(), name,
1220 MACH_MSG_TYPE_COPY_SEND,
1221 (ipc_object_t *) &kern_port);
1222 if (kr != KERN_SUCCESS)
1223 return TASK_NULL;
1224
1225 task = convert_port_to_task(kern_port);
1226
1227 if (IP_VALID(kern_port))
1228 ipc_port_release_send(kern_port);
1229 }
1230 return task;
1231 }
1232
1233 /*
1234 * Routine: convert_task_to_port
1235 * Purpose:
1236 * Convert from a task to a port.
1237 * Consumes a task ref; produces a naked send right
1238 * which may be invalid.
1239 * Conditions:
1240 * Nothing locked.
1241 */
1242
1243 ipc_port_t
1244 convert_task_to_port(
1245 task_t task)
1246 {
1247 ipc_port_t port;
1248
1249 itk_lock(task);
1250 if (task->itk_self != IP_NULL)
1251 port = ipc_port_make_send(task->itk_self);
1252 else
1253 port = IP_NULL;
1254 itk_unlock(task);
1255
1256 task_deallocate(task);
1257 return port;
1258 }
1259
1260 /*
1261 * Routine: convert_task_name_to_port
1262 * Purpose:
1263 * Convert from a task name ref to a port.
1264 * Consumes a task name ref; produces a naked send right
1265 * which may be invalid.
1266 * Conditions:
1267 * Nothing locked.
1268 */
1269
1270 ipc_port_t
1271 convert_task_name_to_port(
1272 task_name_t task_name)
1273 {
1274 ipc_port_t port;
1275
1276 itk_lock(task_name);
1277 if (task_name->itk_nself != IP_NULL)
1278 port = ipc_port_make_send(task_name->itk_nself);
1279 else
1280 port = IP_NULL;
1281 itk_unlock(task_name);
1282
1283 task_name_deallocate(task_name);
1284 return port;
1285 }
1286
1287 /*
1288 * Routine: convert_thread_to_port
1289 * Purpose:
1290 * Convert from a thread to a port.
1291 * Consumes an thread ref; produces a naked send right
1292 * which may be invalid.
1293 * Conditions:
1294 * Nothing locked.
1295 */
1296
1297 ipc_port_t
1298 convert_thread_to_port(
1299 thread_t thread)
1300 {
1301 ipc_port_t port;
1302
1303 thread_mtx_lock(thread);
1304
1305 if (thread->ith_self != IP_NULL)
1306 port = ipc_port_make_send(thread->ith_self);
1307 else
1308 port = IP_NULL;
1309
1310 thread_mtx_unlock(thread);
1311
1312 thread_deallocate(thread);
1313
1314 return (port);
1315 }
1316
1317 /*
1318 * Routine: space_deallocate
1319 * Purpose:
1320 * Deallocate a space ref produced by convert_port_to_space.
1321 * Conditions:
1322 * Nothing locked.
1323 */
1324
1325 void
1326 space_deallocate(
1327 ipc_space_t space)
1328 {
1329 if (space != IS_NULL)
1330 is_release(space);
1331 }
1332
1333 /*
1334 * Routine: thread/task_set_exception_ports [kernel call]
1335 * Purpose:
1336 * Sets the thread/task exception port, flavor and
1337 * behavior for the exception types specified by the mask.
1338 * There will be one send right per exception per valid
1339 * port.
1340 * Conditions:
1341 * Nothing locked. If successful, consumes
1342 * the supplied send right.
1343 * Returns:
1344 * KERN_SUCCESS Changed the special port.
1345 * KERN_INVALID_ARGUMENT The thread is null,
1346 * Illegal mask bit set.
1347 * Illegal exception behavior
1348 * KERN_FAILURE The thread is dead.
1349 */
1350
1351 kern_return_t
1352 thread_set_exception_ports(
1353 thread_t thread,
1354 exception_mask_t exception_mask,
1355 ipc_port_t new_port,
1356 exception_behavior_t new_behavior,
1357 thread_state_flavor_t new_flavor)
1358 {
1359 ipc_port_t old_port[EXC_TYPES_COUNT];
1360 register int i;
1361
1362 if (thread == THREAD_NULL)
1363 return (KERN_INVALID_ARGUMENT);
1364
1365 if (exception_mask & ~EXC_MASK_ALL)
1366 return (KERN_INVALID_ARGUMENT);
1367
1368 if (IP_VALID(new_port)) {
1369 switch (new_behavior) {
1370
1371 case EXCEPTION_DEFAULT:
1372 case EXCEPTION_STATE:
1373 case EXCEPTION_STATE_IDENTITY:
1374 break;
1375
1376 default:
1377 return (KERN_INVALID_ARGUMENT);
1378 }
1379 }
1380
1381 /*
1382 * Check the validity of the thread_state_flavor by calling the
1383 * VALID_THREAD_STATE_FLAVOR architecture dependent macro defined in
1384 * osfmk/mach/ARCHITECTURE/thread_status.h
1385 */
1386 if (!VALID_THREAD_STATE_FLAVOR(new_flavor))
1387 return (KERN_INVALID_ARGUMENT);
1388
1389 thread_mtx_lock(thread);
1390
1391 if (!thread->active) {
1392 thread_mtx_unlock(thread);
1393
1394 return (KERN_FAILURE);
1395 }
1396
1397 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
1398 if (exception_mask & (1 << i)) {
1399 old_port[i] = thread->exc_actions[i].port;
1400 thread->exc_actions[i].port = ipc_port_copy_send(new_port);
1401 thread->exc_actions[i].behavior = new_behavior;
1402 thread->exc_actions[i].flavor = new_flavor;
1403 }
1404 else
1405 old_port[i] = IP_NULL;
1406 }
1407
1408 thread_mtx_unlock(thread);
1409
1410 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i)
1411 if (IP_VALID(old_port[i]))
1412 ipc_port_release_send(old_port[i]);
1413
1414 if (IP_VALID(new_port)) /* consume send right */
1415 ipc_port_release_send(new_port);
1416
1417 return (KERN_SUCCESS);
1418 }
1419
1420 kern_return_t
1421 task_set_exception_ports(
1422 task_t task,
1423 exception_mask_t exception_mask,
1424 ipc_port_t new_port,
1425 exception_behavior_t new_behavior,
1426 thread_state_flavor_t new_flavor)
1427 {
1428 ipc_port_t old_port[EXC_TYPES_COUNT];
1429 boolean_t privileged = current_task()->sec_token.val[0] == 0;
1430 register int i;
1431
1432 if (task == TASK_NULL)
1433 return (KERN_INVALID_ARGUMENT);
1434
1435 if (exception_mask & ~EXC_MASK_ALL)
1436 return (KERN_INVALID_ARGUMENT);
1437
1438 if (IP_VALID(new_port)) {
1439 switch (new_behavior) {
1440
1441 case EXCEPTION_DEFAULT:
1442 case EXCEPTION_STATE:
1443 case EXCEPTION_STATE_IDENTITY:
1444 break;
1445
1446 default:
1447 return (KERN_INVALID_ARGUMENT);
1448 }
1449 }
1450
1451 itk_lock(task);
1452
1453 if (task->itk_self == IP_NULL) {
1454 itk_unlock(task);
1455
1456 return (KERN_FAILURE);
1457 }
1458
1459 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
1460 if (exception_mask & (1 << i)) {
1461 old_port[i] = task->exc_actions[i].port;
1462 task->exc_actions[i].port =
1463 ipc_port_copy_send(new_port);
1464 task->exc_actions[i].behavior = new_behavior;
1465 task->exc_actions[i].flavor = new_flavor;
1466 task->exc_actions[i].privileged = privileged;
1467 }
1468 else
1469 old_port[i] = IP_NULL;
1470 }
1471
1472 itk_unlock(task);
1473
1474 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i)
1475 if (IP_VALID(old_port[i]))
1476 ipc_port_release_send(old_port[i]);
1477
1478 if (IP_VALID(new_port)) /* consume send right */
1479 ipc_port_release_send(new_port);
1480
1481 return (KERN_SUCCESS);
1482 }
1483
1484 /*
1485 * Routine: thread/task_swap_exception_ports [kernel call]
1486 * Purpose:
1487 * Sets the thread/task exception port, flavor and
1488 * behavior for the exception types specified by the
1489 * mask.
1490 *
1491 * The old ports, behavior and flavors are returned
1492 * Count specifies the array sizes on input and
1493 * the number of returned ports etc. on output. The
1494 * arrays must be large enough to hold all the returned
1495 * data, MIG returnes an error otherwise. The masks
1496 * array specifies the corresponding exception type(s).
1497 *
1498 * Conditions:
1499 * Nothing locked. If successful, consumes
1500 * the supplied send right.
1501 *
1502 * Returns upto [in} CountCnt elements.
1503 * Returns:
1504 * KERN_SUCCESS Changed the special port.
1505 * KERN_INVALID_ARGUMENT The thread is null,
1506 * Illegal mask bit set.
1507 * Illegal exception behavior
1508 * KERN_FAILURE The thread is dead.
1509 */
1510
1511 kern_return_t
1512 thread_swap_exception_ports(
1513 thread_t thread,
1514 exception_mask_t exception_mask,
1515 ipc_port_t new_port,
1516 exception_behavior_t new_behavior,
1517 thread_state_flavor_t new_flavor,
1518 exception_mask_array_t masks,
1519 mach_msg_type_number_t *CountCnt,
1520 exception_port_array_t ports,
1521 exception_behavior_array_t behaviors,
1522 thread_state_flavor_array_t flavors)
1523 {
1524 ipc_port_t old_port[EXC_TYPES_COUNT];
1525 unsigned int i, j, count;
1526
1527 if (thread == THREAD_NULL)
1528 return (KERN_INVALID_ARGUMENT);
1529
1530 if (exception_mask & ~EXC_MASK_ALL)
1531 return (KERN_INVALID_ARGUMENT);
1532
1533 if (IP_VALID(new_port)) {
1534 switch (new_behavior) {
1535
1536 case EXCEPTION_DEFAULT:
1537 case EXCEPTION_STATE:
1538 case EXCEPTION_STATE_IDENTITY:
1539 break;
1540
1541 default:
1542 return (KERN_INVALID_ARGUMENT);
1543 }
1544 }
1545
1546 thread_mtx_lock(thread);
1547
1548 if (!thread->active) {
1549 thread_mtx_unlock(thread);
1550
1551 return (KERN_FAILURE);
1552 }
1553
1554 count = 0;
1555
1556 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
1557 if (exception_mask & (1 << i)) {
1558 for (j = 0; j < count; ++j) {
1559 /*
1560 * search for an identical entry, if found
1561 * set corresponding mask for this exception.
1562 */
1563 if ( thread->exc_actions[i].port == ports[j] &&
1564 thread->exc_actions[i].behavior == behaviors[j] &&
1565 thread->exc_actions[i].flavor == flavors[j] ) {
1566 masks[j] |= (1 << i);
1567 break;
1568 }
1569 }
1570
1571 if (j == count) {
1572 masks[j] = (1 << i);
1573 ports[j] = ipc_port_copy_send(thread->exc_actions[i].port);
1574
1575 behaviors[j] = thread->exc_actions[i].behavior;
1576 flavors[j] = thread->exc_actions[i].flavor;
1577 ++count;
1578 }
1579
1580 old_port[i] = thread->exc_actions[i].port;
1581 thread->exc_actions[i].port = ipc_port_copy_send(new_port);
1582 thread->exc_actions[i].behavior = new_behavior;
1583 thread->exc_actions[i].flavor = new_flavor;
1584 if (count > *CountCnt)
1585 break;
1586 }
1587 else
1588 old_port[i] = IP_NULL;
1589 }
1590
1591 thread_mtx_unlock(thread);
1592
1593 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i)
1594 if (IP_VALID(old_port[i]))
1595 ipc_port_release_send(old_port[i]);
1596
1597 if (IP_VALID(new_port)) /* consume send right */
1598 ipc_port_release_send(new_port);
1599
1600 *CountCnt = count;
1601
1602 return (KERN_SUCCESS);
1603 }
1604
1605 kern_return_t
1606 task_swap_exception_ports(
1607 task_t task,
1608 exception_mask_t exception_mask,
1609 ipc_port_t new_port,
1610 exception_behavior_t new_behavior,
1611 thread_state_flavor_t new_flavor,
1612 exception_mask_array_t masks,
1613 mach_msg_type_number_t *CountCnt,
1614 exception_port_array_t ports,
1615 exception_behavior_array_t behaviors,
1616 thread_state_flavor_array_t flavors)
1617 {
1618 ipc_port_t old_port[EXC_TYPES_COUNT];
1619 boolean_t privileged = current_task()->sec_token.val[0] == 0;
1620 unsigned int i, j, count;
1621
1622 if (task == TASK_NULL)
1623 return (KERN_INVALID_ARGUMENT);
1624
1625 if (exception_mask & ~EXC_MASK_ALL)
1626 return (KERN_INVALID_ARGUMENT);
1627
1628 if (IP_VALID(new_port)) {
1629 switch (new_behavior) {
1630
1631 case EXCEPTION_DEFAULT:
1632 case EXCEPTION_STATE:
1633 case EXCEPTION_STATE_IDENTITY:
1634 break;
1635
1636 default:
1637 return (KERN_INVALID_ARGUMENT);
1638 }
1639 }
1640
1641 itk_lock(task);
1642
1643 if (task->itk_self == IP_NULL) {
1644 itk_unlock(task);
1645
1646 return (KERN_FAILURE);
1647 }
1648
1649 count = 0;
1650
1651 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
1652 if (exception_mask & (1 << i)) {
1653 for (j = 0; j < count; j++) {
1654 /*
1655 * search for an identical entry, if found
1656 * set corresponding mask for this exception.
1657 */
1658 if ( task->exc_actions[i].port == ports[j] &&
1659 task->exc_actions[i].behavior == behaviors[j] &&
1660 task->exc_actions[i].flavor == flavors[j] ) {
1661 masks[j] |= (1 << i);
1662 break;
1663 }
1664 }
1665
1666 if (j == count) {
1667 masks[j] = (1 << i);
1668 ports[j] = ipc_port_copy_send(task->exc_actions[i].port);
1669 behaviors[j] = task->exc_actions[i].behavior;
1670 flavors[j] = task->exc_actions[i].flavor;
1671 ++count;
1672 }
1673
1674 old_port[i] = task->exc_actions[i].port;
1675 task->exc_actions[i].port = ipc_port_copy_send(new_port);
1676 task->exc_actions[i].behavior = new_behavior;
1677 task->exc_actions[i].flavor = new_flavor;
1678 task->exc_actions[i].privileged = privileged;
1679 if (count > *CountCnt)
1680 break;
1681 }
1682 else
1683 old_port[i] = IP_NULL;
1684 }
1685
1686 itk_unlock(task);
1687
1688 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++)
1689 if (IP_VALID(old_port[i]))
1690 ipc_port_release_send(old_port[i]);
1691
1692 if (IP_VALID(new_port)) /* consume send right */
1693 ipc_port_release_send(new_port);
1694
1695 *CountCnt = count;
1696
1697 return (KERN_SUCCESS);
1698 }
1699
1700 /*
1701 * Routine: thread/task_get_exception_ports [kernel call]
1702 * Purpose:
1703 * Clones a send right for each of the thread/task's exception
1704 * ports specified in the mask and returns the behaviour
1705 * and flavor of said port.
1706 *
1707 * Returns upto [in} CountCnt elements.
1708 *
1709 * Conditions:
1710 * Nothing locked.
1711 * Returns:
1712 * KERN_SUCCESS Extracted a send right.
1713 * KERN_INVALID_ARGUMENT The thread is null,
1714 * Invalid special port,
1715 * Illegal mask bit set.
1716 * KERN_FAILURE The thread is dead.
1717 */
1718
1719 kern_return_t
1720 thread_get_exception_ports(
1721 thread_t thread,
1722 exception_mask_t exception_mask,
1723 exception_mask_array_t masks,
1724 mach_msg_type_number_t *CountCnt,
1725 exception_port_array_t ports,
1726 exception_behavior_array_t behaviors,
1727 thread_state_flavor_array_t flavors)
1728 {
1729 unsigned int i, j, count;
1730
1731 if (thread == THREAD_NULL)
1732 return (KERN_INVALID_ARGUMENT);
1733
1734 if (exception_mask & ~EXC_MASK_ALL)
1735 return (KERN_INVALID_ARGUMENT);
1736
1737 thread_mtx_lock(thread);
1738
1739 if (!thread->active) {
1740 thread_mtx_unlock(thread);
1741
1742 return (KERN_FAILURE);
1743 }
1744
1745 count = 0;
1746
1747 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
1748 if (exception_mask & (1 << i)) {
1749 for (j = 0; j < count; ++j) {
1750 /*
1751 * search for an identical entry, if found
1752 * set corresponding mask for this exception.
1753 */
1754 if ( thread->exc_actions[i].port == ports[j] &&
1755 thread->exc_actions[i].behavior ==behaviors[j] &&
1756 thread->exc_actions[i].flavor == flavors[j] ) {
1757 masks[j] |= (1 << i);
1758 break;
1759 }
1760 }
1761
1762 if (j == count) {
1763 masks[j] = (1 << i);
1764 ports[j] = ipc_port_copy_send(thread->exc_actions[i].port);
1765 behaviors[j] = thread->exc_actions[i].behavior;
1766 flavors[j] = thread->exc_actions[i].flavor;
1767 ++count;
1768 if (count >= *CountCnt)
1769 break;
1770 }
1771 }
1772 }
1773
1774 thread_mtx_unlock(thread);
1775
1776 *CountCnt = count;
1777
1778 return (KERN_SUCCESS);
1779 }
1780
1781 kern_return_t
1782 task_get_exception_ports(
1783 task_t task,
1784 exception_mask_t exception_mask,
1785 exception_mask_array_t masks,
1786 mach_msg_type_number_t *CountCnt,
1787 exception_port_array_t ports,
1788 exception_behavior_array_t behaviors,
1789 thread_state_flavor_array_t flavors)
1790 {
1791 unsigned int i, j, count;
1792
1793 if (task == TASK_NULL)
1794 return (KERN_INVALID_ARGUMENT);
1795
1796 if (exception_mask & ~EXC_MASK_ALL)
1797 return (KERN_INVALID_ARGUMENT);
1798
1799 itk_lock(task);
1800
1801 if (task->itk_self == IP_NULL) {
1802 itk_unlock(task);
1803
1804 return (KERN_FAILURE);
1805 }
1806
1807 count = 0;
1808
1809 for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; ++i) {
1810 if (exception_mask & (1 << i)) {
1811 for (j = 0; j < count; ++j) {
1812 /*
1813 * search for an identical entry, if found
1814 * set corresponding mask for this exception.
1815 */
1816 if ( task->exc_actions[i].port == ports[j] &&
1817 task->exc_actions[i].behavior == behaviors[j] &&
1818 task->exc_actions[i].flavor == flavors[j] ) {
1819 masks[j] |= (1 << i);
1820 break;
1821 }
1822 }
1823
1824 if (j == count) {
1825 masks[j] = (1 << i);
1826 ports[j] = ipc_port_copy_send(task->exc_actions[i].port);
1827 behaviors[j] = task->exc_actions[i].behavior;
1828 flavors[j] = task->exc_actions[i].flavor;
1829 ++count;
1830 if (count > *CountCnt)
1831 break;
1832 }
1833 }
1834 }
1835
1836 itk_unlock(task);
1837
1838 *CountCnt = count;
1839
1840 return (KERN_SUCCESS);
1841 }