2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
57 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
58 * support for mandatory and extensible security protections. This notice
59 * is included in support of clause 2.2 (b) of the Apple Public License,
61 * Copyright (c) 2005 SPARTA, Inc.
66 * File: ipc/ipc_kmsg.c
70 * Operations on kernel messages.
74 #include <mach/mach_types.h>
75 #include <mach/boolean.h>
76 #include <mach/kern_return.h>
77 #include <mach/message.h>
78 #include <mach/port.h>
79 #include <mach/vm_map.h>
80 #include <mach/mach_vm.h>
81 #include <mach/vm_statistics.h>
83 #include <kern/kern_types.h>
84 #include <kern/assert.h>
85 #include <kern/debug.h>
86 #include <kern/ipc_kobject.h>
87 #include <kern/kalloc.h>
88 #include <kern/zalloc.h>
89 #include <kern/processor.h>
90 #include <kern/thread.h>
91 #include <kern/sched_prim.h>
92 #include <kern/misc_protos.h>
93 #include <kern/counters.h>
94 #include <kern/cpu_data.h>
95 #include <kern/policy_internal.h>
96 #include <kern/mach_filter.h>
98 #include <pthread/priority_private.h>
100 #include <machine/limits.h>
102 #include <vm/vm_map.h>
103 #include <vm/vm_object.h>
104 #include <vm/vm_kern.h>
106 #include <ipc/port.h>
107 #include <ipc/ipc_types.h>
108 #include <ipc/ipc_entry.h>
109 #include <ipc/ipc_kmsg.h>
110 #include <ipc/ipc_notify.h>
111 #include <ipc/ipc_object.h>
112 #include <ipc/ipc_space.h>
113 #include <ipc/ipc_port.h>
114 #include <ipc/ipc_right.h>
115 #include <ipc/ipc_hash.h>
116 #include <ipc/ipc_table.h>
117 #include <ipc/ipc_importance.h>
119 #include <kern/mach_node.h>
120 #include <ipc/flipc.h>
123 #include <os/overflow.h>
125 #include <security/mac_mach_internal.h>
127 #include <device/device_server.h>
132 #include <ppc/Firmware.h>
133 #include <ppc/low_trace.h>
137 #define DEBUG_MSGS_K64 1
140 #include <sys/kdebug.h>
141 #include <libkern/OSAtomic.h>
143 #include <libkern/crypto/sha2.h>
146 #if __has_feature(ptrauth_calls)
147 #include <libkern/ptrauth_utils.h>
153 mach_msg_bits_t msgh_bits
;
154 mach_msg_size_t msgh_size
;
155 mach_port_name_t msgh_remote_port
;
156 mach_port_name_t msgh_local_port
;
157 mach_port_name_t msgh_voucher_port
;
158 mach_msg_id_t msgh_id
;
159 } mach_msg_legacy_header_t
;
162 mach_msg_legacy_header_t header
;
163 mach_msg_body_t body
;
164 } mach_msg_legacy_base_t
;
167 mach_port_name_t name
;
168 mach_msg_size_t pad1
;
170 mach_msg_type_name_t disposition
: 8;
171 mach_msg_descriptor_type_t type
: 8;
172 } mach_msg_legacy_port_descriptor_t
;
176 mach_msg_legacy_port_descriptor_t port
;
177 mach_msg_ool_descriptor32_t out_of_line32
;
178 mach_msg_ool_ports_descriptor32_t ool_ports32
;
179 mach_msg_guarded_port_descriptor32_t guarded_port32
;
180 mach_msg_type_descriptor_t type
;
181 } mach_msg_legacy_descriptor_t
;
185 #define LEGACY_HEADER_SIZE_DELTA ((mach_msg_size_t)(sizeof(mach_msg_header_t) - sizeof(mach_msg_legacy_header_t)))
189 #if __has_feature(ptrauth_calls)
190 typedef uintptr_t ikm_sig_scratch_t
;
194 __unused ipc_kmsg_t kmsg
,
195 ikm_sig_scratch_t
*scratchp
)
197 *scratchp
= OS_PTRAUTH_DISCRIMINATOR("kmsg.ikm_signature");
205 ikm_sig_scratch_t
*scratchp
)
211 * if we happen to be doing the trailer chunk,
212 * diversify with the ptrauth-ed trailer pointer -
213 * as that is unchanging for the kmsg
216 ((vm_offset_t
)kmsg
->ikm_header
+
217 mach_round_msg(kmsg
->ikm_header
->msgh_size
));
219 ptrauth_flags
= (data
== trailerp
) ? PTRAUTH_ADDR_DIVERSIFY
: 0;
220 *scratchp
= ptrauth_utils_sign_blob_generic(data
, len
, *scratchp
, ptrauth_flags
);
225 __unused ipc_kmsg_t kmsg
,
226 ikm_sig_scratch_t
*scratchp
)
231 #elif defined(CRYPTO_SHA2) && !defined(__x86_64__) && !defined(__arm__)
233 typedef SHA256_CTX ikm_sig_scratch_t
;
237 __unused ipc_kmsg_t kmsg
,
238 ikm_sig_scratch_t
*scratchp
)
240 SHA256_Init(scratchp
);
241 SHA256_Update(scratchp
, &vm_kernel_addrhash_salt_ext
, sizeof(uint64_t));
246 __unused ipc_kmsg_t kmsg
,
249 ikm_sig_scratch_t
*scratchp
)
251 SHA256_Update(scratchp
, data
, len
);
256 __unused ipc_kmsg_t kmsg
,
257 ikm_sig_scratch_t
*scratchp
)
259 uintptr_t sha_digest
[SHA256_DIGEST_LENGTH
/ sizeof(uintptr_t)];
261 SHA256_Final((uint8_t *)sha_digest
, scratchp
);
264 * Only use one uintptr_t sized part of result for space and compat reasons.
265 * Truncation is better than XOR'ing the chunks together in hopes of higher
266 * entropy - because of its lower risk of collisions.
272 /* Stubbed out implementation (for __x86_64__, __arm__ for now) */
274 typedef uintptr_t ikm_sig_scratch_t
;
278 __unused ipc_kmsg_t kmsg
,
279 ikm_sig_scratch_t
*scratchp
)
286 __unused ipc_kmsg_t kmsg
,
289 __unused ikm_sig_scratch_t
*scratchp
)
296 __unused ipc_kmsg_t kmsg
,
297 ikm_sig_scratch_t
*scratchp
)
307 ikm_sig_scratch_t
*scratchp
)
309 mach_msg_size_t dsc_count
;
310 mach_msg_base_t base
;
313 /* take a snapshot of the message header/body-count */
314 base
.header
= *kmsg
->ikm_header
;
315 complex = ((base
.header
.msgh_bits
& MACH_MSGH_BITS_COMPLEX
) != 0);
317 dsc_count
= ((mach_msg_body_t
*)(kmsg
->ikm_header
+ 1))->msgh_descriptor_count
;
321 base
.body
.msgh_descriptor_count
= dsc_count
;
323 /* compute sig of a copy of the header with all varying bits masked off */
324 base
.header
.msgh_bits
&= MACH_MSGH_BITS_USER
;
325 base
.header
.msgh_bits
&= ~MACH_MSGH_BITS_VOUCHER_MASK
;
326 ikm_chunk_sig(kmsg
, &base
, sizeof(mach_msg_base_t
), scratchp
);
332 ikm_sig_scratch_t
*scratchp
)
334 mach_msg_max_trailer_t
*trailerp
;
336 /* Add sig of the trailer contents */
337 trailerp
= (mach_msg_max_trailer_t
*)
338 ((vm_offset_t
)kmsg
->ikm_header
+
339 mach_round_msg(kmsg
->ikm_header
->msgh_size
));
340 ikm_chunk_sig(kmsg
, trailerp
, sizeof(*trailerp
), scratchp
);
343 /* Compute the signature for the body bits of a message */
347 ikm_sig_scratch_t
*scratchp
)
349 mach_msg_descriptor_t
*kern_dsc
;
350 mach_msg_size_t dsc_count
;
351 mach_msg_body_t
*body
;
354 if ((kmsg
->ikm_header
->msgh_bits
& MACH_MSGH_BITS_COMPLEX
) == 0) {
357 body
= (mach_msg_body_t
*) (kmsg
->ikm_header
+ 1);
358 dsc_count
= body
->msgh_descriptor_count
;
360 if (dsc_count
== 0) {
364 kern_dsc
= (mach_msg_descriptor_t
*) (body
+ 1);
366 /* Compute the signature for the whole descriptor array */
367 ikm_chunk_sig(kmsg
, kern_dsc
, sizeof(*kern_dsc
) * dsc_count
, scratchp
);
369 /* look for descriptor contents that need a signature */
370 for (i
= 0; i
< dsc_count
; i
++) {
371 switch (kern_dsc
[i
].type
.type
) {
372 case MACH_MSG_PORT_DESCRIPTOR
:
373 case MACH_MSG_GUARDED_PORT_DESCRIPTOR
:
374 case MACH_MSG_OOL_VOLATILE_DESCRIPTOR
:
375 case MACH_MSG_OOL_DESCRIPTOR
:
378 case MACH_MSG_OOL_PORTS_DESCRIPTOR
: {
379 mach_msg_ool_ports_descriptor_t
*ports_dsc
;
381 /* Compute sig for the port/object pointers */
382 ports_dsc
= (mach_msg_ool_ports_descriptor_t
*)&kern_dsc
[i
];
383 ikm_chunk_sig(kmsg
, ports_dsc
->address
, ports_dsc
->count
* sizeof(ipc_object_t
), scratchp
);
387 panic("ipc_kmsg_body_sig: invalid message descriptor");
397 ikm_sig_scratch_t scratch
;
400 zone_require(ipc_kmsg_zone
, kmsg
);
402 ikm_init_sig(kmsg
, &scratch
);
404 ikm_header_sig(kmsg
, &scratch
);
406 /* save off partial signature for just header */
407 sig
= ikm_finalize_sig(kmsg
, &scratch
);
408 kmsg
->ikm_header_sig
= sig
;
411 ikm_trailer_sig(kmsg
, &scratch
);
413 /* save off partial signature for header+trailer */
414 sig
= ikm_finalize_sig(kmsg
, &scratch
);
415 kmsg
->ikm_headtrail_sig
= sig
;
418 ikm_body_sig(kmsg
, &scratch
);
419 sig
= ikm_finalize_sig(kmsg
, &scratch
);
420 kmsg
->ikm_signature
= sig
;
423 unsigned int ikm_signature_failures
;
424 unsigned int ikm_signature_failure_id
;
425 #if (DEVELOPMENT || DEBUG)
426 unsigned int ikm_signature_panic_disable
;
427 unsigned int ikm_signature_header_failures
;
428 unsigned int ikm_signature_trailer_failures
;
435 ikm_sig_scratch_t scratch
;
440 zone_require(ipc_kmsg_zone
, kmsg
);
442 ikm_init_sig(kmsg
, &scratch
);
444 ikm_header_sig(kmsg
, &scratch
);
446 /* Do partial evaluation of just the header signature */
447 sig
= ikm_finalize_sig(kmsg
, &scratch
);
448 expected
= kmsg
->ikm_header_sig
;
449 if (sig
!= expected
) {
450 ikm_signature_header_failures
++;
456 ikm_trailer_sig(kmsg
, &scratch
);
458 /* Do partial evaluation of header+trailer signature */
459 sig
= ikm_finalize_sig(kmsg
, &scratch
);
460 expected
= kmsg
->ikm_headtrail_sig
;
461 if (sig
!= expected
) {
462 ikm_signature_trailer_failures
++;
468 ikm_body_sig(kmsg
, &scratch
);
469 sig
= ikm_finalize_sig(kmsg
, &scratch
);
471 expected
= kmsg
->ikm_signature
;
472 if (sig
!= expected
) {
473 ikm_signature_failures
++;
480 mach_msg_id_t id
= kmsg
->ikm_header
->msgh_id
;
482 ikm_signature_failure_id
= id
;
483 #if (DEVELOPMENT || DEBUG)
484 if (ikm_signature_panic_disable
) {
488 panic("ikm_validate_sig: %s signature mismatch: kmsg=0x%p, id=%d, sig=0x%zx (expected 0x%zx)",
489 str
, kmsg
, id
, sig
, expected
);
495 extern void ipc_pset_print64(
498 extern void ipc_kmsg_print64(
502 extern void ipc_msg_print64(
503 mach_msg_header_t
*msgh
);
505 extern ipc_port_t
ipc_name_to_data64(
507 mach_port_name_t name
);
510 * Forward declarations
512 void ipc_msg_print_untyped64(
513 mach_msg_body_t
*body
);
515 const char * ipc_type_name64(
519 void ipc_print_type_name64(
524 mach_msg_bits_t bit
);
527 mm_copy_options_string64(
528 mach_msg_copy_options_t option
);
530 void db_print_msg_uid64(mach_msg_header_t
*);
533 ipc_msg_body_print64(void *body
, int size
)
535 uint32_t *word
= (uint32_t *) body
;
536 uint32_t *end
= (uint32_t *)(((uintptr_t) body
) + size
537 - sizeof(mach_msg_header_t
));
540 kprintf(" body(%p-%p):\n %p: ", body
, end
, word
);
542 for (i
= 0; i
< 8; i
++, word
++) {
547 kprintf("%08x ", *word
);
549 kprintf("\n %p: ", word
);
560 case MACH_MSG_TYPE_PORT_NAME
:
563 case MACH_MSG_TYPE_MOVE_RECEIVE
:
565 return "port_receive";
567 return "move_receive";
570 case MACH_MSG_TYPE_MOVE_SEND
:
577 case MACH_MSG_TYPE_MOVE_SEND_ONCE
:
579 return "port_send_once";
581 return "move_send_once";
584 case MACH_MSG_TYPE_COPY_SEND
:
587 case MACH_MSG_TYPE_MAKE_SEND
:
590 case MACH_MSG_TYPE_MAKE_SEND_ONCE
:
591 return "make_send_once";
599 ipc_print_type_name64(
602 const char *name
= ipc_type_name64(type_name
, TRUE
);
606 kprintf("type%d", type_name
);
611 * ipc_kmsg_print64 [ debug ]
618 kprintf("%s kmsg=%p:\n", str
, kmsg
);
619 kprintf(" next=%p, prev=%p, size=%d",
624 ipc_msg_print64(kmsg
->ikm_header
);
632 case MACH_MSGH_BITS_COMPLEX
: return "complex";
633 case MACH_MSGH_BITS_CIRCULAR
: return "circular";
634 default: return (char *) 0;
639 * ipc_msg_print64 [ debug ]
643 mach_msg_header_t
*msgh
)
645 mach_msg_bits_t mbits
;
647 const char *bit_name
;
650 mbits
= msgh
->msgh_bits
;
651 kprintf(" msgh_bits=0x%x: l=0x%x,r=0x%x\n",
653 MACH_MSGH_BITS_LOCAL(msgh
->msgh_bits
),
654 MACH_MSGH_BITS_REMOTE(msgh
->msgh_bits
));
656 mbits
= MACH_MSGH_BITS_OTHER(mbits
) & MACH_MSGH_BITS_USED
;
657 kprintf(" decoded bits: ");
659 for (i
= 0, bit
= 1; i
< sizeof(mbits
) * 8; ++i
, bit
<<= 1) {
660 if ((mbits
& bit
) == 0) {
663 bit_name
= msgh_bit_decode64((mach_msg_bits_t
)bit
);
665 kprintf("%s%s", needs_comma
? "," : "", bit_name
);
667 kprintf("%sunknown(0x%x),", needs_comma
? "," : "", bit
);
671 if (msgh
->msgh_bits
& ~MACH_MSGH_BITS_USED
) {
672 kprintf("%sunused=0x%x,", needs_comma
? "," : "",
673 msgh
->msgh_bits
& ~MACH_MSGH_BITS_USED
);
678 if (msgh
->msgh_remote_port
) {
679 kprintf(" remote=%p(", msgh
->msgh_remote_port
);
680 ipc_print_type_name64(MACH_MSGH_BITS_REMOTE(msgh
->msgh_bits
));
683 kprintf(" remote=null");
686 if (msgh
->msgh_local_port
) {
687 kprintf("%slocal=%p(", needs_comma
? "," : "",
688 msgh
->msgh_local_port
);
689 ipc_print_type_name64(MACH_MSGH_BITS_LOCAL(msgh
->msgh_bits
));
692 kprintf("local=null\n");
695 kprintf(" msgh_id=%d, size=%d\n",
699 if (mbits
& MACH_MSGH_BITS_COMPLEX
) {
700 ipc_msg_print_untyped64((mach_msg_body_t
*) (msgh
+ 1));
703 ipc_msg_body_print64((void *)(msgh
+ 1), msgh
->msgh_size
);
708 mm_copy_options_string64(
709 mach_msg_copy_options_t option
)
714 case MACH_MSG_PHYSICAL_COPY
:
717 case MACH_MSG_VIRTUAL_COPY
:
720 case MACH_MSG_OVERWRITE
:
721 name
= "OVERWRITE(DEPRECATED)";
723 case MACH_MSG_ALLOCATE
:
726 case MACH_MSG_KALLOC_COPY_T
:
727 name
= "KALLOC_COPY_T";
737 ipc_msg_print_untyped64(
738 mach_msg_body_t
*body
)
740 mach_msg_descriptor_t
*saddr
, *send
;
741 mach_msg_descriptor_type_t type
;
743 kprintf(" %d descriptors: \n", body
->msgh_descriptor_count
);
745 saddr
= (mach_msg_descriptor_t
*) (body
+ 1);
746 send
= saddr
+ body
->msgh_descriptor_count
;
748 for (; saddr
< send
; saddr
++) {
749 type
= saddr
->type
.type
;
752 case MACH_MSG_PORT_DESCRIPTOR
: {
753 mach_msg_port_descriptor_t
*dsc
;
756 kprintf(" PORT name = %p disp = ", dsc
->name
);
757 ipc_print_type_name64(dsc
->disposition
);
761 case MACH_MSG_OOL_VOLATILE_DESCRIPTOR
:
762 case MACH_MSG_OOL_DESCRIPTOR
: {
763 mach_msg_ool_descriptor_t
*dsc
;
765 dsc
= (mach_msg_ool_descriptor_t
*) &saddr
->out_of_line
;
766 kprintf(" OOL%s addr = %p size = 0x%x copy = %s %s\n",
767 type
== MACH_MSG_OOL_DESCRIPTOR
? "" : " VOLATILE",
768 dsc
->address
, dsc
->size
,
769 mm_copy_options_string64(dsc
->copy
),
770 dsc
->deallocate
? "DEALLOC" : "");
773 case MACH_MSG_OOL_PORTS_DESCRIPTOR
: {
774 mach_msg_ool_ports_descriptor_t
*dsc
;
776 dsc
= (mach_msg_ool_ports_descriptor_t
*) &saddr
->ool_ports
;
778 kprintf(" OOL_PORTS addr = %p count = 0x%x ",
779 dsc
->address
, dsc
->count
);
781 ipc_print_type_name64(dsc
->disposition
);
782 kprintf(" copy = %s %s\n",
783 mm_copy_options_string64(dsc
->copy
),
784 dsc
->deallocate
? "DEALLOC" : "");
787 case MACH_MSG_GUARDED_PORT_DESCRIPTOR
: {
788 mach_msg_guarded_port_descriptor_t
*dsc
;
790 dsc
= (mach_msg_guarded_port_descriptor_t
*)&saddr
->guarded_port
;
791 kprintf(" GUARDED_PORT name = %p flags = 0x%x disp = ", dsc
->name
, dsc
->flags
);
792 ipc_print_type_name64(dsc
->disposition
);
797 kprintf(" UNKNOWN DESCRIPTOR 0x%x\n", type
);
804 #define DEBUG_IPC_KMSG_PRINT(kmsg, string) \
805 __unreachable_ok_push \
806 if (DEBUG_KPRINT_SYSCALL_PREDICATE(DEBUG_KPRINT_SYSCALL_IPC_MASK)) { \
807 ipc_kmsg_print64(kmsg, string); \
811 #define DEBUG_IPC_MSG_BODY_PRINT(body, size) \
812 __unreachable_ok_push \
813 if (DEBUG_KPRINT_SYSCALL_PREDICATE(DEBUG_KPRINT_SYSCALL_IPC_MASK)) { \
814 ipc_msg_body_print64(body,size);\
817 #else /* !DEBUG_MSGS_K64 */
818 #define DEBUG_IPC_KMSG_PRINT(kmsg, string)
819 #define DEBUG_IPC_MSG_BODY_PRINT(body, size)
820 #endif /* !DEBUG_MSGS_K64 */
822 extern vm_map_t ipc_kernel_copy_map
;
823 extern vm_size_t ipc_kmsg_max_space
;
824 extern const vm_size_t ipc_kmsg_max_vm_space
;
825 extern const vm_size_t ipc_kmsg_max_body_space
;
826 extern vm_size_t msg_ool_size_small
;
828 #define MSG_OOL_SIZE_SMALL msg_ool_size_small
830 #if defined(__LP64__)
831 #define MAP_SIZE_DIFFERS(map) (map->max_offset < MACH_VM_MAX_ADDRESS)
832 #define OTHER_OOL_DESCRIPTOR mach_msg_ool_descriptor32_t
833 #define OTHER_OOL_PORTS_DESCRIPTOR mach_msg_ool_ports_descriptor32_t
835 #define MAP_SIZE_DIFFERS(map) (map->max_offset > VM_MAX_ADDRESS)
836 #define OTHER_OOL_DESCRIPTOR mach_msg_ool_descriptor64_t
837 #define OTHER_OOL_PORTS_DESCRIPTOR mach_msg_ool_ports_descriptor64_t
840 #define DESC_SIZE_ADJUSTMENT ((mach_msg_size_t)(sizeof(mach_msg_ool_descriptor64_t) - \
841 sizeof(mach_msg_ool_descriptor32_t)))
843 /* scatter list macros */
845 #define SKIP_PORT_DESCRIPTORS(s, c) \
847 if ((s) != MACH_MSG_DESCRIPTOR_NULL) { \
849 if ((s)->type.type != MACH_MSG_PORT_DESCRIPTOR) \
854 (s) = MACH_MSG_DESCRIPTOR_NULL; \
858 #define INCREMENT_SCATTER(s, c, d) \
860 if ((s) != MACH_MSG_DESCRIPTOR_NULL) { \
861 s = (d) ? (mach_msg_descriptor_t *) \
862 ((OTHER_OOL_DESCRIPTOR *)(s) + 1) : \
868 #define KMSG_TRACE_FLAG_TRACED 0x000001
869 #define KMSG_TRACE_FLAG_COMPLEX 0x000002
870 #define KMSG_TRACE_FLAG_OOLMEM 0x000004
871 #define KMSG_TRACE_FLAG_VCPY 0x000008
872 #define KMSG_TRACE_FLAG_PCPY 0x000010
873 #define KMSG_TRACE_FLAG_SND64 0x000020
874 #define KMSG_TRACE_FLAG_RAISEIMP 0x000040
875 #define KMSG_TRACE_FLAG_APP_SRC 0x000080
876 #define KMSG_TRACE_FLAG_APP_DST 0x000100
877 #define KMSG_TRACE_FLAG_DAEMON_SRC 0x000200
878 #define KMSG_TRACE_FLAG_DAEMON_DST 0x000400
879 #define KMSG_TRACE_FLAG_DST_NDFLTQ 0x000800
880 #define KMSG_TRACE_FLAG_SRC_NDFLTQ 0x001000
881 #define KMSG_TRACE_FLAG_DST_SONCE 0x002000
882 #define KMSG_TRACE_FLAG_SRC_SONCE 0x004000
883 #define KMSG_TRACE_FLAG_CHECKIN 0x008000
884 #define KMSG_TRACE_FLAG_ONEWAY 0x010000
885 #define KMSG_TRACE_FLAG_IOKIT 0x020000
886 #define KMSG_TRACE_FLAG_SNDRCV 0x040000
887 #define KMSG_TRACE_FLAG_DSTQFULL 0x080000
888 #define KMSG_TRACE_FLAG_VOUCHER 0x100000
889 #define KMSG_TRACE_FLAG_TIMER 0x200000
890 #define KMSG_TRACE_FLAG_SEMA 0x400000
891 #define KMSG_TRACE_FLAG_DTMPOWNER 0x800000
892 #define KMSG_TRACE_FLAG_GUARDED_DESC 0x1000000
894 #define KMSG_TRACE_FLAGS_MASK 0x1ffffff
895 #define KMSG_TRACE_FLAGS_SHIFT 8
897 #define KMSG_TRACE_PORTS_MASK 0xff
898 #define KMSG_TRACE_PORTS_SHIFT 0
900 #if (KDEBUG_LEVEL >= KDEBUG_LEVEL_STANDARD)
904 ipc_kmsg_trace_send(ipc_kmsg_t kmsg
,
905 mach_msg_option_t option
)
907 task_t send_task
= TASK_NULL
;
908 ipc_port_t dst_port
, src_port
;
909 boolean_t is_task_64bit
;
910 mach_msg_header_t
*msg
;
911 mach_msg_trailer_t
*trailer
;
914 uint32_t msg_size
= 0;
915 uint64_t msg_flags
= KMSG_TRACE_FLAG_TRACED
;
916 uint32_t num_ports
= 0;
917 uint32_t send_pid
, dst_pid
;
920 * check to see not only if ktracing is enabled, but if we will
921 * _actually_ emit the KMSG_INFO tracepoint. This saves us a
922 * significant amount of processing (and a port lock hold) in
923 * the non-tracing case.
925 if (__probable((kdebug_enable
& KDEBUG_TRACE
) == 0)) {
928 if (!kdebug_debugid_enabled(MACHDBG_CODE(DBG_MACH_IPC
, MACH_IPC_KMSG_INFO
))) {
932 msg
= kmsg
->ikm_header
;
934 dst_port
= msg
->msgh_remote_port
;
935 if (!IPC_PORT_VALID(dst_port
)) {
940 * Message properties / options
942 if ((option
& (MACH_SEND_MSG
| MACH_RCV_MSG
)) == (MACH_SEND_MSG
| MACH_RCV_MSG
)) {
943 msg_flags
|= KMSG_TRACE_FLAG_SNDRCV
;
946 if (msg
->msgh_id
>= is_iokit_subsystem
.start
&&
947 msg
->msgh_id
< is_iokit_subsystem
.end
+ 100) {
948 msg_flags
|= KMSG_TRACE_FLAG_IOKIT
;
950 /* magic XPC checkin message id (XPC_MESSAGE_ID_CHECKIN) from libxpc */
951 else if (msg
->msgh_id
== 0x77303074u
/* w00t */) {
952 msg_flags
|= KMSG_TRACE_FLAG_CHECKIN
;
955 if (msg
->msgh_bits
& MACH_MSGH_BITS_RAISEIMP
) {
956 msg_flags
|= KMSG_TRACE_FLAG_RAISEIMP
;
959 if (unsafe_convert_port_to_voucher(kmsg
->ikm_voucher
)) {
960 msg_flags
|= KMSG_TRACE_FLAG_VOUCHER
;
964 * Sending task / port
966 send_task
= current_task();
967 send_pid
= task_pid(send_task
);
970 if (task_is_daemon(send_task
)) {
971 msg_flags
|= KMSG_TRACE_FLAG_DAEMON_SRC
;
972 } else if (task_is_app(send_task
)) {
973 msg_flags
|= KMSG_TRACE_FLAG_APP_SRC
;
977 is_task_64bit
= (send_task
->map
->max_offset
> VM_MAX_ADDRESS
);
979 msg_flags
|= KMSG_TRACE_FLAG_SND64
;
982 src_port
= msg
->msgh_local_port
;
984 if (src_port
->ip_messages
.imq_qlimit
!= MACH_PORT_QLIMIT_DEFAULT
) {
985 msg_flags
|= KMSG_TRACE_FLAG_SRC_NDFLTQ
;
987 switch (MACH_MSGH_BITS_LOCAL(msg
->msgh_bits
)) {
988 case MACH_MSG_TYPE_MOVE_SEND_ONCE
:
989 msg_flags
|= KMSG_TRACE_FLAG_SRC_SONCE
;
995 msg_flags
|= KMSG_TRACE_FLAG_ONEWAY
;
1000 * Destination task / port
1003 if (!ip_active(dst_port
)) {
1004 /* dst port is being torn down */
1005 dst_pid
= (uint32_t)0xfffffff0;
1006 } else if (dst_port
->ip_tempowner
) {
1007 msg_flags
|= KMSG_TRACE_FLAG_DTMPOWNER
;
1008 if (IIT_NULL
!= dst_port
->ip_imp_task
) {
1009 dst_pid
= task_pid(dst_port
->ip_imp_task
->iit_task
);
1011 dst_pid
= (uint32_t)0xfffffff1;
1013 } else if (dst_port
->ip_receiver_name
== MACH_PORT_NULL
) {
1014 /* dst_port is otherwise in-transit */
1015 dst_pid
= (uint32_t)0xfffffff2;
1017 if (dst_port
->ip_receiver
== ipc_space_kernel
) {
1020 ipc_space_t dst_space
;
1021 dst_space
= dst_port
->ip_receiver
;
1022 if (dst_space
&& is_active(dst_space
)) {
1023 dst_pid
= task_pid(dst_space
->is_task
);
1024 if (task_is_daemon(dst_space
->is_task
)) {
1025 msg_flags
|= KMSG_TRACE_FLAG_DAEMON_DST
;
1026 } else if (task_is_app(dst_space
->is_task
)) {
1027 msg_flags
|= KMSG_TRACE_FLAG_APP_DST
;
1030 /* receiving task is being torn down */
1031 dst_pid
= (uint32_t)0xfffffff3;
1036 if (dst_port
->ip_messages
.imq_qlimit
!= MACH_PORT_QLIMIT_DEFAULT
) {
1037 msg_flags
|= KMSG_TRACE_FLAG_DST_NDFLTQ
;
1039 if (imq_full(&dst_port
->ip_messages
)) {
1040 msg_flags
|= KMSG_TRACE_FLAG_DSTQFULL
;
1043 kotype
= ip_kotype(dst_port
);
1045 ip_unlock(dst_port
);
1048 case IKOT_SEMAPHORE
:
1049 msg_flags
|= KMSG_TRACE_FLAG_SEMA
;
1053 msg_flags
|= KMSG_TRACE_FLAG_TIMER
;
1055 case IKOT_MASTER_DEVICE
:
1056 case IKOT_IOKIT_CONNECT
:
1057 case IKOT_IOKIT_OBJECT
:
1058 case IKOT_IOKIT_IDENT
:
1059 case IKOT_UEXT_OBJECT
:
1060 msg_flags
|= KMSG_TRACE_FLAG_IOKIT
;
1066 switch (MACH_MSGH_BITS_REMOTE(msg
->msgh_bits
)) {
1067 case MACH_MSG_TYPE_PORT_SEND_ONCE
:
1068 msg_flags
|= KMSG_TRACE_FLAG_DST_SONCE
;
1076 * Message size / content
1078 msg_size
= msg
->msgh_size
- sizeof(mach_msg_header_t
);
1080 if (msg
->msgh_bits
& MACH_MSGH_BITS_COMPLEX
) {
1081 mach_msg_body_t
*msg_body
;
1082 mach_msg_descriptor_t
*kern_dsc
;
1085 msg_flags
|= KMSG_TRACE_FLAG_COMPLEX
;
1087 msg_body
= (mach_msg_body_t
*)(kmsg
->ikm_header
+ 1);
1088 dsc_count
= (int)msg_body
->msgh_descriptor_count
;
1089 kern_dsc
= (mach_msg_descriptor_t
*)(msg_body
+ 1);
1091 /* this is gross: see ipc_kmsg_copyin_body()... */
1092 if (!is_task_64bit
) {
1093 msg_size
-= (dsc_count
* 12);
1096 for (int i
= 0; i
< dsc_count
; i
++) {
1097 switch (kern_dsc
[i
].type
.type
) {
1098 case MACH_MSG_PORT_DESCRIPTOR
:
1100 if (is_task_64bit
) {
1104 case MACH_MSG_OOL_VOLATILE_DESCRIPTOR
:
1105 case MACH_MSG_OOL_DESCRIPTOR
: {
1106 mach_msg_ool_descriptor_t
*dsc
;
1107 dsc
= (mach_msg_ool_descriptor_t
*)&kern_dsc
[i
];
1108 msg_flags
|= KMSG_TRACE_FLAG_OOLMEM
;
1109 msg_size
+= dsc
->size
;
1110 if ((dsc
->size
>= MSG_OOL_SIZE_SMALL
) &&
1111 (dsc
->copy
== MACH_MSG_PHYSICAL_COPY
) &&
1113 msg_flags
|= KMSG_TRACE_FLAG_PCPY
;
1114 } else if (dsc
->size
<= MSG_OOL_SIZE_SMALL
) {
1115 msg_flags
|= KMSG_TRACE_FLAG_PCPY
;
1117 msg_flags
|= KMSG_TRACE_FLAG_VCPY
;
1119 if (is_task_64bit
) {
1123 case MACH_MSG_OOL_PORTS_DESCRIPTOR
: {
1124 mach_msg_ool_ports_descriptor_t
*dsc
;
1125 dsc
= (mach_msg_ool_ports_descriptor_t
*)&kern_dsc
[i
];
1126 num_ports
+= dsc
->count
;
1127 if (is_task_64bit
) {
1131 case MACH_MSG_GUARDED_PORT_DESCRIPTOR
:
1133 msg_flags
|= KMSG_TRACE_FLAG_GUARDED_DESC
;
1134 if (is_task_64bit
) {
1147 trailer
= (mach_msg_trailer_t
*)((vm_offset_t
)msg
+
1148 (vm_offset_t
)mach_round_msg(msg
->msgh_size
));
1149 if (trailer
->msgh_trailer_size
<= sizeof(mach_msg_security_trailer_t
)) {
1150 extern const security_token_t KERNEL_SECURITY_TOKEN
;
1151 mach_msg_security_trailer_t
*strailer
;
1152 strailer
= (mach_msg_security_trailer_t
*)trailer
;
1154 * verify the sender PID: replies from the kernel often look
1155 * like self-talk because the sending port is not reset.
1157 if (memcmp(&strailer
->msgh_sender
,
1158 &KERNEL_SECURITY_TOKEN
,
1159 sizeof(KERNEL_SECURITY_TOKEN
)) == 0) {
1161 msg_flags
&= ~(KMSG_TRACE_FLAG_APP_SRC
| KMSG_TRACE_FLAG_DAEMON_SRC
);
1165 KDBG(MACHDBG_CODE(DBG_MACH_IPC
, MACH_IPC_KMSG_INFO
) | DBG_FUNC_END
,
1166 (uintptr_t)send_pid
,
1168 (uintptr_t)msg_size
,
1170 ((msg_flags
& KMSG_TRACE_FLAGS_MASK
) << KMSG_TRACE_FLAGS_SHIFT
) |
1171 ((num_ports
& KMSG_TRACE_PORTS_MASK
) << KMSG_TRACE_PORTS_SHIFT
)
1177 /* zone for cached ipc_kmsg_t structures */
1178 ZONE_DECLARE(ipc_kmsg_zone
, "ipc kmsgs", IKM_SAVED_KMSG_SIZE
,
1179 ZC_CACHING
| ZC_ZFREE_CLEARMEM
);
1180 static TUNABLE(bool, enforce_strict_reply
, "ipc_strict_reply", false);
1183 * Forward declarations
1186 void ipc_kmsg_clean(
1189 void ipc_kmsg_clean_body(
1191 mach_msg_type_number_t number
,
1192 mach_msg_descriptor_t
*desc
);
1194 void ipc_kmsg_clean_partial(
1196 mach_msg_type_number_t number
,
1197 mach_msg_descriptor_t
*desc
,
1201 mach_msg_return_t
ipc_kmsg_copyin_body(
1205 mach_msg_option_t
*optionp
);
1209 ipc_kmsg_link_reply_context_locked(
1210 ipc_port_t reply_port
,
1211 ipc_port_t voucher_port
);
1213 static kern_return_t
1214 ipc_kmsg_validate_reply_port_locked(
1215 ipc_port_t reply_port
,
1216 mach_msg_option_t options
);
1218 static mach_msg_return_t
1219 ipc_kmsg_validate_reply_context_locked(
1220 mach_msg_option_t option
,
1221 ipc_port_t dest_port
,
1222 ipc_voucher_t voucher
,
1223 mach_port_name_t voucher_name
);
1225 /* we can't include the BSD <sys/persona.h> header here... */
1226 #ifndef PERSONA_ID_NONE
1227 #define PERSONA_ID_NONE ((uint32_t)-1)
1231 * We keep a per-processor cache of kernel message buffers.
1232 * The cache saves the overhead/locking of using kalloc/kfree.
1233 * The per-processor cache seems to miss less than a per-thread cache,
1234 * and it also uses less memory. Access to the cache doesn't
1239 * Routine: ikm_set_header
1241 * Set the header (and data) pointers for a message. If the
1242 * message is small, the data pointer is NULL and all the
1243 * data resides within the fixed
1244 * the cache, that is best. Otherwise, allocate a new one.
1252 mach_msg_size_t mtsize
)
1255 kmsg
->ikm_data
= data
;
1256 kmsg
->ikm_header
= (mach_msg_header_t
*)(data
+ kmsg
->ikm_size
- mtsize
);
1258 assert(kmsg
->ikm_size
== IKM_SAVED_MSG_SIZE
);
1259 kmsg
->ikm_header
= (mach_msg_header_t
*)
1260 ((vm_offset_t
)(kmsg
+ 1) + kmsg
->ikm_size
- mtsize
);
1265 * Routine: ipc_kmsg_alloc
1267 * Allocate a kernel message structure. If we can get one from
1268 * the cache, that is best. Otherwise, allocate a new one.
1274 mach_msg_size_t msg_and_trailer_size
)
1276 mach_msg_size_t max_expanded_size
;
1282 * Pad the allocation in case we need to expand the
1283 * message descriptors for user spaces with pointers larger than
1284 * the kernel's own, or vice versa. We don't know how many descriptors
1285 * there are yet, so just assume the whole body could be
1286 * descriptors (if there could be any at all).
1288 * The expansion space is left in front of the header,
1289 * because it is easier to pull the header and descriptors
1290 * forward as we process them than it is to push all the
1293 mach_msg_size_t size
= msg_and_trailer_size
- MAX_TRAILER_SIZE
;
1295 /* compare against implementation upper limit for the body */
1296 if (size
> ipc_kmsg_max_body_space
) {
1300 if (size
> sizeof(mach_msg_base_t
)) {
1301 mach_msg_size_t max_desc
= (mach_msg_size_t
)(((size
- sizeof(mach_msg_base_t
)) /
1302 sizeof(mach_msg_ool_descriptor32_t
)) *
1303 DESC_SIZE_ADJUSTMENT
);
1305 /* make sure expansion won't cause wrap */
1306 if (msg_and_trailer_size
> MACH_MSG_SIZE_MAX
- max_desc
) {
1310 max_expanded_size
= msg_and_trailer_size
+ max_desc
;
1312 max_expanded_size
= msg_and_trailer_size
;
1315 if (max_expanded_size
> IKM_SAVED_MSG_SIZE
) {
1316 data
= kheap_alloc(KHEAP_DATA_BUFFERS
, max_expanded_size
, Z_WAITOK
);
1322 max_expanded_size
= IKM_SAVED_MSG_SIZE
;
1325 kmsg
= zalloc_flags(ipc_kmsg_zone
, Z_WAITOK
| Z_ZERO
| Z_NOFAIL
);
1326 kmsg
->ikm_size
= max_expanded_size
;
1328 ikm_set_header(kmsg
, data
, msg_and_trailer_size
);
1329 assert((kmsg
->ikm_prev
= kmsg
->ikm_next
= IKM_BOGUS
));
1335 * Routine: ipc_kmsg_free
1337 * Free a kernel message buffer. If the kms is preallocated
1338 * to a port, just "put it back (marked unused)." We have to
1339 * do this with the port locked. The port may have its hold
1340 * on our message released. In that case, we have to just
1341 * revert the message to a traditional one and free it normally.
1350 mach_msg_size_t size
= kmsg
->ikm_size
;
1353 assert(!IP_VALID(kmsg
->ikm_voucher
));
1355 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_IPC
, MACH_IPC_KMSG_FREE
) | DBG_FUNC_NONE
,
1356 VM_KERNEL_ADDRPERM((uintptr_t)kmsg
),
1360 * Check to see if the message is bound to the port. If so,
1361 * mark it not in use. If the port isn't already dead, then
1362 * leave the message associated with it. Otherwise, free it.
1364 if (size
== IKM_SAVED_MSG_SIZE
) {
1365 if ((void *)kmsg
->ikm_header
< (void *)(kmsg
+ 1) ||
1366 (void *)kmsg
->ikm_header
>= (void *)(kmsg
+ 1) + IKM_SAVED_MSG_SIZE
) {
1367 panic("ipc_kmsg_free");
1369 port
= ikm_prealloc_inuse_port(kmsg
);
1370 if (port
!= IP_NULL
) {
1372 ikm_prealloc_clear_inuse(kmsg
, port
);
1373 if (ip_active(port
) && (port
->ip_premsg
== kmsg
)) {
1374 assert(IP_PREALLOC(port
));
1380 ip_release(port
); /* May be last reference */
1383 void *data
= kmsg
->ikm_data
;
1384 if ((void *)kmsg
->ikm_header
< data
||
1385 (void *)kmsg
->ikm_header
>= data
+ size
) {
1386 panic("ipc_kmsg_free");
1388 kheap_free(KHEAP_DATA_BUFFERS
, data
, size
);
1390 zfree(ipc_kmsg_zone
, kmsg
);
1395 * Routine: ipc_kmsg_enqueue
1402 ipc_kmsg_queue_t queue
,
1405 ipc_kmsg_t first
= queue
->ikmq_base
;
1408 if (first
== IKM_NULL
) {
1409 queue
->ikmq_base
= kmsg
;
1410 kmsg
->ikm_next
= kmsg
;
1411 kmsg
->ikm_prev
= kmsg
;
1413 last
= first
->ikm_prev
;
1414 kmsg
->ikm_next
= first
;
1415 kmsg
->ikm_prev
= last
;
1416 first
->ikm_prev
= kmsg
;
1417 last
->ikm_next
= kmsg
;
1422 * Routine: ipc_kmsg_enqueue_qos
1424 * Enqueue a kmsg, propagating qos
1425 * overrides towards the head of the queue.
1428 * whether the head of the queue had
1429 * it's override-qos adjusted because
1430 * of this insertion.
1434 ipc_kmsg_enqueue_qos(
1435 ipc_kmsg_queue_t queue
,
1438 ipc_kmsg_t first
= queue
->ikmq_base
;
1440 mach_msg_qos_t qos_ovr
;
1442 if (first
== IKM_NULL
) {
1443 /* insert a first message */
1444 queue
->ikmq_base
= kmsg
;
1445 kmsg
->ikm_next
= kmsg
;
1446 kmsg
->ikm_prev
= kmsg
;
1450 /* insert at the tail */
1451 prev
= first
->ikm_prev
;
1452 kmsg
->ikm_next
= first
;
1453 kmsg
->ikm_prev
= prev
;
1454 first
->ikm_prev
= kmsg
;
1455 prev
->ikm_next
= kmsg
;
1457 /* apply QoS overrides towards the head */
1458 qos_ovr
= kmsg
->ikm_qos_override
;
1459 while (prev
!= kmsg
&&
1460 qos_ovr
> prev
->ikm_qos_override
) {
1461 prev
->ikm_qos_override
= qos_ovr
;
1462 prev
= prev
->ikm_prev
;
1465 /* did we adjust everything? */
1466 return prev
== kmsg
;
1470 * Routine: ipc_kmsg_override_qos
1472 * Update the override for a given kmsg already
1473 * enqueued, propagating qos override adjustments
1474 * towards the head of the queue.
1477 * whether the head of the queue had
1478 * it's override-qos adjusted because
1479 * of this insertion.
1483 ipc_kmsg_override_qos(
1484 ipc_kmsg_queue_t queue
,
1486 mach_msg_qos_t qos_ovr
)
1488 ipc_kmsg_t first
= queue
->ikmq_base
;
1489 ipc_kmsg_t cur
= kmsg
;
1491 /* apply QoS overrides towards the head */
1492 while (qos_ovr
> cur
->ikm_qos_override
) {
1493 cur
->ikm_qos_override
= qos_ovr
;
1497 cur
= cur
->ikm_prev
;
1503 * Routine: ipc_kmsg_dequeue
1505 * Dequeue and return a kmsg.
1510 ipc_kmsg_queue_t queue
)
1514 first
= ipc_kmsg_queue_first(queue
);
1516 if (first
!= IKM_NULL
) {
1517 ipc_kmsg_rmqueue(queue
, first
);
1524 * Routine: ipc_kmsg_rmqueue
1526 * Pull a kmsg out of a queue.
1531 ipc_kmsg_queue_t queue
,
1534 ipc_kmsg_t next
, prev
;
1536 assert(queue
->ikmq_base
!= IKM_NULL
);
1538 next
= kmsg
->ikm_next
;
1539 prev
= kmsg
->ikm_prev
;
1542 assert(prev
== kmsg
);
1543 assert(queue
->ikmq_base
== kmsg
);
1545 queue
->ikmq_base
= IKM_NULL
;
1547 if (__improbable(next
->ikm_prev
!= kmsg
|| prev
->ikm_next
!= kmsg
)) {
1548 panic("ipc_kmsg_rmqueue: inconsistent prev/next pointers. "
1549 "(prev->next: %p, next->prev: %p, kmsg: %p)",
1550 prev
->ikm_next
, next
->ikm_prev
, kmsg
);
1553 if (queue
->ikmq_base
== kmsg
) {
1554 queue
->ikmq_base
= next
;
1557 next
->ikm_prev
= prev
;
1558 prev
->ikm_next
= next
;
1560 /* XXX Temporary debug logic */
1561 assert((kmsg
->ikm_next
= IKM_BOGUS
) == IKM_BOGUS
);
1562 assert((kmsg
->ikm_prev
= IKM_BOGUS
) == IKM_BOGUS
);
1566 * Routine: ipc_kmsg_queue_next
1568 * Return the kmsg following the given kmsg.
1569 * (Or IKM_NULL if it is the last one in the queue.)
1573 ipc_kmsg_queue_next(
1574 ipc_kmsg_queue_t queue
,
1579 assert(queue
->ikmq_base
!= IKM_NULL
);
1581 next
= kmsg
->ikm_next
;
1582 if (queue
->ikmq_base
== next
) {
1590 * Routine: ipc_kmsg_destroy
1592 * Destroys a kernel message. Releases all rights,
1593 * references, and memory held by the message.
1594 * Frees the message.
1604 * Destroying a message can cause more messages to be destroyed.
1605 * Curtail recursion by putting messages on the deferred
1606 * destruction queue. If this was the first message on the
1607 * queue, this instance must process the full queue.
1609 if (ipc_kmsg_delayed_destroy(kmsg
)) {
1610 ipc_kmsg_reap_delayed();
1615 * Routine: ipc_kmsg_delayed_destroy
1617 * Enqueues a kernel message for deferred destruction.
1619 * Boolean indicator that the caller is responsible to reap
1620 * deferred messages.
1624 ipc_kmsg_delayed_destroy(
1627 ipc_kmsg_queue_t queue
= &(current_thread()->ith_messages
);
1628 boolean_t first
= ipc_kmsg_queue_empty(queue
);
1630 ipc_kmsg_enqueue(queue
, kmsg
);
1635 * Routine: ipc_kmsg_destroy_queue
1637 * Destroys messages from the per-thread
1638 * deferred reaping queue.
1644 ipc_kmsg_reap_delayed(void)
1646 ipc_kmsg_queue_t queue
= &(current_thread()->ith_messages
);
1650 * must leave kmsg in queue while cleaning it to assure
1651 * no nested calls recurse into here.
1653 while ((kmsg
= ipc_kmsg_queue_first(queue
)) != IKM_NULL
) {
1654 ipc_kmsg_clean(kmsg
);
1655 ipc_kmsg_rmqueue(queue
, kmsg
);
1656 ipc_kmsg_free(kmsg
);
1661 * Routine: ipc_kmsg_clean_body
1663 * Cleans the body of a kernel message.
1664 * Releases all rights, references, and memory.
1669 static unsigned int _ipc_kmsg_clean_invalid_desc
= 0;
1671 ipc_kmsg_clean_body(
1672 __unused ipc_kmsg_t kmsg
,
1673 mach_msg_type_number_t number
,
1674 mach_msg_descriptor_t
*saddr
)
1676 mach_msg_type_number_t i
;
1682 for (i
= 0; i
< number
; i
++, saddr
++) {
1683 switch (saddr
->type
.type
) {
1684 case MACH_MSG_PORT_DESCRIPTOR
: {
1685 mach_msg_port_descriptor_t
*dsc
;
1690 * Destroy port rights carried in the message
1692 if (!IP_VALID(dsc
->name
)) {
1695 ipc_object_destroy(ip_to_object(dsc
->name
), dsc
->disposition
);
1698 case MACH_MSG_OOL_VOLATILE_DESCRIPTOR
:
1699 case MACH_MSG_OOL_DESCRIPTOR
: {
1700 mach_msg_ool_descriptor_t
*dsc
;
1702 dsc
= (mach_msg_ool_descriptor_t
*)&saddr
->out_of_line
;
1705 * Destroy memory carried in the message
1707 if (dsc
->size
== 0) {
1708 assert(dsc
->address
== (void *) 0);
1710 vm_map_copy_discard((vm_map_copy_t
) dsc
->address
);
1714 case MACH_MSG_OOL_PORTS_DESCRIPTOR
: {
1715 ipc_object_t
*objects
;
1716 mach_msg_type_number_t j
;
1717 mach_msg_ool_ports_descriptor_t
*dsc
;
1719 dsc
= (mach_msg_ool_ports_descriptor_t
*)&saddr
->ool_ports
;
1720 objects
= (ipc_object_t
*) dsc
->address
;
1722 if (dsc
->count
== 0) {
1726 assert(objects
!= (ipc_object_t
*) 0);
1728 /* destroy port rights carried in the message */
1730 for (j
= 0; j
< dsc
->count
; j
++) {
1731 ipc_object_t object
= objects
[j
];
1733 if (!IO_VALID(object
)) {
1737 ipc_object_destroy(object
, dsc
->disposition
);
1740 /* destroy memory carried in the message */
1742 assert(dsc
->count
!= 0);
1745 (vm_size_t
) dsc
->count
* sizeof(mach_port_t
));
1748 case MACH_MSG_GUARDED_PORT_DESCRIPTOR
: {
1749 mach_msg_guarded_port_descriptor_t
*dsc
= (typeof(dsc
)) & saddr
->guarded_port
;
1752 * Destroy port rights carried in the message
1754 if (!IP_VALID(dsc
->name
)) {
1757 ipc_object_destroy(ip_to_object(dsc
->name
), dsc
->disposition
);
1761 _ipc_kmsg_clean_invalid_desc
++; /* don't understand this type of descriptor */
1768 * Routine: ipc_kmsg_clean_partial
1770 * Cleans a partially-acquired kernel message.
1771 * number is the index of the type descriptor
1772 * in the body of the message that contained the error.
1773 * If dolast, the memory and port rights in this last
1774 * type spec are also cleaned. In that case, number
1775 * specifies the number of port rights to clean.
1781 ipc_kmsg_clean_partial(
1783 mach_msg_type_number_t number
,
1784 mach_msg_descriptor_t
*desc
,
1788 ipc_object_t object
;
1789 mach_msg_bits_t mbits
= kmsg
->ikm_header
->msgh_bits
;
1791 /* deal with importance chain while we still have dest and voucher references */
1792 ipc_importance_clean(kmsg
);
1794 object
= ip_to_object(kmsg
->ikm_header
->msgh_remote_port
);
1795 assert(IO_VALID(object
));
1796 ipc_object_destroy_dest(object
, MACH_MSGH_BITS_REMOTE(mbits
));
1798 object
= ip_to_object(kmsg
->ikm_header
->msgh_local_port
);
1799 if (IO_VALID(object
)) {
1800 ipc_object_destroy(object
, MACH_MSGH_BITS_LOCAL(mbits
));
1803 object
= ip_to_object(kmsg
->ikm_voucher
);
1804 if (IO_VALID(object
)) {
1805 assert(MACH_MSGH_BITS_VOUCHER(mbits
) == MACH_MSG_TYPE_MOVE_SEND
);
1806 ipc_object_destroy(object
, MACH_MSG_TYPE_PORT_SEND
);
1807 kmsg
->ikm_voucher
= IP_NULL
;
1811 (void) vm_deallocate(ipc_kernel_copy_map
, paddr
, length
);
1814 ipc_kmsg_clean_body(kmsg
, number
, desc
);
1818 * Routine: ipc_kmsg_clean
1820 * Cleans a kernel message. Releases all rights,
1821 * references, and memory held by the message.
1830 ipc_object_t object
;
1831 mach_msg_bits_t mbits
;
1833 /* deal with importance chain while we still have dest and voucher references */
1834 ipc_importance_clean(kmsg
);
1836 mbits
= kmsg
->ikm_header
->msgh_bits
;
1837 object
= ip_to_object(kmsg
->ikm_header
->msgh_remote_port
);
1838 if (IO_VALID(object
)) {
1839 ipc_object_destroy_dest(object
, MACH_MSGH_BITS_REMOTE(mbits
));
1842 object
= ip_to_object(kmsg
->ikm_header
->msgh_local_port
);
1843 if (IO_VALID(object
)) {
1844 ipc_object_destroy(object
, MACH_MSGH_BITS_LOCAL(mbits
));
1847 object
= ip_to_object(kmsg
->ikm_voucher
);
1848 if (IO_VALID(object
)) {
1849 assert(MACH_MSGH_BITS_VOUCHER(mbits
) == MACH_MSG_TYPE_MOVE_SEND
);
1850 ipc_object_destroy(object
, MACH_MSG_TYPE_PORT_SEND
);
1851 kmsg
->ikm_voucher
= IP_NULL
;
1854 if (mbits
& MACH_MSGH_BITS_COMPLEX
) {
1855 mach_msg_body_t
*body
;
1857 body
= (mach_msg_body_t
*) (kmsg
->ikm_header
+ 1);
1858 ipc_kmsg_clean_body(kmsg
, body
->msgh_descriptor_count
,
1859 (mach_msg_descriptor_t
*)(body
+ 1));
1864 * Routine: ipc_kmsg_set_prealloc
1866 * Assign a kmsg as a preallocated message buffer to a port.
1872 ipc_kmsg_set_prealloc(
1876 assert(kmsg
->ikm_prealloc
== IP_NULL
);
1878 kmsg
->ikm_prealloc
= IP_NULL
;
1880 assert(port_send_turnstile(port
) == TURNSTILE_NULL
);
1881 kmsg
->ikm_turnstile
= TURNSTILE_NULL
;
1882 IP_SET_PREALLOC(port
, kmsg
);
1886 * Routine: ipc_kmsg_clear_prealloc
1888 * Release the Assignment of a preallocated message buffer from a port.
1893 ipc_kmsg_clear_prealloc(
1897 /* take the mqueue lock since the turnstile is protected under it */
1898 imq_lock(&port
->ip_messages
);
1900 IP_CLEAR_PREALLOC(port
, kmsg
);
1901 set_port_send_turnstile(port
, kmsg
->ikm_turnstile
);
1902 imq_unlock(&port
->ip_messages
);
1906 * Routine: ipc_kmsg_prealloc
1908 * Wraper to ipc_kmsg_alloc() to account for
1909 * header expansion requirements.
1912 ipc_kmsg_prealloc(mach_msg_size_t size
)
1914 #if defined(__LP64__)
1915 if (size
> IKM_SAVED_MSG_SIZE
- LEGACY_HEADER_SIZE_DELTA
) {
1916 panic("ipc_kmsg_prealloc");
1919 size
+= LEGACY_HEADER_SIZE_DELTA
;
1921 return ipc_kmsg_alloc(size
);
1926 * Routine: ipc_kmsg_get
1928 * Allocates a kernel message buffer.
1929 * Copies a user message to the message buffer.
1933 * MACH_MSG_SUCCESS Acquired a message buffer.
1934 * MACH_SEND_MSG_TOO_SMALL Message smaller than a header.
1935 * MACH_SEND_MSG_TOO_SMALL Message size not long-word multiple.
1936 * MACH_SEND_TOO_LARGE Message too large to ever be sent.
1937 * MACH_SEND_NO_BUFFER Couldn't allocate a message buffer.
1938 * MACH_SEND_INVALID_DATA Couldn't copy message data.
1943 mach_vm_address_t msg_addr
,
1944 mach_msg_size_t size
,
1947 mach_msg_size_t msg_and_trailer_size
;
1949 mach_msg_max_trailer_t
*trailer
;
1950 mach_msg_legacy_base_t legacy_base
;
1951 mach_msg_size_t len_copied
;
1952 legacy_base
.body
.msgh_descriptor_count
= 0;
1954 if ((size
< sizeof(mach_msg_legacy_header_t
)) || (size
& 3)) {
1955 return MACH_SEND_MSG_TOO_SMALL
;
1958 if (size
> ipc_kmsg_max_body_space
) {
1959 return MACH_SEND_TOO_LARGE
;
1962 if (size
== sizeof(mach_msg_legacy_header_t
)) {
1963 len_copied
= sizeof(mach_msg_legacy_header_t
);
1965 len_copied
= sizeof(mach_msg_legacy_base_t
);
1968 if (copyinmsg(msg_addr
, (char *)&legacy_base
, len_copied
)) {
1969 return MACH_SEND_INVALID_DATA
;
1973 * If the message claims to be complex, it must at least
1974 * have the length of a "base" message (header + dsc_count).
1976 if (len_copied
< sizeof(mach_msg_legacy_base_t
) &&
1977 (legacy_base
.header
.msgh_bits
& MACH_MSGH_BITS_COMPLEX
)) {
1978 return MACH_SEND_MSG_TOO_SMALL
;
1981 msg_addr
+= sizeof(legacy_base
.header
);
1982 #if defined(__LP64__)
1983 size
+= LEGACY_HEADER_SIZE_DELTA
;
1985 /* unreachable if !DEBUG */
1986 __unreachable_ok_push
1987 if (DEBUG_KPRINT_SYSCALL_PREDICATE(DEBUG_KPRINT_SYSCALL_IPC_MASK
)) {
1989 for (j
= 0; j
< sizeof(legacy_base
.header
); j
++) {
1990 kprintf("%02x\n", ((unsigned char*)&legacy_base
.header
)[j
]);
1993 __unreachable_ok_pop
1995 msg_and_trailer_size
= size
+ MAX_TRAILER_SIZE
;
1996 kmsg
= ipc_kmsg_alloc(msg_and_trailer_size
);
1997 if (kmsg
== IKM_NULL
) {
1998 return MACH_SEND_NO_BUFFER
;
2001 kmsg
->ikm_header
->msgh_size
= size
;
2002 kmsg
->ikm_header
->msgh_bits
= legacy_base
.header
.msgh_bits
;
2003 kmsg
->ikm_header
->msgh_remote_port
= CAST_MACH_NAME_TO_PORT(legacy_base
.header
.msgh_remote_port
);
2004 kmsg
->ikm_header
->msgh_local_port
= CAST_MACH_NAME_TO_PORT(legacy_base
.header
.msgh_local_port
);
2005 kmsg
->ikm_header
->msgh_voucher_port
= legacy_base
.header
.msgh_voucher_port
;
2006 kmsg
->ikm_header
->msgh_id
= legacy_base
.header
.msgh_id
;
2008 DEBUG_KPRINT_SYSCALL_IPC("ipc_kmsg_get header:\n"
2011 " remote_port: %p\n"
2013 " voucher_port: 0x%.8x\n"
2015 kmsg
->ikm_header
->msgh_size
,
2016 kmsg
->ikm_header
->msgh_bits
,
2017 kmsg
->ikm_header
->msgh_remote_port
,
2018 kmsg
->ikm_header
->msgh_local_port
,
2019 kmsg
->ikm_header
->msgh_voucher_port
,
2020 kmsg
->ikm_header
->msgh_id
);
2022 if (copyinmsg(msg_addr
, (char *)(kmsg
->ikm_header
+ 1), size
- (mach_msg_size_t
)sizeof(mach_msg_header_t
))) {
2023 ipc_kmsg_free(kmsg
);
2024 return MACH_SEND_INVALID_DATA
;
2027 /* unreachable if !DEBUG */
2028 __unreachable_ok_push
2029 if (DEBUG_KPRINT_SYSCALL_PREDICATE(DEBUG_KPRINT_SYSCALL_IPC_MASK
)) {
2030 kprintf("body: size: %lu\n", (size
- sizeof(mach_msg_header_t
)));
2032 for (i
= 0; i
* 4 < (size
- sizeof(mach_msg_header_t
)); i
++) {
2033 kprintf("%.4x\n", ((uint32_t *)(kmsg
->ikm_header
+ 1))[i
]);
2036 __unreachable_ok_pop
2037 DEBUG_IPC_KMSG_PRINT(kmsg
, "ipc_kmsg_get()");
2040 * I reserve for the trailer the largest space (MAX_TRAILER_SIZE)
2041 * However, the internal size field of the trailer (msgh_trailer_size)
2042 * is initialized to the minimum (sizeof(mach_msg_trailer_t)), to optimize
2043 * the cases where no implicit data is requested.
2045 trailer
= (mach_msg_max_trailer_t
*) ((vm_offset_t
)kmsg
->ikm_header
+ size
);
2046 bzero(trailer
, sizeof(*trailer
));
2047 trailer
->msgh_sender
= current_thread()->task
->sec_token
;
2048 trailer
->msgh_audit
= current_thread()->task
->audit_token
;
2049 trailer
->msgh_trailer_type
= MACH_MSG_TRAILER_FORMAT_0
;
2050 trailer
->msgh_trailer_size
= MACH_MSG_TRAILER_MINIMUM_SIZE
;
2053 if (trcWork
.traceMask
) {
2054 dbgTrace(0x1100, (unsigned int)kmsg
->ikm_header
->msgh_id
,
2055 (unsigned int)kmsg
->ikm_header
->msgh_remote_port
,
2056 (unsigned int)kmsg
->ikm_header
->msgh_local_port
, 0);
2060 trailer
->msgh_labels
.sender
= 0;
2062 return MACH_MSG_SUCCESS
;
2066 * Routine: ipc_kmsg_get_from_kernel
2068 * First checks for a preallocated message
2069 * reserved for kernel clients. If not found -
2070 * allocates a new kernel message buffer.
2071 * Copies a kernel message to the message buffer.
2072 * Only resource errors are allowed.
2075 * Ports in header are ipc_port_t.
2077 * MACH_MSG_SUCCESS Acquired a message buffer.
2078 * MACH_SEND_NO_BUFFER Couldn't allocate a message buffer.
2082 ipc_kmsg_get_from_kernel(
2083 mach_msg_header_t
*msg
,
2084 mach_msg_size_t size
,
2088 mach_msg_size_t msg_and_trailer_size
;
2089 mach_msg_max_trailer_t
*trailer
;
2090 ipc_port_t dest_port
;
2092 assert(size
>= sizeof(mach_msg_header_t
));
2093 assert((size
& 3) == 0);
2095 dest_port
= msg
->msgh_remote_port
;
2097 msg_and_trailer_size
= size
+ MAX_TRAILER_SIZE
;
2100 * See if the port has a pre-allocated kmsg for kernel
2101 * clients. These are set up for those kernel clients
2102 * which cannot afford to wait.
2104 if (IP_VALID(dest_port
) && IP_PREALLOC(dest_port
)) {
2105 mach_msg_size_t max_desc
= 0;
2108 if (!ip_active(dest_port
)) {
2109 ip_unlock(dest_port
);
2110 return MACH_SEND_NO_BUFFER
;
2112 assert(IP_PREALLOC(dest_port
));
2113 kmsg
= dest_port
->ip_premsg
;
2114 if (ikm_prealloc_inuse(kmsg
)) {
2115 ip_unlock(dest_port
);
2116 return MACH_SEND_NO_BUFFER
;
2118 #if !defined(__LP64__)
2119 if (msg
->msgh_bits
& MACH_MSGH_BITS_COMPLEX
) {
2120 assert(size
> sizeof(mach_msg_base_t
));
2121 max_desc
= ((mach_msg_base_t
*)msg
)->body
.msgh_descriptor_count
*
2122 DESC_SIZE_ADJUSTMENT
;
2125 if (msg_and_trailer_size
> kmsg
->ikm_size
- max_desc
) {
2126 ip_unlock(dest_port
);
2127 return MACH_SEND_TOO_LARGE
;
2129 ikm_prealloc_set_inuse(kmsg
, dest_port
);
2130 ikm_set_header(kmsg
, NULL
, msg_and_trailer_size
);
2131 ip_unlock(dest_port
);
2133 kmsg
= ipc_kmsg_alloc(msg_and_trailer_size
);
2134 if (kmsg
== IKM_NULL
) {
2135 return MACH_SEND_NO_BUFFER
;
2139 (void) memcpy((void *) kmsg
->ikm_header
, (const void *) msg
, size
);
2143 kmsg
->ikm_header
->msgh_size
= size
;
2146 * I reserve for the trailer the largest space (MAX_TRAILER_SIZE)
2147 * However, the internal size field of the trailer (msgh_trailer_size)
2148 * is initialized to the minimum (sizeof(mach_msg_trailer_t)), to
2149 * optimize the cases where no implicit data is requested.
2151 trailer
= (mach_msg_max_trailer_t
*)
2152 ((vm_offset_t
)kmsg
->ikm_header
+ size
);
2153 bzero(trailer
, sizeof(*trailer
));
2154 trailer
->msgh_sender
= KERNEL_SECURITY_TOKEN
;
2155 trailer
->msgh_audit
= KERNEL_AUDIT_TOKEN
;
2156 trailer
->msgh_trailer_type
= MACH_MSG_TRAILER_FORMAT_0
;
2157 trailer
->msgh_trailer_size
= MACH_MSG_TRAILER_MINIMUM_SIZE
;
2159 trailer
->msgh_labels
.sender
= 0;
2162 return MACH_MSG_SUCCESS
;
2166 * Routine: ipc_kmsg_send
2168 * Send a message. The message holds a reference
2169 * for the destination port in the msgh_remote_port field.
2171 * If unsuccessful, the caller still has possession of
2172 * the message and must do something with it. If successful,
2173 * the message is queued, given to a receiver, destroyed,
2174 * or handled directly by the kernel via mach_msg.
2178 * MACH_MSG_SUCCESS The message was accepted.
2179 * MACH_SEND_TIMED_OUT Caller still has message.
2180 * MACH_SEND_INTERRUPTED Caller still has message.
2181 * MACH_SEND_INVALID_DEST Caller still has message.
2186 mach_msg_option_t option
,
2187 mach_msg_timeout_t send_timeout
)
2190 thread_t th
= current_thread();
2191 mach_msg_return_t error
= MACH_MSG_SUCCESS
;
2192 boolean_t kernel_reply
= FALSE
;
2194 /* Check if honor qlimit flag is set on thread. */
2195 if ((th
->options
& TH_OPT_HONOR_QLIMIT
) == TH_OPT_HONOR_QLIMIT
) {
2196 /* Remove the MACH_SEND_ALWAYS flag to honor queue limit. */
2197 option
&= (~MACH_SEND_ALWAYS
);
2198 /* Add the timeout flag since the message queue might be full. */
2199 option
|= MACH_SEND_TIMEOUT
;
2200 th
->options
&= (~TH_OPT_HONOR_QLIMIT
);
2203 #if IMPORTANCE_INHERITANCE
2204 bool did_importance
= false;
2205 #if IMPORTANCE_TRACE
2206 mach_msg_id_t imp_msgh_id
= -1;
2207 int sender_pid
= -1;
2208 #endif /* IMPORTANCE_TRACE */
2209 #endif /* IMPORTANCE_INHERITANCE */
2211 /* don't allow the creation of a circular loop */
2212 if (kmsg
->ikm_header
->msgh_bits
& MACH_MSGH_BITS_CIRCULAR
) {
2213 ipc_kmsg_destroy(kmsg
);
2214 KDBG(MACHDBG_CODE(DBG_MACH_IPC
, MACH_IPC_KMSG_INFO
) | DBG_FUNC_END
, MACH_MSGH_BITS_CIRCULAR
);
2215 return MACH_MSG_SUCCESS
;
2218 ipc_voucher_send_preprocessing(kmsg
);
2220 port
= kmsg
->ikm_header
->msgh_remote_port
;
2221 assert(IP_VALID(port
));
2225 * If the destination has been guarded with a reply context, and the
2226 * sender is consuming a send-once right, then assume this is a reply
2227 * to an RPC and we need to validate that this sender is currently in
2228 * the correct context.
2230 if (enforce_strict_reply
&& port
->ip_reply_context
!= 0 &&
2231 ((option
& MACH_SEND_KERNEL
) == 0) &&
2232 MACH_MSGH_BITS_REMOTE(kmsg
->ikm_header
->msgh_bits
) == MACH_MSG_TYPE_PORT_SEND_ONCE
) {
2233 error
= ipc_kmsg_validate_reply_context_locked(option
, port
, th
->ith_voucher
, th
->ith_voucher_name
);
2234 if (error
!= MACH_MSG_SUCCESS
) {
2240 #if IMPORTANCE_INHERITANCE
2242 #endif /* IMPORTANCE_INHERITANCE */
2244 * Can't deliver to a dead port.
2245 * However, we can pretend it got sent
2246 * and was then immediately destroyed.
2248 if (!ip_active(port
)) {
2251 if (MACH_NODE_VALID(kmsg
->ikm_node
) && FPORT_VALID(port
->ip_messages
.imq_fport
)) {
2252 flipc_msg_ack(kmsg
->ikm_node
, &port
->ip_messages
, FALSE
);
2255 if (did_importance
) {
2257 * We're going to pretend we delivered this message
2258 * successfully, and just eat the kmsg. However, the
2259 * kmsg is actually visible via the importance_task!
2260 * We need to cleanup this linkage before we destroy
2261 * the message, and more importantly before we set the
2262 * msgh_remote_port to NULL. See: 34302571
2264 ipc_importance_clean(kmsg
);
2266 ip_release(port
); /* JMM - Future: release right, not just ref */
2267 kmsg
->ikm_header
->msgh_remote_port
= MACH_PORT_NULL
;
2268 ipc_kmsg_destroy(kmsg
);
2269 KDBG(MACHDBG_CODE(DBG_MACH_IPC
, MACH_IPC_KMSG_INFO
) | DBG_FUNC_END
, MACH_SEND_INVALID_DEST
);
2270 return MACH_MSG_SUCCESS
;
2273 if (port
->ip_receiver
== ipc_space_kernel
) {
2275 * We can check ip_receiver == ipc_space_kernel
2276 * before checking that the port is active because
2277 * ipc_port_dealloc_kernel clears ip_receiver
2278 * before destroying a kernel port.
2280 require_ip_active(port
);
2281 port
->ip_messages
.imq_seqno
++;
2284 current_task()->messages_sent
++;
2287 * Call the server routine, and get the reply message to send.
2289 kmsg
= ipc_kobject_server(kmsg
, option
);
2290 if (kmsg
== IKM_NULL
) {
2291 return MACH_MSG_SUCCESS
;
2294 /* sign the reply message */
2297 /* restart the KMSG_INFO tracing for the reply message */
2298 KDBG(MACHDBG_CODE(DBG_MACH_IPC
, MACH_IPC_KMSG_INFO
) | DBG_FUNC_START
);
2299 port
= kmsg
->ikm_header
->msgh_remote_port
;
2300 assert(IP_VALID(port
));
2302 /* fall thru with reply - same options */
2303 kernel_reply
= TRUE
;
2304 if (!ip_active(port
)) {
2305 error
= MACH_SEND_INVALID_DEST
;
2309 #if IMPORTANCE_INHERITANCE
2311 * Need to see if this message needs importance donation and/or
2312 * propagation. That routine can drop the port lock temporarily.
2313 * If it does we'll have to revalidate the destination.
2315 if (!did_importance
) {
2316 did_importance
= true;
2317 if (ipc_importance_send(kmsg
, option
)) {
2321 #endif /* IMPORTANCE_INHERITANCE */
2323 if (error
!= MACH_MSG_SUCCESS
) {
2327 * We have a valid message and a valid reference on the port.
2328 * we can unlock the port and call mqueue_send() on its message
2329 * queue. Lock message queue while port is locked.
2331 imq_lock(&port
->ip_messages
);
2333 ipc_special_reply_port_msg_sent(port
);
2337 error
= ipc_mqueue_send(&port
->ip_messages
, kmsg
, option
,
2341 #if IMPORTANCE_INHERITANCE
2342 if (did_importance
) {
2343 __unused
int importance_cleared
= 0;
2345 case MACH_SEND_TIMED_OUT
:
2346 case MACH_SEND_NO_BUFFER
:
2347 case MACH_SEND_INTERRUPTED
:
2348 case MACH_SEND_INVALID_DEST
:
2350 * We still have the kmsg and its
2351 * reference on the port. But we
2352 * have to back out the importance
2355 * The port could have changed hands,
2356 * be inflight to another destination,
2357 * etc... But in those cases our
2358 * back-out will find the new owner
2359 * (and all the operations that
2360 * transferred the right should have
2361 * applied their own boost adjustments
2362 * to the old owner(s)).
2364 importance_cleared
= 1;
2365 ipc_importance_clean(kmsg
);
2368 case MACH_MSG_SUCCESS
:
2372 #if IMPORTANCE_TRACE
2373 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
, (IMPORTANCE_CODE(IMP_MSG
, IMP_MSG_SEND
)) | DBG_FUNC_END
,
2374 task_pid(current_task()), sender_pid
, imp_msgh_id
, importance_cleared
, 0);
2375 #endif /* IMPORTANCE_TRACE */
2377 #endif /* IMPORTANCE_INHERITANCE */
2380 * If the port has been destroyed while we wait, treat the message
2381 * as a successful delivery (like we do for an inactive port).
2383 if (error
== MACH_SEND_INVALID_DEST
) {
2385 if (MACH_NODE_VALID(kmsg
->ikm_node
) && FPORT_VALID(port
->ip_messages
.imq_fport
)) {
2386 flipc_msg_ack(kmsg
->ikm_node
, &port
->ip_messages
, FALSE
);
2389 ip_release(port
); /* JMM - Future: release right, not just ref */
2390 kmsg
->ikm_header
->msgh_remote_port
= MACH_PORT_NULL
;
2391 ipc_kmsg_destroy(kmsg
);
2392 KDBG(MACHDBG_CODE(DBG_MACH_IPC
, MACH_IPC_KMSG_INFO
) | DBG_FUNC_END
, MACH_SEND_INVALID_DEST
);
2393 return MACH_MSG_SUCCESS
;
2396 if (error
!= MACH_MSG_SUCCESS
&& kernel_reply
) {
2398 * Kernel reply messages that fail can't be allowed to
2399 * pseudo-receive on error conditions. We need to just treat
2400 * the message as a successful delivery.
2403 if (MACH_NODE_VALID(kmsg
->ikm_node
) && FPORT_VALID(port
->ip_messages
.imq_fport
)) {
2404 flipc_msg_ack(kmsg
->ikm_node
, &port
->ip_messages
, FALSE
);
2407 ip_release(port
); /* JMM - Future: release right, not just ref */
2408 kmsg
->ikm_header
->msgh_remote_port
= MACH_PORT_NULL
;
2409 ipc_kmsg_destroy(kmsg
);
2410 KDBG(MACHDBG_CODE(DBG_MACH_IPC
, MACH_IPC_KMSG_INFO
) | DBG_FUNC_END
, error
);
2411 return MACH_MSG_SUCCESS
;
2417 * Routine: ipc_kmsg_put
2419 * Copies a message buffer to a user message.
2420 * Copies only the specified number of bytes.
2421 * Frees the message buffer.
2423 * Nothing locked. The message buffer must have clean
2426 * MACH_MSG_SUCCESS Copied data out of message buffer.
2427 * MACH_RCV_INVALID_DATA Couldn't copy to user message.
2433 mach_msg_option_t option
,
2434 mach_vm_address_t rcv_addr
,
2435 mach_msg_size_t rcv_size
,
2436 mach_msg_size_t trailer_size
,
2437 mach_msg_size_t
*sizep
)
2439 mach_msg_size_t size
= kmsg
->ikm_header
->msgh_size
+ trailer_size
;
2440 mach_msg_return_t mr
;
2442 DEBUG_IPC_KMSG_PRINT(kmsg
, "ipc_kmsg_put()");
2445 DEBUG_KPRINT_SYSCALL_IPC("ipc_kmsg_put header:\n"
2448 " remote_port: %p\n"
2450 " voucher_port: 0x%.8x\n"
2452 kmsg
->ikm_header
->msgh_size
,
2453 kmsg
->ikm_header
->msgh_bits
,
2454 kmsg
->ikm_header
->msgh_remote_port
,
2455 kmsg
->ikm_header
->msgh_local_port
,
2456 kmsg
->ikm_header
->msgh_voucher_port
,
2457 kmsg
->ikm_header
->msgh_id
);
2459 #if defined(__LP64__)
2460 if (current_task() != kernel_task
) { /* don't if receiver expects fully-cooked in-kernel msg; */
2461 mach_msg_legacy_header_t
*legacy_header
=
2462 (mach_msg_legacy_header_t
*)((vm_offset_t
)(kmsg
->ikm_header
) + LEGACY_HEADER_SIZE_DELTA
);
2464 mach_msg_bits_t bits
= kmsg
->ikm_header
->msgh_bits
;
2465 mach_msg_size_t msg_size
= kmsg
->ikm_header
->msgh_size
;
2466 mach_port_name_t remote_port
= CAST_MACH_PORT_TO_NAME(kmsg
->ikm_header
->msgh_remote_port
);
2467 mach_port_name_t local_port
= CAST_MACH_PORT_TO_NAME(kmsg
->ikm_header
->msgh_local_port
);
2468 mach_port_name_t voucher_port
= kmsg
->ikm_header
->msgh_voucher_port
;
2469 mach_msg_id_t id
= kmsg
->ikm_header
->msgh_id
;
2471 legacy_header
->msgh_id
= id
;
2472 legacy_header
->msgh_local_port
= local_port
;
2473 legacy_header
->msgh_remote_port
= remote_port
;
2474 legacy_header
->msgh_voucher_port
= voucher_port
;
2475 legacy_header
->msgh_size
= msg_size
- LEGACY_HEADER_SIZE_DELTA
;
2476 legacy_header
->msgh_bits
= bits
;
2478 size
-= LEGACY_HEADER_SIZE_DELTA
;
2479 kmsg
->ikm_header
= (mach_msg_header_t
*)legacy_header
;
2483 /* unreachable if !DEBUG */
2484 __unreachable_ok_push
2485 if (DEBUG_KPRINT_SYSCALL_PREDICATE(DEBUG_KPRINT_SYSCALL_IPC_MASK
)) {
2486 kprintf("ipc_kmsg_put header+body: %d\n", (size
));
2488 for (i
= 0; i
* 4 < size
; i
++) {
2489 kprintf("%.4x\n", ((uint32_t *)kmsg
->ikm_header
)[i
]);
2491 kprintf("type: %d\n", ((mach_msg_type_descriptor_t
*)(((mach_msg_base_t
*)kmsg
->ikm_header
) + 1))->type
);
2493 __unreachable_ok_pop
2495 /* Re-Compute target address if using stack-style delivery */
2496 if (option
& MACH_RCV_STACK
) {
2497 rcv_addr
+= rcv_size
- size
;
2500 if (copyoutmsg((const char *) kmsg
->ikm_header
, rcv_addr
, size
)) {
2501 mr
= MACH_RCV_INVALID_DATA
;
2504 mr
= MACH_MSG_SUCCESS
;
2507 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_IPC
, MACH_IPC_KMSG_LINK
) | DBG_FUNC_NONE
,
2508 (rcv_addr
>= VM_MIN_KERNEL_AND_KEXT_ADDRESS
||
2509 rcv_addr
+ size
>= VM_MIN_KERNEL_AND_KEXT_ADDRESS
) ? (uintptr_t)0 : (uintptr_t)rcv_addr
,
2510 VM_KERNEL_ADDRPERM((uintptr_t)kmsg
),
2511 1 /* this is on the receive/copyout path */,
2514 ipc_kmsg_free(kmsg
);
2523 * Routine: ipc_kmsg_put_to_kernel
2525 * Copies a message buffer to a kernel message.
2526 * Frees the message buffer.
2527 * No errors allowed.
2533 ipc_kmsg_put_to_kernel(
2534 mach_msg_header_t
*msg
,
2536 mach_msg_size_t size
)
2538 (void) memcpy((void *) msg
, (const void *) kmsg
->ikm_header
, size
);
2540 ipc_kmsg_free(kmsg
);
2543 static pthread_priority_compact_t
2544 ipc_get_current_thread_priority(void)
2546 thread_t thread
= current_thread();
2550 qos
= thread_get_requested_qos(thread
, &relpri
);
2552 qos
= thread_user_promotion_qos_for_pri(thread
->base_pri
);
2555 return _pthread_priority_make_from_thread_qos(qos
, relpri
, 0);
2558 static kern_return_t
2561 mach_msg_option_t options
,
2562 mach_msg_priority_t priority
)
2565 ipc_port_t special_reply_port
= kmsg
->ikm_header
->msgh_local_port
;
2566 ipc_port_t dest_port
= kmsg
->ikm_header
->msgh_remote_port
;
2568 if ((options
& MACH_SEND_OVERRIDE
) &&
2569 !mach_msg_priority_is_pthread_priority(priority
)) {
2570 mach_msg_qos_t qos
= mach_msg_priority_qos(priority
);
2571 int relpri
= mach_msg_priority_relpri(priority
);
2572 mach_msg_qos_t ovr
= mach_msg_priority_overide_qos(priority
);
2574 kmsg
->ikm_ppriority
= _pthread_priority_make_from_thread_qos(qos
, relpri
, 0);
2575 kmsg
->ikm_qos_override
= MAX(qos
, ovr
);
2577 kr
= ipc_get_pthpriority_from_kmsg_voucher(kmsg
, &kmsg
->ikm_ppriority
);
2578 if (kr
!= KERN_SUCCESS
) {
2579 if (options
& MACH_SEND_PROPAGATE_QOS
) {
2580 kmsg
->ikm_ppriority
= ipc_get_current_thread_priority();
2582 kmsg
->ikm_ppriority
= MACH_MSG_PRIORITY_UNSPECIFIED
;
2586 if (options
& MACH_SEND_OVERRIDE
) {
2587 mach_msg_qos_t qos
= _pthread_priority_thread_qos(kmsg
->ikm_ppriority
);
2588 mach_msg_qos_t ovr
= _pthread_priority_thread_qos(priority
);
2589 kmsg
->ikm_qos_override
= MAX(qos
, ovr
);
2591 kmsg
->ikm_qos_override
= _pthread_priority_thread_qos(kmsg
->ikm_ppriority
);
2597 if (IP_VALID(special_reply_port
) &&
2598 MACH_MSGH_BITS_LOCAL(kmsg
->ikm_header
->msgh_bits
) == MACH_MSG_TYPE_PORT_SEND_ONCE
) {
2599 if ((options
& MACH_SEND_SYNC_OVERRIDE
)) {
2600 boolean_t sync_bootstrap_checkin
= !!(options
& MACH_SEND_SYNC_BOOTSTRAP_CHECKIN
);
2602 * Link the destination port to special reply port and make sure that
2603 * dest port has a send turnstile, else allocate one.
2605 ipc_port_link_special_reply_port(special_reply_port
, dest_port
, sync_bootstrap_checkin
);
2612 ipc_kmsg_allow_immovable_send(
2614 ipc_entry_t dest_entry
)
2616 ipc_object_t object
= dest_entry
->ie_object
;
2618 * If the dest port is a kobject, allow copyin of immovable send
2619 * rights in the message body to succeed
2621 if (IO_VALID(object
) && io_is_kobject(object
)) {
2622 kmsg
->ikm_flags
|= IPC_KMSG_FLAGS_ALLOW_IMMOVABLE_SEND
;
2627 * Routine: ipc_kmsg_link_reply_context_locked
2629 * Link any required context from the sending voucher
2630 * to the reply port. The ipc_kmsg_copyin function will
2631 * enforce that the sender calls mach_msg in this context.
2633 * reply port is locked
2636 ipc_kmsg_link_reply_context_locked(
2637 ipc_port_t reply_port
,
2638 ipc_port_t voucher_port
)
2640 kern_return_t __assert_only kr
;
2641 uint32_t persona_id
= 0;
2642 ipc_voucher_t voucher
;
2644 ip_lock_held(reply_port
);
2646 if (!ip_active(reply_port
)) {
2650 voucher
= convert_port_to_voucher(voucher_port
);
2652 kr
= bank_get_bank_ledger_thread_group_and_persona(voucher
, NULL
, NULL
, &persona_id
);
2653 assert(kr
== KERN_SUCCESS
);
2654 ipc_voucher_release(voucher
);
2656 if (persona_id
== 0 || persona_id
== PERSONA_ID_NONE
) {
2657 /* there was no persona context to record */
2662 * Set the persona_id as the context on the reply port.
2663 * This will force the thread that replies to have adopted a voucher
2664 * with a matching persona.
2666 reply_port
->ip_reply_context
= persona_id
;
2671 static kern_return_t
2672 ipc_kmsg_validate_reply_port_locked(ipc_port_t reply_port
, mach_msg_option_t options
)
2674 ip_lock_held(reply_port
);
2676 if (!ip_active(reply_port
)) {
2678 * Ideally, we would enforce that the reply receive right is
2679 * active, but asynchronous XPC cancellation destroys the
2680 * receive right, so we just have to return success here.
2682 return KERN_SUCCESS
;
2685 if (options
& MACH_SEND_MSG
) {
2687 * If the rely port is active, then it should not be
2688 * in-transit, and the receive right should be in the caller's
2691 if (!reply_port
->ip_receiver_name
|| reply_port
->ip_receiver
!= current_task()->itk_space
) {
2692 return KERN_INVALID_CAPABILITY
;
2696 * A port used as a reply port in an RPC should have exactly 1
2697 * extant send-once right which we either just made or are
2698 * moving as part of the IPC.
2700 if (reply_port
->ip_sorights
!= 1) {
2701 return KERN_INVALID_CAPABILITY
;
2704 * XPC uses an extra send-right to keep the name of the reply
2705 * right around through cancellation. That makes it harder to
2706 * enforce a particular semantic kere, so for now, we say that
2707 * you can have a maximum of 1 send right (in addition to your
2708 * send once right). In the future, it would be great to lock
2709 * this down even further.
2711 if (reply_port
->ip_srights
> 1) {
2712 return KERN_INVALID_CAPABILITY
;
2716 * The sender can also specify that the receive right should
2717 * be immovable. Note that this check only applies to
2718 * send-only operations. Combined send/receive or rcv-only
2719 * operations can specify an immovable receive right by
2720 * opt-ing into guarded descriptors (MACH_RCV_GUARDED_DESC)
2721 * and using the MACH_MSG_STRICT_REPLY options flag.
2723 if (MACH_SEND_REPLY_IS_IMMOVABLE(options
)) {
2724 if (!reply_port
->ip_immovable_receive
) {
2725 return KERN_INVALID_CAPABILITY
;
2731 * don't enforce this yet: need a better way of indicating the
2732 * receiver wants this...
2735 if (MACH_RCV_WITH_IMMOVABLE_REPLY(options
)) {
2736 if (!reply_port
->ip_immovable_receive
) {
2737 return KERN_INVALID_CAPABILITY
;
2742 return KERN_SUCCESS
;
2746 * Routine: ipc_kmsg_validate_reply_context_locked
2748 * Validate that the current thread is running in the context
2749 * required by the destination port.
2751 * dest_port is locked
2753 * MACH_MSG_SUCCESS on success.
2754 * On error, an EXC_GUARD exception is also raised.
2755 * This function *always* resets the port reply context.
2757 static mach_msg_return_t
2758 ipc_kmsg_validate_reply_context_locked(
2759 mach_msg_option_t option
,
2760 ipc_port_t dest_port
,
2761 ipc_voucher_t voucher
,
2762 mach_port_name_t voucher_name
)
2764 uint32_t dest_ctx
= dest_port
->ip_reply_context
;
2765 dest_port
->ip_reply_context
= 0;
2767 if (!ip_active(dest_port
)) {
2768 return MACH_MSG_SUCCESS
;
2771 if (voucher
== IPC_VOUCHER_NULL
|| !MACH_PORT_VALID(voucher_name
)) {
2772 if ((option
& MACH_SEND_KERNEL
) == 0) {
2773 mach_port_guard_exception(voucher_name
, 0,
2774 (MPG_FLAGS_STRICT_REPLY_INVALID_VOUCHER
| dest_ctx
),
2775 kGUARD_EXC_STRICT_REPLY
);
2777 return MACH_SEND_INVALID_CONTEXT
;
2780 kern_return_t __assert_only kr
;
2781 uint32_t persona_id
= 0;
2782 kr
= bank_get_bank_ledger_thread_group_and_persona(voucher
, NULL
, NULL
, &persona_id
);
2783 assert(kr
== KERN_SUCCESS
);
2785 if (dest_ctx
!= persona_id
) {
2786 if ((option
& MACH_SEND_KERNEL
) == 0) {
2787 mach_port_guard_exception(voucher_name
, 0,
2788 (MPG_FLAGS_STRICT_REPLY_MISMATCHED_PERSONA
| ((((uint64_t)persona_id
<< 32) & MPG_FLAGS_STRICT_REPLY_MASK
) | dest_ctx
)),
2789 kGUARD_EXC_STRICT_REPLY
);
2791 return MACH_SEND_INVALID_CONTEXT
;
2794 return MACH_MSG_SUCCESS
;
2798 * Routine: ipc_kmsg_copyin_header
2800 * "Copy-in" port rights in the header of a message.
2801 * Operates atomically; if it doesn't succeed the
2802 * message header and the space are left untouched.
2803 * If it does succeed the remote/local port fields
2804 * contain object pointers instead of port names,
2805 * and the bits field is updated. The destination port
2806 * will be a valid port pointer.
2811 * MACH_MSG_SUCCESS Successful copyin.
2812 * MACH_SEND_INVALID_HEADER
2813 * Illegal value in the message header bits.
2814 * MACH_SEND_INVALID_DEST The space is dead.
2815 * MACH_SEND_INVALID_DEST Can't copyin destination port.
2816 * (Either KERN_INVALID_NAME or KERN_INVALID_RIGHT.)
2817 * MACH_SEND_INVALID_REPLY Can't copyin reply port.
2818 * (Either KERN_INVALID_NAME or KERN_INVALID_RIGHT.)
2822 ipc_kmsg_copyin_header(
2825 mach_msg_priority_t priority
,
2826 mach_msg_option_t
*optionp
)
2828 mach_msg_header_t
*msg
= kmsg
->ikm_header
;
2829 mach_msg_bits_t mbits
= msg
->msgh_bits
& MACH_MSGH_BITS_USER
;
2830 mach_port_name_t dest_name
= CAST_MACH_PORT_TO_NAME(msg
->msgh_remote_port
);
2831 mach_port_name_t reply_name
= CAST_MACH_PORT_TO_NAME(msg
->msgh_local_port
);
2832 mach_port_name_t voucher_name
= MACH_PORT_NULL
;
2835 mach_msg_type_name_t dest_type
= MACH_MSGH_BITS_REMOTE(mbits
);
2836 mach_msg_type_name_t reply_type
= MACH_MSGH_BITS_LOCAL(mbits
);
2837 mach_msg_type_name_t voucher_type
= MACH_MSGH_BITS_VOUCHER(mbits
);
2838 ipc_object_t dest_port
= IO_NULL
;
2839 ipc_object_t reply_port
= IO_NULL
;
2840 ipc_port_t dest_soright
= IP_NULL
;
2841 ipc_port_t reply_soright
= IP_NULL
;
2842 ipc_port_t voucher_soright
= IP_NULL
;
2843 ipc_port_t release_port
= IP_NULL
;
2844 ipc_port_t voucher_port
= IP_NULL
;
2845 ipc_port_t voucher_release_port
= IP_NULL
;
2846 ipc_entry_t dest_entry
= IE_NULL
;
2847 ipc_entry_t reply_entry
= IE_NULL
;
2848 ipc_entry_t voucher_entry
= IE_NULL
;
2851 #if IMPORTANCE_INHERITANCE
2852 boolean_t needboost
= FALSE
;
2853 #endif /* IMPORTANCE_INHERITANCE */
2855 if ((mbits
!= msg
->msgh_bits
) ||
2856 (!MACH_MSG_TYPE_PORT_ANY_SEND(dest_type
)) ||
2857 ((reply_type
== 0) ?
2858 (reply_name
!= MACH_PORT_NULL
) :
2859 !MACH_MSG_TYPE_PORT_ANY_SEND(reply_type
))) {
2860 return MACH_SEND_INVALID_HEADER
;
2863 if (!MACH_PORT_VALID(dest_name
)) {
2864 return MACH_SEND_INVALID_DEST
;
2867 is_write_lock(space
);
2868 if (!is_active(space
)) {
2869 is_write_unlock(space
);
2870 return MACH_SEND_INVALID_DEST
;
2872 /* space locked and active */
2875 * If there is a voucher specified, make sure the disposition is
2876 * valid and the entry actually refers to a voucher port. Don't
2877 * actually copy in until we validate destination and reply.
2879 if (voucher_type
!= MACH_MSGH_BITS_ZERO
) {
2880 voucher_name
= msg
->msgh_voucher_port
;
2882 if (voucher_name
== MACH_PORT_DEAD
||
2883 (voucher_type
!= MACH_MSG_TYPE_MOVE_SEND
&&
2884 voucher_type
!= MACH_MSG_TYPE_COPY_SEND
)) {
2885 is_write_unlock(space
);
2886 if ((*optionp
& MACH_SEND_KERNEL
) == 0) {
2887 mach_port_guard_exception(voucher_name
, 0, 0, kGUARD_EXC_SEND_INVALID_VOUCHER
);
2889 return MACH_SEND_INVALID_VOUCHER
;
2892 if (voucher_name
!= MACH_PORT_NULL
) {
2893 voucher_entry
= ipc_entry_lookup(space
, voucher_name
);
2894 if (voucher_entry
== IE_NULL
||
2895 (voucher_entry
->ie_bits
& MACH_PORT_TYPE_SEND
) == 0 ||
2896 io_kotype(voucher_entry
->ie_object
) != IKOT_VOUCHER
) {
2897 is_write_unlock(space
);
2898 if ((*optionp
& MACH_SEND_KERNEL
) == 0) {
2899 mach_port_guard_exception(voucher_name
, 0, 0, kGUARD_EXC_SEND_INVALID_VOUCHER
);
2901 return MACH_SEND_INVALID_VOUCHER
;
2904 voucher_type
= MACH_MSG_TYPE_MOVE_SEND
;
2908 if (enforce_strict_reply
&& MACH_SEND_WITH_STRICT_REPLY(*optionp
) &&
2909 (!MACH_PORT_VALID(reply_name
) ||
2910 ((reply_type
!= MACH_MSG_TYPE_MAKE_SEND_ONCE
) && (reply_type
!= MACH_MSG_TYPE_MOVE_SEND_ONCE
))
2913 * The caller cannot enforce a reply context with an invalid
2914 * reply port name, or a non-send_once reply disposition.
2916 is_write_unlock(space
);
2917 if ((*optionp
& MACH_SEND_KERNEL
) == 0) {
2918 mach_port_guard_exception(reply_name
, 0,
2919 (MPG_FLAGS_STRICT_REPLY_INVALID_REPLY_DISP
| reply_type
),
2920 kGUARD_EXC_STRICT_REPLY
);
2922 return MACH_SEND_INVALID_REPLY
;
2926 * Handle combinations of validating destination and reply; along
2927 * with copying in destination, reply, and voucher in an atomic way.
2930 if (dest_name
== voucher_name
) {
2932 * If the destination name is the same as the voucher name,
2933 * the voucher_entry must already be known. Either that or
2934 * the destination name is MACH_PORT_NULL (i.e. invalid).
2936 dest_entry
= voucher_entry
;
2937 if (dest_entry
== IE_NULL
) {
2940 /* Check if dest port allows immovable send rights to be sent in the kmsg body */
2941 ipc_kmsg_allow_immovable_send(kmsg
, dest_entry
);
2944 * Make sure a future copyin of the reply port will succeed.
2945 * Once we start copying in the dest/voucher pair, we can't
2948 if (MACH_PORT_VALID(reply_name
)) {
2949 assert(reply_type
!= 0); /* because reply_name not null */
2951 /* It is just WRONG if dest, voucher, and reply are all the same. */
2952 if (voucher_name
== reply_name
) {
2955 reply_entry
= ipc_entry_lookup(space
, reply_name
);
2956 if (reply_entry
== IE_NULL
) {
2959 assert(dest_entry
!= reply_entry
); /* names are not equal */
2960 if (!ipc_right_copyin_check_reply(space
, reply_name
, reply_entry
, reply_type
)) {
2966 * Do the joint copyin of the dest disposition and
2967 * voucher disposition from the one entry/port. We
2968 * already validated that the voucher copyin would
2969 * succeed (above). So, any failure in combining
2970 * the copyins can be blamed on the destination.
2972 kr
= ipc_right_copyin_two(space
, dest_name
, dest_entry
,
2973 dest_type
, voucher_type
, &dest_port
, &dest_soright
,
2975 if (kr
!= KERN_SUCCESS
) {
2976 assert(kr
!= KERN_INVALID_CAPABILITY
);
2979 voucher_port
= ip_object_to_port(dest_port
);
2982 * could not have been one of these dispositions,
2983 * validated the port was a true kernel voucher port above,
2984 * AND was successfully able to copyin both dest and voucher.
2986 assert(dest_type
!= MACH_MSG_TYPE_MAKE_SEND
);
2987 assert(dest_type
!= MACH_MSG_TYPE_MAKE_SEND_ONCE
);
2988 assert(dest_type
!= MACH_MSG_TYPE_MOVE_SEND_ONCE
);
2991 * Perform the delayed reply right copyin (guaranteed success).
2993 if (reply_entry
!= IE_NULL
) {
2994 kr
= ipc_right_copyin(space
, reply_name
, reply_entry
,
2995 reply_type
, IPC_RIGHT_COPYIN_FLAGS_DEADOK
,
2996 &reply_port
, &reply_soright
,
2997 &release_port
, &assertcnt
, 0, NULL
);
2998 assert(assertcnt
== 0);
2999 assert(kr
== KERN_SUCCESS
);
3002 if (dest_name
== reply_name
) {
3004 * Destination and reply ports are the same!
3005 * This is very similar to the case where the
3006 * destination and voucher ports were the same
3007 * (except the reply port disposition is not
3008 * previously validated).
3010 dest_entry
= ipc_entry_lookup(space
, dest_name
);
3011 if (dest_entry
== IE_NULL
) {
3014 ipc_kmsg_allow_immovable_send(kmsg
, dest_entry
);
3016 reply_entry
= dest_entry
;
3017 assert(reply_type
!= 0); /* because name not null */
3020 * Pre-validate that the reply right can be copied in by itself
3022 if (!ipc_right_copyin_check_reply(space
, reply_name
, reply_entry
, reply_type
)) {
3027 * Do the joint copyin of the dest disposition and
3028 * reply disposition from the one entry/port.
3030 kr
= ipc_right_copyin_two(space
, dest_name
, dest_entry
,
3031 dest_type
, reply_type
, &dest_port
, &dest_soright
,
3033 if (kr
== KERN_INVALID_CAPABILITY
) {
3035 } else if (kr
!= KERN_SUCCESS
) {
3038 reply_port
= dest_port
;
3041 * Handle destination and reply independently, as
3042 * they are independent entries (even if the entries
3043 * refer to the same port).
3045 * This can be the tough case to make atomic.
3047 * The difficult problem is serializing with port death.
3048 * The bad case is when dest_port dies after its copyin,
3049 * reply_port dies before its copyin, and dest_port dies before
3050 * reply_port. Then the copyins operated as if dest_port was
3051 * alive and reply_port was dead, which shouldn't have happened
3052 * because they died in the other order.
3054 * Note that it is easy for a user task to tell if
3055 * a copyin happened before or after a port died.
3056 * If a port dies before copyin, a dead-name notification
3057 * is generated and the dead name's urefs are incremented,
3058 * and if the copyin happens first, a port-deleted
3059 * notification is generated.
3061 * Even so, avoiding that potentially detectable race is too
3062 * expensive - and no known code cares about it. So, we just
3063 * do the expedient thing and copy them in one after the other.
3066 dest_entry
= ipc_entry_lookup(space
, dest_name
);
3067 if (dest_entry
== IE_NULL
) {
3070 assert(dest_entry
!= voucher_entry
);
3071 ipc_kmsg_allow_immovable_send(kmsg
, dest_entry
);
3074 * Make sure reply port entry is valid before dest copyin.
3076 if (MACH_PORT_VALID(reply_name
)) {
3077 if (reply_name
== voucher_name
) {
3080 reply_entry
= ipc_entry_lookup(space
, reply_name
);
3081 if (reply_entry
== IE_NULL
) {
3084 assert(dest_entry
!= reply_entry
); /* names are not equal */
3085 assert(reply_type
!= 0); /* because reply_name not null */
3087 if (!ipc_right_copyin_check_reply(space
, reply_name
, reply_entry
, reply_type
)) {
3093 * copyin the destination.
3095 kr
= ipc_right_copyin(space
, dest_name
, dest_entry
,
3096 dest_type
, (IPC_RIGHT_COPYIN_FLAGS_ALLOW_IMMOVABLE_SEND
|
3097 IPC_RIGHT_COPYIN_FLAGS_ALLOW_DEAD_SEND_ONCE
),
3098 &dest_port
, &dest_soright
,
3099 &release_port
, &assertcnt
, 0, NULL
);
3100 assert(assertcnt
== 0);
3101 if (kr
!= KERN_SUCCESS
) {
3104 assert(IO_VALID(dest_port
));
3105 assert(!IP_VALID(release_port
));
3108 * Copyin the pre-validated reply right.
3109 * It's OK if the reply right has gone dead in the meantime.
3111 if (MACH_PORT_VALID(reply_name
)) {
3112 kr
= ipc_right_copyin(space
, reply_name
, reply_entry
,
3113 reply_type
, IPC_RIGHT_COPYIN_FLAGS_DEADOK
,
3114 &reply_port
, &reply_soright
,
3115 &release_port
, &assertcnt
, 0, NULL
);
3116 assert(assertcnt
== 0);
3117 assert(kr
== KERN_SUCCESS
);
3119 /* convert invalid name to equivalent ipc_object type */
3120 reply_port
= ip_to_object(CAST_MACH_NAME_TO_PORT(reply_name
));
3125 * Finally can copyin the voucher right now that dest and reply
3126 * are fully copied in (guaranteed success).
3128 if (IE_NULL
!= voucher_entry
) {
3129 kr
= ipc_right_copyin(space
, voucher_name
, voucher_entry
,
3130 voucher_type
, IPC_RIGHT_COPYIN_FLAGS_NONE
,
3131 (ipc_object_t
*)&voucher_port
,
3133 &voucher_release_port
,
3134 &assertcnt
, 0, NULL
);
3135 assert(assertcnt
== 0);
3136 assert(KERN_SUCCESS
== kr
);
3137 assert(IP_VALID(voucher_port
));
3138 require_ip_active(voucher_port
);
3143 * The entries might need to be deallocated.
3145 * Each entry should be deallocated only once,
3146 * even if it was specified in more than one slot in the header.
3147 * Note that dest can be the same entry as reply or voucher,
3148 * but reply and voucher must be distinct entries.
3150 assert(IE_NULL
!= dest_entry
);
3151 if (IE_NULL
!= reply_entry
) {
3152 assert(reply_entry
!= voucher_entry
);
3155 if (IE_BITS_TYPE(dest_entry
->ie_bits
) == MACH_PORT_TYPE_NONE
) {
3156 ipc_entry_dealloc(space
, dest_name
, dest_entry
);
3158 if (dest_entry
== reply_entry
) {
3159 reply_entry
= IE_NULL
;
3162 if (dest_entry
== voucher_entry
) {
3163 voucher_entry
= IE_NULL
;
3166 dest_entry
= IE_NULL
;
3168 if (IE_NULL
!= reply_entry
&&
3169 IE_BITS_TYPE(reply_entry
->ie_bits
) == MACH_PORT_TYPE_NONE
) {
3170 ipc_entry_dealloc(space
, reply_name
, reply_entry
);
3171 reply_entry
= IE_NULL
;
3173 if (IE_NULL
!= voucher_entry
&&
3174 IE_BITS_TYPE(voucher_entry
->ie_bits
) == MACH_PORT_TYPE_NONE
) {
3175 ipc_entry_dealloc(space
, voucher_name
, voucher_entry
);
3176 voucher_entry
= IE_NULL
;
3179 dest_type
= ipc_object_copyin_type(dest_type
);
3180 reply_type
= ipc_object_copyin_type(reply_type
);
3183 * JMM - Without rdar://problem/6275821, this is the last place we can
3184 * re-arm the send-possible notifications. It may trigger unexpectedly
3185 * early (send may NOT have failed), but better than missing. We assure
3186 * we won't miss by forcing MACH_SEND_ALWAYS if we got past arming.
3188 if (((*optionp
& MACH_SEND_NOTIFY
) != 0) &&
3189 dest_type
!= MACH_MSG_TYPE_PORT_SEND_ONCE
&&
3190 dest_entry
!= IE_NULL
&& dest_entry
->ie_request
!= IE_REQ_NONE
) {
3191 ipc_port_t dport
= ip_object_to_port(dest_port
);
3193 assert(dport
!= IP_NULL
);
3195 if (ip_active(dport
) && dport
->ip_receiver
!= ipc_space_kernel
) {
3196 if (ip_full(dport
)) {
3197 #if IMPORTANCE_INHERITANCE
3198 needboost
= ipc_port_request_sparm(dport
, dest_name
,
3199 dest_entry
->ie_request
,
3202 if (needboost
== FALSE
) {
3206 ipc_port_request_sparm(dport
, dest_name
,
3207 dest_entry
->ie_request
,
3211 #endif /* IMPORTANCE_INHERITANCE */
3213 *optionp
|= MACH_SEND_ALWAYS
;
3221 is_write_unlock(space
);
3223 #if IMPORTANCE_INHERITANCE
3225 * If our request is the first boosting send-possible
3226 * notification this cycle, push the boost down the
3229 if (needboost
== TRUE
) {
3230 ipc_port_t dport
= ip_object_to_port(dest_port
);
3232 /* dport still locked from above */
3233 if (ipc_port_importance_delta(dport
, IPID_OPTION_SENDPOSSIBLE
, 1) == FALSE
) {
3237 #endif /* IMPORTANCE_INHERITANCE */
3239 if (dest_soright
!= IP_NULL
) {
3240 ipc_notify_port_deleted(dest_soright
, dest_name
);
3242 if (reply_soright
!= IP_NULL
) {
3243 ipc_notify_port_deleted(reply_soright
, reply_name
);
3245 if (voucher_soright
!= IP_NULL
) {
3246 ipc_notify_port_deleted(voucher_soright
, voucher_name
);
3250 * No room to store voucher port in in-kernel msg header,
3251 * so we store it back in the kmsg itself. Extract the
3252 * qos, and apply any override before we enqueue the kmsg.
3254 if (IP_VALID(voucher_port
)) {
3255 kmsg
->ikm_voucher
= voucher_port
;
3256 voucher_type
= MACH_MSG_TYPE_MOVE_SEND
;
3259 msg
->msgh_bits
= MACH_MSGH_BITS_SET(dest_type
, reply_type
, voucher_type
, mbits
);
3260 msg
->msgh_remote_port
= ip_object_to_port(dest_port
);
3261 msg
->msgh_local_port
= ip_object_to_port(reply_port
);
3263 /* capture the qos value(s) for the kmsg */
3264 ipc_kmsg_set_qos(kmsg
, *optionp
, priority
);
3266 if (release_port
!= IP_NULL
) {
3267 ip_release(release_port
);
3270 if (voucher_release_port
!= IP_NULL
) {
3271 ip_release(voucher_release_port
);
3274 if (enforce_strict_reply
&& MACH_SEND_WITH_STRICT_REPLY(*optionp
) && IP_VALID(msg
->msgh_local_port
)) {
3276 * We've already validated that the reply disposition is a
3277 * [make/move] send-once. Ideally, we should enforce that the
3278 * reply port is also not dead, but XPC asynchronous
3279 * cancellation can make the reply port dead before we
3280 * actually make it to the mach_msg send.
3282 * Here, we ensure that if we have a non-dead reply port, then
3283 * the reply port's receive right should not be in-transit,
3284 * and should live in the caller's IPC space.
3286 ipc_port_t rport
= msg
->msgh_local_port
;
3288 kr
= ipc_kmsg_validate_reply_port_locked(rport
, *optionp
);
3290 if (kr
!= KERN_SUCCESS
) {
3292 * no descriptors have been copied in yet, but the
3293 * full header has been copied in: clean it up
3295 ipc_kmsg_clean_partial(kmsg
, 0, NULL
, 0, 0);
3296 if ((*optionp
& MACH_SEND_KERNEL
) == 0) {
3297 mach_port_guard_exception(reply_name
, 0,
3298 (MPG_FLAGS_STRICT_REPLY_INVALID_REPLY_PORT
| kr
),
3299 kGUARD_EXC_STRICT_REPLY
);
3301 return MACH_SEND_INVALID_REPLY
;
3305 return MACH_MSG_SUCCESS
;
3308 is_write_unlock(space
);
3310 if (release_port
!= IP_NULL
) {
3311 ip_release(release_port
);
3314 assert(voucher_port
== IP_NULL
);
3315 assert(voucher_soright
== IP_NULL
);
3317 if ((*optionp
& MACH_SEND_KERNEL
) == 0) {
3318 mach_port_guard_exception(reply_name
, 0, 0, kGUARD_EXC_SEND_INVALID_REPLY
);
3320 return MACH_SEND_INVALID_REPLY
;
3323 is_write_unlock(space
);
3325 if (release_port
!= IP_NULL
) {
3326 ip_release(release_port
);
3329 if (reply_soright
!= IP_NULL
) {
3330 ipc_notify_port_deleted(reply_soright
, reply_name
);
3333 assert(voucher_port
== IP_NULL
);
3334 assert(voucher_soright
== IP_NULL
);
3336 return MACH_SEND_INVALID_DEST
;
3339 static mach_msg_descriptor_t
*
3340 ipc_kmsg_copyin_port_descriptor(
3341 mach_msg_port_descriptor_t
*dsc
,
3342 mach_msg_legacy_port_descriptor_t
*user_dsc_in
,
3346 mach_msg_option_t
*optionp
,
3347 mach_msg_return_t
*mr
)
3349 mach_msg_legacy_port_descriptor_t user_dsc
= *user_dsc_in
;
3350 mach_msg_type_name_t user_disp
;
3351 mach_msg_type_name_t result_disp
;
3352 mach_port_name_t name
;
3353 ipc_object_t object
;
3355 user_disp
= user_dsc
.disposition
;
3356 result_disp
= ipc_object_copyin_type(user_disp
);
3358 name
= (mach_port_name_t
)user_dsc
.name
;
3359 if (MACH_PORT_VALID(name
)) {
3360 kern_return_t kr
= ipc_object_copyin(space
, name
, user_disp
, &object
, 0, NULL
, kmsg
->ikm_flags
);
3361 if (kr
!= KERN_SUCCESS
) {
3362 if (((*optionp
& MACH_SEND_KERNEL
) == 0) && (kr
== KERN_INVALID_RIGHT
)) {
3363 mach_port_guard_exception(name
, 0, 0, kGUARD_EXC_SEND_INVALID_RIGHT
);
3365 *mr
= MACH_SEND_INVALID_RIGHT
;
3369 if ((result_disp
== MACH_MSG_TYPE_PORT_RECEIVE
) &&
3370 ipc_port_check_circularity(ip_object_to_port(object
),
3371 ip_object_to_port(dest
))) {
3372 kmsg
->ikm_header
->msgh_bits
|= MACH_MSGH_BITS_CIRCULAR
;
3374 dsc
->name
= ip_object_to_port(object
);
3376 dsc
->name
= CAST_MACH_NAME_TO_PORT(name
);
3378 dsc
->disposition
= result_disp
;
3379 dsc
->type
= MACH_MSG_PORT_DESCRIPTOR
;
3381 dsc
->pad_end
= 0; // debug, unnecessary
3383 return (mach_msg_descriptor_t
*)(user_dsc_in
+ 1);
3386 static mach_msg_descriptor_t
*
3387 ipc_kmsg_copyin_ool_descriptor(
3388 mach_msg_ool_descriptor_t
*dsc
,
3389 mach_msg_descriptor_t
*user_dsc
,
3392 vm_map_copy_t
*copy
,
3393 vm_size_t
*space_needed
,
3395 __unused mach_msg_option_t
*optionp
,
3396 mach_msg_return_t
*mr
)
3400 mach_msg_copy_options_t copy_options
;
3401 mach_vm_offset_t addr
;
3402 mach_msg_descriptor_type_t dsc_type
;
3405 mach_msg_ool_descriptor64_t
*user_ool_dsc
= (typeof(user_ool_dsc
))user_dsc
;
3407 addr
= (mach_vm_offset_t
) user_ool_dsc
->address
;
3408 length
= user_ool_dsc
->size
;
3409 dealloc
= user_ool_dsc
->deallocate
;
3410 copy_options
= user_ool_dsc
->copy
;
3411 dsc_type
= user_ool_dsc
->type
;
3413 user_dsc
= (typeof(user_dsc
))(user_ool_dsc
+ 1);
3415 mach_msg_ool_descriptor32_t
*user_ool_dsc
= (typeof(user_ool_dsc
))user_dsc
;
3417 addr
= CAST_USER_ADDR_T(user_ool_dsc
->address
);
3418 dealloc
= user_ool_dsc
->deallocate
;
3419 copy_options
= user_ool_dsc
->copy
;
3420 dsc_type
= user_ool_dsc
->type
;
3421 length
= user_ool_dsc
->size
;
3423 user_dsc
= (typeof(user_dsc
))(user_ool_dsc
+ 1);
3426 dsc
->size
= (mach_msg_size_t
)length
;
3427 dsc
->deallocate
= dealloc
;
3428 dsc
->copy
= copy_options
;
3429 dsc
->type
= dsc_type
;
3432 dsc
->address
= NULL
;
3433 } else if ((length
>= MSG_OOL_SIZE_SMALL
) &&
3434 (copy_options
== MACH_MSG_PHYSICAL_COPY
) && !dealloc
) {
3436 * If the request is a physical copy and the source
3437 * is not being deallocated, then allocate space
3438 * in the kernel's pageable ipc copy map and copy
3439 * the data in. The semantics guarantee that the
3440 * data will have been physically copied before
3441 * the send operation terminates. Thus if the data
3442 * is not being deallocated, we must be prepared
3443 * to page if the region is sufficiently large.
3445 if (copyin(addr
, (char *)*paddr
, length
)) {
3446 *mr
= MACH_SEND_INVALID_MEMORY
;
3451 * The kernel ipc copy map is marked no_zero_fill.
3452 * If the transfer is not a page multiple, we need
3453 * to zero fill the balance.
3455 if (!page_aligned(length
)) {
3456 (void) memset((void *) (*paddr
+ length
), 0,
3457 round_page(length
) - length
);
3459 if (vm_map_copyin(ipc_kernel_copy_map
, (vm_map_address_t
)*paddr
,
3460 (vm_map_size_t
)length
, TRUE
, copy
) != KERN_SUCCESS
) {
3461 *mr
= MACH_MSG_VM_KERNEL
;
3464 dsc
->address
= (void *)*copy
;
3465 *paddr
+= round_page(length
);
3466 *space_needed
-= round_page(length
);
3469 * Make a vm_map_copy_t of the of the data. If the
3470 * data is small, this will do an optimized physical
3471 * copy. Otherwise, it will do a virtual copy.
3473 * NOTE: A virtual copy is OK if the original is being
3474 * deallocted, even if a physical copy was requested.
3476 kern_return_t kr
= vm_map_copyin(map
, addr
,
3477 (vm_map_size_t
)length
, dealloc
, copy
);
3478 if (kr
!= KERN_SUCCESS
) {
3479 *mr
= (kr
== KERN_RESOURCE_SHORTAGE
) ?
3480 MACH_MSG_VM_KERNEL
:
3481 MACH_SEND_INVALID_MEMORY
;
3484 dsc
->address
= (void *)*copy
;
3490 static mach_msg_descriptor_t
*
3491 ipc_kmsg_copyin_ool_ports_descriptor(
3492 mach_msg_ool_ports_descriptor_t
*dsc
,
3493 mach_msg_descriptor_t
*user_dsc
,
3499 mach_msg_option_t
*optionp
,
3500 mach_msg_return_t
*mr
)
3503 ipc_object_t
*objects
;
3505 mach_vm_offset_t addr
;
3506 mach_msg_type_name_t user_disp
;
3507 mach_msg_type_name_t result_disp
;
3508 mach_msg_type_number_t count
;
3509 mach_msg_copy_options_t copy_option
;
3510 boolean_t deallocate
;
3511 mach_msg_descriptor_type_t type
;
3512 vm_size_t ports_length
, names_length
;
3515 mach_msg_ool_ports_descriptor64_t
*user_ool_dsc
= (typeof(user_ool_dsc
))user_dsc
;
3517 addr
= (mach_vm_offset_t
)user_ool_dsc
->address
;
3518 count
= user_ool_dsc
->count
;
3519 deallocate
= user_ool_dsc
->deallocate
;
3520 copy_option
= user_ool_dsc
->copy
;
3521 user_disp
= user_ool_dsc
->disposition
;
3522 type
= user_ool_dsc
->type
;
3524 user_dsc
= (typeof(user_dsc
))(user_ool_dsc
+ 1);
3526 mach_msg_ool_ports_descriptor32_t
*user_ool_dsc
= (typeof(user_ool_dsc
))user_dsc
;
3528 addr
= CAST_USER_ADDR_T(user_ool_dsc
->address
);
3529 count
= user_ool_dsc
->count
;
3530 deallocate
= user_ool_dsc
->deallocate
;
3531 copy_option
= user_ool_dsc
->copy
;
3532 user_disp
= user_ool_dsc
->disposition
;
3533 type
= user_ool_dsc
->type
;
3535 user_dsc
= (typeof(user_dsc
))(user_ool_dsc
+ 1);
3538 dsc
->deallocate
= deallocate
;
3539 dsc
->copy
= copy_option
;
3542 dsc
->address
= NULL
; /* for now */
3544 result_disp
= ipc_object_copyin_type(user_disp
);
3545 dsc
->disposition
= result_disp
;
3547 /* We always do a 'physical copy', but you have to specify something valid */
3548 if (copy_option
!= MACH_MSG_PHYSICAL_COPY
&&
3549 copy_option
!= MACH_MSG_VIRTUAL_COPY
) {
3550 *mr
= MACH_SEND_INVALID_TYPE
;
3554 /* calculate length of data in bytes, rounding up */
3556 if (os_mul_overflow(count
, sizeof(mach_port_t
), &ports_length
)) {
3557 *mr
= MACH_SEND_TOO_LARGE
;
3561 if (os_mul_overflow(count
, sizeof(mach_port_name_t
), &names_length
)) {
3562 *mr
= MACH_SEND_TOO_LARGE
;
3566 if (ports_length
== 0) {
3570 data
= kalloc(ports_length
);
3573 *mr
= MACH_SEND_NO_BUFFER
;
3578 mach_port_name_t
*names
= &((mach_port_name_t
*)data
)[count
];
3580 mach_port_name_t
*names
= ((mach_port_name_t
*)data
);
3583 if (copyinmap(map
, addr
, names
, names_length
) != KERN_SUCCESS
) {
3584 kfree(data
, ports_length
);
3585 *mr
= MACH_SEND_INVALID_MEMORY
;
3590 (void) mach_vm_deallocate(map
, addr
, (mach_vm_size_t
)names_length
);
3593 objects
= (ipc_object_t
*) data
;
3594 dsc
->address
= data
;
3596 for (i
= 0; i
< count
; i
++) {
3597 mach_port_name_t name
= names
[i
];
3598 ipc_object_t object
;
3600 if (!MACH_PORT_VALID(name
)) {
3601 objects
[i
] = ip_to_object(CAST_MACH_NAME_TO_PORT(name
));
3605 kern_return_t kr
= ipc_object_copyin(space
, name
, user_disp
, &object
, 0, NULL
, kmsg
->ikm_flags
);
3607 if (kr
!= KERN_SUCCESS
) {
3610 for (j
= 0; j
< i
; j
++) {
3611 object
= objects
[j
];
3612 if (IPC_OBJECT_VALID(object
)) {
3613 ipc_object_destroy(object
, result_disp
);
3616 kfree(data
, ports_length
);
3617 dsc
->address
= NULL
;
3618 if (((*optionp
& MACH_SEND_KERNEL
) == 0) && (kr
== KERN_INVALID_RIGHT
)) {
3619 mach_port_guard_exception(name
, 0, 0, kGUARD_EXC_SEND_INVALID_RIGHT
);
3621 *mr
= MACH_SEND_INVALID_RIGHT
;
3625 if ((dsc
->disposition
== MACH_MSG_TYPE_PORT_RECEIVE
) &&
3626 ipc_port_check_circularity(ip_object_to_port(object
),
3627 ip_object_to_port(dest
))) {
3628 kmsg
->ikm_header
->msgh_bits
|= MACH_MSGH_BITS_CIRCULAR
;
3631 objects
[i
] = object
;
3637 static mach_msg_descriptor_t
*
3638 ipc_kmsg_copyin_guarded_port_descriptor(
3639 mach_msg_guarded_port_descriptor_t
*dsc
,
3640 mach_msg_descriptor_t
*user_addr
,
3645 mach_msg_option_t
*optionp
,
3646 mach_msg_return_t
*mr
)
3648 mach_msg_descriptor_t
*user_dsc
;
3649 mach_msg_type_name_t disp
;
3650 mach_msg_type_name_t result_disp
;
3651 mach_port_name_t name
;
3652 mach_msg_guard_flags_t guard_flags
;
3653 ipc_object_t object
;
3654 mach_port_context_t context
;
3657 mach_msg_guarded_port_descriptor32_t
*user_gp_dsc
= (typeof(user_gp_dsc
))user_addr
;
3658 name
= user_gp_dsc
->name
;
3659 guard_flags
= user_gp_dsc
->flags
;
3660 disp
= user_gp_dsc
->disposition
;
3661 context
= user_gp_dsc
->context
;
3662 user_dsc
= (mach_msg_descriptor_t
*)(user_gp_dsc
+ 1);
3664 mach_msg_guarded_port_descriptor64_t
*user_gp_dsc
= (typeof(user_gp_dsc
))user_addr
;
3665 name
= user_gp_dsc
->name
;
3666 guard_flags
= user_gp_dsc
->flags
;
3667 disp
= user_gp_dsc
->disposition
;
3668 context
= user_gp_dsc
->context
;
3669 user_dsc
= (mach_msg_descriptor_t
*)(user_gp_dsc
+ 1);
3672 guard_flags
&= MACH_MSG_GUARD_FLAGS_MASK
;
3673 result_disp
= ipc_object_copyin_type(disp
);
3675 if (MACH_PORT_VALID(name
)) {
3676 kern_return_t kr
= ipc_object_copyin(space
, name
, disp
, &object
, context
, &guard_flags
, kmsg
->ikm_flags
);
3677 if (kr
!= KERN_SUCCESS
) {
3678 if (((*optionp
& MACH_SEND_KERNEL
) == 0) && (kr
== KERN_INVALID_RIGHT
)) {
3679 mach_port_guard_exception(name
, 0, 0, kGUARD_EXC_SEND_INVALID_RIGHT
);
3681 *mr
= MACH_SEND_INVALID_RIGHT
;
3685 if ((result_disp
== MACH_MSG_TYPE_PORT_RECEIVE
) &&
3686 ipc_port_check_circularity(ip_object_to_port(object
),
3687 ip_object_to_port(dest
))) {
3688 kmsg
->ikm_header
->msgh_bits
|= MACH_MSGH_BITS_CIRCULAR
;
3690 dsc
->name
= ip_object_to_port(object
);
3692 dsc
->name
= CAST_MACH_NAME_TO_PORT(name
);
3694 dsc
->flags
= guard_flags
;
3695 dsc
->disposition
= result_disp
;
3696 dsc
->type
= MACH_MSG_GUARDED_PORT_DESCRIPTOR
;
3699 dsc
->pad_end
= 0; // debug, unnecessary
3707 * Routine: ipc_kmsg_copyin_body
3709 * "Copy-in" port rights and out-of-line memory
3710 * in the message body.
3712 * In all failure cases, the message is left holding
3713 * no rights or memory. However, the message buffer
3714 * is not deallocated. If successful, the message
3715 * contains a valid destination port.
3719 * MACH_MSG_SUCCESS Successful copyin.
3720 * MACH_SEND_INVALID_MEMORY Can't grab out-of-line memory.
3721 * MACH_SEND_INVALID_RIGHT Can't copyin port right in body.
3722 * MACH_SEND_INVALID_TYPE Bad type specification.
3723 * MACH_SEND_MSG_TOO_SMALL Body is too small for types/data.
3724 * MACH_SEND_INVALID_RT_OOL_SIZE OOL Buffer too large for RT
3725 * MACH_MSG_INVALID_RT_DESCRIPTOR Dealloc and RT are incompatible
3726 * MACH_SEND_NO_GRANT_DEST Dest port doesn't accept ports in body
3730 ipc_kmsg_copyin_body(
3734 mach_msg_option_t
*optionp
)
3737 mach_msg_body_t
*body
;
3738 mach_msg_descriptor_t
*daddr
, *naddr
, *end
;
3739 mach_msg_descriptor_t
*user_addr
, *kern_addr
;
3740 mach_msg_type_number_t dsc_count
;
3741 boolean_t is_task_64bit
= (map
->max_offset
> VM_MAX_ADDRESS
);
3742 boolean_t
complex = FALSE
;
3743 boolean_t contains_port_desc
= FALSE
;
3744 vm_size_t space_needed
= 0;
3745 vm_offset_t paddr
= 0;
3746 vm_map_copy_t copy
= VM_MAP_COPY_NULL
;
3747 mach_msg_type_number_t i
;
3748 mach_msg_return_t mr
= MACH_MSG_SUCCESS
;
3749 ipc_port_t remote_port
= kmsg
->ikm_header
->msgh_remote_port
;
3751 vm_size_t descriptor_size
= 0;
3753 mach_msg_type_number_t total_ool_port_count
= 0;
3754 mach_msg_guard_flags_t guard_flags
= 0;
3755 mach_port_context_t context
;
3756 mach_msg_type_name_t disp
;
3759 * Determine if the target is a kernel port.
3761 dest
= ip_to_object(remote_port
);
3762 body
= (mach_msg_body_t
*) (kmsg
->ikm_header
+ 1);
3763 naddr
= (mach_msg_descriptor_t
*) (body
+ 1);
3764 end
= (mach_msg_descriptor_t
*) ((vm_offset_t
)kmsg
->ikm_header
+ kmsg
->ikm_header
->msgh_size
);
3766 dsc_count
= body
->msgh_descriptor_count
;
3767 if (dsc_count
== 0) {
3768 return MACH_MSG_SUCCESS
;
3772 * Make an initial pass to determine kernal VM space requirements for
3773 * physical copies and possible contraction of the descriptors from
3774 * processes with pointers larger than the kernel's.
3777 for (i
= 0; i
< dsc_count
; i
++) {
3778 mach_msg_size_t size
;
3779 mach_msg_type_number_t ool_port_count
= 0;
3783 /* make sure the descriptor fits in the message */
3784 if (is_task_64bit
) {
3785 if ((mach_msg_descriptor_t
*)((vm_offset_t
)daddr
+ 12) > end
) {
3786 mr
= MACH_SEND_MSG_TOO_SMALL
;
3790 switch (daddr
->type
.type
) {
3791 case MACH_MSG_OOL_DESCRIPTOR
:
3792 case MACH_MSG_OOL_VOLATILE_DESCRIPTOR
:
3793 case MACH_MSG_OOL_PORTS_DESCRIPTOR
:
3794 case MACH_MSG_GUARDED_PORT_DESCRIPTOR
:
3795 descriptor_size
+= 16;
3796 naddr
= (typeof(naddr
))((vm_offset_t
)daddr
+ 16);
3799 descriptor_size
+= 12;
3800 naddr
= (typeof(naddr
))((vm_offset_t
)daddr
+ 12);
3804 descriptor_size
+= 12;
3805 naddr
= (typeof(naddr
))((vm_offset_t
)daddr
+ 12);
3809 mr
= MACH_SEND_MSG_TOO_SMALL
;
3813 switch (daddr
->type
.type
) {
3814 case MACH_MSG_OOL_DESCRIPTOR
:
3815 case MACH_MSG_OOL_VOLATILE_DESCRIPTOR
:
3816 size
= (is_task_64bit
) ?
3817 ((mach_msg_ool_descriptor64_t
*)daddr
)->size
:
3818 daddr
->out_of_line
.size
;
3820 if (daddr
->out_of_line
.copy
!= MACH_MSG_PHYSICAL_COPY
&&
3821 daddr
->out_of_line
.copy
!= MACH_MSG_VIRTUAL_COPY
) {
3823 * Invalid copy option
3825 mr
= MACH_SEND_INVALID_TYPE
;
3829 if ((size
>= MSG_OOL_SIZE_SMALL
) &&
3830 (daddr
->out_of_line
.copy
== MACH_MSG_PHYSICAL_COPY
) &&
3831 !(daddr
->out_of_line
.deallocate
)) {
3833 * Out-of-line memory descriptor, accumulate kernel
3834 * memory requirements
3836 if (space_needed
+ round_page(size
) <= space_needed
) {
3837 /* Overflow dectected */
3838 mr
= MACH_MSG_VM_KERNEL
;
3842 space_needed
+= round_page(size
);
3843 if (space_needed
> ipc_kmsg_max_vm_space
) {
3844 /* Per message kernel memory limit exceeded */
3845 mr
= MACH_MSG_VM_KERNEL
;
3850 case MACH_MSG_PORT_DESCRIPTOR
:
3851 if (os_add_overflow(total_ool_port_count
, 1, &total_ool_port_count
)) {
3852 /* Overflow detected */
3853 mr
= MACH_SEND_TOO_LARGE
;
3856 contains_port_desc
= TRUE
;
3858 case MACH_MSG_OOL_PORTS_DESCRIPTOR
:
3859 ool_port_count
= (is_task_64bit
) ?
3860 ((mach_msg_ool_ports_descriptor64_t
*)daddr
)->count
:
3861 daddr
->ool_ports
.count
;
3863 if (os_add_overflow(total_ool_port_count
, ool_port_count
, &total_ool_port_count
)) {
3864 /* Overflow detected */
3865 mr
= MACH_SEND_TOO_LARGE
;
3869 if (ool_port_count
> (ipc_kmsg_max_vm_space
/ sizeof(mach_port_t
))) {
3870 /* Per message kernel memory limit exceeded */
3871 mr
= MACH_SEND_TOO_LARGE
;
3874 contains_port_desc
= TRUE
;
3876 case MACH_MSG_GUARDED_PORT_DESCRIPTOR
:
3877 guard_flags
= (is_task_64bit
) ?
3878 ((mach_msg_guarded_port_descriptor64_t
*)daddr
)->flags
:
3879 ((mach_msg_guarded_port_descriptor32_t
*)daddr
)->flags
;
3880 context
= (is_task_64bit
) ?
3881 ((mach_msg_guarded_port_descriptor64_t
*)daddr
)->context
:
3882 ((mach_msg_guarded_port_descriptor32_t
*)daddr
)->context
;
3883 disp
= (is_task_64bit
) ?
3884 ((mach_msg_guarded_port_descriptor64_t
*)daddr
)->disposition
:
3885 ((mach_msg_guarded_port_descriptor32_t
*)daddr
)->disposition
;
3887 /* Only MACH_MSG_TYPE_MOVE_RECEIVE is supported for now */
3888 if (!guard_flags
|| ((guard_flags
& ~MACH_MSG_GUARD_FLAGS_MASK
) != 0) ||
3889 ((guard_flags
& MACH_MSG_GUARD_FLAGS_UNGUARDED_ON_SEND
) && (context
!= 0)) ||
3890 (disp
!= MACH_MSG_TYPE_MOVE_RECEIVE
)) {
3892 * Invalid guard flags, context or disposition
3894 mr
= MACH_SEND_INVALID_TYPE
;
3897 if (os_add_overflow(total_ool_port_count
, 1, &total_ool_port_count
)) {
3898 /* Overflow detected */
3899 mr
= MACH_SEND_TOO_LARGE
;
3902 contains_port_desc
= TRUE
;
3907 /* Sending more than 16383 rights in one message seems crazy */
3908 if (total_ool_port_count
>= (MACH_PORT_UREFS_MAX
/ 4)) {
3909 mr
= MACH_SEND_TOO_LARGE
;
3914 * Check if dest is a no-grant port; Since this bit is set only on
3915 * port construction and cannot be unset later, we can peek at the
3916 * bit without paying the cost of locking the port.
3918 if (contains_port_desc
&& remote_port
->ip_no_grant
) {
3919 mr
= MACH_SEND_NO_GRANT_DEST
;
3924 * Allocate space in the pageable kernel ipc copy map for all the
3925 * ool data that is to be physically copied. Map is marked wait for
3929 if (vm_allocate_kernel(ipc_kernel_copy_map
, &paddr
, space_needed
,
3930 VM_FLAGS_ANYWHERE
, VM_KERN_MEMORY_IPC
) != KERN_SUCCESS
) {
3931 mr
= MACH_MSG_VM_KERNEL
;
3936 /* user_addr = just after base as it was copied in */
3937 user_addr
= (mach_msg_descriptor_t
*)((vm_offset_t
)kmsg
->ikm_header
+ sizeof(mach_msg_base_t
));
3939 /* Shift the mach_msg_base_t down to make room for dsc_count*16bytes of descriptors on 64 bit kernels
3941 if (descriptor_size
!= 16 * dsc_count
) {
3942 vm_offset_t dsc_adjust
= 16 * dsc_count
- descriptor_size
;
3944 memmove((char *)(((vm_offset_t
)kmsg
->ikm_header
) - dsc_adjust
), kmsg
->ikm_header
, sizeof(mach_msg_base_t
));
3945 kmsg
->ikm_header
= (mach_msg_header_t
*)((vm_offset_t
)kmsg
->ikm_header
- dsc_adjust
);
3947 /* Update the message size for the larger in-kernel representation */
3948 kmsg
->ikm_header
->msgh_size
+= (mach_msg_size_t
)dsc_adjust
;
3952 /* kern_addr = just after base after it has been (conditionally) moved */
3953 kern_addr
= (mach_msg_descriptor_t
*)((vm_offset_t
)kmsg
->ikm_header
+ sizeof(mach_msg_base_t
));
3955 /* handle the OOL regions and port descriptors. */
3956 for (i
= 0; i
< dsc_count
; i
++) {
3957 switch (user_addr
->type
.type
) {
3958 case MACH_MSG_PORT_DESCRIPTOR
:
3959 user_addr
= ipc_kmsg_copyin_port_descriptor((mach_msg_port_descriptor_t
*)kern_addr
,
3960 (mach_msg_legacy_port_descriptor_t
*)user_addr
, space
, dest
, kmsg
, optionp
, &mr
);
3964 case MACH_MSG_OOL_VOLATILE_DESCRIPTOR
:
3965 case MACH_MSG_OOL_DESCRIPTOR
:
3966 user_addr
= ipc_kmsg_copyin_ool_descriptor((mach_msg_ool_descriptor_t
*)kern_addr
,
3967 user_addr
, is_task_64bit
, &paddr
, ©
, &space_needed
, map
, optionp
, &mr
);
3971 case MACH_MSG_OOL_PORTS_DESCRIPTOR
:
3972 user_addr
= ipc_kmsg_copyin_ool_ports_descriptor((mach_msg_ool_ports_descriptor_t
*)kern_addr
,
3973 user_addr
, is_task_64bit
, map
, space
, dest
, kmsg
, optionp
, &mr
);
3977 case MACH_MSG_GUARDED_PORT_DESCRIPTOR
:
3978 user_addr
= ipc_kmsg_copyin_guarded_port_descriptor((mach_msg_guarded_port_descriptor_t
*)kern_addr
,
3979 user_addr
, is_task_64bit
, space
, dest
, kmsg
, optionp
, &mr
);
3984 /* Invalid descriptor */
3985 mr
= MACH_SEND_INVALID_TYPE
;
3989 if (MACH_MSG_SUCCESS
!= mr
) {
3990 /* clean from start of message descriptors to i */
3991 ipc_kmsg_clean_partial(kmsg
, i
,
3992 (mach_msg_descriptor_t
*)((mach_msg_base_t
*)kmsg
->ikm_header
+ 1),
3993 paddr
, space_needed
);
3999 kmsg
->ikm_header
->msgh_bits
&= ~MACH_MSGH_BITS_COMPLEX
;
4005 /* no descriptors have been copied in yet */
4006 ipc_kmsg_clean_partial(kmsg
, 0, NULL
, 0, 0);
4012 * Routine: ipc_kmsg_copyin
4014 * "Copy-in" port rights and out-of-line memory
4017 * In all failure cases, the message is left holding
4018 * no rights or memory. However, the message buffer
4019 * is not deallocated. If successful, the message
4020 * contains a valid destination port.
4024 * MACH_MSG_SUCCESS Successful copyin.
4025 * MACH_SEND_INVALID_HEADER Illegal value in the message header bits.
4026 * MACH_SEND_INVALID_DEST Can't copyin destination port.
4027 * MACH_SEND_INVALID_REPLY Can't copyin reply port.
4028 * MACH_SEND_INVALID_MEMORY Can't grab out-of-line memory.
4029 * MACH_SEND_INVALID_RIGHT Can't copyin port right in body.
4030 * MACH_SEND_INVALID_TYPE Bad type specification.
4031 * MACH_SEND_MSG_TOO_SMALL Body is too small for types/data.
4039 mach_msg_priority_t priority
,
4040 mach_msg_option_t
*optionp
)
4042 mach_msg_return_t mr
;
4043 mach_port_name_t dest_name
= CAST_MACH_PORT_TO_NAME(kmsg
->ikm_header
->msgh_remote_port
);
4045 kmsg
->ikm_header
->msgh_bits
&= MACH_MSGH_BITS_USER
;
4047 mr
= ipc_kmsg_copyin_header(kmsg
, space
, priority
, optionp
);
4049 if (mr
!= MACH_MSG_SUCCESS
) {
4053 /* Get the message filter policy if the task and port support filtering */
4054 mach_msg_filter_id fid
= 0;
4055 if (ip_enforce_msg_filtering(kmsg
->ikm_header
->msgh_remote_port
) &&
4056 task_get_filter_msg_flag(current_task())) {
4057 /* port label is yet to be supported */
4058 boolean_t allow_kmsg
= mach_msg_fetch_filter_policy(NULL
, kmsg
->ikm_header
->msgh_id
, &fid
);
4060 mach_port_guard_exception(dest_name
, 0, 0, kGUARD_EXC_MSG_FILTERED
);
4061 /* no descriptors have been copied in yet */
4062 ipc_kmsg_clean_partial(kmsg
, 0, NULL
, 0, 0);
4063 return MACH_SEND_MSG_FILTERED
;
4065 kmsg
->ikm_filter_policy_id
= fid
;
4068 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_IPC
, MACH_IPC_MSG_SEND
) | DBG_FUNC_NONE
,
4069 VM_KERNEL_ADDRPERM((uintptr_t)kmsg
),
4070 (uintptr_t)kmsg
->ikm_header
->msgh_bits
,
4071 (uintptr_t)kmsg
->ikm_header
->msgh_id
,
4072 VM_KERNEL_ADDRPERM((uintptr_t)unsafe_convert_port_to_voucher(kmsg
->ikm_voucher
)),
4075 DEBUG_KPRINT_SYSCALL_IPC("ipc_kmsg_copyin header:\n%.8x\n%.8x\n%p\n%p\n%p\n%.8x\n",
4076 kmsg
->ikm_header
->msgh_size
,
4077 kmsg
->ikm_header
->msgh_bits
,
4078 kmsg
->ikm_header
->msgh_remote_port
,
4079 kmsg
->ikm_header
->msgh_local_port
,
4081 kmsg
->ikm_header
->msgh_id
);
4083 if (kmsg
->ikm_header
->msgh_bits
& MACH_MSGH_BITS_COMPLEX
) {
4084 mr
= ipc_kmsg_copyin_body( kmsg
, space
, map
, optionp
);
4086 /* unreachable if !DEBUG */
4087 __unreachable_ok_push
4088 if (DEBUG_KPRINT_SYSCALL_PREDICATE(DEBUG_KPRINT_SYSCALL_IPC_MASK
)) {
4091 for (i
= 0; i
* 4 < (kmsg
->ikm_header
->msgh_size
- sizeof(mach_msg_header_t
)); i
++) {
4092 kprintf("%.4x\n", ((uint32_t *)(kmsg
->ikm_header
+ 1))[i
]);
4095 __unreachable_ok_pop
4098 /* Sign the message contents */
4099 if (mr
== MACH_MSG_SUCCESS
) {
4107 * Routine: ipc_kmsg_copyin_from_kernel
4109 * "Copy-in" port rights and out-of-line memory
4110 * in a message sent from the kernel.
4112 * Because the message comes from the kernel,
4113 * the implementation assumes there are no errors
4114 * or peculiarities in the message.
4120 ipc_kmsg_copyin_from_kernel(
4123 mach_msg_bits_t bits
= kmsg
->ikm_header
->msgh_bits
;
4124 mach_msg_type_name_t rname
= MACH_MSGH_BITS_REMOTE(bits
);
4125 mach_msg_type_name_t lname
= MACH_MSGH_BITS_LOCAL(bits
);
4126 ipc_object_t remote
= ip_to_object(kmsg
->ikm_header
->msgh_remote_port
);
4127 ipc_object_t local
= ip_to_object(kmsg
->ikm_header
->msgh_local_port
);
4128 ipc_port_t dest
= kmsg
->ikm_header
->msgh_remote_port
;
4130 /* translate the destination and reply ports */
4131 if (!IO_VALID(remote
)) {
4132 return MACH_SEND_INVALID_DEST
;
4135 ipc_object_copyin_from_kernel(remote
, rname
);
4136 if (IO_VALID(local
)) {
4137 ipc_object_copyin_from_kernel(local
, lname
);
4141 * The common case is a complex message with no reply port,
4142 * because that is what the memory_object interface uses.
4145 if (bits
== (MACH_MSGH_BITS_COMPLEX
|
4146 MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND
, 0))) {
4147 bits
= (MACH_MSGH_BITS_COMPLEX
|
4148 MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND
, 0));
4150 kmsg
->ikm_header
->msgh_bits
= bits
;
4152 bits
= (MACH_MSGH_BITS_OTHER(bits
) |
4153 MACH_MSGH_BITS(ipc_object_copyin_type(rname
),
4154 ipc_object_copyin_type(lname
)));
4156 kmsg
->ikm_header
->msgh_bits
= bits
;
4159 if (bits
& MACH_MSGH_BITS_COMPLEX
) {
4161 * Check if the remote port accepts ports in the body.
4163 if (dest
->ip_no_grant
) {
4164 mach_msg_descriptor_t
*saddr
;
4165 mach_msg_body_t
*body
;
4166 mach_msg_type_number_t i
, count
;
4168 body
= (mach_msg_body_t
*) (kmsg
->ikm_header
+ 1);
4169 saddr
= (mach_msg_descriptor_t
*) (body
+ 1);
4170 count
= body
->msgh_descriptor_count
;
4172 for (i
= 0; i
< count
; i
++, saddr
++) {
4173 switch (saddr
->type
.type
) {
4174 case MACH_MSG_PORT_DESCRIPTOR
:
4175 case MACH_MSG_OOL_PORTS_DESCRIPTOR
:
4176 case MACH_MSG_GUARDED_PORT_DESCRIPTOR
:
4177 /* no descriptors have been copied in yet */
4178 ipc_kmsg_clean_partial(kmsg
, 0, NULL
, 0, 0);
4179 return MACH_SEND_NO_GRANT_DEST
;
4184 mach_msg_descriptor_t
*saddr
;
4185 mach_msg_body_t
*body
;
4186 mach_msg_type_number_t i
, count
;
4188 body
= (mach_msg_body_t
*) (kmsg
->ikm_header
+ 1);
4189 saddr
= (mach_msg_descriptor_t
*) (body
+ 1);
4190 count
= body
->msgh_descriptor_count
;
4192 for (i
= 0; i
< count
; i
++, saddr
++) {
4193 switch (saddr
->type
.type
) {
4194 case MACH_MSG_PORT_DESCRIPTOR
: {
4195 mach_msg_type_name_t name
;
4196 ipc_object_t object
;
4197 mach_msg_port_descriptor_t
*dsc
;
4201 /* this is really the type SEND, SEND_ONCE, etc. */
4202 name
= dsc
->disposition
;
4203 object
= ip_to_object(dsc
->name
);
4204 dsc
->disposition
= ipc_object_copyin_type(name
);
4206 if (!IO_VALID(object
)) {
4210 ipc_object_copyin_from_kernel(object
, name
);
4212 /* CDY avoid circularity when the destination is also */
4213 /* the kernel. This check should be changed into an */
4214 /* assert when the new kobject model is in place since*/
4215 /* ports will not be used in kernel to kernel chats */
4217 if (ip_object_to_port(remote
)->ip_receiver
!= ipc_space_kernel
) {
4218 if ((dsc
->disposition
== MACH_MSG_TYPE_PORT_RECEIVE
) &&
4219 ipc_port_check_circularity(ip_object_to_port(object
),
4220 ip_object_to_port(remote
))) {
4221 kmsg
->ikm_header
->msgh_bits
|=
4222 MACH_MSGH_BITS_CIRCULAR
;
4227 case MACH_MSG_OOL_VOLATILE_DESCRIPTOR
:
4228 case MACH_MSG_OOL_DESCRIPTOR
: {
4230 * The sender should supply ready-made memory, i.e.
4231 * a vm_map_copy_t, so we don't need to do anything.
4235 case MACH_MSG_OOL_PORTS_DESCRIPTOR
: {
4236 ipc_object_t
*objects
;
4238 mach_msg_type_name_t name
;
4239 mach_msg_ool_ports_descriptor_t
*dsc
;
4241 dsc
= (mach_msg_ool_ports_descriptor_t
*)&saddr
->ool_ports
;
4243 /* this is really the type SEND, SEND_ONCE, etc. */
4244 name
= dsc
->disposition
;
4245 dsc
->disposition
= ipc_object_copyin_type(name
);
4247 objects
= (ipc_object_t
*) dsc
->address
;
4249 for (j
= 0; j
< dsc
->count
; j
++) {
4250 ipc_object_t object
= objects
[j
];
4252 if (!IO_VALID(object
)) {
4256 ipc_object_copyin_from_kernel(object
, name
);
4258 if ((dsc
->disposition
== MACH_MSG_TYPE_PORT_RECEIVE
) &&
4259 ipc_port_check_circularity(ip_object_to_port(object
),
4260 ip_object_to_port(remote
))) {
4261 kmsg
->ikm_header
->msgh_bits
|= MACH_MSGH_BITS_CIRCULAR
;
4266 case MACH_MSG_GUARDED_PORT_DESCRIPTOR
: {
4267 mach_msg_guarded_port_descriptor_t
*dsc
= (typeof(dsc
)) & saddr
->guarded_port
;
4268 mach_msg_type_name_t disp
= dsc
->disposition
;
4269 ipc_object_t object
= ip_to_object(dsc
->name
);
4270 dsc
->disposition
= ipc_object_copyin_type(disp
);
4271 assert(dsc
->flags
== 0);
4273 if (!IO_VALID(object
)) {
4277 ipc_object_copyin_from_kernel(object
, disp
);
4279 * avoid circularity when the destination is also
4280 * the kernel. This check should be changed into an
4281 * assert when the new kobject model is in place since
4282 * ports will not be used in kernel to kernel chats
4285 if (ip_object_to_port(remote
)->ip_receiver
!= ipc_space_kernel
) {
4286 if ((dsc
->disposition
== MACH_MSG_TYPE_PORT_RECEIVE
) &&
4287 ipc_port_check_circularity(ip_object_to_port(object
),
4288 ip_object_to_port(remote
))) {
4289 kmsg
->ikm_header
->msgh_bits
|= MACH_MSGH_BITS_CIRCULAR
;
4296 panic("ipc_kmsg_copyin_from_kernel: bad descriptor");
4297 #endif /* MACH_ASSERT */
4303 /* Add the signature to the message */
4306 return MACH_MSG_SUCCESS
;
4309 #if IKM_SUPPORT_LEGACY
4311 ipc_kmsg_copyin_from_kernel_legacy(
4314 mach_msg_bits_t bits
= kmsg
->ikm_header
->msgh_bits
;
4315 mach_msg_type_name_t rname
= MACH_MSGH_BITS_REMOTE(bits
);
4316 mach_msg_type_name_t lname
= MACH_MSGH_BITS_LOCAL(bits
);
4317 ipc_object_t remote
= ip_to_object(kmsg
->ikm_header
->msgh_remote_port
);
4318 ipc_object_t local
= ip_to_object(kmsg
->ikm_header
->msgh_local_port
);
4319 ipc_port_t dest
= kmsg
->ikm_header
->msgh_remote_port
;
4321 /* translate the destination and reply ports */
4322 if (!IO_VALID(remote
)) {
4323 return MACH_SEND_INVALID_DEST
;
4326 ipc_object_copyin_from_kernel(remote
, rname
);
4327 if (IO_VALID(local
)) {
4328 ipc_object_copyin_from_kernel(local
, lname
);
4332 * The common case is a complex message with no reply port,
4333 * because that is what the memory_object interface uses.
4336 if (bits
== (MACH_MSGH_BITS_COMPLEX
|
4337 MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND
, 0))) {
4338 bits
= (MACH_MSGH_BITS_COMPLEX
|
4339 MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND
, 0));
4341 kmsg
->ikm_header
->msgh_bits
= bits
;
4343 bits
= (MACH_MSGH_BITS_OTHER(bits
) |
4344 MACH_MSGH_BITS(ipc_object_copyin_type(rname
),
4345 ipc_object_copyin_type(lname
)));
4347 kmsg
->ikm_header
->msgh_bits
= bits
;
4350 if (bits
& MACH_MSGH_BITS_COMPLEX
) {
4351 if (dest
->ip_no_grant
) {
4352 mach_msg_descriptor_t
*saddr
;
4353 mach_msg_body_t
*body
;
4354 mach_msg_type_number_t i
, count
;
4356 body
= (mach_msg_body_t
*) (kmsg
->ikm_header
+ 1);
4357 saddr
= (mach_msg_descriptor_t
*) (body
+ 1);
4358 count
= body
->msgh_descriptor_count
;
4360 for (i
= 0; i
< count
; i
++, saddr
++) {
4361 switch (saddr
->type
.type
) {
4362 case MACH_MSG_PORT_DESCRIPTOR
:
4363 case MACH_MSG_OOL_PORTS_DESCRIPTOR
:
4364 case MACH_MSG_GUARDED_PORT_DESCRIPTOR
:
4365 /* no descriptors have been copied in yet */
4366 ipc_kmsg_clean_partial(kmsg
, 0, NULL
, 0, 0);
4367 return MACH_SEND_NO_GRANT_DEST
;
4372 mach_msg_legacy_descriptor_t
*saddr
;
4373 mach_msg_descriptor_t
*daddr
;
4374 mach_msg_body_t
*body
;
4375 mach_msg_type_number_t i
, count
;
4377 body
= (mach_msg_body_t
*) (kmsg
->ikm_header
+ 1);
4378 saddr
= (typeof(saddr
))(body
+ 1);
4379 count
= body
->msgh_descriptor_count
;
4382 vm_offset_t dsc_adjust
= 4 * count
;
4383 memmove((char *)(((vm_offset_t
)kmsg
->ikm_header
) - dsc_adjust
), kmsg
->ikm_header
, sizeof(mach_msg_base_t
));
4384 kmsg
->ikm_header
= (mach_msg_header_t
*)((vm_offset_t
)kmsg
->ikm_header
- dsc_adjust
);
4385 /* Update the message size for the larger in-kernel representation */
4386 kmsg
->ikm_header
->msgh_size
+= dsc_adjust
;
4388 daddr
= (mach_msg_descriptor_t
*)((vm_offset_t
)kmsg
->ikm_header
+ sizeof(mach_msg_base_t
));
4390 for (i
= 0; i
< count
; i
++, saddr
++, daddr
++) {
4391 switch (saddr
->type
.type
) {
4392 case MACH_MSG_PORT_DESCRIPTOR
: {
4393 mach_msg_type_name_t name
;
4394 ipc_object_t object
;
4395 mach_msg_legacy_port_descriptor_t
*dsc
;
4396 mach_msg_port_descriptor_t
*dest_dsc
;
4398 dsc
= (typeof(dsc
)) & saddr
->port
;
4399 dest_dsc
= &daddr
->port
;
4401 /* this is really the type SEND, SEND_ONCE, etc. */
4402 name
= dsc
->disposition
;
4403 object
= ip_to_object(CAST_MACH_NAME_TO_PORT(dsc
->name
));
4404 dest_dsc
->disposition
= ipc_object_copyin_type(name
);
4405 dest_dsc
->name
= ip_object_to_port(object
);
4406 dest_dsc
->type
= MACH_MSG_PORT_DESCRIPTOR
;
4408 if (!IO_VALID(object
)) {
4412 ipc_object_copyin_from_kernel(object
, name
);
4414 /* CDY avoid circularity when the destination is also */
4415 /* the kernel. This check should be changed into an */
4416 /* assert when the new kobject model is in place since*/
4417 /* ports will not be used in kernel to kernel chats */
4419 if (ip_object_to_port(remote
)->ip_receiver
!= ipc_space_kernel
) {
4420 if ((dest_dsc
->disposition
== MACH_MSG_TYPE_PORT_RECEIVE
) &&
4421 ipc_port_check_circularity(ip_object_to_port(object
),
4422 ip_object_to_port(remote
))) {
4423 kmsg
->ikm_header
->msgh_bits
|=
4424 MACH_MSGH_BITS_CIRCULAR
;
4429 case MACH_MSG_OOL_VOLATILE_DESCRIPTOR
:
4430 case MACH_MSG_OOL_DESCRIPTOR
: {
4431 /* The sender should supply ready-made memory, i.e. a vm_map_copy_t
4432 * so we don't need to do anything special. */
4434 mach_msg_ool_descriptor32_t
*source_dsc
= &saddr
->out_of_line32
;
4435 mach_msg_ool_descriptor_t
*dest_dsc
= (typeof(dest_dsc
)) & daddr
->out_of_line
;
4437 vm_offset_t address
= source_dsc
->address
;
4438 vm_size_t size
= source_dsc
->size
;
4439 boolean_t deallocate
= source_dsc
->deallocate
;
4440 mach_msg_copy_options_t copy
= source_dsc
->copy
;
4441 mach_msg_descriptor_type_t type
= source_dsc
->type
;
4443 dest_dsc
->address
= (void *)address
;
4444 dest_dsc
->size
= size
;
4445 dest_dsc
->deallocate
= deallocate
;
4446 dest_dsc
->copy
= copy
;
4447 dest_dsc
->type
= type
;
4450 case MACH_MSG_OOL_PORTS_DESCRIPTOR
: {
4451 ipc_object_t
*objects
;
4453 mach_msg_type_name_t name
;
4454 mach_msg_ool_ports_descriptor_t
*dest_dsc
;
4456 mach_msg_ool_ports_descriptor32_t
*source_dsc
= &saddr
->ool_ports32
;
4457 dest_dsc
= (typeof(dest_dsc
)) & daddr
->ool_ports
;
4459 boolean_t deallocate
= source_dsc
->deallocate
;
4460 mach_msg_copy_options_t copy
= source_dsc
->copy
;
4461 mach_msg_size_t port_count
= source_dsc
->count
;
4462 mach_msg_type_name_t disposition
= source_dsc
->disposition
;
4464 /* this is really the type SEND, SEND_ONCE, etc. */
4466 disposition
= ipc_object_copyin_type(name
);
4468 objects
= (ipc_object_t
*) (uintptr_t)source_dsc
->address
;
4470 for (j
= 0; j
< port_count
; j
++) {
4471 ipc_object_t object
= objects
[j
];
4473 if (!IO_VALID(object
)) {
4477 ipc_object_copyin_from_kernel(object
, name
);
4479 if ((disposition
== MACH_MSG_TYPE_PORT_RECEIVE
) &&
4480 ipc_port_check_circularity(ip_object_to_port(object
),
4481 ip_object_to_port(remote
))) {
4482 kmsg
->ikm_header
->msgh_bits
|= MACH_MSGH_BITS_CIRCULAR
;
4486 dest_dsc
->address
= objects
;
4487 dest_dsc
->deallocate
= deallocate
;
4488 dest_dsc
->copy
= copy
;
4489 dest_dsc
->disposition
= disposition
;
4490 dest_dsc
->type
= MACH_MSG_OOL_PORTS_DESCRIPTOR
;
4491 dest_dsc
->count
= port_count
;
4494 case MACH_MSG_GUARDED_PORT_DESCRIPTOR
: {
4495 mach_msg_type_name_t disp
;
4496 ipc_object_t object
;
4497 mach_msg_guarded_port_descriptor32_t
*dsc
;
4498 mach_msg_guarded_port_descriptor_t
*dest_dsc
;
4500 dsc
= (typeof(dsc
)) & saddr
->guarded_port32
;
4501 dest_dsc
= &daddr
->guarded_port
;
4503 disp
= dsc
->disposition
;
4504 object
= ip_to_object(CAST_MACH_NAME_TO_PORT(dsc
->name
));
4505 assert(dsc
->flags
== 0);
4506 assert(dsc
->context
== 0);
4508 dest_dsc
->disposition
= ipc_object_copyin_type(disp
);
4509 dest_dsc
->name
= ip_object_to_port(object
);
4510 dest_dsc
->type
= MACH_MSG_GUARDED_PORT_DESCRIPTOR
;
4511 dest_dsc
->flags
= 0;
4513 if (!IO_VALID(object
)) {
4517 ipc_object_copyin_from_kernel(object
, disp
);
4519 /* CDY avoid circularity when the destination is also */
4520 /* the kernel. This check should be changed into an */
4521 /* assert when the new kobject model is in place since*/
4522 /* ports will not be used in kernel to kernel chats */
4524 if (ip_object_to_port(remote
)->ip_receiver
!= ipc_space_kernel
) {
4525 if ((dest_dsc
->disposition
== MACH_MSG_TYPE_PORT_RECEIVE
) &&
4526 ipc_port_check_circularity(ip_object_to_port(object
),
4527 ip_object_to_port(remote
))) {
4528 kmsg
->ikm_header
->msgh_bits
|=
4529 MACH_MSGH_BITS_CIRCULAR
;
4536 panic("ipc_kmsg_copyin_from_kernel: bad descriptor");
4537 #endif /* MACH_ASSERT */
4545 return MACH_MSG_SUCCESS
;
4547 #endif /* IKM_SUPPORT_LEGACY */
4550 * Routine: ipc_kmsg_copyout_header
4552 * "Copy-out" port rights in the header of a message.
4553 * Operates atomically; if it doesn't succeed the
4554 * message header and the space are left untouched.
4555 * If it does succeed the remote/local port fields
4556 * contain port names instead of object pointers,
4557 * and the bits field is updated.
4561 * MACH_MSG_SUCCESS Copied out port rights.
4562 * MACH_RCV_INVALID_NOTIFY
4563 * Notify is non-null and doesn't name a receive right.
4564 * (Either KERN_INVALID_NAME or KERN_INVALID_RIGHT.)
4565 * MACH_RCV_HEADER_ERROR|MACH_MSG_IPC_SPACE
4566 * The space is dead.
4567 * MACH_RCV_HEADER_ERROR|MACH_MSG_IPC_SPACE
4568 * No room in space for another name.
4569 * MACH_RCV_HEADER_ERROR|MACH_MSG_IPC_KERNEL
4570 * Couldn't allocate memory for the reply port.
4571 * MACH_RCV_HEADER_ERROR|MACH_MSG_IPC_KERNEL
4572 * Couldn't allocate memory for the dead-name request.
4576 ipc_kmsg_copyout_header(
4579 mach_msg_option_t option
)
4581 mach_msg_header_t
*msg
= kmsg
->ikm_header
;
4582 mach_msg_bits_t mbits
= msg
->msgh_bits
;
4583 ipc_port_t dest
= msg
->msgh_remote_port
;
4585 assert(IP_VALID(dest
));
4588 * While we still hold a reference on the received-from port,
4589 * process all send-possible notfications we received along with
4592 ipc_port_spnotify(dest
);
4595 mach_msg_type_name_t dest_type
= MACH_MSGH_BITS_REMOTE(mbits
);
4596 mach_msg_type_name_t reply_type
= MACH_MSGH_BITS_LOCAL(mbits
);
4597 mach_msg_type_name_t voucher_type
= MACH_MSGH_BITS_VOUCHER(mbits
);
4598 ipc_port_t reply
= msg
->msgh_local_port
;
4599 ipc_port_t release_reply_port
= IP_NULL
;
4600 mach_port_name_t dest_name
, reply_name
;
4602 ipc_port_t voucher
= kmsg
->ikm_voucher
;
4603 ipc_port_t release_voucher_port
= IP_NULL
;
4604 mach_port_name_t voucher_name
;
4606 uint32_t entries_held
= 0;
4607 boolean_t need_write_lock
= FALSE
;
4611 * Reserve any potentially needed entries in the target space.
4612 * We'll free any unused before unlocking the space.
4614 if (IP_VALID(reply
)) {
4616 need_write_lock
= TRUE
;
4618 if (IP_VALID(voucher
)) {
4619 assert(voucher_type
== MACH_MSG_TYPE_MOVE_SEND
);
4621 if ((option
& MACH_RCV_VOUCHER
) != 0) {
4624 need_write_lock
= TRUE
;
4627 if (need_write_lock
) {
4628 is_write_lock(space
);
4630 while (entries_held
) {
4631 if (!is_active(space
)) {
4632 is_write_unlock(space
);
4633 return MACH_RCV_HEADER_ERROR
|
4637 kr
= ipc_entries_hold(space
, entries_held
);
4638 if (KERN_SUCCESS
== kr
) {
4642 kr
= ipc_entry_grow_table(space
, ITS_SIZE_NONE
);
4643 if (KERN_SUCCESS
!= kr
) {
4644 return MACH_RCV_HEADER_ERROR
|
4647 /* space was unlocked and relocked - retry */
4650 /* Handle reply port. */
4651 if (IP_VALID(reply
)) {
4654 /* Is there already an entry we can use? */
4655 if ((reply_type
!= MACH_MSG_TYPE_PORT_SEND_ONCE
) &&
4656 ipc_right_reverse(space
, ip_to_object(reply
), &reply_name
, &entry
)) {
4657 /* reply port is locked and active */
4658 assert(entry
->ie_bits
& MACH_PORT_TYPE_SEND_RECEIVE
);
4661 /* Is the reply port still active and allowed to be copied out? */
4662 if (!ip_active(reply
) || !ip_label_check(space
, reply
, reply_type
)) {
4663 /* clear the context value */
4664 reply
->ip_reply_context
= 0;
4667 release_reply_port
= reply
;
4669 reply_name
= MACH_PORT_DEAD
;
4670 goto done_with_reply
;
4673 /* claim a held entry for the reply port */
4674 assert(entries_held
> 0);
4676 ipc_entry_claim(space
, &reply_name
, &entry
);
4677 assert(IE_BITS_TYPE(entry
->ie_bits
) == MACH_PORT_TYPE_NONE
);
4678 assert(entry
->ie_object
== IO_NULL
);
4679 entry
->ie_object
= ip_to_object(reply
);
4682 /* space and reply port are locked and active */
4683 ip_reference(reply
); /* hold onto the reply port */
4686 * If the receiver would like to enforce strict reply
4687 * semantics, and the message looks like it expects a reply,
4688 * and contains a voucher, then link the context in the
4689 * voucher with the reply port so that the next message sent
4690 * to the reply port must come from a thread that has a
4691 * matching context (voucher).
4693 if (enforce_strict_reply
&& MACH_RCV_WITH_STRICT_REPLY(option
) && IP_VALID(voucher
)) {
4694 if (ipc_kmsg_validate_reply_port_locked(reply
, option
) != KERN_SUCCESS
) {
4695 /* if the receiver isn't happy with the reply port: fail the receive. */
4697 ipc_entry_dealloc(space
, reply_name
, entry
);
4698 is_write_unlock(space
);
4700 return MACH_RCV_INVALID_REPLY
;
4702 ipc_kmsg_link_reply_context_locked(reply
, voucher
);
4705 * if the receive did not choose to participate
4706 * in the strict reply/RPC, then don't enforce
4707 * anything (as this could lead to booby-trapped
4708 * messages that kill the server).
4710 reply
->ip_reply_context
= 0;
4713 kr
= ipc_right_copyout(space
, reply_name
, entry
,
4714 reply_type
, NULL
, NULL
, ip_to_object(reply
));
4715 assert(kr
== KERN_SUCCESS
);
4716 /* reply port is unlocked */
4718 reply_name
= CAST_MACH_PORT_TO_NAME(reply
);
4723 /* Handle voucher port. */
4724 if (voucher_type
!= MACH_MSGH_BITS_ZERO
) {
4725 assert(voucher_type
== MACH_MSG_TYPE_MOVE_SEND
);
4727 if (!IP_VALID(voucher
)) {
4728 if ((option
& MACH_RCV_VOUCHER
) == 0) {
4729 voucher_type
= MACH_MSGH_BITS_ZERO
;
4731 voucher_name
= MACH_PORT_NULL
;
4732 goto done_with_voucher
;
4735 /* clear voucher from its hiding place back in the kmsg */
4736 kmsg
->ikm_voucher
= IP_NULL
;
4738 if ((option
& MACH_RCV_VOUCHER
) != 0) {
4741 if (ipc_right_reverse(space
, ip_to_object(voucher
),
4742 &voucher_name
, &entry
)) {
4743 /* voucher port locked */
4744 assert(entry
->ie_bits
& MACH_PORT_TYPE_SEND
);
4746 assert(entries_held
> 0);
4748 ipc_entry_claim(space
, &voucher_name
, &entry
);
4749 assert(IE_BITS_TYPE(entry
->ie_bits
) == MACH_PORT_TYPE_NONE
);
4750 assert(entry
->ie_object
== IO_NULL
);
4751 entry
->ie_object
= ip_to_object(voucher
);
4754 /* space is locked and active */
4755 require_ip_active(voucher
);
4756 assert(ip_kotype(voucher
) == IKOT_VOUCHER
);
4757 kr
= ipc_right_copyout(space
, voucher_name
, entry
,
4758 MACH_MSG_TYPE_MOVE_SEND
, NULL
, NULL
,
4759 ip_to_object(voucher
));
4760 /* voucher port is unlocked */
4762 voucher_type
= MACH_MSGH_BITS_ZERO
;
4763 release_voucher_port
= voucher
;
4764 voucher_name
= MACH_PORT_NULL
;
4767 voucher_name
= msg
->msgh_voucher_port
;
4773 is_write_unlock(space
);
4776 * No reply or voucher port! This is an easy case.
4777 * We only need to have the space locked
4778 * when locking the destination.
4781 is_read_lock(space
);
4782 if (!is_active(space
)) {
4783 is_read_unlock(space
);
4784 return MACH_RCV_HEADER_ERROR
| MACH_MSG_IPC_SPACE
;
4788 is_read_unlock(space
);
4790 reply_name
= CAST_MACH_PORT_TO_NAME(reply
);
4792 if (voucher_type
!= MACH_MSGH_BITS_ZERO
) {
4793 assert(voucher_type
== MACH_MSG_TYPE_MOVE_SEND
);
4794 if ((option
& MACH_RCV_VOUCHER
) == 0) {
4795 voucher_type
= MACH_MSGH_BITS_ZERO
;
4797 voucher_name
= MACH_PORT_NULL
;
4799 voucher_name
= msg
->msgh_voucher_port
;
4804 * At this point, the space is unlocked and the destination
4805 * port is locked. (Lock taken while space was locked.)
4806 * reply_name is taken care of; we still need dest_name.
4807 * We still hold a ref for reply (if it is valid).
4809 * If the space holds receive rights for the destination,
4810 * we return its name for the right. Otherwise the task
4811 * managed to destroy or give away the receive right between
4812 * receiving the message and this copyout. If the destination
4813 * is dead, return MACH_PORT_DEAD, and if the receive right
4814 * exists somewhere else (another space, in transit)
4815 * return MACH_PORT_NULL.
4817 * Making this copyout operation atomic with the previous
4818 * copyout of the reply port is a bit tricky. If there was
4819 * no real reply port (it wasn't IP_VALID) then this isn't
4820 * an issue. If the reply port was dead at copyout time,
4821 * then we are OK, because if dest is dead we serialize
4822 * after the death of both ports and if dest is alive
4823 * we serialize after reply died but before dest's (later) death.
4824 * So assume reply was alive when we copied it out. If dest
4825 * is alive, then we are OK because we serialize before
4826 * the ports' deaths. So assume dest is dead when we look at it.
4827 * If reply dies/died after dest, then we are OK because
4828 * we serialize after dest died but before reply dies.
4829 * So the hard case is when reply is alive at copyout,
4830 * dest is dead at copyout, and reply died before dest died.
4831 * In this case pretend that dest is still alive, so
4832 * we serialize while both ports are alive.
4834 * Because the space lock is held across the copyout of reply
4835 * and locking dest, the receive right for dest can't move
4836 * in or out of the space while the copyouts happen, so
4837 * that isn't an atomicity problem. In the last hard case
4838 * above, this implies that when dest is dead that the
4839 * space couldn't have had receive rights for dest at
4840 * the time reply was copied-out, so when we pretend
4841 * that dest is still alive, we can return MACH_PORT_NULL.
4843 * If dest == reply, then we have to make it look like
4844 * either both copyouts happened before the port died,
4845 * or both happened after the port died. This special
4846 * case works naturally if the timestamp comparison
4847 * is done correctly.
4850 if (ip_active(dest
)) {
4851 ipc_object_copyout_dest(space
, ip_to_object(dest
),
4852 dest_type
, &dest_name
);
4853 /* dest is unlocked */
4855 ipc_port_timestamp_t timestamp
;
4857 timestamp
= dest
->ip_timestamp
;
4861 if (IP_VALID(reply
)) {
4863 if (ip_active(reply
) ||
4864 IP_TIMESTAMP_ORDER(timestamp
,
4865 reply
->ip_timestamp
)) {
4866 dest_name
= MACH_PORT_DEAD
;
4868 dest_name
= MACH_PORT_NULL
;
4872 dest_name
= MACH_PORT_DEAD
;
4876 if (IP_VALID(reply
)) {
4880 if (IP_VALID(release_reply_port
)) {
4881 if (reply_type
== MACH_MSG_TYPE_PORT_SEND_ONCE
) {
4882 ipc_port_release_sonce(release_reply_port
);
4884 ipc_port_release_send(release_reply_port
);
4888 if ((option
& MACH_RCV_VOUCHER
) != 0) {
4889 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_IPC
, MACH_IPC_MSG_RECV
) | DBG_FUNC_NONE
,
4890 VM_KERNEL_ADDRPERM((uintptr_t)kmsg
),
4891 (uintptr_t)kmsg
->ikm_header
->msgh_bits
,
4892 (uintptr_t)kmsg
->ikm_header
->msgh_id
,
4893 VM_KERNEL_ADDRPERM((uintptr_t)unsafe_convert_port_to_voucher(voucher
)),
4896 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_IPC
, MACH_IPC_MSG_RECV_VOUCHER_REFUSED
) | DBG_FUNC_NONE
,
4897 VM_KERNEL_ADDRPERM((uintptr_t)kmsg
),
4898 (uintptr_t)kmsg
->ikm_header
->msgh_bits
,
4899 (uintptr_t)kmsg
->ikm_header
->msgh_id
,
4900 VM_KERNEL_ADDRPERM((uintptr_t)unsafe_convert_port_to_voucher(voucher
)),
4904 if (IP_VALID(release_voucher_port
)) {
4905 ipc_port_release_send(release_voucher_port
);
4908 msg
->msgh_bits
= MACH_MSGH_BITS_SET(reply_type
, dest_type
,
4909 voucher_type
, mbits
);
4910 msg
->msgh_local_port
= CAST_MACH_NAME_TO_PORT(dest_name
);
4911 msg
->msgh_remote_port
= CAST_MACH_NAME_TO_PORT(reply_name
);
4912 msg
->msgh_voucher_port
= voucher_name
;
4915 return MACH_MSG_SUCCESS
;
4919 * Routine: ipc_kmsg_copyout_object
4921 * Copy-out a port right. Always returns a name,
4922 * even for unsuccessful return codes. Always
4923 * consumes the supplied object.
4927 * MACH_MSG_SUCCESS The space acquired the right
4928 * (name is valid) or the object is dead (MACH_PORT_DEAD).
4929 * MACH_MSG_IPC_SPACE No room in space for the right,
4930 * or the space is dead. (Name is MACH_PORT_NULL.)
4931 * MACH_MSG_IPC_KERNEL Kernel resource shortage.
4932 * (Name is MACH_PORT_NULL.)
4936 ipc_kmsg_copyout_object(
4938 ipc_object_t object
,
4939 mach_msg_type_name_t msgt_name
,
4940 mach_port_context_t
*context
,
4941 mach_msg_guard_flags_t
*guard_flags
,
4942 mach_port_name_t
*namep
)
4946 if (!IO_VALID(object
)) {
4947 *namep
= CAST_MACH_PORT_TO_NAME(object
);
4948 return MACH_MSG_SUCCESS
;
4951 kr
= ipc_object_copyout(space
, object
, msgt_name
, context
, guard_flags
, namep
);
4952 if (kr
!= KERN_SUCCESS
) {
4953 ipc_object_destroy(object
, msgt_name
);
4955 if (kr
== KERN_INVALID_CAPABILITY
) {
4956 *namep
= MACH_PORT_DEAD
;
4958 *namep
= MACH_PORT_NULL
;
4960 if (kr
== KERN_RESOURCE_SHORTAGE
) {
4961 return MACH_MSG_IPC_KERNEL
;
4963 return MACH_MSG_IPC_SPACE
;
4968 return MACH_MSG_SUCCESS
;
4971 static mach_msg_descriptor_t
*
4972 ipc_kmsg_copyout_port_descriptor(mach_msg_descriptor_t
*dsc
,
4973 mach_msg_descriptor_t
*dest_dsc
,
4978 mach_port_name_t name
;
4979 mach_msg_type_name_t disp
;
4981 /* Copyout port right carried in the message */
4982 port
= dsc
->port
.name
;
4983 disp
= dsc
->port
.disposition
;
4984 *mr
|= ipc_kmsg_copyout_object(space
,
4985 ip_to_object(port
), disp
, NULL
, NULL
, &name
);
4987 if (current_task() == kernel_task
) {
4988 mach_msg_port_descriptor_t
*user_dsc
= (typeof(user_dsc
))dest_dsc
;
4989 user_dsc
--; // point to the start of this port descriptor
4990 bzero((void *)user_dsc
, sizeof(*user_dsc
));
4991 user_dsc
->name
= CAST_MACH_NAME_TO_PORT(name
);
4992 user_dsc
->disposition
= disp
;
4993 user_dsc
->type
= MACH_MSG_PORT_DESCRIPTOR
;
4994 dest_dsc
= (typeof(dest_dsc
))user_dsc
;
4996 mach_msg_legacy_port_descriptor_t
*user_dsc
= (typeof(user_dsc
))dest_dsc
;
4997 user_dsc
--; // point to the start of this port descriptor
4998 bzero((void *)user_dsc
, sizeof(*user_dsc
));
4999 user_dsc
->name
= CAST_MACH_PORT_TO_NAME(name
);
5000 user_dsc
->disposition
= disp
;
5001 user_dsc
->type
= MACH_MSG_PORT_DESCRIPTOR
;
5002 dest_dsc
= (typeof(dest_dsc
))user_dsc
;
5005 return (mach_msg_descriptor_t
*)dest_dsc
;
5008 mach_msg_descriptor_t
*
5009 ipc_kmsg_copyout_ool_descriptor(mach_msg_ool_descriptor_t
*dsc
, mach_msg_descriptor_t
*user_dsc
, int is_64bit
, vm_map_t map
, mach_msg_return_t
*mr
);
5010 mach_msg_descriptor_t
*
5011 ipc_kmsg_copyout_ool_descriptor(mach_msg_ool_descriptor_t
*dsc
, mach_msg_descriptor_t
*user_dsc
, int is_64bit
, vm_map_t map
, mach_msg_return_t
*mr
)
5014 vm_map_address_t rcv_addr
;
5015 mach_msg_copy_options_t copy_options
;
5017 mach_msg_descriptor_type_t dsc_type
;
5018 boolean_t misaligned
= FALSE
;
5020 //SKIP_PORT_DESCRIPTORS(saddr, sdsc_count);
5022 copy
= (vm_map_copy_t
)dsc
->address
;
5023 size
= (vm_map_size_t
)dsc
->size
;
5024 copy_options
= dsc
->copy
;
5025 assert(copy_options
!= MACH_MSG_KALLOC_COPY_T
);
5026 dsc_type
= dsc
->type
;
5028 if (copy
!= VM_MAP_COPY_NULL
) {
5032 if (vm_map_copy_validate_size(map
, copy
, &size
) == FALSE
) {
5033 panic("Inconsistent OOL/copyout size on %p: expected %d, got %lld @%p",
5034 dsc
, dsc
->size
, (unsigned long long)copy
->size
, copy
);
5037 if ((copy
->type
== VM_MAP_COPY_ENTRY_LIST
) &&
5038 (trunc_page(copy
->offset
) != copy
->offset
||
5039 round_page(dsc
->size
) != dsc
->size
)) {
5044 vm_map_address_t rounded_addr
;
5045 vm_map_size_t rounded_size
;
5046 vm_map_offset_t effective_page_mask
, effective_page_size
;
5048 effective_page_mask
= VM_MAP_PAGE_MASK(map
);
5049 effective_page_size
= effective_page_mask
+ 1;
5051 rounded_size
= vm_map_round_page(copy
->offset
+ size
, effective_page_mask
) - vm_map_trunc_page(copy
->offset
, effective_page_mask
);
5053 kr
= vm_allocate_kernel(map
, (vm_offset_t
*)&rounded_addr
, rounded_size
, VM_FLAGS_ANYWHERE
, 0);
5055 if (kr
== KERN_SUCCESS
) {
5057 * vm_map_copy_overwrite does a full copy
5058 * if size is too small to optimize.
5059 * So we tried skipping the offset adjustment
5060 * if we fail the 'size' test.
5062 * if (size >= VM_MAP_COPY_OVERWRITE_OPTIMIZATION_THRESHOLD_PAGES * effective_page_size) {
5064 * This resulted in leaked memory especially on the
5065 * older watches (16k user - 4k kernel) because we
5066 * would do a physical copy into the start of this
5067 * rounded range but could leak part of it
5068 * on deallocation if the 'size' being deallocated
5069 * does not cover the full range. So instead we do
5070 * the misalignment adjustment always so that on
5071 * deallocation we will remove the full range.
5073 if ((rounded_addr
& effective_page_mask
) !=
5074 (copy
->offset
& effective_page_mask
)) {
5076 * Need similar mis-alignment of source and destination...
5078 rounded_addr
+= (copy
->offset
& effective_page_mask
);
5080 assert((rounded_addr
& effective_page_mask
) == (copy
->offset
& effective_page_mask
));
5082 rcv_addr
= rounded_addr
;
5084 kr
= vm_map_copy_overwrite(map
, rcv_addr
, copy
, size
, FALSE
);
5087 kr
= vm_map_copyout_size(map
, &rcv_addr
, copy
, size
);
5089 if (kr
!= KERN_SUCCESS
) {
5090 if (kr
== KERN_RESOURCE_SHORTAGE
) {
5091 *mr
|= MACH_MSG_VM_KERNEL
;
5093 *mr
|= MACH_MSG_VM_SPACE
;
5095 vm_map_copy_discard(copy
);
5105 * Now update the descriptor as the user would see it.
5106 * This may require expanding the descriptor to the user
5107 * visible size. There is already space allocated for
5108 * this in what naddr points to.
5110 if (current_task() == kernel_task
) {
5111 mach_msg_ool_descriptor_t
*user_ool_dsc
= (typeof(user_ool_dsc
))user_dsc
;
5113 bzero((void *)user_ool_dsc
, sizeof(*user_ool_dsc
));
5115 user_ool_dsc
->address
= (void *)(uintptr_t)rcv_addr
;
5116 user_ool_dsc
->deallocate
= (copy_options
== MACH_MSG_VIRTUAL_COPY
) ?
5118 user_ool_dsc
->copy
= copy_options
;
5119 user_ool_dsc
->type
= dsc_type
;
5120 user_ool_dsc
->size
= (mach_msg_size_t
)size
;
5122 user_dsc
= (typeof(user_dsc
))user_ool_dsc
;
5123 } else if (is_64bit
) {
5124 mach_msg_ool_descriptor64_t
*user_ool_dsc
= (typeof(user_ool_dsc
))user_dsc
;
5126 bzero((void *)user_ool_dsc
, sizeof(*user_ool_dsc
));
5128 user_ool_dsc
->address
= rcv_addr
;
5129 user_ool_dsc
->deallocate
= (copy_options
== MACH_MSG_VIRTUAL_COPY
) ?
5131 user_ool_dsc
->copy
= copy_options
;
5132 user_ool_dsc
->type
= dsc_type
;
5133 user_ool_dsc
->size
= (mach_msg_size_t
)size
;
5135 user_dsc
= (typeof(user_dsc
))user_ool_dsc
;
5137 mach_msg_ool_descriptor32_t
*user_ool_dsc
= (typeof(user_ool_dsc
))user_dsc
;
5139 bzero((void *)user_ool_dsc
, sizeof(*user_ool_dsc
));
5141 user_ool_dsc
->address
= CAST_DOWN_EXPLICIT(uint32_t, rcv_addr
);
5142 user_ool_dsc
->size
= (mach_msg_size_t
)size
;
5143 user_ool_dsc
->deallocate
= (copy_options
== MACH_MSG_VIRTUAL_COPY
) ?
5145 user_ool_dsc
->copy
= copy_options
;
5146 user_ool_dsc
->type
= dsc_type
;
5148 user_dsc
= (typeof(user_dsc
))user_ool_dsc
;
5153 mach_msg_descriptor_t
*
5154 ipc_kmsg_copyout_ool_ports_descriptor(mach_msg_ool_ports_descriptor_t
*dsc
,
5155 mach_msg_descriptor_t
*user_dsc
,
5160 mach_msg_return_t
*mr
);
5161 mach_msg_descriptor_t
*
5162 ipc_kmsg_copyout_ool_ports_descriptor(mach_msg_ool_ports_descriptor_t
*dsc
,
5163 mach_msg_descriptor_t
*user_dsc
,
5168 mach_msg_return_t
*mr
)
5170 mach_vm_offset_t rcv_addr
= 0;
5171 mach_msg_type_name_t disp
;
5172 mach_msg_type_number_t count
, i
;
5173 vm_size_t ports_length
, names_length
;
5175 mach_msg_copy_options_t copy_options
= MACH_MSG_VIRTUAL_COPY
;
5177 //SKIP_PORT_DESCRIPTORS(saddr, sdsc_count);
5180 disp
= dsc
->disposition
;
5181 ports_length
= count
* sizeof(mach_port_t
);
5182 names_length
= count
* sizeof(mach_port_name_t
);
5184 if (ports_length
!= 0 && dsc
->address
!= 0) {
5186 * Check to see if there is an overwrite descriptor
5187 * specified in the scatter list for this ool data.
5188 * The descriptor has already been verified.
5191 if (saddr
!= MACH_MSG_DESCRIPTOR_NULL
) {
5193 OTHER_OOL_DESCRIPTOR
*scatter_dsc
;
5195 scatter_dsc
= (OTHER_OOL_DESCRIPTOR
*)saddr
;
5196 rcv_addr
= (mach_vm_offset_t
) scatter_dsc
->address
;
5197 copy_options
= scatter_dsc
->copy
;
5199 mach_msg_ool_descriptor_t
*scatter_dsc
;
5201 scatter_dsc
= &saddr
->out_of_line
;
5202 rcv_addr
= CAST_USER_ADDR_T(scatter_dsc
->address
);
5203 copy_options
= scatter_dsc
->copy
;
5205 INCREMENT_SCATTER(saddr
, sdsc_count
, differs
);
5209 if (copy_options
== MACH_MSG_VIRTUAL_COPY
) {
5211 * Dynamically allocate the region
5214 if (vm_kernel_map_is_kernel(map
)) {
5215 tag
= VM_KERN_MEMORY_IPC
;
5217 tag
= VM_MEMORY_MACH_MSG
;
5221 if ((kr
= mach_vm_allocate_kernel(map
, &rcv_addr
,
5222 (mach_vm_size_t
)names_length
,
5223 VM_FLAGS_ANYWHERE
, tag
)) != KERN_SUCCESS
) {
5224 ipc_kmsg_clean_body(kmsg
, 1, (mach_msg_descriptor_t
*)dsc
);
5227 if (kr
== KERN_RESOURCE_SHORTAGE
) {
5228 *mr
|= MACH_MSG_VM_KERNEL
;
5230 *mr
|= MACH_MSG_VM_SPACE
;
5236 * Handle the port rights and copy out the names
5237 * for those rights out to user-space.
5239 if (rcv_addr
!= 0) {
5240 ipc_object_t
*objects
= (ipc_object_t
*) dsc
->address
;
5241 mach_port_name_t
*names
= (mach_port_name_t
*) dsc
->address
;
5243 /* copyout port rights carried in the message */
5245 for (i
= 0; i
< count
; i
++) {
5246 ipc_object_t object
= objects
[i
];
5248 *mr
|= ipc_kmsg_copyout_object(space
, object
,
5249 disp
, NULL
, NULL
, &names
[i
]);
5252 /* copyout to memory allocated above */
5253 void *data
= dsc
->address
;
5254 if (copyoutmap(map
, data
, rcv_addr
, names_length
) != KERN_SUCCESS
) {
5255 *mr
|= MACH_MSG_VM_SPACE
;
5257 kfree(data
, ports_length
);
5264 * Now update the descriptor based on the information
5267 if (current_task() == kernel_task
) {
5268 mach_msg_ool_ports_descriptor_t
*user_ool_dsc
= (typeof(user_ool_dsc
))user_dsc
;
5270 bzero((void *)user_ool_dsc
, sizeof(*user_ool_dsc
));
5272 user_ool_dsc
->address
= (void *)(uintptr_t)rcv_addr
;
5273 user_ool_dsc
->deallocate
= (copy_options
== MACH_MSG_VIRTUAL_COPY
) ?
5275 user_ool_dsc
->copy
= copy_options
;
5276 user_ool_dsc
->disposition
= disp
;
5277 user_ool_dsc
->type
= MACH_MSG_OOL_PORTS_DESCRIPTOR
;
5278 user_ool_dsc
->count
= count
;
5280 user_dsc
= (typeof(user_dsc
))user_ool_dsc
;
5281 } else if (is_64bit
) {
5282 mach_msg_ool_ports_descriptor64_t
*user_ool_dsc
= (typeof(user_ool_dsc
))user_dsc
;
5284 bzero((void *)user_ool_dsc
, sizeof(*user_ool_dsc
));
5286 user_ool_dsc
->address
= rcv_addr
;
5287 user_ool_dsc
->deallocate
= (copy_options
== MACH_MSG_VIRTUAL_COPY
) ?
5289 user_ool_dsc
->copy
= copy_options
;
5290 user_ool_dsc
->disposition
= disp
;
5291 user_ool_dsc
->type
= MACH_MSG_OOL_PORTS_DESCRIPTOR
;
5292 user_ool_dsc
->count
= count
;
5294 user_dsc
= (typeof(user_dsc
))user_ool_dsc
;
5296 mach_msg_ool_ports_descriptor32_t
*user_ool_dsc
= (typeof(user_ool_dsc
))user_dsc
;
5298 bzero((void *)user_ool_dsc
, sizeof(*user_ool_dsc
));
5300 user_ool_dsc
->address
= CAST_DOWN_EXPLICIT(uint32_t, rcv_addr
);
5301 user_ool_dsc
->count
= count
;
5302 user_ool_dsc
->deallocate
= (copy_options
== MACH_MSG_VIRTUAL_COPY
) ?
5304 user_ool_dsc
->copy
= copy_options
;
5305 user_ool_dsc
->disposition
= disp
;
5306 user_ool_dsc
->type
= MACH_MSG_OOL_PORTS_DESCRIPTOR
;
5308 user_dsc
= (typeof(user_dsc
))user_ool_dsc
;
5313 static mach_msg_descriptor_t
*
5314 ipc_kmsg_copyout_guarded_port_descriptor(
5315 mach_msg_guarded_port_descriptor_t
*dsc
,
5316 mach_msg_descriptor_t
*dest_dsc
,
5318 __unused ipc_kmsg_t kmsg
,
5320 mach_msg_option_t option
,
5324 mach_port_name_t name
= MACH_PORT_NULL
;
5325 mach_msg_type_name_t disp
;
5326 mach_msg_guard_flags_t guard_flags
;
5327 mach_port_context_t context
;
5329 /* Copyout port right carried in the message */
5331 disp
= dsc
->disposition
;
5332 guard_flags
= dsc
->flags
;
5335 /* Currently kernel_task doesnt support receiving guarded port descriptors */
5336 struct knote
*kn
= current_thread()->ith_knote
;
5337 if ((kn
!= ITH_KNOTE_PSEUDO
) && (((option
& MACH_RCV_GUARDED_DESC
) == 0) ||
5338 (current_task() == kernel_task
))) {
5339 #if DEVELOPMENT || DEBUG
5340 if (current_task() != kernel_task
) {
5342 * Simulated crash needed for debugging, notifies the receiver to opt into receiving
5343 * guarded descriptors.
5345 mach_port_guard_exception(current_thread()->ith_receiver_name
, 0, 0, kGUARD_EXC_RCV_GUARDED_DESC
);
5348 KDBG(MACHDBG_CODE(DBG_MACH_IPC
, MACH_IPC_DESTROY_GUARDED_DESC
), current_thread()->ith_receiver_name
,
5349 VM_KERNEL_ADDRPERM(port
), disp
, guard_flags
);
5350 ipc_object_destroy(ip_to_object(port
), disp
);
5351 mach_msg_legacy_port_descriptor_t
*user_dsc
= (typeof(user_dsc
))dest_dsc
;
5352 user_dsc
--; // point to the start of this port descriptor
5353 bzero((void *)user_dsc
, sizeof(*user_dsc
));
5354 user_dsc
->name
= name
;
5355 user_dsc
->disposition
= disp
;
5356 user_dsc
->type
= MACH_MSG_PORT_DESCRIPTOR
;
5357 dest_dsc
= (typeof(dest_dsc
))user_dsc
;
5359 *mr
|= ipc_kmsg_copyout_object(space
,
5360 ip_to_object(port
), disp
, &context
, &guard_flags
, &name
);
5363 mach_msg_guarded_port_descriptor32_t
*user_dsc
= (typeof(user_dsc
))dest_dsc
;
5364 user_dsc
--; // point to the start of this port descriptor
5365 bzero((void *)user_dsc
, sizeof(*user_dsc
));
5366 user_dsc
->name
= name
;
5367 user_dsc
->flags
= guard_flags
;
5368 user_dsc
->disposition
= disp
;
5369 user_dsc
->type
= MACH_MSG_GUARDED_PORT_DESCRIPTOR
;
5370 user_dsc
->context
= CAST_DOWN_EXPLICIT(uint32_t, context
);
5371 dest_dsc
= (typeof(dest_dsc
))user_dsc
;
5373 mach_msg_guarded_port_descriptor64_t
*user_dsc
= (typeof(user_dsc
))dest_dsc
;
5374 user_dsc
--; // point to the start of this port descriptor
5375 bzero((void *)user_dsc
, sizeof(*user_dsc
));
5376 user_dsc
->name
= name
;
5377 user_dsc
->flags
= guard_flags
;
5378 user_dsc
->disposition
= disp
;
5379 user_dsc
->type
= MACH_MSG_GUARDED_PORT_DESCRIPTOR
;
5380 user_dsc
->context
= context
;
5381 dest_dsc
= (typeof(dest_dsc
))user_dsc
;
5385 return (mach_msg_descriptor_t
*)dest_dsc
;
5390 * Routine: ipc_kmsg_copyout_body
5392 * "Copy-out" port rights and out-of-line memory
5393 * in the body of a message.
5395 * The error codes are a combination of special bits.
5396 * The copyout proceeds despite errors.
5400 * MACH_MSG_SUCCESS Successful copyout.
5401 * MACH_MSG_IPC_SPACE No room for port right in name space.
5402 * MACH_MSG_VM_SPACE No room for memory in address space.
5403 * MACH_MSG_IPC_KERNEL Resource shortage handling port right.
5404 * MACH_MSG_VM_KERNEL Resource shortage handling memory.
5405 * MACH_MSG_INVALID_RT_DESCRIPTOR Descriptor incompatible with RT
5409 ipc_kmsg_copyout_body(
5413 mach_msg_option_t option
,
5414 mach_msg_body_t
*slist
)
5416 mach_msg_body_t
*body
;
5417 mach_msg_descriptor_t
*kern_dsc
, *user_dsc
;
5418 mach_msg_descriptor_t
*saddr
;
5419 mach_msg_type_number_t dsc_count
, sdsc_count
;
5421 mach_msg_return_t mr
= MACH_MSG_SUCCESS
;
5422 boolean_t is_task_64bit
= (map
->max_offset
> VM_MAX_ADDRESS
);
5424 body
= (mach_msg_body_t
*) (kmsg
->ikm_header
+ 1);
5425 dsc_count
= body
->msgh_descriptor_count
;
5426 kern_dsc
= (mach_msg_descriptor_t
*) (body
+ 1);
5427 /* Point user_dsc just after the end of all the descriptors */
5428 user_dsc
= &kern_dsc
[dsc_count
];
5430 /* Do scatter list setup */
5431 if (slist
!= MACH_MSG_BODY_NULL
) {
5432 panic("Scatter lists disabled");
5433 saddr
= (mach_msg_descriptor_t
*) (slist
+ 1);
5434 sdsc_count
= slist
->msgh_descriptor_count
;
5436 saddr
= MACH_MSG_DESCRIPTOR_NULL
;
5440 /* Now process the descriptors - in reverse order */
5441 for (i
= dsc_count
- 1; i
>= 0; i
--) {
5442 switch (kern_dsc
[i
].type
.type
) {
5443 case MACH_MSG_PORT_DESCRIPTOR
:
5444 user_dsc
= ipc_kmsg_copyout_port_descriptor(&kern_dsc
[i
], user_dsc
, space
, &mr
);
5446 case MACH_MSG_OOL_VOLATILE_DESCRIPTOR
:
5447 case MACH_MSG_OOL_DESCRIPTOR
:
5448 user_dsc
= ipc_kmsg_copyout_ool_descriptor(
5449 (mach_msg_ool_descriptor_t
*)&kern_dsc
[i
], user_dsc
, is_task_64bit
, map
, &mr
);
5451 case MACH_MSG_OOL_PORTS_DESCRIPTOR
:
5452 user_dsc
= ipc_kmsg_copyout_ool_ports_descriptor(
5453 (mach_msg_ool_ports_descriptor_t
*)&kern_dsc
[i
], user_dsc
, is_task_64bit
, map
, space
, kmsg
, &mr
);
5455 case MACH_MSG_GUARDED_PORT_DESCRIPTOR
:
5456 user_dsc
= ipc_kmsg_copyout_guarded_port_descriptor(
5457 (mach_msg_guarded_port_descriptor_t
*)&kern_dsc
[i
], user_dsc
, is_task_64bit
, kmsg
, space
, option
, &mr
);
5460 panic("untyped IPC copyout body: invalid message descriptor");
5465 if (user_dsc
!= kern_dsc
) {
5466 vm_offset_t dsc_adjust
= (vm_offset_t
)user_dsc
- (vm_offset_t
)kern_dsc
;
5467 memmove((char *)((vm_offset_t
)kmsg
->ikm_header
+ dsc_adjust
), kmsg
->ikm_header
, sizeof(mach_msg_base_t
));
5468 kmsg
->ikm_header
= (mach_msg_header_t
*)((vm_offset_t
)kmsg
->ikm_header
+ dsc_adjust
);
5469 /* Update the message size for the smaller user representation */
5470 kmsg
->ikm_header
->msgh_size
-= (mach_msg_size_t
)dsc_adjust
;
5477 * Routine: ipc_kmsg_copyout_size
5479 * Compute the size of the message as copied out to the given
5480 * map. If the destination map's pointers are a different size
5481 * than the kernel's, we have to allow for expansion/
5482 * contraction of the descriptors as appropriate.
5486 * size of the message as it would be received.
5490 ipc_kmsg_copyout_size(
5494 mach_msg_size_t send_size
;
5496 send_size
= kmsg
->ikm_header
->msgh_size
;
5498 boolean_t is_task_64bit
= (map
->max_offset
> VM_MAX_ADDRESS
);
5500 #if defined(__LP64__)
5501 send_size
-= LEGACY_HEADER_SIZE_DELTA
;
5504 if (kmsg
->ikm_header
->msgh_bits
& MACH_MSGH_BITS_COMPLEX
) {
5505 mach_msg_body_t
*body
;
5506 mach_msg_descriptor_t
*saddr
, *eaddr
;
5508 body
= (mach_msg_body_t
*) (kmsg
->ikm_header
+ 1);
5509 saddr
= (mach_msg_descriptor_t
*) (body
+ 1);
5510 eaddr
= saddr
+ body
->msgh_descriptor_count
;
5512 for (; saddr
< eaddr
; saddr
++) {
5513 switch (saddr
->type
.type
) {
5514 case MACH_MSG_OOL_DESCRIPTOR
:
5515 case MACH_MSG_OOL_VOLATILE_DESCRIPTOR
:
5516 case MACH_MSG_OOL_PORTS_DESCRIPTOR
:
5517 case MACH_MSG_GUARDED_PORT_DESCRIPTOR
:
5518 if (!is_task_64bit
) {
5519 send_size
-= DESC_SIZE_ADJUSTMENT
;
5522 case MACH_MSG_PORT_DESCRIPTOR
:
5523 send_size
-= DESC_SIZE_ADJUSTMENT
;
5534 * Routine: ipc_kmsg_copyout
5536 * "Copy-out" port rights and out-of-line memory
5541 * MACH_MSG_SUCCESS Copied out all rights and memory.
5542 * MACH_RCV_HEADER_ERROR + special bits
5543 * Rights and memory in the message are intact.
5544 * MACH_RCV_BODY_ERROR + special bits
5545 * The message header was successfully copied out.
5546 * As much of the body was handled as possible.
5554 mach_msg_body_t
*slist
,
5555 mach_msg_option_t option
)
5557 mach_msg_return_t mr
;
5559 ikm_validate_sig(kmsg
);
5561 mr
= ipc_kmsg_copyout_header(kmsg
, space
, option
);
5562 if (mr
!= MACH_MSG_SUCCESS
) {
5566 if (kmsg
->ikm_header
->msgh_bits
& MACH_MSGH_BITS_COMPLEX
) {
5567 mr
= ipc_kmsg_copyout_body(kmsg
, space
, map
, option
, slist
);
5569 if (mr
!= MACH_MSG_SUCCESS
) {
5570 mr
|= MACH_RCV_BODY_ERROR
;
5578 * Routine: ipc_kmsg_copyout_pseudo
5580 * Does a pseudo-copyout of the message.
5581 * This is like a regular copyout, except
5582 * that the ports in the header are handled
5583 * as if they are in the body. They aren't reversed.
5585 * The error codes are a combination of special bits.
5586 * The copyout proceeds despite errors.
5590 * MACH_MSG_SUCCESS Successful copyout.
5591 * MACH_MSG_IPC_SPACE No room for port right in name space.
5592 * MACH_MSG_VM_SPACE No room for memory in address space.
5593 * MACH_MSG_IPC_KERNEL Resource shortage handling port right.
5594 * MACH_MSG_VM_KERNEL Resource shortage handling memory.
5598 ipc_kmsg_copyout_pseudo(
5602 mach_msg_body_t
*slist
)
5604 mach_msg_bits_t mbits
= kmsg
->ikm_header
->msgh_bits
;
5605 ipc_object_t dest
= ip_to_object(kmsg
->ikm_header
->msgh_remote_port
);
5606 ipc_object_t reply
= ip_to_object(kmsg
->ikm_header
->msgh_local_port
);
5607 ipc_object_t voucher
= ip_to_object(kmsg
->ikm_voucher
);
5608 mach_msg_type_name_t dest_type
= MACH_MSGH_BITS_REMOTE(mbits
);
5609 mach_msg_type_name_t reply_type
= MACH_MSGH_BITS_LOCAL(mbits
);
5610 mach_msg_type_name_t voucher_type
= MACH_MSGH_BITS_VOUCHER(mbits
);
5611 mach_port_name_t voucher_name
= kmsg
->ikm_header
->msgh_voucher_port
;
5612 mach_port_name_t dest_name
, reply_name
;
5613 mach_msg_return_t mr
;
5615 /* Set ith_knote to ITH_KNOTE_PSEUDO */
5616 current_thread()->ith_knote
= ITH_KNOTE_PSEUDO
;
5618 ikm_validate_sig(kmsg
);
5620 assert(IO_VALID(dest
));
5624 * If we did this here, it looks like we wouldn't need the undo logic
5625 * at the end of ipc_kmsg_send() in the error cases. Not sure which
5626 * would be more elegant to keep.
5628 ipc_importance_clean(kmsg
);
5630 /* just assert it is already clean */
5631 ipc_importance_assert_clean(kmsg
);
5634 mr
= (ipc_kmsg_copyout_object(space
, dest
, dest_type
, NULL
, NULL
, &dest_name
) |
5635 ipc_kmsg_copyout_object(space
, reply
, reply_type
, NULL
, NULL
, &reply_name
));
5637 kmsg
->ikm_header
->msgh_bits
= mbits
& MACH_MSGH_BITS_USER
;
5638 kmsg
->ikm_header
->msgh_remote_port
= CAST_MACH_NAME_TO_PORT(dest_name
);
5639 kmsg
->ikm_header
->msgh_local_port
= CAST_MACH_NAME_TO_PORT(reply_name
);
5641 if (IO_VALID(voucher
)) {
5642 assert(voucher_type
== MACH_MSG_TYPE_MOVE_SEND
);
5644 kmsg
->ikm_voucher
= IP_NULL
;
5645 mr
|= ipc_kmsg_copyout_object(space
, voucher
, voucher_type
, NULL
, NULL
, &voucher_name
);
5646 kmsg
->ikm_header
->msgh_voucher_port
= voucher_name
;
5649 if (mbits
& MACH_MSGH_BITS_COMPLEX
) {
5650 mr
|= ipc_kmsg_copyout_body(kmsg
, space
, map
, 0, slist
);
5657 * Routine: ipc_kmsg_copyout_dest
5659 * Copies out the destination port in the message.
5660 * Destroys all other rights and memory in the message.
5666 ipc_kmsg_copyout_dest(
5670 mach_msg_bits_t mbits
;
5673 ipc_object_t voucher
;
5674 mach_msg_type_name_t dest_type
;
5675 mach_msg_type_name_t reply_type
;
5676 mach_msg_type_name_t voucher_type
;
5677 mach_port_name_t dest_name
, reply_name
, voucher_name
;
5679 ikm_validate_sig(kmsg
);
5681 mbits
= kmsg
->ikm_header
->msgh_bits
;
5682 dest
= ip_to_object(kmsg
->ikm_header
->msgh_remote_port
);
5683 reply
= ip_to_object(kmsg
->ikm_header
->msgh_local_port
);
5684 voucher
= ip_to_object(kmsg
->ikm_voucher
);
5685 voucher_name
= kmsg
->ikm_header
->msgh_voucher_port
;
5686 dest_type
= MACH_MSGH_BITS_REMOTE(mbits
);
5687 reply_type
= MACH_MSGH_BITS_LOCAL(mbits
);
5688 voucher_type
= MACH_MSGH_BITS_VOUCHER(mbits
);
5690 assert(IO_VALID(dest
));
5692 ipc_importance_assert_clean(kmsg
);
5695 if (io_active(dest
)) {
5696 ipc_object_copyout_dest(space
, dest
, dest_type
, &dest_name
);
5697 /* dest is unlocked */
5701 dest_name
= MACH_PORT_DEAD
;
5704 if (IO_VALID(reply
)) {
5705 ipc_object_destroy(reply
, reply_type
);
5706 reply_name
= MACH_PORT_NULL
;
5708 reply_name
= CAST_MACH_PORT_TO_NAME(reply
);
5711 if (IO_VALID(voucher
)) {
5712 assert(voucher_type
== MACH_MSG_TYPE_MOVE_SEND
);
5714 kmsg
->ikm_voucher
= IP_NULL
;
5715 ipc_object_destroy(voucher
, voucher_type
);
5716 voucher_name
= MACH_PORT_NULL
;
5719 kmsg
->ikm_header
->msgh_bits
= MACH_MSGH_BITS_SET(reply_type
, dest_type
,
5720 voucher_type
, mbits
);
5721 kmsg
->ikm_header
->msgh_local_port
= CAST_MACH_NAME_TO_PORT(dest_name
);
5722 kmsg
->ikm_header
->msgh_remote_port
= CAST_MACH_NAME_TO_PORT(reply_name
);
5723 kmsg
->ikm_header
->msgh_voucher_port
= voucher_name
;
5725 if (mbits
& MACH_MSGH_BITS_COMPLEX
) {
5726 mach_msg_body_t
*body
;
5728 body
= (mach_msg_body_t
*) (kmsg
->ikm_header
+ 1);
5729 ipc_kmsg_clean_body(kmsg
, body
->msgh_descriptor_count
,
5730 (mach_msg_descriptor_t
*)(body
+ 1));
5735 * Routine: ipc_kmsg_copyout_to_kernel
5737 * Copies out the destination and reply ports in the message.
5738 * Leaves all other rights and memory in the message alone.
5742 * Derived from ipc_kmsg_copyout_dest.
5743 * Use by mach_msg_rpc_from_kernel (which used to use copyout_dest).
5744 * We really do want to save rights and memory.
5748 ipc_kmsg_copyout_to_kernel(
5754 mach_msg_type_name_t dest_type
;
5755 mach_msg_type_name_t reply_type
;
5756 mach_port_name_t dest_name
;
5758 ikm_validate_sig(kmsg
);
5760 dest
= ip_to_object(kmsg
->ikm_header
->msgh_remote_port
);
5761 reply
= kmsg
->ikm_header
->msgh_local_port
;
5762 dest_type
= MACH_MSGH_BITS_REMOTE(kmsg
->ikm_header
->msgh_bits
);
5763 reply_type
= MACH_MSGH_BITS_LOCAL(kmsg
->ikm_header
->msgh_bits
);
5765 assert(IO_VALID(dest
));
5768 if (io_active(dest
)) {
5769 ipc_object_copyout_dest(space
, dest
, dest_type
, &dest_name
);
5770 /* dest is unlocked */
5774 dest_name
= MACH_PORT_DEAD
;
5778 * While MIG kernel users don't receive vouchers, the
5779 * msgh_voucher_port field is intended to be round-tripped through the
5780 * kernel if there is no voucher disposition set. Here we check for a
5781 * non-zero voucher disposition, and consume the voucher send right as
5782 * there is no possible way to specify MACH_RCV_VOUCHER semantics.
5784 mach_msg_type_name_t voucher_type
;
5785 voucher_type
= MACH_MSGH_BITS_VOUCHER(kmsg
->ikm_header
->msgh_bits
);
5786 if (voucher_type
!= MACH_MSGH_BITS_ZERO
) {
5787 assert(voucher_type
== MACH_MSG_TYPE_MOVE_SEND
);
5789 * someone managed to send this kernel routine a message with
5790 * a voucher in it. Cleanup the reference in
5791 * kmsg->ikm_voucher.
5793 if (IP_VALID(kmsg
->ikm_voucher
)) {
5794 ipc_port_release_send(kmsg
->ikm_voucher
);
5796 kmsg
->ikm_voucher
= IP_NULL
;
5797 kmsg
->ikm_header
->msgh_voucher_port
= 0;
5800 kmsg
->ikm_header
->msgh_bits
=
5801 (MACH_MSGH_BITS_OTHER(kmsg
->ikm_header
->msgh_bits
) |
5802 MACH_MSGH_BITS(reply_type
, dest_type
));
5803 kmsg
->ikm_header
->msgh_local_port
= CAST_MACH_NAME_TO_PORT(dest_name
);
5804 kmsg
->ikm_header
->msgh_remote_port
= reply
;
5807 #if IKM_SUPPORT_LEGACY
5809 ipc_kmsg_copyout_to_kernel_legacy(
5815 mach_msg_type_name_t dest_type
;
5816 mach_msg_type_name_t reply_type
;
5817 mach_port_name_t dest_name
;
5819 ikm_validate_sig(kmsg
);
5821 dest
= ip_to_object(kmsg
->ikm_header
->msgh_remote_port
);
5822 reply
= kmsg
->ikm_header
->msgh_local_port
;
5823 dest_type
= MACH_MSGH_BITS_REMOTE(kmsg
->ikm_header
->msgh_bits
);
5824 reply_type
= MACH_MSGH_BITS_LOCAL(kmsg
->ikm_header
->msgh_bits
);
5826 assert(IO_VALID(dest
));
5829 if (io_active(dest
)) {
5830 ipc_object_copyout_dest(space
, dest
, dest_type
, &dest_name
);
5831 /* dest is unlocked */
5835 dest_name
= MACH_PORT_DEAD
;
5838 mach_msg_type_name_t voucher_type
;
5839 voucher_type
= MACH_MSGH_BITS_VOUCHER(kmsg
->ikm_header
->msgh_bits
);
5840 if (voucher_type
!= MACH_MSGH_BITS_ZERO
) {
5841 assert(voucher_type
== MACH_MSG_TYPE_MOVE_SEND
);
5842 assert(IP_VALID(kmsg
->ikm_voucher
));
5844 * someone managed to send this kernel routine a message with
5845 * a voucher in it. Cleanup the reference in
5846 * kmsg->ikm_voucher.
5848 ipc_port_release_send(kmsg
->ikm_voucher
);
5849 kmsg
->ikm_voucher
= IP_NULL
;
5850 kmsg
->ikm_header
->msgh_voucher_port
= 0;
5853 kmsg
->ikm_header
->msgh_bits
=
5854 (MACH_MSGH_BITS_OTHER(kmsg
->ikm_header
->msgh_bits
) |
5855 MACH_MSGH_BITS(reply_type
, dest_type
));
5856 kmsg
->ikm_header
->msgh_local_port
= CAST_MACH_NAME_TO_PORT(dest_name
);
5857 kmsg
->ikm_header
->msgh_remote_port
= reply
;
5859 mach_msg_descriptor_t
*saddr
;
5860 mach_msg_legacy_descriptor_t
*daddr
;
5861 mach_msg_type_number_t i
, count
= ((mach_msg_base_t
*)kmsg
->ikm_header
)->body
.msgh_descriptor_count
;
5862 saddr
= (mach_msg_descriptor_t
*) (((mach_msg_base_t
*)kmsg
->ikm_header
) + 1);
5863 saddr
= &saddr
[count
- 1];
5864 daddr
= (mach_msg_legacy_descriptor_t
*)&saddr
[count
];
5867 vm_offset_t dsc_adjust
= 0;
5869 for (i
= 0; i
< count
; i
++, saddr
--, daddr
--) {
5870 switch (saddr
->type
.type
) {
5871 case MACH_MSG_PORT_DESCRIPTOR
: {
5872 mach_msg_port_descriptor_t
*dsc
= &saddr
->port
;
5873 mach_msg_legacy_port_descriptor_t
*dest_dsc
= &daddr
->port
;
5875 mach_port_t name
= dsc
->name
;
5876 mach_msg_type_name_t disposition
= dsc
->disposition
;
5878 dest_dsc
->name
= CAST_MACH_PORT_TO_NAME(name
);
5879 dest_dsc
->disposition
= disposition
;
5880 dest_dsc
->type
= MACH_MSG_PORT_DESCRIPTOR
;
5883 case MACH_MSG_OOL_VOLATILE_DESCRIPTOR
:
5884 case MACH_MSG_OOL_DESCRIPTOR
: {
5885 /* The sender should supply ready-made memory, i.e. a vm_map_copy_t
5886 * so we don't need to do anything special. */
5888 mach_msg_ool_descriptor_t
*source_dsc
= (typeof(source_dsc
)) & saddr
->out_of_line
;
5890 mach_msg_ool_descriptor32_t
*dest_dsc
= &daddr
->out_of_line32
;
5892 vm_offset_t address
= (vm_offset_t
)source_dsc
->address
;
5893 vm_size_t size
= source_dsc
->size
;
5894 boolean_t deallocate
= source_dsc
->deallocate
;
5895 mach_msg_copy_options_t copy
= source_dsc
->copy
;
5896 mach_msg_descriptor_type_t type
= source_dsc
->type
;
5898 dest_dsc
->address
= address
;
5899 dest_dsc
->size
= size
;
5900 dest_dsc
->deallocate
= deallocate
;
5901 dest_dsc
->copy
= copy
;
5902 dest_dsc
->type
= type
;
5905 case MACH_MSG_OOL_PORTS_DESCRIPTOR
: {
5906 mach_msg_ool_ports_descriptor_t
*source_dsc
= (typeof(source_dsc
)) & saddr
->ool_ports
;
5908 mach_msg_ool_ports_descriptor32_t
*dest_dsc
= &daddr
->ool_ports32
;
5910 vm_offset_t address
= (vm_offset_t
)source_dsc
->address
;
5911 vm_size_t port_count
= source_dsc
->count
;
5912 boolean_t deallocate
= source_dsc
->deallocate
;
5913 mach_msg_copy_options_t copy
= source_dsc
->copy
;
5914 mach_msg_descriptor_type_t type
= source_dsc
->type
;
5916 dest_dsc
->address
= address
;
5917 dest_dsc
->count
= port_count
;
5918 dest_dsc
->deallocate
= deallocate
;
5919 dest_dsc
->copy
= copy
;
5920 dest_dsc
->type
= type
;
5923 case MACH_MSG_GUARDED_PORT_DESCRIPTOR
: {
5924 mach_msg_guarded_port_descriptor_t
*source_dsc
= (typeof(source_dsc
)) & saddr
->guarded_port
;
5925 mach_msg_guarded_port_descriptor32_t
*dest_dsc
= &daddr
->guarded_port32
;
5927 dest_dsc
->name
= CAST_MACH_PORT_TO_NAME(source_dsc
->name
);
5928 dest_dsc
->disposition
= source_dsc
->disposition
;
5929 dest_dsc
->flags
= 0;
5930 dest_dsc
->type
= MACH_MSG_GUARDED_PORT_DESCRIPTOR
;
5931 dest_dsc
->context
= 0;
5936 panic("ipc_kmsg_copyout_to_kernel_legacy: bad descriptor");
5937 #endif /* MACH_ASSERT */
5943 dsc_adjust
= 4 * count
;
5944 memmove((char *)((vm_offset_t
)kmsg
->ikm_header
+ dsc_adjust
), kmsg
->ikm_header
, sizeof(mach_msg_base_t
));
5945 kmsg
->ikm_header
= (mach_msg_header_t
*)((vm_offset_t
)kmsg
->ikm_header
+ dsc_adjust
);
5946 /* Update the message size for the smaller user representation */
5947 kmsg
->ikm_header
->msgh_size
-= dsc_adjust
;
5950 #endif /* IKM_SUPPORT_LEGACY */
5954 * Just sets those parts of the trailer that aren't set up at allocation time.
5957 ipc_kmsg_munge_trailer(mach_msg_max_trailer_t
*in
, void *_out
, boolean_t is64bit
)
5960 mach_msg_max_trailer64_t
*out
= (mach_msg_max_trailer64_t
*)_out
;
5961 out
->msgh_seqno
= in
->msgh_seqno
;
5962 out
->msgh_context
= in
->msgh_context
;
5963 out
->msgh_trailer_size
= in
->msgh_trailer_size
;
5964 out
->msgh_ad
= in
->msgh_ad
;
5966 mach_msg_max_trailer32_t
*out
= (mach_msg_max_trailer32_t
*)_out
;
5967 out
->msgh_seqno
= in
->msgh_seqno
;
5968 out
->msgh_context
= (mach_port_context32_t
)in
->msgh_context
;
5969 out
->msgh_trailer_size
= in
->msgh_trailer_size
;
5970 out
->msgh_ad
= in
->msgh_ad
;
5973 #endif /* __arm64__ */
5975 mach_msg_trailer_size_t
5976 ipc_kmsg_trailer_size(
5977 mach_msg_option_t option
,
5978 __unused thread_t thread
)
5980 if (!(option
& MACH_RCV_TRAILER_MASK
)) {
5981 return MACH_MSG_TRAILER_MINIMUM_SIZE
;
5983 return REQUESTED_TRAILER_SIZE(thread_is_64bit_addr(thread
), option
);
5988 ipc_kmsg_add_trailer(ipc_kmsg_t kmsg
, ipc_space_t space __unused
,
5989 mach_msg_option_t option
, __unused thread_t thread
,
5990 mach_port_seqno_t seqno
, boolean_t minimal_trailer
,
5991 mach_vm_offset_t context
)
5993 mach_msg_max_trailer_t
*trailer
;
5996 mach_msg_max_trailer_t tmp_trailer
; /* This accommodates U64, and we'll munge */
5997 void *real_trailer_out
= (void*)(mach_msg_max_trailer_t
*)
5998 ((vm_offset_t
)kmsg
->ikm_header
+
5999 mach_round_msg(kmsg
->ikm_header
->msgh_size
));
6002 * Populate scratch with initial values set up at message allocation time.
6003 * After, we reinterpret the space in the message as the right type
6004 * of trailer for the address space in question.
6006 bcopy(real_trailer_out
, &tmp_trailer
, MAX_TRAILER_SIZE
);
6007 trailer
= &tmp_trailer
;
6008 #else /* __arm64__ */
6010 trailer
= (mach_msg_max_trailer_t
*)
6011 ((vm_offset_t
)kmsg
->ikm_header
+
6012 mach_round_msg(kmsg
->ikm_header
->msgh_size
));
6013 #endif /* __arm64__ */
6015 if (!(option
& MACH_RCV_TRAILER_MASK
)) {
6019 trailer
->msgh_seqno
= seqno
;
6020 trailer
->msgh_context
= context
;
6021 trailer
->msgh_trailer_size
= REQUESTED_TRAILER_SIZE(thread_is_64bit_addr(thread
), option
);
6023 if (minimal_trailer
) {
6027 if (GET_RCV_ELEMENTS(option
) >= MACH_RCV_TRAILER_AV
) {
6028 trailer
->msgh_ad
= kmsg
->ikm_filter_policy_id
;
6032 * The ipc_kmsg_t holds a reference to the label of a label
6033 * handle, not the port. We must get a reference to the port
6034 * and a send right to copyout to the receiver.
6037 if (option
& MACH_RCV_TRAILER_ELEMENTS(MACH_RCV_TRAILER_LABELS
)) {
6038 trailer
->msgh_labels
.sender
= 0;
6043 ipc_kmsg_munge_trailer(trailer
, real_trailer_out
, thread_is_64bit_addr(thread
));
6044 #endif /* __arm64__ */
6049 ipc_kmsg_msg_header(ipc_kmsg_t kmsg
)
6054 return kmsg
->ikm_header
;