2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
57 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
58 * support for mandatory and extensible security protections. This notice
59 * is included in support of clause 2.2 (b) of the Apple Public License,
61 * Copyright (c) 2005 SPARTA, Inc.
66 * File: ipc/ipc_kmsg.c
70 * Operations on kernel messages.
74 #include <mach/mach_types.h>
75 #include <mach/boolean.h>
76 #include <mach/kern_return.h>
77 #include <mach/message.h>
78 #include <mach/port.h>
79 #include <mach/vm_map.h>
80 #include <mach/mach_vm.h>
81 #include <mach/vm_statistics.h>
83 #include <kern/kern_types.h>
84 #include <kern/assert.h>
85 #include <kern/debug.h>
86 #include <kern/ipc_kobject.h>
87 #include <kern/kalloc.h>
88 #include <kern/zalloc.h>
89 #include <kern/processor.h>
90 #include <kern/thread.h>
91 #include <kern/sched_prim.h>
92 #include <kern/misc_protos.h>
93 #include <kern/counters.h>
94 #include <kern/cpu_data.h>
95 #include <kern/policy_internal.h>
96 #include <kern/mach_filter.h>
98 #include <pthread/priority_private.h>
100 #include <machine/limits.h>
102 #include <vm/vm_map.h>
103 #include <vm/vm_object.h>
104 #include <vm/vm_kern.h>
106 #include <ipc/port.h>
107 #include <ipc/ipc_types.h>
108 #include <ipc/ipc_entry.h>
109 #include <ipc/ipc_kmsg.h>
110 #include <ipc/ipc_notify.h>
111 #include <ipc/ipc_object.h>
112 #include <ipc/ipc_space.h>
113 #include <ipc/ipc_port.h>
114 #include <ipc/ipc_right.h>
115 #include <ipc/ipc_hash.h>
116 #include <ipc/ipc_table.h>
117 #include <ipc/ipc_importance.h>
119 #include <kern/mach_node.h>
120 #include <ipc/flipc.h>
123 #include <os/overflow.h>
125 #include <security/mac_mach_internal.h>
127 #include <device/device_server.h>
132 #include <ppc/Firmware.h>
133 #include <ppc/low_trace.h>
137 #define DEBUG_MSGS_K64 1
140 #include <sys/kdebug.h>
141 #include <libkern/OSAtomic.h>
143 #include <libkern/crypto/sha2.h>
146 #if __has_feature(ptrauth_calls)
147 #include <libkern/ptrauth_utils.h>
153 mach_msg_bits_t msgh_bits
;
154 mach_msg_size_t msgh_size
;
155 mach_port_name_t msgh_remote_port
;
156 mach_port_name_t msgh_local_port
;
157 mach_port_name_t msgh_voucher_port
;
158 mach_msg_id_t msgh_id
;
159 } mach_msg_legacy_header_t
;
162 mach_msg_legacy_header_t header
;
163 mach_msg_body_t body
;
164 } mach_msg_legacy_base_t
;
167 mach_port_name_t name
;
168 mach_msg_size_t pad1
;
170 mach_msg_type_name_t disposition
: 8;
171 mach_msg_descriptor_type_t type
: 8;
172 } mach_msg_legacy_port_descriptor_t
;
176 mach_msg_legacy_port_descriptor_t port
;
177 mach_msg_ool_descriptor32_t out_of_line32
;
178 mach_msg_ool_ports_descriptor32_t ool_ports32
;
179 mach_msg_guarded_port_descriptor32_t guarded_port32
;
180 mach_msg_type_descriptor_t type
;
181 } mach_msg_legacy_descriptor_t
;
185 #define LEGACY_HEADER_SIZE_DELTA ((mach_msg_size_t)(sizeof(mach_msg_header_t) - sizeof(mach_msg_legacy_header_t)))
189 #if __has_feature(ptrauth_calls)
190 typedef uintptr_t ikm_sig_scratch_t
;
194 __unused ipc_kmsg_t kmsg
,
195 ikm_sig_scratch_t
*scratchp
)
197 *scratchp
= OS_PTRAUTH_DISCRIMINATOR("kmsg.ikm_signature");
205 ikm_sig_scratch_t
*scratchp
)
211 * if we happen to be doing the trailer chunk,
212 * diversify with the ptrauth-ed trailer pointer -
213 * as that is unchanging for the kmsg
216 ((vm_offset_t
)kmsg
->ikm_header
+
217 mach_round_msg(kmsg
->ikm_header
->msgh_size
));
219 ptrauth_flags
= (data
== trailerp
) ? PTRAUTH_ADDR_DIVERSIFY
: 0;
220 *scratchp
= ptrauth_utils_sign_blob_generic(data
, len
, *scratchp
, ptrauth_flags
);
225 __unused ipc_kmsg_t kmsg
,
226 ikm_sig_scratch_t
*scratchp
)
231 #elif defined(CRYPTO_SHA2) && !defined(__x86_64__)
233 typedef SHA256_CTX ikm_sig_scratch_t
;
237 __unused ipc_kmsg_t kmsg
,
238 ikm_sig_scratch_t
*scratchp
)
240 SHA256_Init(scratchp
);
241 SHA256_Update(scratchp
, &vm_kernel_addrhash_salt_ext
, sizeof(uint64_t));
246 __unused ipc_kmsg_t kmsg
,
249 ikm_sig_scratch_t
*scratchp
)
251 SHA256_Update(scratchp
, data
, len
);
256 __unused ipc_kmsg_t kmsg
,
257 ikm_sig_scratch_t
*scratchp
)
259 uintptr_t sha_digest
[SHA256_DIGEST_LENGTH
/ sizeof(uintptr_t)];
261 SHA256_Final((uint8_t *)sha_digest
, scratchp
);
264 * Only use one uintptr_t sized part of result for space and compat reasons.
265 * Truncation is better than XOR'ing the chunks together in hopes of higher
266 * entropy - because of its lower risk of collisions.
272 /* Stubbed out implementation (for __x86_64__ for now) */
274 typedef uintptr_t ikm_sig_scratch_t
;
278 __unused ipc_kmsg_t kmsg
,
279 ikm_sig_scratch_t
*scratchp
)
286 __unused ipc_kmsg_t kmsg
,
289 __unused ikm_sig_scratch_t
*scratchp
)
296 __unused ipc_kmsg_t kmsg
,
297 ikm_sig_scratch_t
*scratchp
)
307 ikm_sig_scratch_t
*scratchp
)
309 mach_msg_size_t dsc_count
;
310 mach_msg_base_t base
;
313 /* take a snapshot of the message header/body-count */
314 base
.header
= *kmsg
->ikm_header
;
315 complex = ((base
.header
.msgh_bits
& MACH_MSGH_BITS_COMPLEX
) != 0);
317 dsc_count
= ((mach_msg_body_t
*)(kmsg
->ikm_header
+ 1))->msgh_descriptor_count
;
321 base
.body
.msgh_descriptor_count
= dsc_count
;
323 /* compute sig of a copy of the header with all varying bits masked off */
324 base
.header
.msgh_bits
&= MACH_MSGH_BITS_USER
;
325 base
.header
.msgh_bits
&= ~MACH_MSGH_BITS_VOUCHER_MASK
;
326 ikm_chunk_sig(kmsg
, &base
, sizeof(mach_msg_base_t
), scratchp
);
332 ikm_sig_scratch_t
*scratchp
)
334 mach_msg_max_trailer_t
*trailerp
;
336 /* Add sig of the trailer contents */
337 trailerp
= (mach_msg_max_trailer_t
*)
338 ((vm_offset_t
)kmsg
->ikm_header
+
339 mach_round_msg(kmsg
->ikm_header
->msgh_size
));
340 ikm_chunk_sig(kmsg
, trailerp
, sizeof(*trailerp
), scratchp
);
343 /* Compute the signature for the body bits of a message */
347 ikm_sig_scratch_t
*scratchp
)
349 mach_msg_descriptor_t
*kern_dsc
;
350 mach_msg_size_t dsc_count
;
351 mach_msg_body_t
*body
;
354 if ((kmsg
->ikm_header
->msgh_bits
& MACH_MSGH_BITS_COMPLEX
) == 0) {
357 body
= (mach_msg_body_t
*) (kmsg
->ikm_header
+ 1);
358 dsc_count
= body
->msgh_descriptor_count
;
360 if (dsc_count
== 0) {
364 kern_dsc
= (mach_msg_descriptor_t
*) (body
+ 1);
366 /* Compute the signature for the whole descriptor array */
367 ikm_chunk_sig(kmsg
, kern_dsc
, sizeof(*kern_dsc
) * dsc_count
, scratchp
);
369 /* look for descriptor contents that need a signature */
370 for (i
= 0; i
< dsc_count
; i
++) {
371 switch (kern_dsc
[i
].type
.type
) {
372 case MACH_MSG_PORT_DESCRIPTOR
:
373 case MACH_MSG_GUARDED_PORT_DESCRIPTOR
:
374 case MACH_MSG_OOL_VOLATILE_DESCRIPTOR
:
375 case MACH_MSG_OOL_DESCRIPTOR
:
378 case MACH_MSG_OOL_PORTS_DESCRIPTOR
: {
379 mach_msg_ool_ports_descriptor_t
*ports_dsc
;
381 /* Compute sig for the port/object pointers */
382 ports_dsc
= (mach_msg_ool_ports_descriptor_t
*)&kern_dsc
[i
];
383 ikm_chunk_sig(kmsg
, ports_dsc
->address
, ports_dsc
->count
* sizeof(ipc_object_t
), scratchp
);
387 panic("ipc_kmsg_body_sig: invalid message descriptor");
397 ikm_sig_scratch_t scratch
;
400 zone_require(ipc_kmsg_zone
, kmsg
);
402 ikm_init_sig(kmsg
, &scratch
);
404 ikm_header_sig(kmsg
, &scratch
);
406 /* save off partial signature for just header */
407 sig
= ikm_finalize_sig(kmsg
, &scratch
);
408 kmsg
->ikm_header_sig
= sig
;
411 ikm_trailer_sig(kmsg
, &scratch
);
413 /* save off partial signature for header+trailer */
414 sig
= ikm_finalize_sig(kmsg
, &scratch
);
415 kmsg
->ikm_headtrail_sig
= sig
;
418 ikm_body_sig(kmsg
, &scratch
);
419 sig
= ikm_finalize_sig(kmsg
, &scratch
);
420 kmsg
->ikm_signature
= sig
;
423 unsigned int ikm_signature_failures
;
424 unsigned int ikm_signature_failure_id
;
425 #if (DEVELOPMENT || DEBUG)
426 unsigned int ikm_signature_panic_disable
;
427 unsigned int ikm_signature_header_failures
;
428 unsigned int ikm_signature_trailer_failures
;
435 ikm_sig_scratch_t scratch
;
440 zone_require(ipc_kmsg_zone
, kmsg
);
442 ikm_init_sig(kmsg
, &scratch
);
444 ikm_header_sig(kmsg
, &scratch
);
446 /* Do partial evaluation of just the header signature */
447 sig
= ikm_finalize_sig(kmsg
, &scratch
);
448 expected
= kmsg
->ikm_header_sig
;
449 if (sig
!= expected
) {
450 ikm_signature_header_failures
++;
456 ikm_trailer_sig(kmsg
, &scratch
);
458 /* Do partial evaluation of header+trailer signature */
459 sig
= ikm_finalize_sig(kmsg
, &scratch
);
460 expected
= kmsg
->ikm_headtrail_sig
;
461 if (sig
!= expected
) {
462 ikm_signature_trailer_failures
++;
468 ikm_body_sig(kmsg
, &scratch
);
469 sig
= ikm_finalize_sig(kmsg
, &scratch
);
471 expected
= kmsg
->ikm_signature
;
472 if (sig
!= expected
) {
473 ikm_signature_failures
++;
480 mach_msg_id_t id
= kmsg
->ikm_header
->msgh_id
;
482 ikm_signature_failure_id
= id
;
483 #if (DEVELOPMENT || DEBUG)
484 if (ikm_signature_panic_disable
) {
488 panic("ikm_validate_sig: %s signature mismatch: kmsg=0x%p, id=%d, sig=0x%zx (expected 0x%zx)",
489 str
, kmsg
, id
, sig
, expected
);
495 extern void ipc_pset_print64(
498 extern void ipc_kmsg_print64(
502 extern void ipc_msg_print64(
503 mach_msg_header_t
*msgh
);
505 extern ipc_port_t
ipc_name_to_data64(
507 mach_port_name_t name
);
510 * Forward declarations
512 void ipc_msg_print_untyped64(
513 mach_msg_body_t
*body
);
515 const char * ipc_type_name64(
519 void ipc_print_type_name64(
524 mach_msg_bits_t bit
);
527 mm_copy_options_string64(
528 mach_msg_copy_options_t option
);
530 void db_print_msg_uid64(mach_msg_header_t
*);
533 ipc_msg_body_print64(void *body
, int size
)
535 uint32_t *word
= (uint32_t *) body
;
536 uint32_t *end
= (uint32_t *)(((uintptr_t) body
) + size
537 - sizeof(mach_msg_header_t
));
540 kprintf(" body(%p-%p):\n %p: ", body
, end
, word
);
542 for (i
= 0; i
< 8; i
++, word
++) {
547 kprintf("%08x ", *word
);
549 kprintf("\n %p: ", word
);
560 case MACH_MSG_TYPE_PORT_NAME
:
563 case MACH_MSG_TYPE_MOVE_RECEIVE
:
565 return "port_receive";
567 return "move_receive";
570 case MACH_MSG_TYPE_MOVE_SEND
:
577 case MACH_MSG_TYPE_MOVE_SEND_ONCE
:
579 return "port_send_once";
581 return "move_send_once";
584 case MACH_MSG_TYPE_COPY_SEND
:
587 case MACH_MSG_TYPE_MAKE_SEND
:
590 case MACH_MSG_TYPE_MAKE_SEND_ONCE
:
591 return "make_send_once";
599 ipc_print_type_name64(
602 const char *name
= ipc_type_name64(type_name
, TRUE
);
606 kprintf("type%d", type_name
);
611 * ipc_kmsg_print64 [ debug ]
618 kprintf("%s kmsg=%p:\n", str
, kmsg
);
619 kprintf(" next=%p, prev=%p, size=%d",
624 ipc_msg_print64(kmsg
->ikm_header
);
632 case MACH_MSGH_BITS_COMPLEX
: return "complex";
633 case MACH_MSGH_BITS_CIRCULAR
: return "circular";
634 default: return (char *) 0;
639 * ipc_msg_print64 [ debug ]
643 mach_msg_header_t
*msgh
)
645 mach_msg_bits_t mbits
;
647 const char *bit_name
;
650 mbits
= msgh
->msgh_bits
;
651 kprintf(" msgh_bits=0x%x: l=0x%x,r=0x%x\n",
653 MACH_MSGH_BITS_LOCAL(msgh
->msgh_bits
),
654 MACH_MSGH_BITS_REMOTE(msgh
->msgh_bits
));
656 mbits
= MACH_MSGH_BITS_OTHER(mbits
) & MACH_MSGH_BITS_USED
;
657 kprintf(" decoded bits: ");
659 for (i
= 0, bit
= 1; i
< sizeof(mbits
) * 8; ++i
, bit
<<= 1) {
660 if ((mbits
& bit
) == 0) {
663 bit_name
= msgh_bit_decode64((mach_msg_bits_t
)bit
);
665 kprintf("%s%s", needs_comma
? "," : "", bit_name
);
667 kprintf("%sunknown(0x%x),", needs_comma
? "," : "", bit
);
671 if (msgh
->msgh_bits
& ~MACH_MSGH_BITS_USED
) {
672 kprintf("%sunused=0x%x,", needs_comma
? "," : "",
673 msgh
->msgh_bits
& ~MACH_MSGH_BITS_USED
);
678 if (msgh
->msgh_remote_port
) {
679 kprintf(" remote=%p(", msgh
->msgh_remote_port
);
680 ipc_print_type_name64(MACH_MSGH_BITS_REMOTE(msgh
->msgh_bits
));
683 kprintf(" remote=null");
686 if (msgh
->msgh_local_port
) {
687 kprintf("%slocal=%p(", needs_comma
? "," : "",
688 msgh
->msgh_local_port
);
689 ipc_print_type_name64(MACH_MSGH_BITS_LOCAL(msgh
->msgh_bits
));
692 kprintf("local=null\n");
695 kprintf(" msgh_id=%d, size=%d\n",
699 if (mbits
& MACH_MSGH_BITS_COMPLEX
) {
700 ipc_msg_print_untyped64((mach_msg_body_t
*) (msgh
+ 1));
703 ipc_msg_body_print64((void *)(msgh
+ 1), msgh
->msgh_size
);
708 mm_copy_options_string64(
709 mach_msg_copy_options_t option
)
714 case MACH_MSG_PHYSICAL_COPY
:
717 case MACH_MSG_VIRTUAL_COPY
:
720 case MACH_MSG_OVERWRITE
:
721 name
= "OVERWRITE(DEPRECATED)";
723 case MACH_MSG_ALLOCATE
:
726 case MACH_MSG_KALLOC_COPY_T
:
727 name
= "KALLOC_COPY_T";
737 ipc_msg_print_untyped64(
738 mach_msg_body_t
*body
)
740 mach_msg_descriptor_t
*saddr
, *send
;
741 mach_msg_descriptor_type_t type
;
743 kprintf(" %d descriptors: \n", body
->msgh_descriptor_count
);
745 saddr
= (mach_msg_descriptor_t
*) (body
+ 1);
746 send
= saddr
+ body
->msgh_descriptor_count
;
748 for (; saddr
< send
; saddr
++) {
749 type
= saddr
->type
.type
;
752 case MACH_MSG_PORT_DESCRIPTOR
: {
753 mach_msg_port_descriptor_t
*dsc
;
756 kprintf(" PORT name = %p disp = ", dsc
->name
);
757 ipc_print_type_name64(dsc
->disposition
);
761 case MACH_MSG_OOL_VOLATILE_DESCRIPTOR
:
762 case MACH_MSG_OOL_DESCRIPTOR
: {
763 mach_msg_ool_descriptor_t
*dsc
;
765 dsc
= (mach_msg_ool_descriptor_t
*) &saddr
->out_of_line
;
766 kprintf(" OOL%s addr = %p size = 0x%x copy = %s %s\n",
767 type
== MACH_MSG_OOL_DESCRIPTOR
? "" : " VOLATILE",
768 dsc
->address
, dsc
->size
,
769 mm_copy_options_string64(dsc
->copy
),
770 dsc
->deallocate
? "DEALLOC" : "");
773 case MACH_MSG_OOL_PORTS_DESCRIPTOR
: {
774 mach_msg_ool_ports_descriptor_t
*dsc
;
776 dsc
= (mach_msg_ool_ports_descriptor_t
*) &saddr
->ool_ports
;
778 kprintf(" OOL_PORTS addr = %p count = 0x%x ",
779 dsc
->address
, dsc
->count
);
781 ipc_print_type_name64(dsc
->disposition
);
782 kprintf(" copy = %s %s\n",
783 mm_copy_options_string64(dsc
->copy
),
784 dsc
->deallocate
? "DEALLOC" : "");
787 case MACH_MSG_GUARDED_PORT_DESCRIPTOR
: {
788 mach_msg_guarded_port_descriptor_t
*dsc
;
790 dsc
= (mach_msg_guarded_port_descriptor_t
*)&saddr
->guarded_port
;
791 kprintf(" GUARDED_PORT name = %p flags = 0x%x disp = ", dsc
->name
, dsc
->flags
);
792 ipc_print_type_name64(dsc
->disposition
);
797 kprintf(" UNKNOWN DESCRIPTOR 0x%x\n", type
);
804 #define DEBUG_IPC_KMSG_PRINT(kmsg, string) \
805 __unreachable_ok_push \
806 if (DEBUG_KPRINT_SYSCALL_PREDICATE(DEBUG_KPRINT_SYSCALL_IPC_MASK)) { \
807 ipc_kmsg_print64(kmsg, string); \
811 #define DEBUG_IPC_MSG_BODY_PRINT(body, size) \
812 __unreachable_ok_push \
813 if (DEBUG_KPRINT_SYSCALL_PREDICATE(DEBUG_KPRINT_SYSCALL_IPC_MASK)) { \
814 ipc_msg_body_print64(body,size);\
817 #else /* !DEBUG_MSGS_K64 */
818 #define DEBUG_IPC_KMSG_PRINT(kmsg, string)
819 #define DEBUG_IPC_MSG_BODY_PRINT(body, size)
820 #endif /* !DEBUG_MSGS_K64 */
822 extern vm_map_t ipc_kernel_copy_map
;
823 extern vm_size_t ipc_kmsg_max_space
;
824 extern const vm_size_t ipc_kmsg_max_vm_space
;
825 extern const vm_size_t ipc_kmsg_max_body_space
;
826 extern vm_size_t msg_ool_size_small
;
828 #define MSG_OOL_SIZE_SMALL msg_ool_size_small
830 #if defined(__LP64__)
831 #define MAP_SIZE_DIFFERS(map) (map->max_offset < MACH_VM_MAX_ADDRESS)
832 #define OTHER_OOL_DESCRIPTOR mach_msg_ool_descriptor32_t
833 #define OTHER_OOL_PORTS_DESCRIPTOR mach_msg_ool_ports_descriptor32_t
835 #define MAP_SIZE_DIFFERS(map) (map->max_offset > VM_MAX_ADDRESS)
836 #define OTHER_OOL_DESCRIPTOR mach_msg_ool_descriptor64_t
837 #define OTHER_OOL_PORTS_DESCRIPTOR mach_msg_ool_ports_descriptor64_t
840 #define DESC_SIZE_ADJUSTMENT ((mach_msg_size_t)(sizeof(mach_msg_ool_descriptor64_t) - \
841 sizeof(mach_msg_ool_descriptor32_t)))
843 /* scatter list macros */
845 #define SKIP_PORT_DESCRIPTORS(s, c) \
847 if ((s) != MACH_MSG_DESCRIPTOR_NULL) { \
849 if ((s)->type.type != MACH_MSG_PORT_DESCRIPTOR) \
854 (s) = MACH_MSG_DESCRIPTOR_NULL; \
858 #define INCREMENT_SCATTER(s, c, d) \
860 if ((s) != MACH_MSG_DESCRIPTOR_NULL) { \
861 s = (d) ? (mach_msg_descriptor_t *) \
862 ((OTHER_OOL_DESCRIPTOR *)(s) + 1) : \
868 #define KMSG_TRACE_FLAG_TRACED 0x000001
869 #define KMSG_TRACE_FLAG_COMPLEX 0x000002
870 #define KMSG_TRACE_FLAG_OOLMEM 0x000004
871 #define KMSG_TRACE_FLAG_VCPY 0x000008
872 #define KMSG_TRACE_FLAG_PCPY 0x000010
873 #define KMSG_TRACE_FLAG_SND64 0x000020
874 #define KMSG_TRACE_FLAG_RAISEIMP 0x000040
875 #define KMSG_TRACE_FLAG_APP_SRC 0x000080
876 #define KMSG_TRACE_FLAG_APP_DST 0x000100
877 #define KMSG_TRACE_FLAG_DAEMON_SRC 0x000200
878 #define KMSG_TRACE_FLAG_DAEMON_DST 0x000400
879 #define KMSG_TRACE_FLAG_DST_NDFLTQ 0x000800
880 #define KMSG_TRACE_FLAG_SRC_NDFLTQ 0x001000
881 #define KMSG_TRACE_FLAG_DST_SONCE 0x002000
882 #define KMSG_TRACE_FLAG_SRC_SONCE 0x004000
883 #define KMSG_TRACE_FLAG_CHECKIN 0x008000
884 #define KMSG_TRACE_FLAG_ONEWAY 0x010000
885 #define KMSG_TRACE_FLAG_IOKIT 0x020000
886 #define KMSG_TRACE_FLAG_SNDRCV 0x040000
887 #define KMSG_TRACE_FLAG_DSTQFULL 0x080000
888 #define KMSG_TRACE_FLAG_VOUCHER 0x100000
889 #define KMSG_TRACE_FLAG_TIMER 0x200000
890 #define KMSG_TRACE_FLAG_SEMA 0x400000
891 #define KMSG_TRACE_FLAG_DTMPOWNER 0x800000
892 #define KMSG_TRACE_FLAG_GUARDED_DESC 0x1000000
894 #define KMSG_TRACE_FLAGS_MASK 0x1ffffff
895 #define KMSG_TRACE_FLAGS_SHIFT 8
897 #define KMSG_TRACE_PORTS_MASK 0xff
898 #define KMSG_TRACE_PORTS_SHIFT 0
900 #if (KDEBUG_LEVEL >= KDEBUG_LEVEL_STANDARD)
904 ipc_kmsg_trace_send(ipc_kmsg_t kmsg
,
905 mach_msg_option_t option
)
907 task_t send_task
= TASK_NULL
;
908 ipc_port_t dst_port
, src_port
;
909 boolean_t is_task_64bit
;
910 mach_msg_header_t
*msg
;
911 mach_msg_trailer_t
*trailer
;
914 uint32_t msg_size
= 0;
915 uint64_t msg_flags
= KMSG_TRACE_FLAG_TRACED
;
916 uint32_t num_ports
= 0;
917 uint32_t send_pid
, dst_pid
;
920 * check to see not only if ktracing is enabled, but if we will
921 * _actually_ emit the KMSG_INFO tracepoint. This saves us a
922 * significant amount of processing (and a port lock hold) in
923 * the non-tracing case.
925 if (__probable((kdebug_enable
& KDEBUG_TRACE
) == 0)) {
928 if (!kdebug_debugid_enabled(MACHDBG_CODE(DBG_MACH_IPC
, MACH_IPC_KMSG_INFO
))) {
932 msg
= kmsg
->ikm_header
;
934 dst_port
= msg
->msgh_remote_port
;
935 if (!IPC_PORT_VALID(dst_port
)) {
940 * Message properties / options
942 if ((option
& (MACH_SEND_MSG
| MACH_RCV_MSG
)) == (MACH_SEND_MSG
| MACH_RCV_MSG
)) {
943 msg_flags
|= KMSG_TRACE_FLAG_SNDRCV
;
946 if (msg
->msgh_id
>= is_iokit_subsystem
.start
&&
947 msg
->msgh_id
< is_iokit_subsystem
.end
+ 100) {
948 msg_flags
|= KMSG_TRACE_FLAG_IOKIT
;
950 /* magic XPC checkin message id (XPC_MESSAGE_ID_CHECKIN) from libxpc */
951 else if (msg
->msgh_id
== 0x77303074u
/* w00t */) {
952 msg_flags
|= KMSG_TRACE_FLAG_CHECKIN
;
955 if (msg
->msgh_bits
& MACH_MSGH_BITS_RAISEIMP
) {
956 msg_flags
|= KMSG_TRACE_FLAG_RAISEIMP
;
959 if (unsafe_convert_port_to_voucher(kmsg
->ikm_voucher
)) {
960 msg_flags
|= KMSG_TRACE_FLAG_VOUCHER
;
964 * Sending task / port
966 send_task
= current_task();
967 send_pid
= task_pid(send_task
);
970 if (task_is_daemon(send_task
)) {
971 msg_flags
|= KMSG_TRACE_FLAG_DAEMON_SRC
;
972 } else if (task_is_app(send_task
)) {
973 msg_flags
|= KMSG_TRACE_FLAG_APP_SRC
;
977 is_task_64bit
= (send_task
->map
->max_offset
> VM_MAX_ADDRESS
);
979 msg_flags
|= KMSG_TRACE_FLAG_SND64
;
982 src_port
= msg
->msgh_local_port
;
984 if (src_port
->ip_messages
.imq_qlimit
!= MACH_PORT_QLIMIT_DEFAULT
) {
985 msg_flags
|= KMSG_TRACE_FLAG_SRC_NDFLTQ
;
987 switch (MACH_MSGH_BITS_LOCAL(msg
->msgh_bits
)) {
988 case MACH_MSG_TYPE_MOVE_SEND_ONCE
:
989 msg_flags
|= KMSG_TRACE_FLAG_SRC_SONCE
;
995 msg_flags
|= KMSG_TRACE_FLAG_ONEWAY
;
1000 * Destination task / port
1003 if (!ip_active(dst_port
)) {
1004 /* dst port is being torn down */
1005 dst_pid
= (uint32_t)0xfffffff0;
1006 } else if (dst_port
->ip_tempowner
) {
1007 msg_flags
|= KMSG_TRACE_FLAG_DTMPOWNER
;
1008 if (IIT_NULL
!= dst_port
->ip_imp_task
) {
1009 dst_pid
= task_pid(dst_port
->ip_imp_task
->iit_task
);
1011 dst_pid
= (uint32_t)0xfffffff1;
1013 } else if (dst_port
->ip_receiver_name
== MACH_PORT_NULL
) {
1014 /* dst_port is otherwise in-transit */
1015 dst_pid
= (uint32_t)0xfffffff2;
1017 if (dst_port
->ip_receiver
== ipc_space_kernel
) {
1020 ipc_space_t dst_space
;
1021 dst_space
= dst_port
->ip_receiver
;
1022 if (dst_space
&& is_active(dst_space
)) {
1023 dst_pid
= task_pid(dst_space
->is_task
);
1024 if (task_is_daemon(dst_space
->is_task
)) {
1025 msg_flags
|= KMSG_TRACE_FLAG_DAEMON_DST
;
1026 } else if (task_is_app(dst_space
->is_task
)) {
1027 msg_flags
|= KMSG_TRACE_FLAG_APP_DST
;
1030 /* receiving task is being torn down */
1031 dst_pid
= (uint32_t)0xfffffff3;
1036 if (dst_port
->ip_messages
.imq_qlimit
!= MACH_PORT_QLIMIT_DEFAULT
) {
1037 msg_flags
|= KMSG_TRACE_FLAG_DST_NDFLTQ
;
1039 if (imq_full(&dst_port
->ip_messages
)) {
1040 msg_flags
|= KMSG_TRACE_FLAG_DSTQFULL
;
1043 kotype
= ip_kotype(dst_port
);
1045 ip_unlock(dst_port
);
1048 case IKOT_SEMAPHORE
:
1049 msg_flags
|= KMSG_TRACE_FLAG_SEMA
;
1053 msg_flags
|= KMSG_TRACE_FLAG_TIMER
;
1055 case IKOT_MASTER_DEVICE
:
1056 case IKOT_IOKIT_CONNECT
:
1057 case IKOT_IOKIT_OBJECT
:
1058 case IKOT_IOKIT_IDENT
:
1059 case IKOT_UEXT_OBJECT
:
1060 msg_flags
|= KMSG_TRACE_FLAG_IOKIT
;
1066 switch (MACH_MSGH_BITS_REMOTE(msg
->msgh_bits
)) {
1067 case MACH_MSG_TYPE_PORT_SEND_ONCE
:
1068 msg_flags
|= KMSG_TRACE_FLAG_DST_SONCE
;
1076 * Message size / content
1078 msg_size
= msg
->msgh_size
- sizeof(mach_msg_header_t
);
1080 if (msg
->msgh_bits
& MACH_MSGH_BITS_COMPLEX
) {
1081 mach_msg_body_t
*msg_body
;
1082 mach_msg_descriptor_t
*kern_dsc
;
1085 msg_flags
|= KMSG_TRACE_FLAG_COMPLEX
;
1087 msg_body
= (mach_msg_body_t
*)(kmsg
->ikm_header
+ 1);
1088 dsc_count
= (int)msg_body
->msgh_descriptor_count
;
1089 kern_dsc
= (mach_msg_descriptor_t
*)(msg_body
+ 1);
1091 /* this is gross: see ipc_kmsg_copyin_body()... */
1092 if (!is_task_64bit
) {
1093 msg_size
-= (dsc_count
* 12);
1096 for (int i
= 0; i
< dsc_count
; i
++) {
1097 switch (kern_dsc
[i
].type
.type
) {
1098 case MACH_MSG_PORT_DESCRIPTOR
:
1100 if (is_task_64bit
) {
1104 case MACH_MSG_OOL_VOLATILE_DESCRIPTOR
:
1105 case MACH_MSG_OOL_DESCRIPTOR
: {
1106 mach_msg_ool_descriptor_t
*dsc
;
1107 dsc
= (mach_msg_ool_descriptor_t
*)&kern_dsc
[i
];
1108 msg_flags
|= KMSG_TRACE_FLAG_OOLMEM
;
1109 msg_size
+= dsc
->size
;
1110 if ((dsc
->size
>= MSG_OOL_SIZE_SMALL
) &&
1111 (dsc
->copy
== MACH_MSG_PHYSICAL_COPY
) &&
1113 msg_flags
|= KMSG_TRACE_FLAG_PCPY
;
1114 } else if (dsc
->size
<= MSG_OOL_SIZE_SMALL
) {
1115 msg_flags
|= KMSG_TRACE_FLAG_PCPY
;
1117 msg_flags
|= KMSG_TRACE_FLAG_VCPY
;
1119 if (is_task_64bit
) {
1123 case MACH_MSG_OOL_PORTS_DESCRIPTOR
: {
1124 mach_msg_ool_ports_descriptor_t
*dsc
;
1125 dsc
= (mach_msg_ool_ports_descriptor_t
*)&kern_dsc
[i
];
1126 num_ports
+= dsc
->count
;
1127 if (is_task_64bit
) {
1131 case MACH_MSG_GUARDED_PORT_DESCRIPTOR
:
1133 msg_flags
|= KMSG_TRACE_FLAG_GUARDED_DESC
;
1134 if (is_task_64bit
) {
1147 trailer
= (mach_msg_trailer_t
*)((vm_offset_t
)msg
+
1148 (vm_offset_t
)mach_round_msg(msg
->msgh_size
));
1149 if (trailer
->msgh_trailer_size
<= sizeof(mach_msg_security_trailer_t
)) {
1150 extern const security_token_t KERNEL_SECURITY_TOKEN
;
1151 mach_msg_security_trailer_t
*strailer
;
1152 strailer
= (mach_msg_security_trailer_t
*)trailer
;
1154 * verify the sender PID: replies from the kernel often look
1155 * like self-talk because the sending port is not reset.
1157 if (memcmp(&strailer
->msgh_sender
,
1158 &KERNEL_SECURITY_TOKEN
,
1159 sizeof(KERNEL_SECURITY_TOKEN
)) == 0) {
1161 msg_flags
&= ~(KMSG_TRACE_FLAG_APP_SRC
| KMSG_TRACE_FLAG_DAEMON_SRC
);
1165 KDBG(MACHDBG_CODE(DBG_MACH_IPC
, MACH_IPC_KMSG_INFO
) | DBG_FUNC_END
,
1166 (uintptr_t)send_pid
,
1168 (uintptr_t)msg_size
,
1170 ((msg_flags
& KMSG_TRACE_FLAGS_MASK
) << KMSG_TRACE_FLAGS_SHIFT
) |
1171 ((num_ports
& KMSG_TRACE_PORTS_MASK
) << KMSG_TRACE_PORTS_SHIFT
)
1177 /* zone for cached ipc_kmsg_t structures */
1178 ZONE_DECLARE(ipc_kmsg_zone
, "ipc kmsgs", IKM_SAVED_KMSG_SIZE
, ZC_CACHING
);
1179 static TUNABLE(bool, enforce_strict_reply
, "ipc_strict_reply", false);
1182 * Forward declarations
1185 void ipc_kmsg_clean(
1188 void ipc_kmsg_clean_body(
1190 mach_msg_type_number_t number
,
1191 mach_msg_descriptor_t
*desc
);
1193 void ipc_kmsg_clean_partial(
1195 mach_msg_type_number_t number
,
1196 mach_msg_descriptor_t
*desc
,
1200 mach_msg_return_t
ipc_kmsg_copyin_body(
1204 mach_msg_option_t
*optionp
);
1208 ipc_kmsg_link_reply_context_locked(
1209 ipc_port_t reply_port
,
1210 ipc_port_t voucher_port
);
1212 static kern_return_t
1213 ipc_kmsg_validate_reply_port_locked(
1214 ipc_port_t reply_port
,
1215 mach_msg_option_t options
);
1217 static mach_msg_return_t
1218 ipc_kmsg_validate_reply_context_locked(
1219 mach_msg_option_t option
,
1220 ipc_port_t dest_port
,
1221 ipc_voucher_t voucher
,
1222 mach_port_name_t voucher_name
);
1224 /* we can't include the BSD <sys/persona.h> header here... */
1225 #ifndef PERSONA_ID_NONE
1226 #define PERSONA_ID_NONE ((uint32_t)-1)
1230 * We keep a per-processor cache of kernel message buffers.
1231 * The cache saves the overhead/locking of using kalloc/kfree.
1232 * The per-processor cache seems to miss less than a per-thread cache,
1233 * and it also uses less memory. Access to the cache doesn't
1238 * Routine: ikm_set_header
1240 * Set the header (and data) pointers for a message. If the
1241 * message is small, the data pointer is NULL and all the
1242 * data resides within the fixed
1243 * the cache, that is best. Otherwise, allocate a new one.
1251 mach_msg_size_t mtsize
)
1254 kmsg
->ikm_data
= data
;
1255 kmsg
->ikm_header
= (mach_msg_header_t
*)(data
+ kmsg
->ikm_size
- mtsize
);
1257 assert(kmsg
->ikm_size
== IKM_SAVED_MSG_SIZE
);
1258 kmsg
->ikm_header
= (mach_msg_header_t
*)
1259 ((vm_offset_t
)(kmsg
+ 1) + kmsg
->ikm_size
- mtsize
);
1264 * Routine: ipc_kmsg_alloc
1266 * Allocate a kernel message structure. If we can get one from
1267 * the cache, that is best. Otherwise, allocate a new one.
1273 mach_msg_size_t msg_and_trailer_size
)
1275 mach_msg_size_t max_expanded_size
;
1281 * Pad the allocation in case we need to expand the
1282 * message descriptors for user spaces with pointers larger than
1283 * the kernel's own, or vice versa. We don't know how many descriptors
1284 * there are yet, so just assume the whole body could be
1285 * descriptors (if there could be any at all).
1287 * The expansion space is left in front of the header,
1288 * because it is easier to pull the header and descriptors
1289 * forward as we process them than it is to push all the
1292 mach_msg_size_t size
= msg_and_trailer_size
- MAX_TRAILER_SIZE
;
1294 /* compare against implementation upper limit for the body */
1295 if (size
> ipc_kmsg_max_body_space
) {
1299 if (size
> sizeof(mach_msg_base_t
)) {
1300 mach_msg_size_t max_desc
= (mach_msg_size_t
)(((size
- sizeof(mach_msg_base_t
)) /
1301 sizeof(mach_msg_ool_descriptor32_t
)) *
1302 DESC_SIZE_ADJUSTMENT
);
1304 /* make sure expansion won't cause wrap */
1305 if (msg_and_trailer_size
> MACH_MSG_SIZE_MAX
- max_desc
) {
1309 max_expanded_size
= msg_and_trailer_size
+ max_desc
;
1311 max_expanded_size
= msg_and_trailer_size
;
1314 kmsg
= (ipc_kmsg_t
)zalloc(ipc_kmsg_zone
);
1316 if (max_expanded_size
< IKM_SAVED_MSG_SIZE
) {
1317 max_expanded_size
= IKM_SAVED_MSG_SIZE
; /* round up for ikm_cache */
1319 } else if (max_expanded_size
> IKM_SAVED_MSG_SIZE
) {
1320 data
= kheap_alloc(KHEAP_DATA_BUFFERS
, max_expanded_size
, Z_WAITOK
);
1323 if (kmsg
!= IKM_NULL
) {
1324 ikm_init(kmsg
, max_expanded_size
);
1325 ikm_set_header(kmsg
, data
, msg_and_trailer_size
);
1332 * Routine: ipc_kmsg_free
1334 * Free a kernel message buffer. If the kms is preallocated
1335 * to a port, just "put it back (marked unused)." We have to
1336 * do this with the port locked. The port may have its hold
1337 * on our message released. In that case, we have to just
1338 * revert the message to a traditional one and free it normally.
1347 mach_msg_size_t size
= kmsg
->ikm_size
;
1350 assert(!IP_VALID(kmsg
->ikm_voucher
));
1352 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_IPC
, MACH_IPC_KMSG_FREE
) | DBG_FUNC_NONE
,
1353 VM_KERNEL_ADDRPERM((uintptr_t)kmsg
),
1357 * Check to see if the message is bound to the port. If so,
1358 * mark it not in use. If the port isn't already dead, then
1359 * leave the message associated with it. Otherwise, free it.
1361 if (size
== IKM_SAVED_MSG_SIZE
) {
1362 if ((void *)kmsg
->ikm_header
< (void *)(kmsg
+ 1) ||
1363 (void *)kmsg
->ikm_header
>= (void *)(kmsg
+ 1) + IKM_SAVED_MSG_SIZE
) {
1364 panic("ipc_kmsg_free");
1366 port
= ikm_prealloc_inuse_port(kmsg
);
1367 if (port
!= IP_NULL
) {
1369 ikm_prealloc_clear_inuse(kmsg
, port
);
1370 if (ip_active(port
) && (port
->ip_premsg
== kmsg
)) {
1371 assert(IP_PREALLOC(port
));
1377 ip_release(port
); /* May be last reference */
1380 void *data
= kmsg
->ikm_data
;
1381 if ((void *)kmsg
->ikm_header
< data
||
1382 (void *)kmsg
->ikm_header
>= data
+ size
) {
1383 panic("ipc_kmsg_free");
1385 kheap_free(KHEAP_DATA_BUFFERS
, data
, size
);
1387 zfree(ipc_kmsg_zone
, kmsg
);
1392 * Routine: ipc_kmsg_enqueue
1399 ipc_kmsg_queue_t queue
,
1402 ipc_kmsg_t first
= queue
->ikmq_base
;
1405 if (first
== IKM_NULL
) {
1406 queue
->ikmq_base
= kmsg
;
1407 kmsg
->ikm_next
= kmsg
;
1408 kmsg
->ikm_prev
= kmsg
;
1410 last
= first
->ikm_prev
;
1411 kmsg
->ikm_next
= first
;
1412 kmsg
->ikm_prev
= last
;
1413 first
->ikm_prev
= kmsg
;
1414 last
->ikm_next
= kmsg
;
1419 * Routine: ipc_kmsg_enqueue_qos
1421 * Enqueue a kmsg, propagating qos
1422 * overrides towards the head of the queue.
1425 * whether the head of the queue had
1426 * it's override-qos adjusted because
1427 * of this insertion.
1431 ipc_kmsg_enqueue_qos(
1432 ipc_kmsg_queue_t queue
,
1435 ipc_kmsg_t first
= queue
->ikmq_base
;
1437 mach_msg_qos_t qos_ovr
;
1439 if (first
== IKM_NULL
) {
1440 /* insert a first message */
1441 queue
->ikmq_base
= kmsg
;
1442 kmsg
->ikm_next
= kmsg
;
1443 kmsg
->ikm_prev
= kmsg
;
1447 /* insert at the tail */
1448 prev
= first
->ikm_prev
;
1449 kmsg
->ikm_next
= first
;
1450 kmsg
->ikm_prev
= prev
;
1451 first
->ikm_prev
= kmsg
;
1452 prev
->ikm_next
= kmsg
;
1454 /* apply QoS overrides towards the head */
1455 qos_ovr
= kmsg
->ikm_qos_override
;
1456 while (prev
!= kmsg
&&
1457 qos_ovr
> prev
->ikm_qos_override
) {
1458 prev
->ikm_qos_override
= qos_ovr
;
1459 prev
= prev
->ikm_prev
;
1462 /* did we adjust everything? */
1463 return prev
== kmsg
;
1467 * Routine: ipc_kmsg_override_qos
1469 * Update the override for a given kmsg already
1470 * enqueued, propagating qos override adjustments
1471 * towards the head of the queue.
1474 * whether the head of the queue had
1475 * it's override-qos adjusted because
1476 * of this insertion.
1480 ipc_kmsg_override_qos(
1481 ipc_kmsg_queue_t queue
,
1483 mach_msg_qos_t qos_ovr
)
1485 ipc_kmsg_t first
= queue
->ikmq_base
;
1486 ipc_kmsg_t cur
= kmsg
;
1488 /* apply QoS overrides towards the head */
1489 while (qos_ovr
> cur
->ikm_qos_override
) {
1490 cur
->ikm_qos_override
= qos_ovr
;
1494 cur
= cur
->ikm_prev
;
1500 * Routine: ipc_kmsg_dequeue
1502 * Dequeue and return a kmsg.
1507 ipc_kmsg_queue_t queue
)
1511 first
= ipc_kmsg_queue_first(queue
);
1513 if (first
!= IKM_NULL
) {
1514 ipc_kmsg_rmqueue(queue
, first
);
1521 * Routine: ipc_kmsg_rmqueue
1523 * Pull a kmsg out of a queue.
1528 ipc_kmsg_queue_t queue
,
1531 ipc_kmsg_t next
, prev
;
1533 assert(queue
->ikmq_base
!= IKM_NULL
);
1535 next
= kmsg
->ikm_next
;
1536 prev
= kmsg
->ikm_prev
;
1539 assert(prev
== kmsg
);
1540 assert(queue
->ikmq_base
== kmsg
);
1542 queue
->ikmq_base
= IKM_NULL
;
1544 if (__improbable(next
->ikm_prev
!= kmsg
|| prev
->ikm_next
!= kmsg
)) {
1545 panic("ipc_kmsg_rmqueue: inconsistent prev/next pointers. "
1546 "(prev->next: %p, next->prev: %p, kmsg: %p)",
1547 prev
->ikm_next
, next
->ikm_prev
, kmsg
);
1550 if (queue
->ikmq_base
== kmsg
) {
1551 queue
->ikmq_base
= next
;
1554 next
->ikm_prev
= prev
;
1555 prev
->ikm_next
= next
;
1557 /* XXX Temporary debug logic */
1558 assert((kmsg
->ikm_next
= IKM_BOGUS
) == IKM_BOGUS
);
1559 assert((kmsg
->ikm_prev
= IKM_BOGUS
) == IKM_BOGUS
);
1563 * Routine: ipc_kmsg_queue_next
1565 * Return the kmsg following the given kmsg.
1566 * (Or IKM_NULL if it is the last one in the queue.)
1570 ipc_kmsg_queue_next(
1571 ipc_kmsg_queue_t queue
,
1576 assert(queue
->ikmq_base
!= IKM_NULL
);
1578 next
= kmsg
->ikm_next
;
1579 if (queue
->ikmq_base
== next
) {
1587 * Routine: ipc_kmsg_destroy
1589 * Destroys a kernel message. Releases all rights,
1590 * references, and memory held by the message.
1591 * Frees the message.
1601 * Destroying a message can cause more messages to be destroyed.
1602 * Curtail recursion by putting messages on the deferred
1603 * destruction queue. If this was the first message on the
1604 * queue, this instance must process the full queue.
1606 if (ipc_kmsg_delayed_destroy(kmsg
)) {
1607 ipc_kmsg_reap_delayed();
1612 * Routine: ipc_kmsg_delayed_destroy
1614 * Enqueues a kernel message for deferred destruction.
1616 * Boolean indicator that the caller is responsible to reap
1617 * deferred messages.
1621 ipc_kmsg_delayed_destroy(
1624 ipc_kmsg_queue_t queue
= &(current_thread()->ith_messages
);
1625 boolean_t first
= ipc_kmsg_queue_empty(queue
);
1627 ipc_kmsg_enqueue(queue
, kmsg
);
1632 * Routine: ipc_kmsg_destroy_queue
1634 * Destroys messages from the per-thread
1635 * deferred reaping queue.
1641 ipc_kmsg_reap_delayed(void)
1643 ipc_kmsg_queue_t queue
= &(current_thread()->ith_messages
);
1647 * must leave kmsg in queue while cleaning it to assure
1648 * no nested calls recurse into here.
1650 while ((kmsg
= ipc_kmsg_queue_first(queue
)) != IKM_NULL
) {
1651 ipc_kmsg_clean(kmsg
);
1652 ipc_kmsg_rmqueue(queue
, kmsg
);
1653 ipc_kmsg_free(kmsg
);
1658 * Routine: ipc_kmsg_clean_body
1660 * Cleans the body of a kernel message.
1661 * Releases all rights, references, and memory.
1666 static unsigned int _ipc_kmsg_clean_invalid_desc
= 0;
1668 ipc_kmsg_clean_body(
1669 __unused ipc_kmsg_t kmsg
,
1670 mach_msg_type_number_t number
,
1671 mach_msg_descriptor_t
*saddr
)
1673 mach_msg_type_number_t i
;
1679 for (i
= 0; i
< number
; i
++, saddr
++) {
1680 switch (saddr
->type
.type
) {
1681 case MACH_MSG_PORT_DESCRIPTOR
: {
1682 mach_msg_port_descriptor_t
*dsc
;
1687 * Destroy port rights carried in the message
1689 if (!IP_VALID(dsc
->name
)) {
1692 ipc_object_destroy(ip_to_object(dsc
->name
), dsc
->disposition
);
1695 case MACH_MSG_OOL_VOLATILE_DESCRIPTOR
:
1696 case MACH_MSG_OOL_DESCRIPTOR
: {
1697 mach_msg_ool_descriptor_t
*dsc
;
1699 dsc
= (mach_msg_ool_descriptor_t
*)&saddr
->out_of_line
;
1702 * Destroy memory carried in the message
1704 if (dsc
->size
== 0) {
1705 assert(dsc
->address
== (void *) 0);
1707 vm_map_copy_discard((vm_map_copy_t
) dsc
->address
);
1711 case MACH_MSG_OOL_PORTS_DESCRIPTOR
: {
1712 ipc_object_t
*objects
;
1713 mach_msg_type_number_t j
;
1714 mach_msg_ool_ports_descriptor_t
*dsc
;
1716 dsc
= (mach_msg_ool_ports_descriptor_t
*)&saddr
->ool_ports
;
1717 objects
= (ipc_object_t
*) dsc
->address
;
1719 if (dsc
->count
== 0) {
1723 assert(objects
!= (ipc_object_t
*) 0);
1725 /* destroy port rights carried in the message */
1727 for (j
= 0; j
< dsc
->count
; j
++) {
1728 ipc_object_t object
= objects
[j
];
1730 if (!IO_VALID(object
)) {
1734 ipc_object_destroy(object
, dsc
->disposition
);
1737 /* destroy memory carried in the message */
1739 assert(dsc
->count
!= 0);
1742 (vm_size_t
) dsc
->count
* sizeof(mach_port_t
));
1745 case MACH_MSG_GUARDED_PORT_DESCRIPTOR
: {
1746 mach_msg_guarded_port_descriptor_t
*dsc
= (typeof(dsc
)) & saddr
->guarded_port
;
1749 * Destroy port rights carried in the message
1751 if (!IP_VALID(dsc
->name
)) {
1754 ipc_object_destroy(ip_to_object(dsc
->name
), dsc
->disposition
);
1758 _ipc_kmsg_clean_invalid_desc
++; /* don't understand this type of descriptor */
1765 * Routine: ipc_kmsg_clean_partial
1767 * Cleans a partially-acquired kernel message.
1768 * number is the index of the type descriptor
1769 * in the body of the message that contained the error.
1770 * If dolast, the memory and port rights in this last
1771 * type spec are also cleaned. In that case, number
1772 * specifies the number of port rights to clean.
1778 ipc_kmsg_clean_partial(
1780 mach_msg_type_number_t number
,
1781 mach_msg_descriptor_t
*desc
,
1785 ipc_object_t object
;
1786 mach_msg_bits_t mbits
= kmsg
->ikm_header
->msgh_bits
;
1788 /* deal with importance chain while we still have dest and voucher references */
1789 ipc_importance_clean(kmsg
);
1791 object
= ip_to_object(kmsg
->ikm_header
->msgh_remote_port
);
1792 assert(IO_VALID(object
));
1793 ipc_object_destroy_dest(object
, MACH_MSGH_BITS_REMOTE(mbits
));
1795 object
= ip_to_object(kmsg
->ikm_header
->msgh_local_port
);
1796 if (IO_VALID(object
)) {
1797 ipc_object_destroy(object
, MACH_MSGH_BITS_LOCAL(mbits
));
1800 object
= ip_to_object(kmsg
->ikm_voucher
);
1801 if (IO_VALID(object
)) {
1802 assert(MACH_MSGH_BITS_VOUCHER(mbits
) == MACH_MSG_TYPE_MOVE_SEND
);
1803 ipc_object_destroy(object
, MACH_MSG_TYPE_PORT_SEND
);
1804 kmsg
->ikm_voucher
= IP_NULL
;
1808 (void) vm_deallocate(ipc_kernel_copy_map
, paddr
, length
);
1811 ipc_kmsg_clean_body(kmsg
, number
, desc
);
1815 * Routine: ipc_kmsg_clean
1817 * Cleans a kernel message. Releases all rights,
1818 * references, and memory held by the message.
1827 ipc_object_t object
;
1828 mach_msg_bits_t mbits
;
1830 /* deal with importance chain while we still have dest and voucher references */
1831 ipc_importance_clean(kmsg
);
1833 mbits
= kmsg
->ikm_header
->msgh_bits
;
1834 object
= ip_to_object(kmsg
->ikm_header
->msgh_remote_port
);
1835 if (IO_VALID(object
)) {
1836 ipc_object_destroy_dest(object
, MACH_MSGH_BITS_REMOTE(mbits
));
1839 object
= ip_to_object(kmsg
->ikm_header
->msgh_local_port
);
1840 if (IO_VALID(object
)) {
1841 ipc_object_destroy(object
, MACH_MSGH_BITS_LOCAL(mbits
));
1844 object
= ip_to_object(kmsg
->ikm_voucher
);
1845 if (IO_VALID(object
)) {
1846 assert(MACH_MSGH_BITS_VOUCHER(mbits
) == MACH_MSG_TYPE_MOVE_SEND
);
1847 ipc_object_destroy(object
, MACH_MSG_TYPE_PORT_SEND
);
1848 kmsg
->ikm_voucher
= IP_NULL
;
1851 if (mbits
& MACH_MSGH_BITS_COMPLEX
) {
1852 mach_msg_body_t
*body
;
1854 body
= (mach_msg_body_t
*) (kmsg
->ikm_header
+ 1);
1855 ipc_kmsg_clean_body(kmsg
, body
->msgh_descriptor_count
,
1856 (mach_msg_descriptor_t
*)(body
+ 1));
1861 * Routine: ipc_kmsg_set_prealloc
1863 * Assign a kmsg as a preallocated message buffer to a port.
1869 ipc_kmsg_set_prealloc(
1873 assert(kmsg
->ikm_prealloc
== IP_NULL
);
1875 kmsg
->ikm_prealloc
= IP_NULL
;
1877 assert(port_send_turnstile(port
) == TURNSTILE_NULL
);
1878 kmsg
->ikm_turnstile
= TURNSTILE_NULL
;
1879 IP_SET_PREALLOC(port
, kmsg
);
1883 * Routine: ipc_kmsg_clear_prealloc
1885 * Release the Assignment of a preallocated message buffer from a port.
1890 ipc_kmsg_clear_prealloc(
1894 /* take the mqueue lock since the turnstile is protected under it */
1895 imq_lock(&port
->ip_messages
);
1897 IP_CLEAR_PREALLOC(port
, kmsg
);
1898 set_port_send_turnstile(port
, kmsg
->ikm_turnstile
);
1899 imq_unlock(&port
->ip_messages
);
1903 * Routine: ipc_kmsg_prealloc
1905 * Wraper to ipc_kmsg_alloc() to account for
1906 * header expansion requirements.
1909 ipc_kmsg_prealloc(mach_msg_size_t size
)
1911 #if defined(__LP64__)
1912 if (size
> IKM_SAVED_MSG_SIZE
- LEGACY_HEADER_SIZE_DELTA
) {
1913 panic("ipc_kmsg_prealloc");
1916 size
+= LEGACY_HEADER_SIZE_DELTA
;
1918 return ipc_kmsg_alloc(size
);
1923 * Routine: ipc_kmsg_get
1925 * Allocates a kernel message buffer.
1926 * Copies a user message to the message buffer.
1930 * MACH_MSG_SUCCESS Acquired a message buffer.
1931 * MACH_SEND_MSG_TOO_SMALL Message smaller than a header.
1932 * MACH_SEND_MSG_TOO_SMALL Message size not long-word multiple.
1933 * MACH_SEND_TOO_LARGE Message too large to ever be sent.
1934 * MACH_SEND_NO_BUFFER Couldn't allocate a message buffer.
1935 * MACH_SEND_INVALID_DATA Couldn't copy message data.
1940 mach_vm_address_t msg_addr
,
1941 mach_msg_size_t size
,
1944 mach_msg_size_t msg_and_trailer_size
;
1946 mach_msg_max_trailer_t
*trailer
;
1947 mach_msg_legacy_base_t legacy_base
;
1948 mach_msg_size_t len_copied
;
1949 legacy_base
.body
.msgh_descriptor_count
= 0;
1951 if ((size
< sizeof(mach_msg_legacy_header_t
)) || (size
& 3)) {
1952 return MACH_SEND_MSG_TOO_SMALL
;
1955 if (size
> ipc_kmsg_max_body_space
) {
1956 return MACH_SEND_TOO_LARGE
;
1959 if (size
== sizeof(mach_msg_legacy_header_t
)) {
1960 len_copied
= sizeof(mach_msg_legacy_header_t
);
1962 len_copied
= sizeof(mach_msg_legacy_base_t
);
1965 if (copyinmsg(msg_addr
, (char *)&legacy_base
, len_copied
)) {
1966 return MACH_SEND_INVALID_DATA
;
1970 * If the message claims to be complex, it must at least
1971 * have the length of a "base" message (header + dsc_count).
1973 if (len_copied
< sizeof(mach_msg_legacy_base_t
) &&
1974 (legacy_base
.header
.msgh_bits
& MACH_MSGH_BITS_COMPLEX
)) {
1975 return MACH_SEND_MSG_TOO_SMALL
;
1978 msg_addr
+= sizeof(legacy_base
.header
);
1979 #if defined(__LP64__)
1980 size
+= LEGACY_HEADER_SIZE_DELTA
;
1982 /* unreachable if !DEBUG */
1983 __unreachable_ok_push
1984 if (DEBUG_KPRINT_SYSCALL_PREDICATE(DEBUG_KPRINT_SYSCALL_IPC_MASK
)) {
1986 for (j
= 0; j
< sizeof(legacy_base
.header
); j
++) {
1987 kprintf("%02x\n", ((unsigned char*)&legacy_base
.header
)[j
]);
1990 __unreachable_ok_pop
1992 msg_and_trailer_size
= size
+ MAX_TRAILER_SIZE
;
1993 kmsg
= ipc_kmsg_alloc(msg_and_trailer_size
);
1994 if (kmsg
== IKM_NULL
) {
1995 return MACH_SEND_NO_BUFFER
;
1998 kmsg
->ikm_header
->msgh_size
= size
;
1999 kmsg
->ikm_header
->msgh_bits
= legacy_base
.header
.msgh_bits
;
2000 kmsg
->ikm_header
->msgh_remote_port
= CAST_MACH_NAME_TO_PORT(legacy_base
.header
.msgh_remote_port
);
2001 kmsg
->ikm_header
->msgh_local_port
= CAST_MACH_NAME_TO_PORT(legacy_base
.header
.msgh_local_port
);
2002 kmsg
->ikm_header
->msgh_voucher_port
= legacy_base
.header
.msgh_voucher_port
;
2003 kmsg
->ikm_header
->msgh_id
= legacy_base
.header
.msgh_id
;
2005 DEBUG_KPRINT_SYSCALL_IPC("ipc_kmsg_get header:\n"
2008 " remote_port: %p\n"
2010 " voucher_port: 0x%.8x\n"
2012 kmsg
->ikm_header
->msgh_size
,
2013 kmsg
->ikm_header
->msgh_bits
,
2014 kmsg
->ikm_header
->msgh_remote_port
,
2015 kmsg
->ikm_header
->msgh_local_port
,
2016 kmsg
->ikm_header
->msgh_voucher_port
,
2017 kmsg
->ikm_header
->msgh_id
);
2019 if (copyinmsg(msg_addr
, (char *)(kmsg
->ikm_header
+ 1), size
- (mach_msg_size_t
)sizeof(mach_msg_header_t
))) {
2020 ipc_kmsg_free(kmsg
);
2021 return MACH_SEND_INVALID_DATA
;
2024 /* unreachable if !DEBUG */
2025 __unreachable_ok_push
2026 if (DEBUG_KPRINT_SYSCALL_PREDICATE(DEBUG_KPRINT_SYSCALL_IPC_MASK
)) {
2027 kprintf("body: size: %lu\n", (size
- sizeof(mach_msg_header_t
)));
2029 for (i
= 0; i
* 4 < (size
- sizeof(mach_msg_header_t
)); i
++) {
2030 kprintf("%.4x\n", ((uint32_t *)(kmsg
->ikm_header
+ 1))[i
]);
2033 __unreachable_ok_pop
2034 DEBUG_IPC_KMSG_PRINT(kmsg
, "ipc_kmsg_get()");
2037 * I reserve for the trailer the largest space (MAX_TRAILER_SIZE)
2038 * However, the internal size field of the trailer (msgh_trailer_size)
2039 * is initialized to the minimum (sizeof(mach_msg_trailer_t)), to optimize
2040 * the cases where no implicit data is requested.
2042 trailer
= (mach_msg_max_trailer_t
*) ((vm_offset_t
)kmsg
->ikm_header
+ size
);
2043 bzero(trailer
, sizeof(*trailer
));
2044 trailer
->msgh_sender
= current_thread()->task
->sec_token
;
2045 trailer
->msgh_audit
= current_thread()->task
->audit_token
;
2046 trailer
->msgh_trailer_type
= MACH_MSG_TRAILER_FORMAT_0
;
2047 trailer
->msgh_trailer_size
= MACH_MSG_TRAILER_MINIMUM_SIZE
;
2050 if (trcWork
.traceMask
) {
2051 dbgTrace(0x1100, (unsigned int)kmsg
->ikm_header
->msgh_id
,
2052 (unsigned int)kmsg
->ikm_header
->msgh_remote_port
,
2053 (unsigned int)kmsg
->ikm_header
->msgh_local_port
, 0);
2057 trailer
->msgh_labels
.sender
= 0;
2059 return MACH_MSG_SUCCESS
;
2063 * Routine: ipc_kmsg_get_from_kernel
2065 * First checks for a preallocated message
2066 * reserved for kernel clients. If not found -
2067 * allocates a new kernel message buffer.
2068 * Copies a kernel message to the message buffer.
2069 * Only resource errors are allowed.
2072 * Ports in header are ipc_port_t.
2074 * MACH_MSG_SUCCESS Acquired a message buffer.
2075 * MACH_SEND_NO_BUFFER Couldn't allocate a message buffer.
2079 ipc_kmsg_get_from_kernel(
2080 mach_msg_header_t
*msg
,
2081 mach_msg_size_t size
,
2085 mach_msg_size_t msg_and_trailer_size
;
2086 mach_msg_max_trailer_t
*trailer
;
2087 ipc_port_t dest_port
;
2089 assert(size
>= sizeof(mach_msg_header_t
));
2090 assert((size
& 3) == 0);
2092 dest_port
= msg
->msgh_remote_port
;
2094 msg_and_trailer_size
= size
+ MAX_TRAILER_SIZE
;
2097 * See if the port has a pre-allocated kmsg for kernel
2098 * clients. These are set up for those kernel clients
2099 * which cannot afford to wait.
2101 if (IP_VALID(dest_port
) && IP_PREALLOC(dest_port
)) {
2102 mach_msg_size_t max_desc
= 0;
2105 if (!ip_active(dest_port
)) {
2106 ip_unlock(dest_port
);
2107 return MACH_SEND_NO_BUFFER
;
2109 assert(IP_PREALLOC(dest_port
));
2110 kmsg
= dest_port
->ip_premsg
;
2111 if (ikm_prealloc_inuse(kmsg
)) {
2112 ip_unlock(dest_port
);
2113 return MACH_SEND_NO_BUFFER
;
2115 #if !defined(__LP64__)
2116 if (msg
->msgh_bits
& MACH_MSGH_BITS_COMPLEX
) {
2117 assert(size
> sizeof(mach_msg_base_t
));
2118 max_desc
= ((mach_msg_base_t
*)msg
)->body
.msgh_descriptor_count
*
2119 DESC_SIZE_ADJUSTMENT
;
2122 if (msg_and_trailer_size
> kmsg
->ikm_size
- max_desc
) {
2123 ip_unlock(dest_port
);
2124 return MACH_SEND_TOO_LARGE
;
2126 ikm_prealloc_set_inuse(kmsg
, dest_port
);
2127 ikm_set_header(kmsg
, NULL
, msg_and_trailer_size
);
2128 ip_unlock(dest_port
);
2130 kmsg
= ipc_kmsg_alloc(msg_and_trailer_size
);
2131 if (kmsg
== IKM_NULL
) {
2132 return MACH_SEND_NO_BUFFER
;
2136 (void) memcpy((void *) kmsg
->ikm_header
, (const void *) msg
, size
);
2140 kmsg
->ikm_header
->msgh_size
= size
;
2143 * I reserve for the trailer the largest space (MAX_TRAILER_SIZE)
2144 * However, the internal size field of the trailer (msgh_trailer_size)
2145 * is initialized to the minimum (sizeof(mach_msg_trailer_t)), to
2146 * optimize the cases where no implicit data is requested.
2148 trailer
= (mach_msg_max_trailer_t
*)
2149 ((vm_offset_t
)kmsg
->ikm_header
+ size
);
2150 bzero(trailer
, sizeof(*trailer
));
2151 trailer
->msgh_sender
= KERNEL_SECURITY_TOKEN
;
2152 trailer
->msgh_audit
= KERNEL_AUDIT_TOKEN
;
2153 trailer
->msgh_trailer_type
= MACH_MSG_TRAILER_FORMAT_0
;
2154 trailer
->msgh_trailer_size
= MACH_MSG_TRAILER_MINIMUM_SIZE
;
2156 trailer
->msgh_labels
.sender
= 0;
2159 return MACH_MSG_SUCCESS
;
2163 * Routine: ipc_kmsg_send
2165 * Send a message. The message holds a reference
2166 * for the destination port in the msgh_remote_port field.
2168 * If unsuccessful, the caller still has possession of
2169 * the message and must do something with it. If successful,
2170 * the message is queued, given to a receiver, destroyed,
2171 * or handled directly by the kernel via mach_msg.
2175 * MACH_MSG_SUCCESS The message was accepted.
2176 * MACH_SEND_TIMED_OUT Caller still has message.
2177 * MACH_SEND_INTERRUPTED Caller still has message.
2178 * MACH_SEND_INVALID_DEST Caller still has message.
2183 mach_msg_option_t option
,
2184 mach_msg_timeout_t send_timeout
)
2187 thread_t th
= current_thread();
2188 mach_msg_return_t error
= MACH_MSG_SUCCESS
;
2189 boolean_t kernel_reply
= FALSE
;
2191 /* Check if honor qlimit flag is set on thread. */
2192 if ((th
->options
& TH_OPT_HONOR_QLIMIT
) == TH_OPT_HONOR_QLIMIT
) {
2193 /* Remove the MACH_SEND_ALWAYS flag to honor queue limit. */
2194 option
&= (~MACH_SEND_ALWAYS
);
2195 /* Add the timeout flag since the message queue might be full. */
2196 option
|= MACH_SEND_TIMEOUT
;
2197 th
->options
&= (~TH_OPT_HONOR_QLIMIT
);
2200 #if IMPORTANCE_INHERITANCE
2201 bool did_importance
= false;
2202 #if IMPORTANCE_TRACE
2203 mach_msg_id_t imp_msgh_id
= -1;
2204 int sender_pid
= -1;
2205 #endif /* IMPORTANCE_TRACE */
2206 #endif /* IMPORTANCE_INHERITANCE */
2208 /* don't allow the creation of a circular loop */
2209 if (kmsg
->ikm_header
->msgh_bits
& MACH_MSGH_BITS_CIRCULAR
) {
2210 ipc_kmsg_destroy(kmsg
);
2211 KDBG(MACHDBG_CODE(DBG_MACH_IPC
, MACH_IPC_KMSG_INFO
) | DBG_FUNC_END
, MACH_MSGH_BITS_CIRCULAR
);
2212 return MACH_MSG_SUCCESS
;
2215 ipc_voucher_send_preprocessing(kmsg
);
2217 port
= kmsg
->ikm_header
->msgh_remote_port
;
2218 assert(IP_VALID(port
));
2222 * If the destination has been guarded with a reply context, and the
2223 * sender is consuming a send-once right, then assume this is a reply
2224 * to an RPC and we need to validate that this sender is currently in
2225 * the correct context.
2227 if (enforce_strict_reply
&& port
->ip_reply_context
!= 0 &&
2228 ((option
& MACH_SEND_KERNEL
) == 0) &&
2229 MACH_MSGH_BITS_REMOTE(kmsg
->ikm_header
->msgh_bits
) == MACH_MSG_TYPE_PORT_SEND_ONCE
) {
2230 error
= ipc_kmsg_validate_reply_context_locked(option
, port
, th
->ith_voucher
, th
->ith_voucher_name
);
2231 if (error
!= MACH_MSG_SUCCESS
) {
2237 #if IMPORTANCE_INHERITANCE
2239 #endif /* IMPORTANCE_INHERITANCE */
2241 * Can't deliver to a dead port.
2242 * However, we can pretend it got sent
2243 * and was then immediately destroyed.
2245 if (!ip_active(port
)) {
2248 if (MACH_NODE_VALID(kmsg
->ikm_node
) && FPORT_VALID(port
->ip_messages
.imq_fport
)) {
2249 flipc_msg_ack(kmsg
->ikm_node
, &port
->ip_messages
, FALSE
);
2252 if (did_importance
) {
2254 * We're going to pretend we delivered this message
2255 * successfully, and just eat the kmsg. However, the
2256 * kmsg is actually visible via the importance_task!
2257 * We need to cleanup this linkage before we destroy
2258 * the message, and more importantly before we set the
2259 * msgh_remote_port to NULL. See: 34302571
2261 ipc_importance_clean(kmsg
);
2263 ip_release(port
); /* JMM - Future: release right, not just ref */
2264 kmsg
->ikm_header
->msgh_remote_port
= MACH_PORT_NULL
;
2265 ipc_kmsg_destroy(kmsg
);
2266 KDBG(MACHDBG_CODE(DBG_MACH_IPC
, MACH_IPC_KMSG_INFO
) | DBG_FUNC_END
, MACH_SEND_INVALID_DEST
);
2267 return MACH_MSG_SUCCESS
;
2270 if (port
->ip_receiver
== ipc_space_kernel
) {
2272 * We can check ip_receiver == ipc_space_kernel
2273 * before checking that the port is active because
2274 * ipc_port_dealloc_kernel clears ip_receiver
2275 * before destroying a kernel port.
2277 require_ip_active(port
);
2278 port
->ip_messages
.imq_seqno
++;
2281 current_task()->messages_sent
++;
2284 * Call the server routine, and get the reply message to send.
2286 kmsg
= ipc_kobject_server(kmsg
, option
);
2287 if (kmsg
== IKM_NULL
) {
2288 return MACH_MSG_SUCCESS
;
2291 /* sign the reply message */
2294 /* restart the KMSG_INFO tracing for the reply message */
2295 KDBG(MACHDBG_CODE(DBG_MACH_IPC
, MACH_IPC_KMSG_INFO
) | DBG_FUNC_START
);
2296 port
= kmsg
->ikm_header
->msgh_remote_port
;
2297 assert(IP_VALID(port
));
2299 /* fall thru with reply - same options */
2300 kernel_reply
= TRUE
;
2301 if (!ip_active(port
)) {
2302 error
= MACH_SEND_INVALID_DEST
;
2306 #if IMPORTANCE_INHERITANCE
2308 * Need to see if this message needs importance donation and/or
2309 * propagation. That routine can drop the port lock temporarily.
2310 * If it does we'll have to revalidate the destination.
2312 if (!did_importance
) {
2313 did_importance
= true;
2314 if (ipc_importance_send(kmsg
, option
)) {
2318 #endif /* IMPORTANCE_INHERITANCE */
2320 if (error
!= MACH_MSG_SUCCESS
) {
2324 * We have a valid message and a valid reference on the port.
2325 * we can unlock the port and call mqueue_send() on its message
2326 * queue. Lock message queue while port is locked.
2328 imq_lock(&port
->ip_messages
);
2330 ipc_special_reply_port_msg_sent(port
);
2334 error
= ipc_mqueue_send(&port
->ip_messages
, kmsg
, option
,
2338 #if IMPORTANCE_INHERITANCE
2339 if (did_importance
) {
2340 __unused
int importance_cleared
= 0;
2342 case MACH_SEND_TIMED_OUT
:
2343 case MACH_SEND_NO_BUFFER
:
2344 case MACH_SEND_INTERRUPTED
:
2345 case MACH_SEND_INVALID_DEST
:
2347 * We still have the kmsg and its
2348 * reference on the port. But we
2349 * have to back out the importance
2352 * The port could have changed hands,
2353 * be inflight to another destination,
2354 * etc... But in those cases our
2355 * back-out will find the new owner
2356 * (and all the operations that
2357 * transferred the right should have
2358 * applied their own boost adjustments
2359 * to the old owner(s)).
2361 importance_cleared
= 1;
2362 ipc_importance_clean(kmsg
);
2365 case MACH_MSG_SUCCESS
:
2369 #if IMPORTANCE_TRACE
2370 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE
, (IMPORTANCE_CODE(IMP_MSG
, IMP_MSG_SEND
)) | DBG_FUNC_END
,
2371 task_pid(current_task()), sender_pid
, imp_msgh_id
, importance_cleared
, 0);
2372 #endif /* IMPORTANCE_TRACE */
2374 #endif /* IMPORTANCE_INHERITANCE */
2377 * If the port has been destroyed while we wait, treat the message
2378 * as a successful delivery (like we do for an inactive port).
2380 if (error
== MACH_SEND_INVALID_DEST
) {
2382 if (MACH_NODE_VALID(kmsg
->ikm_node
) && FPORT_VALID(port
->ip_messages
.imq_fport
)) {
2383 flipc_msg_ack(kmsg
->ikm_node
, &port
->ip_messages
, FALSE
);
2386 ip_release(port
); /* JMM - Future: release right, not just ref */
2387 kmsg
->ikm_header
->msgh_remote_port
= MACH_PORT_NULL
;
2388 ipc_kmsg_destroy(kmsg
);
2389 KDBG(MACHDBG_CODE(DBG_MACH_IPC
, MACH_IPC_KMSG_INFO
) | DBG_FUNC_END
, MACH_SEND_INVALID_DEST
);
2390 return MACH_MSG_SUCCESS
;
2393 if (error
!= MACH_MSG_SUCCESS
&& kernel_reply
) {
2395 * Kernel reply messages that fail can't be allowed to
2396 * pseudo-receive on error conditions. We need to just treat
2397 * the message as a successful delivery.
2400 if (MACH_NODE_VALID(kmsg
->ikm_node
) && FPORT_VALID(port
->ip_messages
.imq_fport
)) {
2401 flipc_msg_ack(kmsg
->ikm_node
, &port
->ip_messages
, FALSE
);
2404 ip_release(port
); /* JMM - Future: release right, not just ref */
2405 kmsg
->ikm_header
->msgh_remote_port
= MACH_PORT_NULL
;
2406 ipc_kmsg_destroy(kmsg
);
2407 KDBG(MACHDBG_CODE(DBG_MACH_IPC
, MACH_IPC_KMSG_INFO
) | DBG_FUNC_END
, error
);
2408 return MACH_MSG_SUCCESS
;
2414 * Routine: ipc_kmsg_put
2416 * Copies a message buffer to a user message.
2417 * Copies only the specified number of bytes.
2418 * Frees the message buffer.
2420 * Nothing locked. The message buffer must have clean
2423 * MACH_MSG_SUCCESS Copied data out of message buffer.
2424 * MACH_RCV_INVALID_DATA Couldn't copy to user message.
2430 mach_msg_option_t option
,
2431 mach_vm_address_t rcv_addr
,
2432 mach_msg_size_t rcv_size
,
2433 mach_msg_size_t trailer_size
,
2434 mach_msg_size_t
*sizep
)
2436 mach_msg_size_t size
= kmsg
->ikm_header
->msgh_size
+ trailer_size
;
2437 mach_msg_return_t mr
;
2439 DEBUG_IPC_KMSG_PRINT(kmsg
, "ipc_kmsg_put()");
2442 DEBUG_KPRINT_SYSCALL_IPC("ipc_kmsg_put header:\n"
2445 " remote_port: %p\n"
2447 " voucher_port: 0x%.8x\n"
2449 kmsg
->ikm_header
->msgh_size
,
2450 kmsg
->ikm_header
->msgh_bits
,
2451 kmsg
->ikm_header
->msgh_remote_port
,
2452 kmsg
->ikm_header
->msgh_local_port
,
2453 kmsg
->ikm_header
->msgh_voucher_port
,
2454 kmsg
->ikm_header
->msgh_id
);
2456 #if defined(__LP64__)
2457 if (current_task() != kernel_task
) { /* don't if receiver expects fully-cooked in-kernel msg; */
2458 mach_msg_legacy_header_t
*legacy_header
=
2459 (mach_msg_legacy_header_t
*)((vm_offset_t
)(kmsg
->ikm_header
) + LEGACY_HEADER_SIZE_DELTA
);
2461 mach_msg_bits_t bits
= kmsg
->ikm_header
->msgh_bits
;
2462 mach_msg_size_t msg_size
= kmsg
->ikm_header
->msgh_size
;
2463 mach_port_name_t remote_port
= CAST_MACH_PORT_TO_NAME(kmsg
->ikm_header
->msgh_remote_port
);
2464 mach_port_name_t local_port
= CAST_MACH_PORT_TO_NAME(kmsg
->ikm_header
->msgh_local_port
);
2465 mach_port_name_t voucher_port
= kmsg
->ikm_header
->msgh_voucher_port
;
2466 mach_msg_id_t id
= kmsg
->ikm_header
->msgh_id
;
2468 legacy_header
->msgh_id
= id
;
2469 legacy_header
->msgh_local_port
= local_port
;
2470 legacy_header
->msgh_remote_port
= remote_port
;
2471 legacy_header
->msgh_voucher_port
= voucher_port
;
2472 legacy_header
->msgh_size
= msg_size
- LEGACY_HEADER_SIZE_DELTA
;
2473 legacy_header
->msgh_bits
= bits
;
2475 size
-= LEGACY_HEADER_SIZE_DELTA
;
2476 kmsg
->ikm_header
= (mach_msg_header_t
*)legacy_header
;
2480 /* unreachable if !DEBUG */
2481 __unreachable_ok_push
2482 if (DEBUG_KPRINT_SYSCALL_PREDICATE(DEBUG_KPRINT_SYSCALL_IPC_MASK
)) {
2483 kprintf("ipc_kmsg_put header+body: %d\n", (size
));
2485 for (i
= 0; i
* 4 < size
; i
++) {
2486 kprintf("%.4x\n", ((uint32_t *)kmsg
->ikm_header
)[i
]);
2488 kprintf("type: %d\n", ((mach_msg_type_descriptor_t
*)(((mach_msg_base_t
*)kmsg
->ikm_header
) + 1))->type
);
2490 __unreachable_ok_pop
2492 /* Re-Compute target address if using stack-style delivery */
2493 if (option
& MACH_RCV_STACK
) {
2494 rcv_addr
+= rcv_size
- size
;
2497 if (copyoutmsg((const char *) kmsg
->ikm_header
, rcv_addr
, size
)) {
2498 mr
= MACH_RCV_INVALID_DATA
;
2501 mr
= MACH_MSG_SUCCESS
;
2504 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_IPC
, MACH_IPC_KMSG_LINK
) | DBG_FUNC_NONE
,
2505 (rcv_addr
>= VM_MIN_KERNEL_AND_KEXT_ADDRESS
||
2506 rcv_addr
+ size
>= VM_MIN_KERNEL_AND_KEXT_ADDRESS
) ? (uintptr_t)0 : (uintptr_t)rcv_addr
,
2507 VM_KERNEL_ADDRPERM((uintptr_t)kmsg
),
2508 1 /* this is on the receive/copyout path */,
2511 ipc_kmsg_free(kmsg
);
2520 * Routine: ipc_kmsg_put_to_kernel
2522 * Copies a message buffer to a kernel message.
2523 * Frees the message buffer.
2524 * No errors allowed.
2530 ipc_kmsg_put_to_kernel(
2531 mach_msg_header_t
*msg
,
2533 mach_msg_size_t size
)
2535 (void) memcpy((void *) msg
, (const void *) kmsg
->ikm_header
, size
);
2537 ipc_kmsg_free(kmsg
);
2540 static pthread_priority_compact_t
2541 ipc_get_current_thread_priority(void)
2543 thread_t thread
= current_thread();
2547 qos
= thread_get_requested_qos(thread
, &relpri
);
2549 qos
= thread_user_promotion_qos_for_pri(thread
->base_pri
);
2552 return _pthread_priority_make_from_thread_qos(qos
, relpri
, 0);
2555 static kern_return_t
2558 mach_msg_option_t options
,
2559 mach_msg_priority_t priority
)
2562 ipc_port_t special_reply_port
= kmsg
->ikm_header
->msgh_local_port
;
2563 ipc_port_t dest_port
= kmsg
->ikm_header
->msgh_remote_port
;
2565 if ((options
& MACH_SEND_OVERRIDE
) &&
2566 !mach_msg_priority_is_pthread_priority(priority
)) {
2567 mach_msg_qos_t qos
= mach_msg_priority_qos(priority
);
2568 int relpri
= mach_msg_priority_relpri(priority
);
2569 mach_msg_qos_t ovr
= mach_msg_priority_overide_qos(priority
);
2571 kmsg
->ikm_ppriority
= _pthread_priority_make_from_thread_qos(qos
, relpri
, 0);
2572 kmsg
->ikm_qos_override
= MAX(qos
, ovr
);
2574 kr
= ipc_get_pthpriority_from_kmsg_voucher(kmsg
, &kmsg
->ikm_ppriority
);
2575 if (kr
!= KERN_SUCCESS
) {
2576 if (options
& MACH_SEND_PROPAGATE_QOS
) {
2577 kmsg
->ikm_ppriority
= ipc_get_current_thread_priority();
2579 kmsg
->ikm_ppriority
= MACH_MSG_PRIORITY_UNSPECIFIED
;
2583 if (options
& MACH_SEND_OVERRIDE
) {
2584 mach_msg_qos_t qos
= _pthread_priority_thread_qos(kmsg
->ikm_ppriority
);
2585 mach_msg_qos_t ovr
= _pthread_priority_thread_qos(priority
);
2586 kmsg
->ikm_qos_override
= MAX(qos
, ovr
);
2588 kmsg
->ikm_qos_override
= _pthread_priority_thread_qos(kmsg
->ikm_ppriority
);
2594 if (IP_VALID(special_reply_port
) &&
2595 MACH_MSGH_BITS_LOCAL(kmsg
->ikm_header
->msgh_bits
) == MACH_MSG_TYPE_PORT_SEND_ONCE
) {
2596 if ((options
& MACH_SEND_SYNC_OVERRIDE
)) {
2597 boolean_t sync_bootstrap_checkin
= !!(options
& MACH_SEND_SYNC_BOOTSTRAP_CHECKIN
);
2599 * Link the destination port to special reply port and make sure that
2600 * dest port has a send turnstile, else allocate one.
2602 ipc_port_link_special_reply_port(special_reply_port
, dest_port
, sync_bootstrap_checkin
);
2609 ipc_kmsg_allow_immovable_send(
2611 ipc_entry_t dest_entry
)
2613 ipc_object_t object
= dest_entry
->ie_object
;
2615 * If the dest port is a kobject, allow copyin of immovable send
2616 * rights in the message body to succeed
2618 if (IO_VALID(object
) && io_is_kobject(object
)) {
2619 kmsg
->ikm_flags
|= IPC_KMSG_FLAGS_ALLOW_IMMOVABLE_SEND
;
2624 * Routine: ipc_kmsg_link_reply_context_locked
2626 * Link any required context from the sending voucher
2627 * to the reply port. The ipc_kmsg_copyin function will
2628 * enforce that the sender calls mach_msg in this context.
2630 * reply port is locked
2633 ipc_kmsg_link_reply_context_locked(
2634 ipc_port_t reply_port
,
2635 ipc_port_t voucher_port
)
2637 kern_return_t __assert_only kr
;
2638 uint32_t persona_id
= 0;
2639 ipc_voucher_t voucher
;
2641 ip_lock_held(reply_port
);
2643 if (!ip_active(reply_port
)) {
2647 voucher
= convert_port_to_voucher(voucher_port
);
2649 kr
= bank_get_bank_ledger_thread_group_and_persona(voucher
, NULL
, NULL
, &persona_id
);
2650 assert(kr
== KERN_SUCCESS
);
2651 ipc_voucher_release(voucher
);
2653 if (persona_id
== 0 || persona_id
== PERSONA_ID_NONE
) {
2654 /* there was no persona context to record */
2659 * Set the persona_id as the context on the reply port.
2660 * This will force the thread that replies to have adopted a voucher
2661 * with a matching persona.
2663 reply_port
->ip_reply_context
= persona_id
;
2668 static kern_return_t
2669 ipc_kmsg_validate_reply_port_locked(ipc_port_t reply_port
, mach_msg_option_t options
)
2671 ip_lock_held(reply_port
);
2673 if (!ip_active(reply_port
)) {
2675 * Ideally, we would enforce that the reply receive right is
2676 * active, but asynchronous XPC cancellation destroys the
2677 * receive right, so we just have to return success here.
2679 return KERN_SUCCESS
;
2682 if (options
& MACH_SEND_MSG
) {
2684 * If the rely port is active, then it should not be
2685 * in-transit, and the receive right should be in the caller's
2688 if (!reply_port
->ip_receiver_name
|| reply_port
->ip_receiver
!= current_task()->itk_space
) {
2689 return KERN_INVALID_CAPABILITY
;
2693 * A port used as a reply port in an RPC should have exactly 1
2694 * extant send-once right which we either just made or are
2695 * moving as part of the IPC.
2697 if (reply_port
->ip_sorights
!= 1) {
2698 return KERN_INVALID_CAPABILITY
;
2701 * XPC uses an extra send-right to keep the name of the reply
2702 * right around through cancellation. That makes it harder to
2703 * enforce a particular semantic kere, so for now, we say that
2704 * you can have a maximum of 1 send right (in addition to your
2705 * send once right). In the future, it would be great to lock
2706 * this down even further.
2708 if (reply_port
->ip_srights
> 1) {
2709 return KERN_INVALID_CAPABILITY
;
2713 * The sender can also specify that the receive right should
2714 * be immovable. Note that this check only applies to
2715 * send-only operations. Combined send/receive or rcv-only
2716 * operations can specify an immovable receive right by
2717 * opt-ing into guarded descriptors (MACH_RCV_GUARDED_DESC)
2718 * and using the MACH_MSG_STRICT_REPLY options flag.
2720 if (MACH_SEND_REPLY_IS_IMMOVABLE(options
)) {
2721 if (!reply_port
->ip_immovable_receive
) {
2722 return KERN_INVALID_CAPABILITY
;
2728 * don't enforce this yet: need a better way of indicating the
2729 * receiver wants this...
2732 if (MACH_RCV_WITH_IMMOVABLE_REPLY(options
)) {
2733 if (!reply_port
->ip_immovable_receive
) {
2734 return KERN_INVALID_CAPABILITY
;
2739 return KERN_SUCCESS
;
2743 * Routine: ipc_kmsg_validate_reply_context_locked
2745 * Validate that the current thread is running in the context
2746 * required by the destination port.
2748 * dest_port is locked
2750 * MACH_MSG_SUCCESS on success.
2751 * On error, an EXC_GUARD exception is also raised.
2752 * This function *always* resets the port reply context.
2754 static mach_msg_return_t
2755 ipc_kmsg_validate_reply_context_locked(
2756 mach_msg_option_t option
,
2757 ipc_port_t dest_port
,
2758 ipc_voucher_t voucher
,
2759 mach_port_name_t voucher_name
)
2761 uint32_t dest_ctx
= dest_port
->ip_reply_context
;
2762 dest_port
->ip_reply_context
= 0;
2764 if (!ip_active(dest_port
)) {
2765 return MACH_MSG_SUCCESS
;
2768 if (voucher
== IPC_VOUCHER_NULL
|| !MACH_PORT_VALID(voucher_name
)) {
2769 if ((option
& MACH_SEND_KERNEL
) == 0) {
2770 mach_port_guard_exception(voucher_name
, 0,
2771 (MPG_FLAGS_STRICT_REPLY_INVALID_VOUCHER
| dest_ctx
),
2772 kGUARD_EXC_STRICT_REPLY
);
2774 return MACH_SEND_INVALID_CONTEXT
;
2777 kern_return_t __assert_only kr
;
2778 uint32_t persona_id
= 0;
2779 kr
= bank_get_bank_ledger_thread_group_and_persona(voucher
, NULL
, NULL
, &persona_id
);
2780 assert(kr
== KERN_SUCCESS
);
2782 if (dest_ctx
!= persona_id
) {
2783 if ((option
& MACH_SEND_KERNEL
) == 0) {
2784 mach_port_guard_exception(voucher_name
, 0,
2785 (MPG_FLAGS_STRICT_REPLY_MISMATCHED_PERSONA
| ((((uint64_t)persona_id
<< 32) & MPG_FLAGS_STRICT_REPLY_MASK
) | dest_ctx
)),
2786 kGUARD_EXC_STRICT_REPLY
);
2788 return MACH_SEND_INVALID_CONTEXT
;
2791 return MACH_MSG_SUCCESS
;
2795 * Routine: ipc_kmsg_copyin_header
2797 * "Copy-in" port rights in the header of a message.
2798 * Operates atomically; if it doesn't succeed the
2799 * message header and the space are left untouched.
2800 * If it does succeed the remote/local port fields
2801 * contain object pointers instead of port names,
2802 * and the bits field is updated. The destination port
2803 * will be a valid port pointer.
2808 * MACH_MSG_SUCCESS Successful copyin.
2809 * MACH_SEND_INVALID_HEADER
2810 * Illegal value in the message header bits.
2811 * MACH_SEND_INVALID_DEST The space is dead.
2812 * MACH_SEND_INVALID_DEST Can't copyin destination port.
2813 * (Either KERN_INVALID_NAME or KERN_INVALID_RIGHT.)
2814 * MACH_SEND_INVALID_REPLY Can't copyin reply port.
2815 * (Either KERN_INVALID_NAME or KERN_INVALID_RIGHT.)
2819 ipc_kmsg_copyin_header(
2822 mach_msg_priority_t priority
,
2823 mach_msg_option_t
*optionp
)
2825 mach_msg_header_t
*msg
= kmsg
->ikm_header
;
2826 mach_msg_bits_t mbits
= msg
->msgh_bits
& MACH_MSGH_BITS_USER
;
2827 mach_port_name_t dest_name
= CAST_MACH_PORT_TO_NAME(msg
->msgh_remote_port
);
2828 mach_port_name_t reply_name
= CAST_MACH_PORT_TO_NAME(msg
->msgh_local_port
);
2829 mach_port_name_t voucher_name
= MACH_PORT_NULL
;
2832 mach_msg_type_name_t dest_type
= MACH_MSGH_BITS_REMOTE(mbits
);
2833 mach_msg_type_name_t reply_type
= MACH_MSGH_BITS_LOCAL(mbits
);
2834 mach_msg_type_name_t voucher_type
= MACH_MSGH_BITS_VOUCHER(mbits
);
2835 ipc_object_t dest_port
= IO_NULL
;
2836 ipc_object_t reply_port
= IO_NULL
;
2837 ipc_port_t dest_soright
= IP_NULL
;
2838 ipc_port_t reply_soright
= IP_NULL
;
2839 ipc_port_t voucher_soright
= IP_NULL
;
2840 ipc_port_t release_port
= IP_NULL
;
2841 ipc_port_t voucher_port
= IP_NULL
;
2842 ipc_port_t voucher_release_port
= IP_NULL
;
2843 ipc_entry_t dest_entry
= IE_NULL
;
2844 ipc_entry_t reply_entry
= IE_NULL
;
2845 ipc_entry_t voucher_entry
= IE_NULL
;
2848 #if IMPORTANCE_INHERITANCE
2849 boolean_t needboost
= FALSE
;
2850 #endif /* IMPORTANCE_INHERITANCE */
2852 if ((mbits
!= msg
->msgh_bits
) ||
2853 (!MACH_MSG_TYPE_PORT_ANY_SEND(dest_type
)) ||
2854 ((reply_type
== 0) ?
2855 (reply_name
!= MACH_PORT_NULL
) :
2856 !MACH_MSG_TYPE_PORT_ANY_SEND(reply_type
))) {
2857 return MACH_SEND_INVALID_HEADER
;
2860 if (!MACH_PORT_VALID(dest_name
)) {
2861 return MACH_SEND_INVALID_DEST
;
2864 is_write_lock(space
);
2865 if (!is_active(space
)) {
2866 is_write_unlock(space
);
2867 return MACH_SEND_INVALID_DEST
;
2869 /* space locked and active */
2872 * If there is a voucher specified, make sure the disposition is
2873 * valid and the entry actually refers to a voucher port. Don't
2874 * actually copy in until we validate destination and reply.
2876 if (voucher_type
!= MACH_MSGH_BITS_ZERO
) {
2877 voucher_name
= msg
->msgh_voucher_port
;
2879 if (voucher_name
== MACH_PORT_DEAD
||
2880 (voucher_type
!= MACH_MSG_TYPE_MOVE_SEND
&&
2881 voucher_type
!= MACH_MSG_TYPE_COPY_SEND
)) {
2882 is_write_unlock(space
);
2883 if ((*optionp
& MACH_SEND_KERNEL
) == 0) {
2884 mach_port_guard_exception(voucher_name
, 0, 0, kGUARD_EXC_SEND_INVALID_VOUCHER
);
2886 return MACH_SEND_INVALID_VOUCHER
;
2889 if (voucher_name
!= MACH_PORT_NULL
) {
2890 voucher_entry
= ipc_entry_lookup(space
, voucher_name
);
2891 if (voucher_entry
== IE_NULL
||
2892 (voucher_entry
->ie_bits
& MACH_PORT_TYPE_SEND
) == 0 ||
2893 io_kotype(voucher_entry
->ie_object
) != IKOT_VOUCHER
) {
2894 is_write_unlock(space
);
2895 if ((*optionp
& MACH_SEND_KERNEL
) == 0) {
2896 mach_port_guard_exception(voucher_name
, 0, 0, kGUARD_EXC_SEND_INVALID_VOUCHER
);
2898 return MACH_SEND_INVALID_VOUCHER
;
2901 voucher_type
= MACH_MSG_TYPE_MOVE_SEND
;
2905 if (enforce_strict_reply
&& MACH_SEND_WITH_STRICT_REPLY(*optionp
) &&
2906 (!MACH_PORT_VALID(reply_name
) ||
2907 ((reply_type
!= MACH_MSG_TYPE_MAKE_SEND_ONCE
) && (reply_type
!= MACH_MSG_TYPE_MOVE_SEND_ONCE
))
2910 * The caller cannot enforce a reply context with an invalid
2911 * reply port name, or a non-send_once reply disposition.
2913 is_write_unlock(space
);
2914 if ((*optionp
& MACH_SEND_KERNEL
) == 0) {
2915 mach_port_guard_exception(reply_name
, 0,
2916 (MPG_FLAGS_STRICT_REPLY_INVALID_REPLY_DISP
| reply_type
),
2917 kGUARD_EXC_STRICT_REPLY
);
2919 return MACH_SEND_INVALID_REPLY
;
2923 * Handle combinations of validating destination and reply; along
2924 * with copying in destination, reply, and voucher in an atomic way.
2927 if (dest_name
== voucher_name
) {
2929 * If the destination name is the same as the voucher name,
2930 * the voucher_entry must already be known. Either that or
2931 * the destination name is MACH_PORT_NULL (i.e. invalid).
2933 dest_entry
= voucher_entry
;
2934 if (dest_entry
== IE_NULL
) {
2937 /* Check if dest port allows immovable send rights to be sent in the kmsg body */
2938 ipc_kmsg_allow_immovable_send(kmsg
, dest_entry
);
2941 * Make sure a future copyin of the reply port will succeed.
2942 * Once we start copying in the dest/voucher pair, we can't
2945 if (MACH_PORT_VALID(reply_name
)) {
2946 assert(reply_type
!= 0); /* because reply_name not null */
2948 /* It is just WRONG if dest, voucher, and reply are all the same. */
2949 if (voucher_name
== reply_name
) {
2952 reply_entry
= ipc_entry_lookup(space
, reply_name
);
2953 if (reply_entry
== IE_NULL
) {
2956 assert(dest_entry
!= reply_entry
); /* names are not equal */
2957 if (!ipc_right_copyin_check_reply(space
, reply_name
, reply_entry
, reply_type
)) {
2963 * Do the joint copyin of the dest disposition and
2964 * voucher disposition from the one entry/port. We
2965 * already validated that the voucher copyin would
2966 * succeed (above). So, any failure in combining
2967 * the copyins can be blamed on the destination.
2969 kr
= ipc_right_copyin_two(space
, dest_name
, dest_entry
,
2970 dest_type
, voucher_type
, &dest_port
, &dest_soright
,
2972 if (kr
!= KERN_SUCCESS
) {
2973 assert(kr
!= KERN_INVALID_CAPABILITY
);
2976 voucher_port
= ip_object_to_port(dest_port
);
2979 * could not have been one of these dispositions,
2980 * validated the port was a true kernel voucher port above,
2981 * AND was successfully able to copyin both dest and voucher.
2983 assert(dest_type
!= MACH_MSG_TYPE_MAKE_SEND
);
2984 assert(dest_type
!= MACH_MSG_TYPE_MAKE_SEND_ONCE
);
2985 assert(dest_type
!= MACH_MSG_TYPE_MOVE_SEND_ONCE
);
2988 * Perform the delayed reply right copyin (guaranteed success).
2990 if (reply_entry
!= IE_NULL
) {
2991 kr
= ipc_right_copyin(space
, reply_name
, reply_entry
,
2992 reply_type
, IPC_RIGHT_COPYIN_FLAGS_DEADOK
,
2993 &reply_port
, &reply_soright
,
2994 &release_port
, &assertcnt
, 0, NULL
);
2995 assert(assertcnt
== 0);
2996 assert(kr
== KERN_SUCCESS
);
2999 if (dest_name
== reply_name
) {
3001 * Destination and reply ports are the same!
3002 * This is very similar to the case where the
3003 * destination and voucher ports were the same
3004 * (except the reply port disposition is not
3005 * previously validated).
3007 dest_entry
= ipc_entry_lookup(space
, dest_name
);
3008 if (dest_entry
== IE_NULL
) {
3011 ipc_kmsg_allow_immovable_send(kmsg
, dest_entry
);
3013 reply_entry
= dest_entry
;
3014 assert(reply_type
!= 0); /* because name not null */
3017 * Pre-validate that the reply right can be copied in by itself
3019 if (!ipc_right_copyin_check_reply(space
, reply_name
, reply_entry
, reply_type
)) {
3024 * Do the joint copyin of the dest disposition and
3025 * reply disposition from the one entry/port.
3027 kr
= ipc_right_copyin_two(space
, dest_name
, dest_entry
,
3028 dest_type
, reply_type
, &dest_port
, &dest_soright
,
3030 if (kr
== KERN_INVALID_CAPABILITY
) {
3032 } else if (kr
!= KERN_SUCCESS
) {
3035 reply_port
= dest_port
;
3038 * Handle destination and reply independently, as
3039 * they are independent entries (even if the entries
3040 * refer to the same port).
3042 * This can be the tough case to make atomic.
3044 * The difficult problem is serializing with port death.
3045 * The bad case is when dest_port dies after its copyin,
3046 * reply_port dies before its copyin, and dest_port dies before
3047 * reply_port. Then the copyins operated as if dest_port was
3048 * alive and reply_port was dead, which shouldn't have happened
3049 * because they died in the other order.
3051 * Note that it is easy for a user task to tell if
3052 * a copyin happened before or after a port died.
3053 * If a port dies before copyin, a dead-name notification
3054 * is generated and the dead name's urefs are incremented,
3055 * and if the copyin happens first, a port-deleted
3056 * notification is generated.
3058 * Even so, avoiding that potentially detectable race is too
3059 * expensive - and no known code cares about it. So, we just
3060 * do the expedient thing and copy them in one after the other.
3063 dest_entry
= ipc_entry_lookup(space
, dest_name
);
3064 if (dest_entry
== IE_NULL
) {
3067 assert(dest_entry
!= voucher_entry
);
3068 ipc_kmsg_allow_immovable_send(kmsg
, dest_entry
);
3071 * Make sure reply port entry is valid before dest copyin.
3073 if (MACH_PORT_VALID(reply_name
)) {
3074 if (reply_name
== voucher_name
) {
3077 reply_entry
= ipc_entry_lookup(space
, reply_name
);
3078 if (reply_entry
== IE_NULL
) {
3081 assert(dest_entry
!= reply_entry
); /* names are not equal */
3082 assert(reply_type
!= 0); /* because reply_name not null */
3084 if (!ipc_right_copyin_check_reply(space
, reply_name
, reply_entry
, reply_type
)) {
3090 * copyin the destination.
3092 kr
= ipc_right_copyin(space
, dest_name
, dest_entry
,
3093 dest_type
, (IPC_RIGHT_COPYIN_FLAGS_ALLOW_IMMOVABLE_SEND
|
3094 IPC_RIGHT_COPYIN_FLAGS_ALLOW_DEAD_SEND_ONCE
),
3095 &dest_port
, &dest_soright
,
3096 &release_port
, &assertcnt
, 0, NULL
);
3097 assert(assertcnt
== 0);
3098 if (kr
!= KERN_SUCCESS
) {
3101 assert(IO_VALID(dest_port
));
3102 assert(!IP_VALID(release_port
));
3105 * Copyin the pre-validated reply right.
3106 * It's OK if the reply right has gone dead in the meantime.
3108 if (MACH_PORT_VALID(reply_name
)) {
3109 kr
= ipc_right_copyin(space
, reply_name
, reply_entry
,
3110 reply_type
, IPC_RIGHT_COPYIN_FLAGS_DEADOK
,
3111 &reply_port
, &reply_soright
,
3112 &release_port
, &assertcnt
, 0, NULL
);
3113 assert(assertcnt
== 0);
3114 assert(kr
== KERN_SUCCESS
);
3116 /* convert invalid name to equivalent ipc_object type */
3117 reply_port
= ip_to_object(CAST_MACH_NAME_TO_PORT(reply_name
));
3122 * Finally can copyin the voucher right now that dest and reply
3123 * are fully copied in (guaranteed success).
3125 if (IE_NULL
!= voucher_entry
) {
3126 kr
= ipc_right_copyin(space
, voucher_name
, voucher_entry
,
3127 voucher_type
, IPC_RIGHT_COPYIN_FLAGS_NONE
,
3128 (ipc_object_t
*)&voucher_port
,
3130 &voucher_release_port
,
3131 &assertcnt
, 0, NULL
);
3132 assert(assertcnt
== 0);
3133 assert(KERN_SUCCESS
== kr
);
3134 assert(IP_VALID(voucher_port
));
3135 require_ip_active(voucher_port
);
3140 * The entries might need to be deallocated.
3142 * Each entry should be deallocated only once,
3143 * even if it was specified in more than one slot in the header.
3144 * Note that dest can be the same entry as reply or voucher,
3145 * but reply and voucher must be distinct entries.
3147 assert(IE_NULL
!= dest_entry
);
3148 if (IE_NULL
!= reply_entry
) {
3149 assert(reply_entry
!= voucher_entry
);
3152 if (IE_BITS_TYPE(dest_entry
->ie_bits
) == MACH_PORT_TYPE_NONE
) {
3153 ipc_entry_dealloc(space
, dest_name
, dest_entry
);
3155 if (dest_entry
== reply_entry
) {
3156 reply_entry
= IE_NULL
;
3159 if (dest_entry
== voucher_entry
) {
3160 voucher_entry
= IE_NULL
;
3163 dest_entry
= IE_NULL
;
3165 if (IE_NULL
!= reply_entry
&&
3166 IE_BITS_TYPE(reply_entry
->ie_bits
) == MACH_PORT_TYPE_NONE
) {
3167 ipc_entry_dealloc(space
, reply_name
, reply_entry
);
3168 reply_entry
= IE_NULL
;
3170 if (IE_NULL
!= voucher_entry
&&
3171 IE_BITS_TYPE(voucher_entry
->ie_bits
) == MACH_PORT_TYPE_NONE
) {
3172 ipc_entry_dealloc(space
, voucher_name
, voucher_entry
);
3173 voucher_entry
= IE_NULL
;
3176 dest_type
= ipc_object_copyin_type(dest_type
);
3177 reply_type
= ipc_object_copyin_type(reply_type
);
3180 * JMM - Without rdar://problem/6275821, this is the last place we can
3181 * re-arm the send-possible notifications. It may trigger unexpectedly
3182 * early (send may NOT have failed), but better than missing. We assure
3183 * we won't miss by forcing MACH_SEND_ALWAYS if we got past arming.
3185 if (((*optionp
& MACH_SEND_NOTIFY
) != 0) &&
3186 dest_type
!= MACH_MSG_TYPE_PORT_SEND_ONCE
&&
3187 dest_entry
!= IE_NULL
&& dest_entry
->ie_request
!= IE_REQ_NONE
) {
3188 ipc_port_t dport
= ip_object_to_port(dest_port
);
3190 assert(dport
!= IP_NULL
);
3192 if (ip_active(dport
) && dport
->ip_receiver
!= ipc_space_kernel
) {
3193 if (ip_full(dport
)) {
3194 #if IMPORTANCE_INHERITANCE
3195 needboost
= ipc_port_request_sparm(dport
, dest_name
,
3196 dest_entry
->ie_request
,
3199 if (needboost
== FALSE
) {
3203 ipc_port_request_sparm(dport
, dest_name
,
3204 dest_entry
->ie_request
,
3208 #endif /* IMPORTANCE_INHERITANCE */
3210 *optionp
|= MACH_SEND_ALWAYS
;
3218 is_write_unlock(space
);
3220 #if IMPORTANCE_INHERITANCE
3222 * If our request is the first boosting send-possible
3223 * notification this cycle, push the boost down the
3226 if (needboost
== TRUE
) {
3227 ipc_port_t dport
= ip_object_to_port(dest_port
);
3229 /* dport still locked from above */
3230 if (ipc_port_importance_delta(dport
, IPID_OPTION_SENDPOSSIBLE
, 1) == FALSE
) {
3234 #endif /* IMPORTANCE_INHERITANCE */
3236 if (dest_soright
!= IP_NULL
) {
3237 ipc_notify_port_deleted(dest_soright
, dest_name
);
3239 if (reply_soright
!= IP_NULL
) {
3240 ipc_notify_port_deleted(reply_soright
, reply_name
);
3242 if (voucher_soright
!= IP_NULL
) {
3243 ipc_notify_port_deleted(voucher_soright
, voucher_name
);
3247 * No room to store voucher port in in-kernel msg header,
3248 * so we store it back in the kmsg itself. Extract the
3249 * qos, and apply any override before we enqueue the kmsg.
3251 if (IP_VALID(voucher_port
)) {
3252 kmsg
->ikm_voucher
= voucher_port
;
3253 voucher_type
= MACH_MSG_TYPE_MOVE_SEND
;
3256 msg
->msgh_bits
= MACH_MSGH_BITS_SET(dest_type
, reply_type
, voucher_type
, mbits
);
3257 msg
->msgh_remote_port
= ip_object_to_port(dest_port
);
3258 msg
->msgh_local_port
= ip_object_to_port(reply_port
);
3260 /* capture the qos value(s) for the kmsg */
3261 ipc_kmsg_set_qos(kmsg
, *optionp
, priority
);
3263 if (release_port
!= IP_NULL
) {
3264 ip_release(release_port
);
3267 if (voucher_release_port
!= IP_NULL
) {
3268 ip_release(voucher_release_port
);
3271 if (enforce_strict_reply
&& MACH_SEND_WITH_STRICT_REPLY(*optionp
) && IP_VALID(msg
->msgh_local_port
)) {
3273 * We've already validated that the reply disposition is a
3274 * [make/move] send-once. Ideally, we should enforce that the
3275 * reply port is also not dead, but XPC asynchronous
3276 * cancellation can make the reply port dead before we
3277 * actually make it to the mach_msg send.
3279 * Here, we ensure that if we have a non-dead reply port, then
3280 * the reply port's receive right should not be in-transit,
3281 * and should live in the caller's IPC space.
3283 ipc_port_t rport
= msg
->msgh_local_port
;
3285 kr
= ipc_kmsg_validate_reply_port_locked(rport
, *optionp
);
3287 if (kr
!= KERN_SUCCESS
) {
3289 * no descriptors have been copied in yet, but the
3290 * full header has been copied in: clean it up
3292 ipc_kmsg_clean_partial(kmsg
, 0, NULL
, 0, 0);
3293 if ((*optionp
& MACH_SEND_KERNEL
) == 0) {
3294 mach_port_guard_exception(reply_name
, 0,
3295 (MPG_FLAGS_STRICT_REPLY_INVALID_REPLY_PORT
| kr
),
3296 kGUARD_EXC_STRICT_REPLY
);
3298 return MACH_SEND_INVALID_REPLY
;
3302 return MACH_MSG_SUCCESS
;
3305 is_write_unlock(space
);
3307 if (release_port
!= IP_NULL
) {
3308 ip_release(release_port
);
3311 assert(voucher_port
== IP_NULL
);
3312 assert(voucher_soright
== IP_NULL
);
3314 if ((*optionp
& MACH_SEND_KERNEL
) == 0) {
3315 mach_port_guard_exception(reply_name
, 0, 0, kGUARD_EXC_SEND_INVALID_REPLY
);
3317 return MACH_SEND_INVALID_REPLY
;
3320 is_write_unlock(space
);
3322 if (release_port
!= IP_NULL
) {
3323 ip_release(release_port
);
3326 if (reply_soright
!= IP_NULL
) {
3327 ipc_notify_port_deleted(reply_soright
, reply_name
);
3330 assert(voucher_port
== IP_NULL
);
3331 assert(voucher_soright
== IP_NULL
);
3333 return MACH_SEND_INVALID_DEST
;
3336 static mach_msg_descriptor_t
*
3337 ipc_kmsg_copyin_port_descriptor(
3338 mach_msg_port_descriptor_t
*dsc
,
3339 mach_msg_legacy_port_descriptor_t
*user_dsc_in
,
3343 mach_msg_option_t
*optionp
,
3344 mach_msg_return_t
*mr
)
3346 mach_msg_legacy_port_descriptor_t user_dsc
= *user_dsc_in
;
3347 mach_msg_type_name_t user_disp
;
3348 mach_msg_type_name_t result_disp
;
3349 mach_port_name_t name
;
3350 ipc_object_t object
;
3352 user_disp
= user_dsc
.disposition
;
3353 result_disp
= ipc_object_copyin_type(user_disp
);
3355 name
= (mach_port_name_t
)user_dsc
.name
;
3356 if (MACH_PORT_VALID(name
)) {
3357 kern_return_t kr
= ipc_object_copyin(space
, name
, user_disp
, &object
, 0, NULL
, kmsg
->ikm_flags
);
3358 if (kr
!= KERN_SUCCESS
) {
3359 if (((*optionp
& MACH_SEND_KERNEL
) == 0) && (kr
== KERN_INVALID_RIGHT
)) {
3360 mach_port_guard_exception(name
, 0, 0, kGUARD_EXC_SEND_INVALID_RIGHT
);
3362 *mr
= MACH_SEND_INVALID_RIGHT
;
3366 if ((result_disp
== MACH_MSG_TYPE_PORT_RECEIVE
) &&
3367 ipc_port_check_circularity(ip_object_to_port(object
),
3368 ip_object_to_port(dest
))) {
3369 kmsg
->ikm_header
->msgh_bits
|= MACH_MSGH_BITS_CIRCULAR
;
3371 dsc
->name
= ip_object_to_port(object
);
3373 dsc
->name
= CAST_MACH_NAME_TO_PORT(name
);
3375 dsc
->disposition
= result_disp
;
3376 dsc
->type
= MACH_MSG_PORT_DESCRIPTOR
;
3378 dsc
->pad_end
= 0; // debug, unnecessary
3380 return (mach_msg_descriptor_t
*)(user_dsc_in
+ 1);
3383 static mach_msg_descriptor_t
*
3384 ipc_kmsg_copyin_ool_descriptor(
3385 mach_msg_ool_descriptor_t
*dsc
,
3386 mach_msg_descriptor_t
*user_dsc
,
3389 vm_map_copy_t
*copy
,
3390 vm_size_t
*space_needed
,
3392 __unused mach_msg_option_t
*optionp
,
3393 mach_msg_return_t
*mr
)
3397 mach_msg_copy_options_t copy_options
;
3398 mach_vm_offset_t addr
;
3399 mach_msg_descriptor_type_t dsc_type
;
3402 mach_msg_ool_descriptor64_t
*user_ool_dsc
= (typeof(user_ool_dsc
))user_dsc
;
3404 addr
= (mach_vm_offset_t
) user_ool_dsc
->address
;
3405 length
= user_ool_dsc
->size
;
3406 dealloc
= user_ool_dsc
->deallocate
;
3407 copy_options
= user_ool_dsc
->copy
;
3408 dsc_type
= user_ool_dsc
->type
;
3410 user_dsc
= (typeof(user_dsc
))(user_ool_dsc
+ 1);
3412 mach_msg_ool_descriptor32_t
*user_ool_dsc
= (typeof(user_ool_dsc
))user_dsc
;
3414 addr
= CAST_USER_ADDR_T(user_ool_dsc
->address
);
3415 dealloc
= user_ool_dsc
->deallocate
;
3416 copy_options
= user_ool_dsc
->copy
;
3417 dsc_type
= user_ool_dsc
->type
;
3418 length
= user_ool_dsc
->size
;
3420 user_dsc
= (typeof(user_dsc
))(user_ool_dsc
+ 1);
3423 dsc
->size
= (mach_msg_size_t
)length
;
3424 dsc
->deallocate
= dealloc
;
3425 dsc
->copy
= copy_options
;
3426 dsc
->type
= dsc_type
;
3429 dsc
->address
= NULL
;
3430 } else if ((length
>= MSG_OOL_SIZE_SMALL
) &&
3431 (copy_options
== MACH_MSG_PHYSICAL_COPY
) && !dealloc
) {
3433 * If the request is a physical copy and the source
3434 * is not being deallocated, then allocate space
3435 * in the kernel's pageable ipc copy map and copy
3436 * the data in. The semantics guarantee that the
3437 * data will have been physically copied before
3438 * the send operation terminates. Thus if the data
3439 * is not being deallocated, we must be prepared
3440 * to page if the region is sufficiently large.
3442 if (copyin(addr
, (char *)*paddr
, length
)) {
3443 *mr
= MACH_SEND_INVALID_MEMORY
;
3448 * The kernel ipc copy map is marked no_zero_fill.
3449 * If the transfer is not a page multiple, we need
3450 * to zero fill the balance.
3452 if (!page_aligned(length
)) {
3453 (void) memset((void *) (*paddr
+ length
), 0,
3454 round_page(length
) - length
);
3456 if (vm_map_copyin(ipc_kernel_copy_map
, (vm_map_address_t
)*paddr
,
3457 (vm_map_size_t
)length
, TRUE
, copy
) != KERN_SUCCESS
) {
3458 *mr
= MACH_MSG_VM_KERNEL
;
3461 dsc
->address
= (void *)*copy
;
3462 *paddr
+= round_page(length
);
3463 *space_needed
-= round_page(length
);
3466 * Make a vm_map_copy_t of the of the data. If the
3467 * data is small, this will do an optimized physical
3468 * copy. Otherwise, it will do a virtual copy.
3470 * NOTE: A virtual copy is OK if the original is being
3471 * deallocted, even if a physical copy was requested.
3473 kern_return_t kr
= vm_map_copyin(map
, addr
,
3474 (vm_map_size_t
)length
, dealloc
, copy
);
3475 if (kr
!= KERN_SUCCESS
) {
3476 *mr
= (kr
== KERN_RESOURCE_SHORTAGE
) ?
3477 MACH_MSG_VM_KERNEL
:
3478 MACH_SEND_INVALID_MEMORY
;
3481 dsc
->address
= (void *)*copy
;
3487 static mach_msg_descriptor_t
*
3488 ipc_kmsg_copyin_ool_ports_descriptor(
3489 mach_msg_ool_ports_descriptor_t
*dsc
,
3490 mach_msg_descriptor_t
*user_dsc
,
3496 mach_msg_option_t
*optionp
,
3497 mach_msg_return_t
*mr
)
3500 ipc_object_t
*objects
;
3502 mach_vm_offset_t addr
;
3503 mach_msg_type_name_t user_disp
;
3504 mach_msg_type_name_t result_disp
;
3505 mach_msg_type_number_t count
;
3506 mach_msg_copy_options_t copy_option
;
3507 boolean_t deallocate
;
3508 mach_msg_descriptor_type_t type
;
3509 vm_size_t ports_length
, names_length
;
3512 mach_msg_ool_ports_descriptor64_t
*user_ool_dsc
= (typeof(user_ool_dsc
))user_dsc
;
3514 addr
= (mach_vm_offset_t
)user_ool_dsc
->address
;
3515 count
= user_ool_dsc
->count
;
3516 deallocate
= user_ool_dsc
->deallocate
;
3517 copy_option
= user_ool_dsc
->copy
;
3518 user_disp
= user_ool_dsc
->disposition
;
3519 type
= user_ool_dsc
->type
;
3521 user_dsc
= (typeof(user_dsc
))(user_ool_dsc
+ 1);
3523 mach_msg_ool_ports_descriptor32_t
*user_ool_dsc
= (typeof(user_ool_dsc
))user_dsc
;
3525 addr
= CAST_USER_ADDR_T(user_ool_dsc
->address
);
3526 count
= user_ool_dsc
->count
;
3527 deallocate
= user_ool_dsc
->deallocate
;
3528 copy_option
= user_ool_dsc
->copy
;
3529 user_disp
= user_ool_dsc
->disposition
;
3530 type
= user_ool_dsc
->type
;
3532 user_dsc
= (typeof(user_dsc
))(user_ool_dsc
+ 1);
3535 dsc
->deallocate
= deallocate
;
3536 dsc
->copy
= copy_option
;
3539 dsc
->address
= NULL
; /* for now */
3541 result_disp
= ipc_object_copyin_type(user_disp
);
3542 dsc
->disposition
= result_disp
;
3544 /* We always do a 'physical copy', but you have to specify something valid */
3545 if (copy_option
!= MACH_MSG_PHYSICAL_COPY
&&
3546 copy_option
!= MACH_MSG_VIRTUAL_COPY
) {
3547 *mr
= MACH_SEND_INVALID_TYPE
;
3551 /* calculate length of data in bytes, rounding up */
3553 if (os_mul_overflow(count
, sizeof(mach_port_t
), &ports_length
)) {
3554 *mr
= MACH_SEND_TOO_LARGE
;
3558 if (os_mul_overflow(count
, sizeof(mach_port_name_t
), &names_length
)) {
3559 *mr
= MACH_SEND_TOO_LARGE
;
3563 if (ports_length
== 0) {
3567 data
= kalloc(ports_length
);
3570 *mr
= MACH_SEND_NO_BUFFER
;
3575 mach_port_name_t
*names
= &((mach_port_name_t
*)data
)[count
];
3577 mach_port_name_t
*names
= ((mach_port_name_t
*)data
);
3580 if (copyinmap(map
, addr
, names
, names_length
) != KERN_SUCCESS
) {
3581 kfree(data
, ports_length
);
3582 *mr
= MACH_SEND_INVALID_MEMORY
;
3587 (void) mach_vm_deallocate(map
, addr
, (mach_vm_size_t
)names_length
);
3590 objects
= (ipc_object_t
*) data
;
3591 dsc
->address
= data
;
3593 for (i
= 0; i
< count
; i
++) {
3594 mach_port_name_t name
= names
[i
];
3595 ipc_object_t object
;
3597 if (!MACH_PORT_VALID(name
)) {
3598 objects
[i
] = ip_to_object(CAST_MACH_NAME_TO_PORT(name
));
3602 kern_return_t kr
= ipc_object_copyin(space
, name
, user_disp
, &object
, 0, NULL
, kmsg
->ikm_flags
);
3604 if (kr
!= KERN_SUCCESS
) {
3607 for (j
= 0; j
< i
; j
++) {
3608 object
= objects
[j
];
3609 if (IPC_OBJECT_VALID(object
)) {
3610 ipc_object_destroy(object
, result_disp
);
3613 kfree(data
, ports_length
);
3614 dsc
->address
= NULL
;
3615 if (((*optionp
& MACH_SEND_KERNEL
) == 0) && (kr
== KERN_INVALID_RIGHT
)) {
3616 mach_port_guard_exception(name
, 0, 0, kGUARD_EXC_SEND_INVALID_RIGHT
);
3618 *mr
= MACH_SEND_INVALID_RIGHT
;
3622 if ((dsc
->disposition
== MACH_MSG_TYPE_PORT_RECEIVE
) &&
3623 ipc_port_check_circularity(ip_object_to_port(object
),
3624 ip_object_to_port(dest
))) {
3625 kmsg
->ikm_header
->msgh_bits
|= MACH_MSGH_BITS_CIRCULAR
;
3628 objects
[i
] = object
;
3634 static mach_msg_descriptor_t
*
3635 ipc_kmsg_copyin_guarded_port_descriptor(
3636 mach_msg_guarded_port_descriptor_t
*dsc
,
3637 mach_msg_descriptor_t
*user_addr
,
3642 mach_msg_option_t
*optionp
,
3643 mach_msg_return_t
*mr
)
3645 mach_msg_descriptor_t
*user_dsc
;
3646 mach_msg_type_name_t disp
;
3647 mach_msg_type_name_t result_disp
;
3648 mach_port_name_t name
;
3649 mach_msg_guard_flags_t guard_flags
;
3650 ipc_object_t object
;
3651 mach_port_context_t context
;
3654 mach_msg_guarded_port_descriptor32_t
*user_gp_dsc
= (typeof(user_gp_dsc
))user_addr
;
3655 name
= user_gp_dsc
->name
;
3656 guard_flags
= user_gp_dsc
->flags
;
3657 disp
= user_gp_dsc
->disposition
;
3658 context
= user_gp_dsc
->context
;
3659 user_dsc
= (mach_msg_descriptor_t
*)(user_gp_dsc
+ 1);
3661 mach_msg_guarded_port_descriptor64_t
*user_gp_dsc
= (typeof(user_gp_dsc
))user_addr
;
3662 name
= user_gp_dsc
->name
;
3663 guard_flags
= user_gp_dsc
->flags
;
3664 disp
= user_gp_dsc
->disposition
;
3665 context
= user_gp_dsc
->context
;
3666 user_dsc
= (mach_msg_descriptor_t
*)(user_gp_dsc
+ 1);
3669 guard_flags
&= MACH_MSG_GUARD_FLAGS_MASK
;
3670 result_disp
= ipc_object_copyin_type(disp
);
3672 if (MACH_PORT_VALID(name
)) {
3673 kern_return_t kr
= ipc_object_copyin(space
, name
, disp
, &object
, context
, &guard_flags
, kmsg
->ikm_flags
);
3674 if (kr
!= KERN_SUCCESS
) {
3675 if (((*optionp
& MACH_SEND_KERNEL
) == 0) && (kr
== KERN_INVALID_RIGHT
)) {
3676 mach_port_guard_exception(name
, 0, 0, kGUARD_EXC_SEND_INVALID_RIGHT
);
3678 *mr
= MACH_SEND_INVALID_RIGHT
;
3682 if ((result_disp
== MACH_MSG_TYPE_PORT_RECEIVE
) &&
3683 ipc_port_check_circularity(ip_object_to_port(object
),
3684 ip_object_to_port(dest
))) {
3685 kmsg
->ikm_header
->msgh_bits
|= MACH_MSGH_BITS_CIRCULAR
;
3687 dsc
->name
= ip_object_to_port(object
);
3689 dsc
->name
= CAST_MACH_NAME_TO_PORT(name
);
3691 dsc
->flags
= guard_flags
;
3692 dsc
->disposition
= result_disp
;
3693 dsc
->type
= MACH_MSG_GUARDED_PORT_DESCRIPTOR
;
3696 dsc
->pad_end
= 0; // debug, unnecessary
3704 * Routine: ipc_kmsg_copyin_body
3706 * "Copy-in" port rights and out-of-line memory
3707 * in the message body.
3709 * In all failure cases, the message is left holding
3710 * no rights or memory. However, the message buffer
3711 * is not deallocated. If successful, the message
3712 * contains a valid destination port.
3716 * MACH_MSG_SUCCESS Successful copyin.
3717 * MACH_SEND_INVALID_MEMORY Can't grab out-of-line memory.
3718 * MACH_SEND_INVALID_RIGHT Can't copyin port right in body.
3719 * MACH_SEND_INVALID_TYPE Bad type specification.
3720 * MACH_SEND_MSG_TOO_SMALL Body is too small for types/data.
3721 * MACH_SEND_INVALID_RT_OOL_SIZE OOL Buffer too large for RT
3722 * MACH_MSG_INVALID_RT_DESCRIPTOR Dealloc and RT are incompatible
3723 * MACH_SEND_NO_GRANT_DEST Dest port doesn't accept ports in body
3727 ipc_kmsg_copyin_body(
3731 mach_msg_option_t
*optionp
)
3734 mach_msg_body_t
*body
;
3735 mach_msg_descriptor_t
*daddr
, *naddr
, *end
;
3736 mach_msg_descriptor_t
*user_addr
, *kern_addr
;
3737 mach_msg_type_number_t dsc_count
;
3738 boolean_t is_task_64bit
= (map
->max_offset
> VM_MAX_ADDRESS
);
3739 boolean_t
complex = FALSE
;
3740 boolean_t contains_port_desc
= FALSE
;
3741 vm_size_t space_needed
= 0;
3742 vm_offset_t paddr
= 0;
3743 vm_map_copy_t copy
= VM_MAP_COPY_NULL
;
3744 mach_msg_type_number_t i
;
3745 mach_msg_return_t mr
= MACH_MSG_SUCCESS
;
3746 ipc_port_t remote_port
= kmsg
->ikm_header
->msgh_remote_port
;
3748 vm_size_t descriptor_size
= 0;
3750 mach_msg_type_number_t total_ool_port_count
= 0;
3751 mach_msg_guard_flags_t guard_flags
= 0;
3752 mach_port_context_t context
;
3753 mach_msg_type_name_t disp
;
3756 * Determine if the target is a kernel port.
3758 dest
= ip_to_object(remote_port
);
3759 body
= (mach_msg_body_t
*) (kmsg
->ikm_header
+ 1);
3760 naddr
= (mach_msg_descriptor_t
*) (body
+ 1);
3761 end
= (mach_msg_descriptor_t
*) ((vm_offset_t
)kmsg
->ikm_header
+ kmsg
->ikm_header
->msgh_size
);
3763 dsc_count
= body
->msgh_descriptor_count
;
3764 if (dsc_count
== 0) {
3765 return MACH_MSG_SUCCESS
;
3769 * Make an initial pass to determine kernal VM space requirements for
3770 * physical copies and possible contraction of the descriptors from
3771 * processes with pointers larger than the kernel's.
3774 for (i
= 0; i
< dsc_count
; i
++) {
3775 mach_msg_size_t size
;
3776 mach_msg_type_number_t ool_port_count
= 0;
3780 /* make sure the descriptor fits in the message */
3781 if (is_task_64bit
) {
3782 if ((mach_msg_descriptor_t
*)((vm_offset_t
)daddr
+ 12) > end
) {
3783 mr
= MACH_SEND_MSG_TOO_SMALL
;
3787 switch (daddr
->type
.type
) {
3788 case MACH_MSG_OOL_DESCRIPTOR
:
3789 case MACH_MSG_OOL_VOLATILE_DESCRIPTOR
:
3790 case MACH_MSG_OOL_PORTS_DESCRIPTOR
:
3791 case MACH_MSG_GUARDED_PORT_DESCRIPTOR
:
3792 descriptor_size
+= 16;
3793 naddr
= (typeof(naddr
))((vm_offset_t
)daddr
+ 16);
3796 descriptor_size
+= 12;
3797 naddr
= (typeof(naddr
))((vm_offset_t
)daddr
+ 12);
3801 descriptor_size
+= 12;
3802 naddr
= (typeof(naddr
))((vm_offset_t
)daddr
+ 12);
3806 mr
= MACH_SEND_MSG_TOO_SMALL
;
3810 switch (daddr
->type
.type
) {
3811 case MACH_MSG_OOL_DESCRIPTOR
:
3812 case MACH_MSG_OOL_VOLATILE_DESCRIPTOR
:
3813 size
= (is_task_64bit
) ?
3814 ((mach_msg_ool_descriptor64_t
*)daddr
)->size
:
3815 daddr
->out_of_line
.size
;
3817 if (daddr
->out_of_line
.copy
!= MACH_MSG_PHYSICAL_COPY
&&
3818 daddr
->out_of_line
.copy
!= MACH_MSG_VIRTUAL_COPY
) {
3820 * Invalid copy option
3822 mr
= MACH_SEND_INVALID_TYPE
;
3826 if ((size
>= MSG_OOL_SIZE_SMALL
) &&
3827 (daddr
->out_of_line
.copy
== MACH_MSG_PHYSICAL_COPY
) &&
3828 !(daddr
->out_of_line
.deallocate
)) {
3830 * Out-of-line memory descriptor, accumulate kernel
3831 * memory requirements
3833 if (space_needed
+ round_page(size
) <= space_needed
) {
3834 /* Overflow dectected */
3835 mr
= MACH_MSG_VM_KERNEL
;
3839 space_needed
+= round_page(size
);
3840 if (space_needed
> ipc_kmsg_max_vm_space
) {
3841 /* Per message kernel memory limit exceeded */
3842 mr
= MACH_MSG_VM_KERNEL
;
3847 case MACH_MSG_PORT_DESCRIPTOR
:
3848 if (os_add_overflow(total_ool_port_count
, 1, &total_ool_port_count
)) {
3849 /* Overflow detected */
3850 mr
= MACH_SEND_TOO_LARGE
;
3853 contains_port_desc
= TRUE
;
3855 case MACH_MSG_OOL_PORTS_DESCRIPTOR
:
3856 ool_port_count
= (is_task_64bit
) ?
3857 ((mach_msg_ool_ports_descriptor64_t
*)daddr
)->count
:
3858 daddr
->ool_ports
.count
;
3860 if (os_add_overflow(total_ool_port_count
, ool_port_count
, &total_ool_port_count
)) {
3861 /* Overflow detected */
3862 mr
= MACH_SEND_TOO_LARGE
;
3866 if (ool_port_count
> (ipc_kmsg_max_vm_space
/ sizeof(mach_port_t
))) {
3867 /* Per message kernel memory limit exceeded */
3868 mr
= MACH_SEND_TOO_LARGE
;
3871 contains_port_desc
= TRUE
;
3873 case MACH_MSG_GUARDED_PORT_DESCRIPTOR
:
3874 guard_flags
= (is_task_64bit
) ?
3875 ((mach_msg_guarded_port_descriptor64_t
*)daddr
)->flags
:
3876 ((mach_msg_guarded_port_descriptor32_t
*)daddr
)->flags
;
3877 context
= (is_task_64bit
) ?
3878 ((mach_msg_guarded_port_descriptor64_t
*)daddr
)->context
:
3879 ((mach_msg_guarded_port_descriptor32_t
*)daddr
)->context
;
3880 disp
= (is_task_64bit
) ?
3881 ((mach_msg_guarded_port_descriptor64_t
*)daddr
)->disposition
:
3882 ((mach_msg_guarded_port_descriptor32_t
*)daddr
)->disposition
;
3884 /* Only MACH_MSG_TYPE_MOVE_RECEIVE is supported for now */
3885 if (!guard_flags
|| ((guard_flags
& ~MACH_MSG_GUARD_FLAGS_MASK
) != 0) ||
3886 ((guard_flags
& MACH_MSG_GUARD_FLAGS_UNGUARDED_ON_SEND
) && (context
!= 0)) ||
3887 (disp
!= MACH_MSG_TYPE_MOVE_RECEIVE
)) {
3889 * Invalid guard flags, context or disposition
3891 mr
= MACH_SEND_INVALID_TYPE
;
3894 if (os_add_overflow(total_ool_port_count
, 1, &total_ool_port_count
)) {
3895 /* Overflow detected */
3896 mr
= MACH_SEND_TOO_LARGE
;
3899 contains_port_desc
= TRUE
;
3904 /* Sending more than 16383 rights in one message seems crazy */
3905 if (total_ool_port_count
>= (MACH_PORT_UREFS_MAX
/ 4)) {
3906 mr
= MACH_SEND_TOO_LARGE
;
3911 * Check if dest is a no-grant port; Since this bit is set only on
3912 * port construction and cannot be unset later, we can peek at the
3913 * bit without paying the cost of locking the port.
3915 if (contains_port_desc
&& remote_port
->ip_no_grant
) {
3916 mr
= MACH_SEND_NO_GRANT_DEST
;
3921 * Allocate space in the pageable kernel ipc copy map for all the
3922 * ool data that is to be physically copied. Map is marked wait for
3926 if (vm_allocate_kernel(ipc_kernel_copy_map
, &paddr
, space_needed
,
3927 VM_FLAGS_ANYWHERE
, VM_KERN_MEMORY_IPC
) != KERN_SUCCESS
) {
3928 mr
= MACH_MSG_VM_KERNEL
;
3933 /* user_addr = just after base as it was copied in */
3934 user_addr
= (mach_msg_descriptor_t
*)((vm_offset_t
)kmsg
->ikm_header
+ sizeof(mach_msg_base_t
));
3936 /* Shift the mach_msg_base_t down to make room for dsc_count*16bytes of descriptors on 64 bit kernels
3938 if (descriptor_size
!= 16 * dsc_count
) {
3939 vm_offset_t dsc_adjust
= 16 * dsc_count
- descriptor_size
;
3941 memmove((char *)(((vm_offset_t
)kmsg
->ikm_header
) - dsc_adjust
), kmsg
->ikm_header
, sizeof(mach_msg_base_t
));
3942 kmsg
->ikm_header
= (mach_msg_header_t
*)((vm_offset_t
)kmsg
->ikm_header
- dsc_adjust
);
3944 /* Update the message size for the larger in-kernel representation */
3945 kmsg
->ikm_header
->msgh_size
+= (mach_msg_size_t
)dsc_adjust
;
3949 /* kern_addr = just after base after it has been (conditionally) moved */
3950 kern_addr
= (mach_msg_descriptor_t
*)((vm_offset_t
)kmsg
->ikm_header
+ sizeof(mach_msg_base_t
));
3952 /* handle the OOL regions and port descriptors. */
3953 for (i
= 0; i
< dsc_count
; i
++) {
3954 switch (user_addr
->type
.type
) {
3955 case MACH_MSG_PORT_DESCRIPTOR
:
3956 user_addr
= ipc_kmsg_copyin_port_descriptor((mach_msg_port_descriptor_t
*)kern_addr
,
3957 (mach_msg_legacy_port_descriptor_t
*)user_addr
, space
, dest
, kmsg
, optionp
, &mr
);
3961 case MACH_MSG_OOL_VOLATILE_DESCRIPTOR
:
3962 case MACH_MSG_OOL_DESCRIPTOR
:
3963 user_addr
= ipc_kmsg_copyin_ool_descriptor((mach_msg_ool_descriptor_t
*)kern_addr
,
3964 user_addr
, is_task_64bit
, &paddr
, ©
, &space_needed
, map
, optionp
, &mr
);
3968 case MACH_MSG_OOL_PORTS_DESCRIPTOR
:
3969 user_addr
= ipc_kmsg_copyin_ool_ports_descriptor((mach_msg_ool_ports_descriptor_t
*)kern_addr
,
3970 user_addr
, is_task_64bit
, map
, space
, dest
, kmsg
, optionp
, &mr
);
3974 case MACH_MSG_GUARDED_PORT_DESCRIPTOR
:
3975 user_addr
= ipc_kmsg_copyin_guarded_port_descriptor((mach_msg_guarded_port_descriptor_t
*)kern_addr
,
3976 user_addr
, is_task_64bit
, space
, dest
, kmsg
, optionp
, &mr
);
3981 /* Invalid descriptor */
3982 mr
= MACH_SEND_INVALID_TYPE
;
3986 if (MACH_MSG_SUCCESS
!= mr
) {
3987 /* clean from start of message descriptors to i */
3988 ipc_kmsg_clean_partial(kmsg
, i
,
3989 (mach_msg_descriptor_t
*)((mach_msg_base_t
*)kmsg
->ikm_header
+ 1),
3990 paddr
, space_needed
);
3996 kmsg
->ikm_header
->msgh_bits
&= ~MACH_MSGH_BITS_COMPLEX
;
4002 /* no descriptors have been copied in yet */
4003 ipc_kmsg_clean_partial(kmsg
, 0, NULL
, 0, 0);
4009 * Routine: ipc_kmsg_copyin
4011 * "Copy-in" port rights and out-of-line memory
4014 * In all failure cases, the message is left holding
4015 * no rights or memory. However, the message buffer
4016 * is not deallocated. If successful, the message
4017 * contains a valid destination port.
4021 * MACH_MSG_SUCCESS Successful copyin.
4022 * MACH_SEND_INVALID_HEADER Illegal value in the message header bits.
4023 * MACH_SEND_INVALID_DEST Can't copyin destination port.
4024 * MACH_SEND_INVALID_REPLY Can't copyin reply port.
4025 * MACH_SEND_INVALID_MEMORY Can't grab out-of-line memory.
4026 * MACH_SEND_INVALID_RIGHT Can't copyin port right in body.
4027 * MACH_SEND_INVALID_TYPE Bad type specification.
4028 * MACH_SEND_MSG_TOO_SMALL Body is too small for types/data.
4036 mach_msg_priority_t priority
,
4037 mach_msg_option_t
*optionp
)
4039 mach_msg_return_t mr
;
4040 mach_port_name_t dest_name
= CAST_MACH_PORT_TO_NAME(kmsg
->ikm_header
->msgh_remote_port
);
4042 kmsg
->ikm_header
->msgh_bits
&= MACH_MSGH_BITS_USER
;
4044 mr
= ipc_kmsg_copyin_header(kmsg
, space
, priority
, optionp
);
4046 if (mr
!= MACH_MSG_SUCCESS
) {
4050 /* Get the message filter policy if the task and port support filtering */
4051 mach_msg_filter_id fid
= 0;
4052 if (ip_enforce_msg_filtering(kmsg
->ikm_header
->msgh_remote_port
) &&
4053 task_get_filter_msg_flag(current_task())) {
4054 /* port label is yet to be supported */
4055 boolean_t allow_kmsg
= mach_msg_fetch_filter_policy(NULL
, kmsg
->ikm_header
->msgh_id
, &fid
);
4057 mach_port_guard_exception(dest_name
, 0, 0, kGUARD_EXC_MSG_FILTERED
);
4058 /* no descriptors have been copied in yet */
4059 ipc_kmsg_clean_partial(kmsg
, 0, NULL
, 0, 0);
4060 return MACH_SEND_MSG_FILTERED
;
4062 kmsg
->ikm_filter_policy_id
= fid
;
4065 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_IPC
, MACH_IPC_MSG_SEND
) | DBG_FUNC_NONE
,
4066 VM_KERNEL_ADDRPERM((uintptr_t)kmsg
),
4067 (uintptr_t)kmsg
->ikm_header
->msgh_bits
,
4068 (uintptr_t)kmsg
->ikm_header
->msgh_id
,
4069 VM_KERNEL_ADDRPERM((uintptr_t)unsafe_convert_port_to_voucher(kmsg
->ikm_voucher
)),
4072 DEBUG_KPRINT_SYSCALL_IPC("ipc_kmsg_copyin header:\n%.8x\n%.8x\n%p\n%p\n%p\n%.8x\n",
4073 kmsg
->ikm_header
->msgh_size
,
4074 kmsg
->ikm_header
->msgh_bits
,
4075 kmsg
->ikm_header
->msgh_remote_port
,
4076 kmsg
->ikm_header
->msgh_local_port
,
4078 kmsg
->ikm_header
->msgh_id
);
4080 if (kmsg
->ikm_header
->msgh_bits
& MACH_MSGH_BITS_COMPLEX
) {
4081 mr
= ipc_kmsg_copyin_body( kmsg
, space
, map
, optionp
);
4083 /* unreachable if !DEBUG */
4084 __unreachable_ok_push
4085 if (DEBUG_KPRINT_SYSCALL_PREDICATE(DEBUG_KPRINT_SYSCALL_IPC_MASK
)) {
4088 for (i
= 0; i
* 4 < (kmsg
->ikm_header
->msgh_size
- sizeof(mach_msg_header_t
)); i
++) {
4089 kprintf("%.4x\n", ((uint32_t *)(kmsg
->ikm_header
+ 1))[i
]);
4092 __unreachable_ok_pop
4095 /* Sign the message contents */
4096 if (mr
== MACH_MSG_SUCCESS
) {
4104 * Routine: ipc_kmsg_copyin_from_kernel
4106 * "Copy-in" port rights and out-of-line memory
4107 * in a message sent from the kernel.
4109 * Because the message comes from the kernel,
4110 * the implementation assumes there are no errors
4111 * or peculiarities in the message.
4117 ipc_kmsg_copyin_from_kernel(
4120 mach_msg_bits_t bits
= kmsg
->ikm_header
->msgh_bits
;
4121 mach_msg_type_name_t rname
= MACH_MSGH_BITS_REMOTE(bits
);
4122 mach_msg_type_name_t lname
= MACH_MSGH_BITS_LOCAL(bits
);
4123 ipc_object_t remote
= ip_to_object(kmsg
->ikm_header
->msgh_remote_port
);
4124 ipc_object_t local
= ip_to_object(kmsg
->ikm_header
->msgh_local_port
);
4125 ipc_port_t dest
= kmsg
->ikm_header
->msgh_remote_port
;
4127 /* translate the destination and reply ports */
4128 if (!IO_VALID(remote
)) {
4129 return MACH_SEND_INVALID_DEST
;
4132 ipc_object_copyin_from_kernel(remote
, rname
);
4133 if (IO_VALID(local
)) {
4134 ipc_object_copyin_from_kernel(local
, lname
);
4138 * The common case is a complex message with no reply port,
4139 * because that is what the memory_object interface uses.
4142 if (bits
== (MACH_MSGH_BITS_COMPLEX
|
4143 MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND
, 0))) {
4144 bits
= (MACH_MSGH_BITS_COMPLEX
|
4145 MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND
, 0));
4147 kmsg
->ikm_header
->msgh_bits
= bits
;
4149 bits
= (MACH_MSGH_BITS_OTHER(bits
) |
4150 MACH_MSGH_BITS(ipc_object_copyin_type(rname
),
4151 ipc_object_copyin_type(lname
)));
4153 kmsg
->ikm_header
->msgh_bits
= bits
;
4156 if (bits
& MACH_MSGH_BITS_COMPLEX
) {
4158 * Check if the remote port accepts ports in the body.
4160 if (dest
->ip_no_grant
) {
4161 mach_msg_descriptor_t
*saddr
;
4162 mach_msg_body_t
*body
;
4163 mach_msg_type_number_t i
, count
;
4165 body
= (mach_msg_body_t
*) (kmsg
->ikm_header
+ 1);
4166 saddr
= (mach_msg_descriptor_t
*) (body
+ 1);
4167 count
= body
->msgh_descriptor_count
;
4169 for (i
= 0; i
< count
; i
++, saddr
++) {
4170 switch (saddr
->type
.type
) {
4171 case MACH_MSG_PORT_DESCRIPTOR
:
4172 case MACH_MSG_OOL_PORTS_DESCRIPTOR
:
4173 case MACH_MSG_GUARDED_PORT_DESCRIPTOR
:
4174 /* no descriptors have been copied in yet */
4175 ipc_kmsg_clean_partial(kmsg
, 0, NULL
, 0, 0);
4176 return MACH_SEND_NO_GRANT_DEST
;
4181 mach_msg_descriptor_t
*saddr
;
4182 mach_msg_body_t
*body
;
4183 mach_msg_type_number_t i
, count
;
4185 body
= (mach_msg_body_t
*) (kmsg
->ikm_header
+ 1);
4186 saddr
= (mach_msg_descriptor_t
*) (body
+ 1);
4187 count
= body
->msgh_descriptor_count
;
4189 for (i
= 0; i
< count
; i
++, saddr
++) {
4190 switch (saddr
->type
.type
) {
4191 case MACH_MSG_PORT_DESCRIPTOR
: {
4192 mach_msg_type_name_t name
;
4193 ipc_object_t object
;
4194 mach_msg_port_descriptor_t
*dsc
;
4198 /* this is really the type SEND, SEND_ONCE, etc. */
4199 name
= dsc
->disposition
;
4200 object
= ip_to_object(dsc
->name
);
4201 dsc
->disposition
= ipc_object_copyin_type(name
);
4203 if (!IO_VALID(object
)) {
4207 ipc_object_copyin_from_kernel(object
, name
);
4209 /* CDY avoid circularity when the destination is also */
4210 /* the kernel. This check should be changed into an */
4211 /* assert when the new kobject model is in place since*/
4212 /* ports will not be used in kernel to kernel chats */
4214 if (ip_object_to_port(remote
)->ip_receiver
!= ipc_space_kernel
) {
4215 if ((dsc
->disposition
== MACH_MSG_TYPE_PORT_RECEIVE
) &&
4216 ipc_port_check_circularity(ip_object_to_port(object
),
4217 ip_object_to_port(remote
))) {
4218 kmsg
->ikm_header
->msgh_bits
|=
4219 MACH_MSGH_BITS_CIRCULAR
;
4224 case MACH_MSG_OOL_VOLATILE_DESCRIPTOR
:
4225 case MACH_MSG_OOL_DESCRIPTOR
: {
4227 * The sender should supply ready-made memory, i.e.
4228 * a vm_map_copy_t, so we don't need to do anything.
4232 case MACH_MSG_OOL_PORTS_DESCRIPTOR
: {
4233 ipc_object_t
*objects
;
4235 mach_msg_type_name_t name
;
4236 mach_msg_ool_ports_descriptor_t
*dsc
;
4238 dsc
= (mach_msg_ool_ports_descriptor_t
*)&saddr
->ool_ports
;
4240 /* this is really the type SEND, SEND_ONCE, etc. */
4241 name
= dsc
->disposition
;
4242 dsc
->disposition
= ipc_object_copyin_type(name
);
4244 objects
= (ipc_object_t
*) dsc
->address
;
4246 for (j
= 0; j
< dsc
->count
; j
++) {
4247 ipc_object_t object
= objects
[j
];
4249 if (!IO_VALID(object
)) {
4253 ipc_object_copyin_from_kernel(object
, name
);
4255 if ((dsc
->disposition
== MACH_MSG_TYPE_PORT_RECEIVE
) &&
4256 ipc_port_check_circularity(ip_object_to_port(object
),
4257 ip_object_to_port(remote
))) {
4258 kmsg
->ikm_header
->msgh_bits
|= MACH_MSGH_BITS_CIRCULAR
;
4263 case MACH_MSG_GUARDED_PORT_DESCRIPTOR
: {
4264 mach_msg_guarded_port_descriptor_t
*dsc
= (typeof(dsc
)) & saddr
->guarded_port
;
4265 mach_msg_type_name_t disp
= dsc
->disposition
;
4266 ipc_object_t object
= ip_to_object(dsc
->name
);
4267 dsc
->disposition
= ipc_object_copyin_type(disp
);
4268 assert(dsc
->flags
== 0);
4270 if (!IO_VALID(object
)) {
4274 ipc_object_copyin_from_kernel(object
, disp
);
4276 * avoid circularity when the destination is also
4277 * the kernel. This check should be changed into an
4278 * assert when the new kobject model is in place since
4279 * ports will not be used in kernel to kernel chats
4282 if (ip_object_to_port(remote
)->ip_receiver
!= ipc_space_kernel
) {
4283 if ((dsc
->disposition
== MACH_MSG_TYPE_PORT_RECEIVE
) &&
4284 ipc_port_check_circularity(ip_object_to_port(object
),
4285 ip_object_to_port(remote
))) {
4286 kmsg
->ikm_header
->msgh_bits
|= MACH_MSGH_BITS_CIRCULAR
;
4293 panic("ipc_kmsg_copyin_from_kernel: bad descriptor");
4294 #endif /* MACH_ASSERT */
4300 /* Add the signature to the message */
4303 return MACH_MSG_SUCCESS
;
4306 #if IKM_SUPPORT_LEGACY
4308 ipc_kmsg_copyin_from_kernel_legacy(
4311 mach_msg_bits_t bits
= kmsg
->ikm_header
->msgh_bits
;
4312 mach_msg_type_name_t rname
= MACH_MSGH_BITS_REMOTE(bits
);
4313 mach_msg_type_name_t lname
= MACH_MSGH_BITS_LOCAL(bits
);
4314 ipc_object_t remote
= ip_to_object(kmsg
->ikm_header
->msgh_remote_port
);
4315 ipc_object_t local
= ip_to_object(kmsg
->ikm_header
->msgh_local_port
);
4316 ipc_port_t dest
= kmsg
->ikm_header
->msgh_remote_port
;
4318 /* translate the destination and reply ports */
4319 if (!IO_VALID(remote
)) {
4320 return MACH_SEND_INVALID_DEST
;
4323 ipc_object_copyin_from_kernel(remote
, rname
);
4324 if (IO_VALID(local
)) {
4325 ipc_object_copyin_from_kernel(local
, lname
);
4329 * The common case is a complex message with no reply port,
4330 * because that is what the memory_object interface uses.
4333 if (bits
== (MACH_MSGH_BITS_COMPLEX
|
4334 MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND
, 0))) {
4335 bits
= (MACH_MSGH_BITS_COMPLEX
|
4336 MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND
, 0));
4338 kmsg
->ikm_header
->msgh_bits
= bits
;
4340 bits
= (MACH_MSGH_BITS_OTHER(bits
) |
4341 MACH_MSGH_BITS(ipc_object_copyin_type(rname
),
4342 ipc_object_copyin_type(lname
)));
4344 kmsg
->ikm_header
->msgh_bits
= bits
;
4347 if (bits
& MACH_MSGH_BITS_COMPLEX
) {
4348 if (dest
->ip_no_grant
) {
4349 mach_msg_descriptor_t
*saddr
;
4350 mach_msg_body_t
*body
;
4351 mach_msg_type_number_t i
, count
;
4353 body
= (mach_msg_body_t
*) (kmsg
->ikm_header
+ 1);
4354 saddr
= (mach_msg_descriptor_t
*) (body
+ 1);
4355 count
= body
->msgh_descriptor_count
;
4357 for (i
= 0; i
< count
; i
++, saddr
++) {
4358 switch (saddr
->type
.type
) {
4359 case MACH_MSG_PORT_DESCRIPTOR
:
4360 case MACH_MSG_OOL_PORTS_DESCRIPTOR
:
4361 case MACH_MSG_GUARDED_PORT_DESCRIPTOR
:
4362 /* no descriptors have been copied in yet */
4363 ipc_kmsg_clean_partial(kmsg
, 0, NULL
, 0, 0);
4364 return MACH_SEND_NO_GRANT_DEST
;
4369 mach_msg_legacy_descriptor_t
*saddr
;
4370 mach_msg_descriptor_t
*daddr
;
4371 mach_msg_body_t
*body
;
4372 mach_msg_type_number_t i
, count
;
4374 body
= (mach_msg_body_t
*) (kmsg
->ikm_header
+ 1);
4375 saddr
= (typeof(saddr
))(body
+ 1);
4376 count
= body
->msgh_descriptor_count
;
4379 vm_offset_t dsc_adjust
= 4 * count
;
4380 memmove((char *)(((vm_offset_t
)kmsg
->ikm_header
) - dsc_adjust
), kmsg
->ikm_header
, sizeof(mach_msg_base_t
));
4381 kmsg
->ikm_header
= (mach_msg_header_t
*)((vm_offset_t
)kmsg
->ikm_header
- dsc_adjust
);
4382 /* Update the message size for the larger in-kernel representation */
4383 kmsg
->ikm_header
->msgh_size
+= dsc_adjust
;
4385 daddr
= (mach_msg_descriptor_t
*)((vm_offset_t
)kmsg
->ikm_header
+ sizeof(mach_msg_base_t
));
4387 for (i
= 0; i
< count
; i
++, saddr
++, daddr
++) {
4388 switch (saddr
->type
.type
) {
4389 case MACH_MSG_PORT_DESCRIPTOR
: {
4390 mach_msg_type_name_t name
;
4391 ipc_object_t object
;
4392 mach_msg_legacy_port_descriptor_t
*dsc
;
4393 mach_msg_port_descriptor_t
*dest_dsc
;
4395 dsc
= (typeof(dsc
)) & saddr
->port
;
4396 dest_dsc
= &daddr
->port
;
4398 /* this is really the type SEND, SEND_ONCE, etc. */
4399 name
= dsc
->disposition
;
4400 object
= ip_to_object(CAST_MACH_NAME_TO_PORT(dsc
->name
));
4401 dest_dsc
->disposition
= ipc_object_copyin_type(name
);
4402 dest_dsc
->name
= ip_object_to_port(object
);
4403 dest_dsc
->type
= MACH_MSG_PORT_DESCRIPTOR
;
4405 if (!IO_VALID(object
)) {
4409 ipc_object_copyin_from_kernel(object
, name
);
4411 /* CDY avoid circularity when the destination is also */
4412 /* the kernel. This check should be changed into an */
4413 /* assert when the new kobject model is in place since*/
4414 /* ports will not be used in kernel to kernel chats */
4416 if (ip_object_to_port(remote
)->ip_receiver
!= ipc_space_kernel
) {
4417 if ((dest_dsc
->disposition
== MACH_MSG_TYPE_PORT_RECEIVE
) &&
4418 ipc_port_check_circularity(ip_object_to_port(object
),
4419 ip_object_to_port(remote
))) {
4420 kmsg
->ikm_header
->msgh_bits
|=
4421 MACH_MSGH_BITS_CIRCULAR
;
4426 case MACH_MSG_OOL_VOLATILE_DESCRIPTOR
:
4427 case MACH_MSG_OOL_DESCRIPTOR
: {
4428 /* The sender should supply ready-made memory, i.e. a vm_map_copy_t
4429 * so we don't need to do anything special. */
4431 mach_msg_ool_descriptor32_t
*source_dsc
= &saddr
->out_of_line32
;
4432 mach_msg_ool_descriptor_t
*dest_dsc
= (typeof(dest_dsc
)) & daddr
->out_of_line
;
4434 vm_offset_t address
= source_dsc
->address
;
4435 vm_size_t size
= source_dsc
->size
;
4436 boolean_t deallocate
= source_dsc
->deallocate
;
4437 mach_msg_copy_options_t copy
= source_dsc
->copy
;
4438 mach_msg_descriptor_type_t type
= source_dsc
->type
;
4440 dest_dsc
->address
= (void *)address
;
4441 dest_dsc
->size
= size
;
4442 dest_dsc
->deallocate
= deallocate
;
4443 dest_dsc
->copy
= copy
;
4444 dest_dsc
->type
= type
;
4447 case MACH_MSG_OOL_PORTS_DESCRIPTOR
: {
4448 ipc_object_t
*objects
;
4450 mach_msg_type_name_t name
;
4451 mach_msg_ool_ports_descriptor_t
*dest_dsc
;
4453 mach_msg_ool_ports_descriptor32_t
*source_dsc
= &saddr
->ool_ports32
;
4454 dest_dsc
= (typeof(dest_dsc
)) & daddr
->ool_ports
;
4456 boolean_t deallocate
= source_dsc
->deallocate
;
4457 mach_msg_copy_options_t copy
= source_dsc
->copy
;
4458 mach_msg_size_t port_count
= source_dsc
->count
;
4459 mach_msg_type_name_t disposition
= source_dsc
->disposition
;
4461 /* this is really the type SEND, SEND_ONCE, etc. */
4463 disposition
= ipc_object_copyin_type(name
);
4465 objects
= (ipc_object_t
*) (uintptr_t)source_dsc
->address
;
4467 for (j
= 0; j
< port_count
; j
++) {
4468 ipc_object_t object
= objects
[j
];
4470 if (!IO_VALID(object
)) {
4474 ipc_object_copyin_from_kernel(object
, name
);
4476 if ((disposition
== MACH_MSG_TYPE_PORT_RECEIVE
) &&
4477 ipc_port_check_circularity(ip_object_to_port(object
),
4478 ip_object_to_port(remote
))) {
4479 kmsg
->ikm_header
->msgh_bits
|= MACH_MSGH_BITS_CIRCULAR
;
4483 dest_dsc
->address
= objects
;
4484 dest_dsc
->deallocate
= deallocate
;
4485 dest_dsc
->copy
= copy
;
4486 dest_dsc
->disposition
= disposition
;
4487 dest_dsc
->type
= MACH_MSG_OOL_PORTS_DESCRIPTOR
;
4488 dest_dsc
->count
= port_count
;
4491 case MACH_MSG_GUARDED_PORT_DESCRIPTOR
: {
4492 mach_msg_type_name_t disp
;
4493 ipc_object_t object
;
4494 mach_msg_guarded_port_descriptor32_t
*dsc
;
4495 mach_msg_guarded_port_descriptor_t
*dest_dsc
;
4497 dsc
= (typeof(dsc
)) & saddr
->guarded_port32
;
4498 dest_dsc
= &daddr
->guarded_port
;
4500 disp
= dsc
->disposition
;
4501 object
= ip_to_object(CAST_MACH_NAME_TO_PORT(dsc
->name
));
4502 assert(dsc
->flags
== 0);
4503 assert(dsc
->context
== 0);
4505 dest_dsc
->disposition
= ipc_object_copyin_type(disp
);
4506 dest_dsc
->name
= ip_object_to_port(object
);
4507 dest_dsc
->type
= MACH_MSG_GUARDED_PORT_DESCRIPTOR
;
4508 dest_dsc
->flags
= 0;
4510 if (!IO_VALID(object
)) {
4514 ipc_object_copyin_from_kernel(object
, disp
);
4516 /* CDY avoid circularity when the destination is also */
4517 /* the kernel. This check should be changed into an */
4518 /* assert when the new kobject model is in place since*/
4519 /* ports will not be used in kernel to kernel chats */
4521 if (ip_object_to_port(remote
)->ip_receiver
!= ipc_space_kernel
) {
4522 if ((dest_dsc
->disposition
== MACH_MSG_TYPE_PORT_RECEIVE
) &&
4523 ipc_port_check_circularity(ip_object_to_port(object
),
4524 ip_object_to_port(remote
))) {
4525 kmsg
->ikm_header
->msgh_bits
|=
4526 MACH_MSGH_BITS_CIRCULAR
;
4533 panic("ipc_kmsg_copyin_from_kernel: bad descriptor");
4534 #endif /* MACH_ASSERT */
4542 return MACH_MSG_SUCCESS
;
4544 #endif /* IKM_SUPPORT_LEGACY */
4547 * Routine: ipc_kmsg_copyout_header
4549 * "Copy-out" port rights in the header of a message.
4550 * Operates atomically; if it doesn't succeed the
4551 * message header and the space are left untouched.
4552 * If it does succeed the remote/local port fields
4553 * contain port names instead of object pointers,
4554 * and the bits field is updated.
4558 * MACH_MSG_SUCCESS Copied out port rights.
4559 * MACH_RCV_INVALID_NOTIFY
4560 * Notify is non-null and doesn't name a receive right.
4561 * (Either KERN_INVALID_NAME or KERN_INVALID_RIGHT.)
4562 * MACH_RCV_HEADER_ERROR|MACH_MSG_IPC_SPACE
4563 * The space is dead.
4564 * MACH_RCV_HEADER_ERROR|MACH_MSG_IPC_SPACE
4565 * No room in space for another name.
4566 * MACH_RCV_HEADER_ERROR|MACH_MSG_IPC_KERNEL
4567 * Couldn't allocate memory for the reply port.
4568 * MACH_RCV_HEADER_ERROR|MACH_MSG_IPC_KERNEL
4569 * Couldn't allocate memory for the dead-name request.
4573 ipc_kmsg_copyout_header(
4576 mach_msg_option_t option
)
4578 mach_msg_header_t
*msg
= kmsg
->ikm_header
;
4579 mach_msg_bits_t mbits
= msg
->msgh_bits
;
4580 ipc_port_t dest
= msg
->msgh_remote_port
;
4582 assert(IP_VALID(dest
));
4585 * While we still hold a reference on the received-from port,
4586 * process all send-possible notfications we received along with
4589 ipc_port_spnotify(dest
);
4592 mach_msg_type_name_t dest_type
= MACH_MSGH_BITS_REMOTE(mbits
);
4593 mach_msg_type_name_t reply_type
= MACH_MSGH_BITS_LOCAL(mbits
);
4594 mach_msg_type_name_t voucher_type
= MACH_MSGH_BITS_VOUCHER(mbits
);
4595 ipc_port_t reply
= msg
->msgh_local_port
;
4596 ipc_port_t release_reply_port
= IP_NULL
;
4597 mach_port_name_t dest_name
, reply_name
;
4599 ipc_port_t voucher
= kmsg
->ikm_voucher
;
4600 ipc_port_t release_voucher_port
= IP_NULL
;
4601 mach_port_name_t voucher_name
;
4603 uint32_t entries_held
= 0;
4604 boolean_t need_write_lock
= FALSE
;
4608 * Reserve any potentially needed entries in the target space.
4609 * We'll free any unused before unlocking the space.
4611 if (IP_VALID(reply
)) {
4613 need_write_lock
= TRUE
;
4615 if (IP_VALID(voucher
)) {
4616 assert(voucher_type
== MACH_MSG_TYPE_MOVE_SEND
);
4618 if ((option
& MACH_RCV_VOUCHER
) != 0) {
4621 need_write_lock
= TRUE
;
4624 if (need_write_lock
) {
4625 is_write_lock(space
);
4627 while (entries_held
) {
4628 if (!is_active(space
)) {
4629 is_write_unlock(space
);
4630 return MACH_RCV_HEADER_ERROR
|
4634 kr
= ipc_entries_hold(space
, entries_held
);
4635 if (KERN_SUCCESS
== kr
) {
4639 kr
= ipc_entry_grow_table(space
, ITS_SIZE_NONE
);
4640 if (KERN_SUCCESS
!= kr
) {
4641 return MACH_RCV_HEADER_ERROR
|
4644 /* space was unlocked and relocked - retry */
4647 /* Handle reply port. */
4648 if (IP_VALID(reply
)) {
4651 /* Is there already an entry we can use? */
4652 if ((reply_type
!= MACH_MSG_TYPE_PORT_SEND_ONCE
) &&
4653 ipc_right_reverse(space
, ip_to_object(reply
), &reply_name
, &entry
)) {
4654 /* reply port is locked and active */
4655 assert(entry
->ie_bits
& MACH_PORT_TYPE_SEND_RECEIVE
);
4658 /* Is the reply port still active and allowed to be copied out? */
4659 if (!ip_active(reply
) || !ip_label_check(space
, reply
, reply_type
)) {
4660 /* clear the context value */
4661 reply
->ip_reply_context
= 0;
4664 release_reply_port
= reply
;
4666 reply_name
= MACH_PORT_DEAD
;
4667 goto done_with_reply
;
4670 /* claim a held entry for the reply port */
4671 assert(entries_held
> 0);
4673 ipc_entry_claim(space
, &reply_name
, &entry
);
4674 assert(IE_BITS_TYPE(entry
->ie_bits
) == MACH_PORT_TYPE_NONE
);
4675 assert(entry
->ie_object
== IO_NULL
);
4676 entry
->ie_object
= ip_to_object(reply
);
4679 /* space and reply port are locked and active */
4680 ip_reference(reply
); /* hold onto the reply port */
4683 * If the receiver would like to enforce strict reply
4684 * semantics, and the message looks like it expects a reply,
4685 * and contains a voucher, then link the context in the
4686 * voucher with the reply port so that the next message sent
4687 * to the reply port must come from a thread that has a
4688 * matching context (voucher).
4690 if (enforce_strict_reply
&& MACH_RCV_WITH_STRICT_REPLY(option
) && IP_VALID(voucher
)) {
4691 if (ipc_kmsg_validate_reply_port_locked(reply
, option
) != KERN_SUCCESS
) {
4692 /* if the receiver isn't happy with the reply port: fail the receive. */
4694 ipc_entry_dealloc(space
, reply_name
, entry
);
4695 is_write_unlock(space
);
4697 return MACH_RCV_INVALID_REPLY
;
4699 ipc_kmsg_link_reply_context_locked(reply
, voucher
);
4702 * if the receive did not choose to participate
4703 * in the strict reply/RPC, then don't enforce
4704 * anything (as this could lead to booby-trapped
4705 * messages that kill the server).
4707 reply
->ip_reply_context
= 0;
4710 kr
= ipc_right_copyout(space
, reply_name
, entry
,
4711 reply_type
, NULL
, NULL
, ip_to_object(reply
));
4712 assert(kr
== KERN_SUCCESS
);
4713 /* reply port is unlocked */
4715 reply_name
= CAST_MACH_PORT_TO_NAME(reply
);
4720 /* Handle voucher port. */
4721 if (voucher_type
!= MACH_MSGH_BITS_ZERO
) {
4722 assert(voucher_type
== MACH_MSG_TYPE_MOVE_SEND
);
4724 if (!IP_VALID(voucher
)) {
4725 if ((option
& MACH_RCV_VOUCHER
) == 0) {
4726 voucher_type
= MACH_MSGH_BITS_ZERO
;
4728 voucher_name
= MACH_PORT_NULL
;
4729 goto done_with_voucher
;
4732 /* clear voucher from its hiding place back in the kmsg */
4733 kmsg
->ikm_voucher
= IP_NULL
;
4735 if ((option
& MACH_RCV_VOUCHER
) != 0) {
4738 if (ipc_right_reverse(space
, ip_to_object(voucher
),
4739 &voucher_name
, &entry
)) {
4740 /* voucher port locked */
4741 assert(entry
->ie_bits
& MACH_PORT_TYPE_SEND
);
4743 assert(entries_held
> 0);
4745 ipc_entry_claim(space
, &voucher_name
, &entry
);
4746 assert(IE_BITS_TYPE(entry
->ie_bits
) == MACH_PORT_TYPE_NONE
);
4747 assert(entry
->ie_object
== IO_NULL
);
4748 entry
->ie_object
= ip_to_object(voucher
);
4751 /* space is locked and active */
4752 require_ip_active(voucher
);
4753 assert(ip_kotype(voucher
) == IKOT_VOUCHER
);
4754 kr
= ipc_right_copyout(space
, voucher_name
, entry
,
4755 MACH_MSG_TYPE_MOVE_SEND
, NULL
, NULL
,
4756 ip_to_object(voucher
));
4757 /* voucher port is unlocked */
4759 voucher_type
= MACH_MSGH_BITS_ZERO
;
4760 release_voucher_port
= voucher
;
4761 voucher_name
= MACH_PORT_NULL
;
4764 voucher_name
= msg
->msgh_voucher_port
;
4770 is_write_unlock(space
);
4773 * No reply or voucher port! This is an easy case.
4774 * We only need to have the space locked
4775 * when locking the destination.
4778 is_read_lock(space
);
4779 if (!is_active(space
)) {
4780 is_read_unlock(space
);
4781 return MACH_RCV_HEADER_ERROR
| MACH_MSG_IPC_SPACE
;
4785 is_read_unlock(space
);
4787 reply_name
= CAST_MACH_PORT_TO_NAME(reply
);
4789 if (voucher_type
!= MACH_MSGH_BITS_ZERO
) {
4790 assert(voucher_type
== MACH_MSG_TYPE_MOVE_SEND
);
4791 if ((option
& MACH_RCV_VOUCHER
) == 0) {
4792 voucher_type
= MACH_MSGH_BITS_ZERO
;
4794 voucher_name
= MACH_PORT_NULL
;
4796 voucher_name
= msg
->msgh_voucher_port
;
4801 * At this point, the space is unlocked and the destination
4802 * port is locked. (Lock taken while space was locked.)
4803 * reply_name is taken care of; we still need dest_name.
4804 * We still hold a ref for reply (if it is valid).
4806 * If the space holds receive rights for the destination,
4807 * we return its name for the right. Otherwise the task
4808 * managed to destroy or give away the receive right between
4809 * receiving the message and this copyout. If the destination
4810 * is dead, return MACH_PORT_DEAD, and if the receive right
4811 * exists somewhere else (another space, in transit)
4812 * return MACH_PORT_NULL.
4814 * Making this copyout operation atomic with the previous
4815 * copyout of the reply port is a bit tricky. If there was
4816 * no real reply port (it wasn't IP_VALID) then this isn't
4817 * an issue. If the reply port was dead at copyout time,
4818 * then we are OK, because if dest is dead we serialize
4819 * after the death of both ports and if dest is alive
4820 * we serialize after reply died but before dest's (later) death.
4821 * So assume reply was alive when we copied it out. If dest
4822 * is alive, then we are OK because we serialize before
4823 * the ports' deaths. So assume dest is dead when we look at it.
4824 * If reply dies/died after dest, then we are OK because
4825 * we serialize after dest died but before reply dies.
4826 * So the hard case is when reply is alive at copyout,
4827 * dest is dead at copyout, and reply died before dest died.
4828 * In this case pretend that dest is still alive, so
4829 * we serialize while both ports are alive.
4831 * Because the space lock is held across the copyout of reply
4832 * and locking dest, the receive right for dest can't move
4833 * in or out of the space while the copyouts happen, so
4834 * that isn't an atomicity problem. In the last hard case
4835 * above, this implies that when dest is dead that the
4836 * space couldn't have had receive rights for dest at
4837 * the time reply was copied-out, so when we pretend
4838 * that dest is still alive, we can return MACH_PORT_NULL.
4840 * If dest == reply, then we have to make it look like
4841 * either both copyouts happened before the port died,
4842 * or both happened after the port died. This special
4843 * case works naturally if the timestamp comparison
4844 * is done correctly.
4847 if (ip_active(dest
)) {
4848 ipc_object_copyout_dest(space
, ip_to_object(dest
),
4849 dest_type
, &dest_name
);
4850 /* dest is unlocked */
4852 ipc_port_timestamp_t timestamp
;
4854 timestamp
= dest
->ip_timestamp
;
4858 if (IP_VALID(reply
)) {
4860 if (ip_active(reply
) ||
4861 IP_TIMESTAMP_ORDER(timestamp
,
4862 reply
->ip_timestamp
)) {
4863 dest_name
= MACH_PORT_DEAD
;
4865 dest_name
= MACH_PORT_NULL
;
4869 dest_name
= MACH_PORT_DEAD
;
4873 if (IP_VALID(reply
)) {
4877 if (IP_VALID(release_reply_port
)) {
4878 if (reply_type
== MACH_MSG_TYPE_PORT_SEND_ONCE
) {
4879 ipc_port_release_sonce(release_reply_port
);
4881 ipc_port_release_send(release_reply_port
);
4885 if ((option
& MACH_RCV_VOUCHER
) != 0) {
4886 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_IPC
, MACH_IPC_MSG_RECV
) | DBG_FUNC_NONE
,
4887 VM_KERNEL_ADDRPERM((uintptr_t)kmsg
),
4888 (uintptr_t)kmsg
->ikm_header
->msgh_bits
,
4889 (uintptr_t)kmsg
->ikm_header
->msgh_id
,
4890 VM_KERNEL_ADDRPERM((uintptr_t)unsafe_convert_port_to_voucher(voucher
)),
4893 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_IPC
, MACH_IPC_MSG_RECV_VOUCHER_REFUSED
) | DBG_FUNC_NONE
,
4894 VM_KERNEL_ADDRPERM((uintptr_t)kmsg
),
4895 (uintptr_t)kmsg
->ikm_header
->msgh_bits
,
4896 (uintptr_t)kmsg
->ikm_header
->msgh_id
,
4897 VM_KERNEL_ADDRPERM((uintptr_t)unsafe_convert_port_to_voucher(voucher
)),
4901 if (IP_VALID(release_voucher_port
)) {
4902 ipc_port_release_send(release_voucher_port
);
4905 msg
->msgh_bits
= MACH_MSGH_BITS_SET(reply_type
, dest_type
,
4906 voucher_type
, mbits
);
4907 msg
->msgh_local_port
= CAST_MACH_NAME_TO_PORT(dest_name
);
4908 msg
->msgh_remote_port
= CAST_MACH_NAME_TO_PORT(reply_name
);
4909 msg
->msgh_voucher_port
= voucher_name
;
4912 return MACH_MSG_SUCCESS
;
4916 * Routine: ipc_kmsg_copyout_object
4918 * Copy-out a port right. Always returns a name,
4919 * even for unsuccessful return codes. Always
4920 * consumes the supplied object.
4924 * MACH_MSG_SUCCESS The space acquired the right
4925 * (name is valid) or the object is dead (MACH_PORT_DEAD).
4926 * MACH_MSG_IPC_SPACE No room in space for the right,
4927 * or the space is dead. (Name is MACH_PORT_NULL.)
4928 * MACH_MSG_IPC_KERNEL Kernel resource shortage.
4929 * (Name is MACH_PORT_NULL.)
4933 ipc_kmsg_copyout_object(
4935 ipc_object_t object
,
4936 mach_msg_type_name_t msgt_name
,
4937 mach_port_context_t
*context
,
4938 mach_msg_guard_flags_t
*guard_flags
,
4939 mach_port_name_t
*namep
)
4943 if (!IO_VALID(object
)) {
4944 *namep
= CAST_MACH_PORT_TO_NAME(object
);
4945 return MACH_MSG_SUCCESS
;
4948 kr
= ipc_object_copyout(space
, object
, msgt_name
, context
, guard_flags
, namep
);
4949 if (kr
!= KERN_SUCCESS
) {
4950 ipc_object_destroy(object
, msgt_name
);
4952 if (kr
== KERN_INVALID_CAPABILITY
) {
4953 *namep
= MACH_PORT_DEAD
;
4955 *namep
= MACH_PORT_NULL
;
4957 if (kr
== KERN_RESOURCE_SHORTAGE
) {
4958 return MACH_MSG_IPC_KERNEL
;
4960 return MACH_MSG_IPC_SPACE
;
4965 return MACH_MSG_SUCCESS
;
4968 static mach_msg_descriptor_t
*
4969 ipc_kmsg_copyout_port_descriptor(mach_msg_descriptor_t
*dsc
,
4970 mach_msg_descriptor_t
*dest_dsc
,
4975 mach_port_name_t name
;
4976 mach_msg_type_name_t disp
;
4978 /* Copyout port right carried in the message */
4979 port
= dsc
->port
.name
;
4980 disp
= dsc
->port
.disposition
;
4981 *mr
|= ipc_kmsg_copyout_object(space
,
4982 ip_to_object(port
), disp
, NULL
, NULL
, &name
);
4984 if (current_task() == kernel_task
) {
4985 mach_msg_port_descriptor_t
*user_dsc
= (typeof(user_dsc
))dest_dsc
;
4986 user_dsc
--; // point to the start of this port descriptor
4987 bzero((void *)user_dsc
, sizeof(*user_dsc
));
4988 user_dsc
->name
= CAST_MACH_NAME_TO_PORT(name
);
4989 user_dsc
->disposition
= disp
;
4990 user_dsc
->type
= MACH_MSG_PORT_DESCRIPTOR
;
4991 dest_dsc
= (typeof(dest_dsc
))user_dsc
;
4993 mach_msg_legacy_port_descriptor_t
*user_dsc
= (typeof(user_dsc
))dest_dsc
;
4994 user_dsc
--; // point to the start of this port descriptor
4995 bzero((void *)user_dsc
, sizeof(*user_dsc
));
4996 user_dsc
->name
= CAST_MACH_PORT_TO_NAME(name
);
4997 user_dsc
->disposition
= disp
;
4998 user_dsc
->type
= MACH_MSG_PORT_DESCRIPTOR
;
4999 dest_dsc
= (typeof(dest_dsc
))user_dsc
;
5002 return (mach_msg_descriptor_t
*)dest_dsc
;
5005 mach_msg_descriptor_t
*
5006 ipc_kmsg_copyout_ool_descriptor(mach_msg_ool_descriptor_t
*dsc
, mach_msg_descriptor_t
*user_dsc
, int is_64bit
, vm_map_t map
, mach_msg_return_t
*mr
);
5007 mach_msg_descriptor_t
*
5008 ipc_kmsg_copyout_ool_descriptor(mach_msg_ool_descriptor_t
*dsc
, mach_msg_descriptor_t
*user_dsc
, int is_64bit
, vm_map_t map
, mach_msg_return_t
*mr
)
5011 vm_map_address_t rcv_addr
;
5012 mach_msg_copy_options_t copy_options
;
5014 mach_msg_descriptor_type_t dsc_type
;
5015 boolean_t misaligned
= FALSE
;
5017 //SKIP_PORT_DESCRIPTORS(saddr, sdsc_count);
5019 copy
= (vm_map_copy_t
)dsc
->address
;
5020 size
= (vm_map_size_t
)dsc
->size
;
5021 copy_options
= dsc
->copy
;
5022 assert(copy_options
!= MACH_MSG_KALLOC_COPY_T
);
5023 dsc_type
= dsc
->type
;
5025 if (copy
!= VM_MAP_COPY_NULL
) {
5029 if (vm_map_copy_validate_size(map
, copy
, &size
) == FALSE
) {
5030 panic("Inconsistent OOL/copyout size on %p: expected %d, got %lld @%p",
5031 dsc
, dsc
->size
, (unsigned long long)copy
->size
, copy
);
5034 if ((copy
->type
== VM_MAP_COPY_ENTRY_LIST
) &&
5035 (trunc_page(copy
->offset
) != copy
->offset
||
5036 round_page(dsc
->size
) != dsc
->size
)) {
5041 vm_map_address_t rounded_addr
;
5042 vm_map_size_t rounded_size
;
5043 vm_map_offset_t effective_page_mask
, effective_page_size
;
5045 effective_page_mask
= VM_MAP_PAGE_MASK(map
);
5046 effective_page_size
= effective_page_mask
+ 1;
5048 rounded_size
= vm_map_round_page(copy
->offset
+ size
, effective_page_mask
) - vm_map_trunc_page(copy
->offset
, effective_page_mask
);
5050 kr
= vm_allocate_kernel(map
, (vm_offset_t
*)&rounded_addr
, rounded_size
, VM_FLAGS_ANYWHERE
, 0);
5052 if (kr
== KERN_SUCCESS
) {
5054 * vm_map_copy_overwrite does a full copy
5055 * if size is too small to optimize.
5056 * So we tried skipping the offset adjustment
5057 * if we fail the 'size' test.
5059 * if (size >= VM_MAP_COPY_OVERWRITE_OPTIMIZATION_THRESHOLD_PAGES * effective_page_size) {
5061 * This resulted in leaked memory especially on the
5062 * older watches (16k user - 4k kernel) because we
5063 * would do a physical copy into the start of this
5064 * rounded range but could leak part of it
5065 * on deallocation if the 'size' being deallocated
5066 * does not cover the full range. So instead we do
5067 * the misalignment adjustment always so that on
5068 * deallocation we will remove the full range.
5070 if ((rounded_addr
& effective_page_mask
) !=
5071 (copy
->offset
& effective_page_mask
)) {
5073 * Need similar mis-alignment of source and destination...
5075 rounded_addr
+= (copy
->offset
& effective_page_mask
);
5077 assert((rounded_addr
& effective_page_mask
) == (copy
->offset
& effective_page_mask
));
5079 rcv_addr
= rounded_addr
;
5081 kr
= vm_map_copy_overwrite(map
, rcv_addr
, copy
, size
, FALSE
);
5084 kr
= vm_map_copyout_size(map
, &rcv_addr
, copy
, size
);
5086 if (kr
!= KERN_SUCCESS
) {
5087 if (kr
== KERN_RESOURCE_SHORTAGE
) {
5088 *mr
|= MACH_MSG_VM_KERNEL
;
5090 *mr
|= MACH_MSG_VM_SPACE
;
5092 vm_map_copy_discard(copy
);
5102 * Now update the descriptor as the user would see it.
5103 * This may require expanding the descriptor to the user
5104 * visible size. There is already space allocated for
5105 * this in what naddr points to.
5107 if (current_task() == kernel_task
) {
5108 mach_msg_ool_descriptor_t
*user_ool_dsc
= (typeof(user_ool_dsc
))user_dsc
;
5110 bzero((void *)user_ool_dsc
, sizeof(*user_ool_dsc
));
5112 user_ool_dsc
->address
= (void *)(uintptr_t)rcv_addr
;
5113 user_ool_dsc
->deallocate
= (copy_options
== MACH_MSG_VIRTUAL_COPY
) ?
5115 user_ool_dsc
->copy
= copy_options
;
5116 user_ool_dsc
->type
= dsc_type
;
5117 user_ool_dsc
->size
= (mach_msg_size_t
)size
;
5119 user_dsc
= (typeof(user_dsc
))user_ool_dsc
;
5120 } else if (is_64bit
) {
5121 mach_msg_ool_descriptor64_t
*user_ool_dsc
= (typeof(user_ool_dsc
))user_dsc
;
5123 bzero((void *)user_ool_dsc
, sizeof(*user_ool_dsc
));
5125 user_ool_dsc
->address
= rcv_addr
;
5126 user_ool_dsc
->deallocate
= (copy_options
== MACH_MSG_VIRTUAL_COPY
) ?
5128 user_ool_dsc
->copy
= copy_options
;
5129 user_ool_dsc
->type
= dsc_type
;
5130 user_ool_dsc
->size
= (mach_msg_size_t
)size
;
5132 user_dsc
= (typeof(user_dsc
))user_ool_dsc
;
5134 mach_msg_ool_descriptor32_t
*user_ool_dsc
= (typeof(user_ool_dsc
))user_dsc
;
5136 bzero((void *)user_ool_dsc
, sizeof(*user_ool_dsc
));
5138 user_ool_dsc
->address
= CAST_DOWN_EXPLICIT(uint32_t, rcv_addr
);
5139 user_ool_dsc
->size
= (mach_msg_size_t
)size
;
5140 user_ool_dsc
->deallocate
= (copy_options
== MACH_MSG_VIRTUAL_COPY
) ?
5142 user_ool_dsc
->copy
= copy_options
;
5143 user_ool_dsc
->type
= dsc_type
;
5145 user_dsc
= (typeof(user_dsc
))user_ool_dsc
;
5150 mach_msg_descriptor_t
*
5151 ipc_kmsg_copyout_ool_ports_descriptor(mach_msg_ool_ports_descriptor_t
*dsc
,
5152 mach_msg_descriptor_t
*user_dsc
,
5157 mach_msg_return_t
*mr
);
5158 mach_msg_descriptor_t
*
5159 ipc_kmsg_copyout_ool_ports_descriptor(mach_msg_ool_ports_descriptor_t
*dsc
,
5160 mach_msg_descriptor_t
*user_dsc
,
5165 mach_msg_return_t
*mr
)
5167 mach_vm_offset_t rcv_addr
= 0;
5168 mach_msg_type_name_t disp
;
5169 mach_msg_type_number_t count
, i
;
5170 vm_size_t ports_length
, names_length
;
5172 mach_msg_copy_options_t copy_options
= MACH_MSG_VIRTUAL_COPY
;
5174 //SKIP_PORT_DESCRIPTORS(saddr, sdsc_count);
5177 disp
= dsc
->disposition
;
5178 ports_length
= count
* sizeof(mach_port_t
);
5179 names_length
= count
* sizeof(mach_port_name_t
);
5181 if (ports_length
!= 0 && dsc
->address
!= 0) {
5183 * Check to see if there is an overwrite descriptor
5184 * specified in the scatter list for this ool data.
5185 * The descriptor has already been verified.
5188 if (saddr
!= MACH_MSG_DESCRIPTOR_NULL
) {
5190 OTHER_OOL_DESCRIPTOR
*scatter_dsc
;
5192 scatter_dsc
= (OTHER_OOL_DESCRIPTOR
*)saddr
;
5193 rcv_addr
= (mach_vm_offset_t
) scatter_dsc
->address
;
5194 copy_options
= scatter_dsc
->copy
;
5196 mach_msg_ool_descriptor_t
*scatter_dsc
;
5198 scatter_dsc
= &saddr
->out_of_line
;
5199 rcv_addr
= CAST_USER_ADDR_T(scatter_dsc
->address
);
5200 copy_options
= scatter_dsc
->copy
;
5202 INCREMENT_SCATTER(saddr
, sdsc_count
, differs
);
5206 if (copy_options
== MACH_MSG_VIRTUAL_COPY
) {
5208 * Dynamically allocate the region
5211 if (vm_kernel_map_is_kernel(map
)) {
5212 tag
= VM_KERN_MEMORY_IPC
;
5214 tag
= VM_MEMORY_MACH_MSG
;
5218 if ((kr
= mach_vm_allocate_kernel(map
, &rcv_addr
,
5219 (mach_vm_size_t
)names_length
,
5220 VM_FLAGS_ANYWHERE
, tag
)) != KERN_SUCCESS
) {
5221 ipc_kmsg_clean_body(kmsg
, 1, (mach_msg_descriptor_t
*)dsc
);
5224 if (kr
== KERN_RESOURCE_SHORTAGE
) {
5225 *mr
|= MACH_MSG_VM_KERNEL
;
5227 *mr
|= MACH_MSG_VM_SPACE
;
5233 * Handle the port rights and copy out the names
5234 * for those rights out to user-space.
5236 if (rcv_addr
!= 0) {
5237 ipc_object_t
*objects
= (ipc_object_t
*) dsc
->address
;
5238 mach_port_name_t
*names
= (mach_port_name_t
*) dsc
->address
;
5240 /* copyout port rights carried in the message */
5242 for (i
= 0; i
< count
; i
++) {
5243 ipc_object_t object
= objects
[i
];
5245 *mr
|= ipc_kmsg_copyout_object(space
, object
,
5246 disp
, NULL
, NULL
, &names
[i
]);
5249 /* copyout to memory allocated above */
5250 void *data
= dsc
->address
;
5251 if (copyoutmap(map
, data
, rcv_addr
, names_length
) != KERN_SUCCESS
) {
5252 *mr
|= MACH_MSG_VM_SPACE
;
5254 kfree(data
, ports_length
);
5261 * Now update the descriptor based on the information
5264 if (current_task() == kernel_task
) {
5265 mach_msg_ool_ports_descriptor_t
*user_ool_dsc
= (typeof(user_ool_dsc
))user_dsc
;
5267 bzero((void *)user_ool_dsc
, sizeof(*user_ool_dsc
));
5269 user_ool_dsc
->address
= (void *)(uintptr_t)rcv_addr
;
5270 user_ool_dsc
->deallocate
= (copy_options
== MACH_MSG_VIRTUAL_COPY
) ?
5272 user_ool_dsc
->copy
= copy_options
;
5273 user_ool_dsc
->disposition
= disp
;
5274 user_ool_dsc
->type
= MACH_MSG_OOL_PORTS_DESCRIPTOR
;
5275 user_ool_dsc
->count
= count
;
5277 user_dsc
= (typeof(user_dsc
))user_ool_dsc
;
5278 } else if (is_64bit
) {
5279 mach_msg_ool_ports_descriptor64_t
*user_ool_dsc
= (typeof(user_ool_dsc
))user_dsc
;
5281 bzero((void *)user_ool_dsc
, sizeof(*user_ool_dsc
));
5283 user_ool_dsc
->address
= rcv_addr
;
5284 user_ool_dsc
->deallocate
= (copy_options
== MACH_MSG_VIRTUAL_COPY
) ?
5286 user_ool_dsc
->copy
= copy_options
;
5287 user_ool_dsc
->disposition
= disp
;
5288 user_ool_dsc
->type
= MACH_MSG_OOL_PORTS_DESCRIPTOR
;
5289 user_ool_dsc
->count
= count
;
5291 user_dsc
= (typeof(user_dsc
))user_ool_dsc
;
5293 mach_msg_ool_ports_descriptor32_t
*user_ool_dsc
= (typeof(user_ool_dsc
))user_dsc
;
5295 bzero((void *)user_ool_dsc
, sizeof(*user_ool_dsc
));
5297 user_ool_dsc
->address
= CAST_DOWN_EXPLICIT(uint32_t, rcv_addr
);
5298 user_ool_dsc
->count
= count
;
5299 user_ool_dsc
->deallocate
= (copy_options
== MACH_MSG_VIRTUAL_COPY
) ?
5301 user_ool_dsc
->copy
= copy_options
;
5302 user_ool_dsc
->disposition
= disp
;
5303 user_ool_dsc
->type
= MACH_MSG_OOL_PORTS_DESCRIPTOR
;
5305 user_dsc
= (typeof(user_dsc
))user_ool_dsc
;
5310 static mach_msg_descriptor_t
*
5311 ipc_kmsg_copyout_guarded_port_descriptor(
5312 mach_msg_guarded_port_descriptor_t
*dsc
,
5313 mach_msg_descriptor_t
*dest_dsc
,
5315 __unused ipc_kmsg_t kmsg
,
5317 mach_msg_option_t option
,
5321 mach_port_name_t name
= MACH_PORT_NULL
;
5322 mach_msg_type_name_t disp
;
5323 mach_msg_guard_flags_t guard_flags
;
5324 mach_port_context_t context
;
5326 /* Copyout port right carried in the message */
5328 disp
= dsc
->disposition
;
5329 guard_flags
= dsc
->flags
;
5332 /* Currently kernel_task doesnt support receiving guarded port descriptors */
5333 struct knote
*kn
= current_thread()->ith_knote
;
5334 if ((kn
!= ITH_KNOTE_PSEUDO
) && (((option
& MACH_RCV_GUARDED_DESC
) == 0) ||
5335 (current_task() == kernel_task
))) {
5336 #if DEVELOPMENT || DEBUG
5337 if (current_task() != kernel_task
) {
5339 * Simulated crash needed for debugging, notifies the receiver to opt into receiving
5340 * guarded descriptors.
5342 mach_port_guard_exception(current_thread()->ith_receiver_name
, 0, 0, kGUARD_EXC_RCV_GUARDED_DESC
);
5345 KDBG(MACHDBG_CODE(DBG_MACH_IPC
, MACH_IPC_DESTROY_GUARDED_DESC
), current_thread()->ith_receiver_name
,
5346 VM_KERNEL_ADDRPERM(port
), disp
, guard_flags
);
5347 ipc_object_destroy(ip_to_object(port
), disp
);
5348 mach_msg_legacy_port_descriptor_t
*user_dsc
= (typeof(user_dsc
))dest_dsc
;
5349 user_dsc
--; // point to the start of this port descriptor
5350 bzero((void *)user_dsc
, sizeof(*user_dsc
));
5351 user_dsc
->name
= name
;
5352 user_dsc
->disposition
= disp
;
5353 user_dsc
->type
= MACH_MSG_PORT_DESCRIPTOR
;
5354 dest_dsc
= (typeof(dest_dsc
))user_dsc
;
5356 *mr
|= ipc_kmsg_copyout_object(space
,
5357 ip_to_object(port
), disp
, &context
, &guard_flags
, &name
);
5360 mach_msg_guarded_port_descriptor32_t
*user_dsc
= (typeof(user_dsc
))dest_dsc
;
5361 user_dsc
--; // point to the start of this port descriptor
5362 bzero((void *)user_dsc
, sizeof(*user_dsc
));
5363 user_dsc
->name
= name
;
5364 user_dsc
->flags
= guard_flags
;
5365 user_dsc
->disposition
= disp
;
5366 user_dsc
->type
= MACH_MSG_GUARDED_PORT_DESCRIPTOR
;
5367 user_dsc
->context
= CAST_DOWN_EXPLICIT(uint32_t, context
);
5368 dest_dsc
= (typeof(dest_dsc
))user_dsc
;
5370 mach_msg_guarded_port_descriptor64_t
*user_dsc
= (typeof(user_dsc
))dest_dsc
;
5371 user_dsc
--; // point to the start of this port descriptor
5372 bzero((void *)user_dsc
, sizeof(*user_dsc
));
5373 user_dsc
->name
= name
;
5374 user_dsc
->flags
= guard_flags
;
5375 user_dsc
->disposition
= disp
;
5376 user_dsc
->type
= MACH_MSG_GUARDED_PORT_DESCRIPTOR
;
5377 user_dsc
->context
= context
;
5378 dest_dsc
= (typeof(dest_dsc
))user_dsc
;
5382 return (mach_msg_descriptor_t
*)dest_dsc
;
5387 * Routine: ipc_kmsg_copyout_body
5389 * "Copy-out" port rights and out-of-line memory
5390 * in the body of a message.
5392 * The error codes are a combination of special bits.
5393 * The copyout proceeds despite errors.
5397 * MACH_MSG_SUCCESS Successful copyout.
5398 * MACH_MSG_IPC_SPACE No room for port right in name space.
5399 * MACH_MSG_VM_SPACE No room for memory in address space.
5400 * MACH_MSG_IPC_KERNEL Resource shortage handling port right.
5401 * MACH_MSG_VM_KERNEL Resource shortage handling memory.
5402 * MACH_MSG_INVALID_RT_DESCRIPTOR Descriptor incompatible with RT
5406 ipc_kmsg_copyout_body(
5410 mach_msg_option_t option
,
5411 mach_msg_body_t
*slist
)
5413 mach_msg_body_t
*body
;
5414 mach_msg_descriptor_t
*kern_dsc
, *user_dsc
;
5415 mach_msg_descriptor_t
*saddr
;
5416 mach_msg_type_number_t dsc_count
, sdsc_count
;
5418 mach_msg_return_t mr
= MACH_MSG_SUCCESS
;
5419 boolean_t is_task_64bit
= (map
->max_offset
> VM_MAX_ADDRESS
);
5421 body
= (mach_msg_body_t
*) (kmsg
->ikm_header
+ 1);
5422 dsc_count
= body
->msgh_descriptor_count
;
5423 kern_dsc
= (mach_msg_descriptor_t
*) (body
+ 1);
5424 /* Point user_dsc just after the end of all the descriptors */
5425 user_dsc
= &kern_dsc
[dsc_count
];
5427 /* Do scatter list setup */
5428 if (slist
!= MACH_MSG_BODY_NULL
) {
5429 panic("Scatter lists disabled");
5430 saddr
= (mach_msg_descriptor_t
*) (slist
+ 1);
5431 sdsc_count
= slist
->msgh_descriptor_count
;
5433 saddr
= MACH_MSG_DESCRIPTOR_NULL
;
5437 /* Now process the descriptors - in reverse order */
5438 for (i
= dsc_count
- 1; i
>= 0; i
--) {
5439 switch (kern_dsc
[i
].type
.type
) {
5440 case MACH_MSG_PORT_DESCRIPTOR
:
5441 user_dsc
= ipc_kmsg_copyout_port_descriptor(&kern_dsc
[i
], user_dsc
, space
, &mr
);
5443 case MACH_MSG_OOL_VOLATILE_DESCRIPTOR
:
5444 case MACH_MSG_OOL_DESCRIPTOR
:
5445 user_dsc
= ipc_kmsg_copyout_ool_descriptor(
5446 (mach_msg_ool_descriptor_t
*)&kern_dsc
[i
], user_dsc
, is_task_64bit
, map
, &mr
);
5448 case MACH_MSG_OOL_PORTS_DESCRIPTOR
:
5449 user_dsc
= ipc_kmsg_copyout_ool_ports_descriptor(
5450 (mach_msg_ool_ports_descriptor_t
*)&kern_dsc
[i
], user_dsc
, is_task_64bit
, map
, space
, kmsg
, &mr
);
5452 case MACH_MSG_GUARDED_PORT_DESCRIPTOR
:
5453 user_dsc
= ipc_kmsg_copyout_guarded_port_descriptor(
5454 (mach_msg_guarded_port_descriptor_t
*)&kern_dsc
[i
], user_dsc
, is_task_64bit
, kmsg
, space
, option
, &mr
);
5457 panic("untyped IPC copyout body: invalid message descriptor");
5462 if (user_dsc
!= kern_dsc
) {
5463 vm_offset_t dsc_adjust
= (vm_offset_t
)user_dsc
- (vm_offset_t
)kern_dsc
;
5464 memmove((char *)((vm_offset_t
)kmsg
->ikm_header
+ dsc_adjust
), kmsg
->ikm_header
, sizeof(mach_msg_base_t
));
5465 kmsg
->ikm_header
= (mach_msg_header_t
*)((vm_offset_t
)kmsg
->ikm_header
+ dsc_adjust
);
5466 /* Update the message size for the smaller user representation */
5467 kmsg
->ikm_header
->msgh_size
-= (mach_msg_size_t
)dsc_adjust
;
5474 * Routine: ipc_kmsg_copyout_size
5476 * Compute the size of the message as copied out to the given
5477 * map. If the destination map's pointers are a different size
5478 * than the kernel's, we have to allow for expansion/
5479 * contraction of the descriptors as appropriate.
5483 * size of the message as it would be received.
5487 ipc_kmsg_copyout_size(
5491 mach_msg_size_t send_size
;
5493 send_size
= kmsg
->ikm_header
->msgh_size
;
5495 boolean_t is_task_64bit
= (map
->max_offset
> VM_MAX_ADDRESS
);
5497 #if defined(__LP64__)
5498 send_size
-= LEGACY_HEADER_SIZE_DELTA
;
5501 if (kmsg
->ikm_header
->msgh_bits
& MACH_MSGH_BITS_COMPLEX
) {
5502 mach_msg_body_t
*body
;
5503 mach_msg_descriptor_t
*saddr
, *eaddr
;
5505 body
= (mach_msg_body_t
*) (kmsg
->ikm_header
+ 1);
5506 saddr
= (mach_msg_descriptor_t
*) (body
+ 1);
5507 eaddr
= saddr
+ body
->msgh_descriptor_count
;
5509 for (; saddr
< eaddr
; saddr
++) {
5510 switch (saddr
->type
.type
) {
5511 case MACH_MSG_OOL_DESCRIPTOR
:
5512 case MACH_MSG_OOL_VOLATILE_DESCRIPTOR
:
5513 case MACH_MSG_OOL_PORTS_DESCRIPTOR
:
5514 case MACH_MSG_GUARDED_PORT_DESCRIPTOR
:
5515 if (!is_task_64bit
) {
5516 send_size
-= DESC_SIZE_ADJUSTMENT
;
5519 case MACH_MSG_PORT_DESCRIPTOR
:
5520 send_size
-= DESC_SIZE_ADJUSTMENT
;
5531 * Routine: ipc_kmsg_copyout
5533 * "Copy-out" port rights and out-of-line memory
5538 * MACH_MSG_SUCCESS Copied out all rights and memory.
5539 * MACH_RCV_HEADER_ERROR + special bits
5540 * Rights and memory in the message are intact.
5541 * MACH_RCV_BODY_ERROR + special bits
5542 * The message header was successfully copied out.
5543 * As much of the body was handled as possible.
5551 mach_msg_body_t
*slist
,
5552 mach_msg_option_t option
)
5554 mach_msg_return_t mr
;
5556 ikm_validate_sig(kmsg
);
5558 mr
= ipc_kmsg_copyout_header(kmsg
, space
, option
);
5559 if (mr
!= MACH_MSG_SUCCESS
) {
5563 if (kmsg
->ikm_header
->msgh_bits
& MACH_MSGH_BITS_COMPLEX
) {
5564 mr
= ipc_kmsg_copyout_body(kmsg
, space
, map
, option
, slist
);
5566 if (mr
!= MACH_MSG_SUCCESS
) {
5567 mr
|= MACH_RCV_BODY_ERROR
;
5575 * Routine: ipc_kmsg_copyout_pseudo
5577 * Does a pseudo-copyout of the message.
5578 * This is like a regular copyout, except
5579 * that the ports in the header are handled
5580 * as if they are in the body. They aren't reversed.
5582 * The error codes are a combination of special bits.
5583 * The copyout proceeds despite errors.
5587 * MACH_MSG_SUCCESS Successful copyout.
5588 * MACH_MSG_IPC_SPACE No room for port right in name space.
5589 * MACH_MSG_VM_SPACE No room for memory in address space.
5590 * MACH_MSG_IPC_KERNEL Resource shortage handling port right.
5591 * MACH_MSG_VM_KERNEL Resource shortage handling memory.
5595 ipc_kmsg_copyout_pseudo(
5599 mach_msg_body_t
*slist
)
5601 mach_msg_bits_t mbits
= kmsg
->ikm_header
->msgh_bits
;
5602 ipc_object_t dest
= ip_to_object(kmsg
->ikm_header
->msgh_remote_port
);
5603 ipc_object_t reply
= ip_to_object(kmsg
->ikm_header
->msgh_local_port
);
5604 ipc_object_t voucher
= ip_to_object(kmsg
->ikm_voucher
);
5605 mach_msg_type_name_t dest_type
= MACH_MSGH_BITS_REMOTE(mbits
);
5606 mach_msg_type_name_t reply_type
= MACH_MSGH_BITS_LOCAL(mbits
);
5607 mach_msg_type_name_t voucher_type
= MACH_MSGH_BITS_VOUCHER(mbits
);
5608 mach_port_name_t voucher_name
= kmsg
->ikm_header
->msgh_voucher_port
;
5609 mach_port_name_t dest_name
, reply_name
;
5610 mach_msg_return_t mr
;
5612 /* Set ith_knote to ITH_KNOTE_PSEUDO */
5613 current_thread()->ith_knote
= ITH_KNOTE_PSEUDO
;
5615 ikm_validate_sig(kmsg
);
5617 assert(IO_VALID(dest
));
5621 * If we did this here, it looks like we wouldn't need the undo logic
5622 * at the end of ipc_kmsg_send() in the error cases. Not sure which
5623 * would be more elegant to keep.
5625 ipc_importance_clean(kmsg
);
5627 /* just assert it is already clean */
5628 ipc_importance_assert_clean(kmsg
);
5631 mr
= (ipc_kmsg_copyout_object(space
, dest
, dest_type
, NULL
, NULL
, &dest_name
) |
5632 ipc_kmsg_copyout_object(space
, reply
, reply_type
, NULL
, NULL
, &reply_name
));
5634 kmsg
->ikm_header
->msgh_bits
= mbits
& MACH_MSGH_BITS_USER
;
5635 kmsg
->ikm_header
->msgh_remote_port
= CAST_MACH_NAME_TO_PORT(dest_name
);
5636 kmsg
->ikm_header
->msgh_local_port
= CAST_MACH_NAME_TO_PORT(reply_name
);
5638 if (IO_VALID(voucher
)) {
5639 assert(voucher_type
== MACH_MSG_TYPE_MOVE_SEND
);
5641 kmsg
->ikm_voucher
= IP_NULL
;
5642 mr
|= ipc_kmsg_copyout_object(space
, voucher
, voucher_type
, NULL
, NULL
, &voucher_name
);
5643 kmsg
->ikm_header
->msgh_voucher_port
= voucher_name
;
5646 if (mbits
& MACH_MSGH_BITS_COMPLEX
) {
5647 mr
|= ipc_kmsg_copyout_body(kmsg
, space
, map
, 0, slist
);
5654 * Routine: ipc_kmsg_copyout_dest
5656 * Copies out the destination port in the message.
5657 * Destroys all other rights and memory in the message.
5663 ipc_kmsg_copyout_dest(
5667 mach_msg_bits_t mbits
;
5670 ipc_object_t voucher
;
5671 mach_msg_type_name_t dest_type
;
5672 mach_msg_type_name_t reply_type
;
5673 mach_msg_type_name_t voucher_type
;
5674 mach_port_name_t dest_name
, reply_name
, voucher_name
;
5676 ikm_validate_sig(kmsg
);
5678 mbits
= kmsg
->ikm_header
->msgh_bits
;
5679 dest
= ip_to_object(kmsg
->ikm_header
->msgh_remote_port
);
5680 reply
= ip_to_object(kmsg
->ikm_header
->msgh_local_port
);
5681 voucher
= ip_to_object(kmsg
->ikm_voucher
);
5682 voucher_name
= kmsg
->ikm_header
->msgh_voucher_port
;
5683 dest_type
= MACH_MSGH_BITS_REMOTE(mbits
);
5684 reply_type
= MACH_MSGH_BITS_LOCAL(mbits
);
5685 voucher_type
= MACH_MSGH_BITS_VOUCHER(mbits
);
5687 assert(IO_VALID(dest
));
5689 ipc_importance_assert_clean(kmsg
);
5692 if (io_active(dest
)) {
5693 ipc_object_copyout_dest(space
, dest
, dest_type
, &dest_name
);
5694 /* dest is unlocked */
5698 dest_name
= MACH_PORT_DEAD
;
5701 if (IO_VALID(reply
)) {
5702 ipc_object_destroy(reply
, reply_type
);
5703 reply_name
= MACH_PORT_NULL
;
5705 reply_name
= CAST_MACH_PORT_TO_NAME(reply
);
5708 if (IO_VALID(voucher
)) {
5709 assert(voucher_type
== MACH_MSG_TYPE_MOVE_SEND
);
5711 kmsg
->ikm_voucher
= IP_NULL
;
5712 ipc_object_destroy(voucher
, voucher_type
);
5713 voucher_name
= MACH_PORT_NULL
;
5716 kmsg
->ikm_header
->msgh_bits
= MACH_MSGH_BITS_SET(reply_type
, dest_type
,
5717 voucher_type
, mbits
);
5718 kmsg
->ikm_header
->msgh_local_port
= CAST_MACH_NAME_TO_PORT(dest_name
);
5719 kmsg
->ikm_header
->msgh_remote_port
= CAST_MACH_NAME_TO_PORT(reply_name
);
5720 kmsg
->ikm_header
->msgh_voucher_port
= voucher_name
;
5722 if (mbits
& MACH_MSGH_BITS_COMPLEX
) {
5723 mach_msg_body_t
*body
;
5725 body
= (mach_msg_body_t
*) (kmsg
->ikm_header
+ 1);
5726 ipc_kmsg_clean_body(kmsg
, body
->msgh_descriptor_count
,
5727 (mach_msg_descriptor_t
*)(body
+ 1));
5732 * Routine: ipc_kmsg_copyout_to_kernel
5734 * Copies out the destination and reply ports in the message.
5735 * Leaves all other rights and memory in the message alone.
5739 * Derived from ipc_kmsg_copyout_dest.
5740 * Use by mach_msg_rpc_from_kernel (which used to use copyout_dest).
5741 * We really do want to save rights and memory.
5745 ipc_kmsg_copyout_to_kernel(
5751 mach_msg_type_name_t dest_type
;
5752 mach_msg_type_name_t reply_type
;
5753 mach_port_name_t dest_name
;
5755 ikm_validate_sig(kmsg
);
5757 dest
= ip_to_object(kmsg
->ikm_header
->msgh_remote_port
);
5758 reply
= kmsg
->ikm_header
->msgh_local_port
;
5759 dest_type
= MACH_MSGH_BITS_REMOTE(kmsg
->ikm_header
->msgh_bits
);
5760 reply_type
= MACH_MSGH_BITS_LOCAL(kmsg
->ikm_header
->msgh_bits
);
5762 assert(IO_VALID(dest
));
5765 if (io_active(dest
)) {
5766 ipc_object_copyout_dest(space
, dest
, dest_type
, &dest_name
);
5767 /* dest is unlocked */
5771 dest_name
= MACH_PORT_DEAD
;
5775 * While MIG kernel users don't receive vouchers, the
5776 * msgh_voucher_port field is intended to be round-tripped through the
5777 * kernel if there is no voucher disposition set. Here we check for a
5778 * non-zero voucher disposition, and consume the voucher send right as
5779 * there is no possible way to specify MACH_RCV_VOUCHER semantics.
5781 mach_msg_type_name_t voucher_type
;
5782 voucher_type
= MACH_MSGH_BITS_VOUCHER(kmsg
->ikm_header
->msgh_bits
);
5783 if (voucher_type
!= MACH_MSGH_BITS_ZERO
) {
5784 assert(voucher_type
== MACH_MSG_TYPE_MOVE_SEND
);
5786 * someone managed to send this kernel routine a message with
5787 * a voucher in it. Cleanup the reference in
5788 * kmsg->ikm_voucher.
5790 if (IP_VALID(kmsg
->ikm_voucher
)) {
5791 ipc_port_release_send(kmsg
->ikm_voucher
);
5793 kmsg
->ikm_voucher
= IP_NULL
;
5794 kmsg
->ikm_header
->msgh_voucher_port
= 0;
5797 kmsg
->ikm_header
->msgh_bits
=
5798 (MACH_MSGH_BITS_OTHER(kmsg
->ikm_header
->msgh_bits
) |
5799 MACH_MSGH_BITS(reply_type
, dest_type
));
5800 kmsg
->ikm_header
->msgh_local_port
= CAST_MACH_NAME_TO_PORT(dest_name
);
5801 kmsg
->ikm_header
->msgh_remote_port
= reply
;
5804 #if IKM_SUPPORT_LEGACY
5806 ipc_kmsg_copyout_to_kernel_legacy(
5812 mach_msg_type_name_t dest_type
;
5813 mach_msg_type_name_t reply_type
;
5814 mach_port_name_t dest_name
;
5816 ikm_validate_sig(kmsg
);
5818 dest
= ip_to_object(kmsg
->ikm_header
->msgh_remote_port
);
5819 reply
= kmsg
->ikm_header
->msgh_local_port
;
5820 dest_type
= MACH_MSGH_BITS_REMOTE(kmsg
->ikm_header
->msgh_bits
);
5821 reply_type
= MACH_MSGH_BITS_LOCAL(kmsg
->ikm_header
->msgh_bits
);
5823 assert(IO_VALID(dest
));
5826 if (io_active(dest
)) {
5827 ipc_object_copyout_dest(space
, dest
, dest_type
, &dest_name
);
5828 /* dest is unlocked */
5832 dest_name
= MACH_PORT_DEAD
;
5835 mach_msg_type_name_t voucher_type
;
5836 voucher_type
= MACH_MSGH_BITS_VOUCHER(kmsg
->ikm_header
->msgh_bits
);
5837 if (voucher_type
!= MACH_MSGH_BITS_ZERO
) {
5838 assert(voucher_type
== MACH_MSG_TYPE_MOVE_SEND
);
5839 assert(IP_VALID(kmsg
->ikm_voucher
));
5841 * someone managed to send this kernel routine a message with
5842 * a voucher in it. Cleanup the reference in
5843 * kmsg->ikm_voucher.
5845 ipc_port_release_send(kmsg
->ikm_voucher
);
5846 kmsg
->ikm_voucher
= IP_NULL
;
5847 kmsg
->ikm_header
->msgh_voucher_port
= 0;
5850 kmsg
->ikm_header
->msgh_bits
=
5851 (MACH_MSGH_BITS_OTHER(kmsg
->ikm_header
->msgh_bits
) |
5852 MACH_MSGH_BITS(reply_type
, dest_type
));
5853 kmsg
->ikm_header
->msgh_local_port
= CAST_MACH_NAME_TO_PORT(dest_name
);
5854 kmsg
->ikm_header
->msgh_remote_port
= reply
;
5856 mach_msg_descriptor_t
*saddr
;
5857 mach_msg_legacy_descriptor_t
*daddr
;
5858 mach_msg_type_number_t i
, count
= ((mach_msg_base_t
*)kmsg
->ikm_header
)->body
.msgh_descriptor_count
;
5859 saddr
= (mach_msg_descriptor_t
*) (((mach_msg_base_t
*)kmsg
->ikm_header
) + 1);
5860 saddr
= &saddr
[count
- 1];
5861 daddr
= (mach_msg_legacy_descriptor_t
*)&saddr
[count
];
5864 vm_offset_t dsc_adjust
= 0;
5866 for (i
= 0; i
< count
; i
++, saddr
--, daddr
--) {
5867 switch (saddr
->type
.type
) {
5868 case MACH_MSG_PORT_DESCRIPTOR
: {
5869 mach_msg_port_descriptor_t
*dsc
= &saddr
->port
;
5870 mach_msg_legacy_port_descriptor_t
*dest_dsc
= &daddr
->port
;
5872 mach_port_t name
= dsc
->name
;
5873 mach_msg_type_name_t disposition
= dsc
->disposition
;
5875 dest_dsc
->name
= CAST_MACH_PORT_TO_NAME(name
);
5876 dest_dsc
->disposition
= disposition
;
5877 dest_dsc
->type
= MACH_MSG_PORT_DESCRIPTOR
;
5880 case MACH_MSG_OOL_VOLATILE_DESCRIPTOR
:
5881 case MACH_MSG_OOL_DESCRIPTOR
: {
5882 /* The sender should supply ready-made memory, i.e. a vm_map_copy_t
5883 * so we don't need to do anything special. */
5885 mach_msg_ool_descriptor_t
*source_dsc
= (typeof(source_dsc
)) & saddr
->out_of_line
;
5887 mach_msg_ool_descriptor32_t
*dest_dsc
= &daddr
->out_of_line32
;
5889 vm_offset_t address
= (vm_offset_t
)source_dsc
->address
;
5890 vm_size_t size
= source_dsc
->size
;
5891 boolean_t deallocate
= source_dsc
->deallocate
;
5892 mach_msg_copy_options_t copy
= source_dsc
->copy
;
5893 mach_msg_descriptor_type_t type
= source_dsc
->type
;
5895 dest_dsc
->address
= address
;
5896 dest_dsc
->size
= size
;
5897 dest_dsc
->deallocate
= deallocate
;
5898 dest_dsc
->copy
= copy
;
5899 dest_dsc
->type
= type
;
5902 case MACH_MSG_OOL_PORTS_DESCRIPTOR
: {
5903 mach_msg_ool_ports_descriptor_t
*source_dsc
= (typeof(source_dsc
)) & saddr
->ool_ports
;
5905 mach_msg_ool_ports_descriptor32_t
*dest_dsc
= &daddr
->ool_ports32
;
5907 vm_offset_t address
= (vm_offset_t
)source_dsc
->address
;
5908 vm_size_t port_count
= source_dsc
->count
;
5909 boolean_t deallocate
= source_dsc
->deallocate
;
5910 mach_msg_copy_options_t copy
= source_dsc
->copy
;
5911 mach_msg_descriptor_type_t type
= source_dsc
->type
;
5913 dest_dsc
->address
= address
;
5914 dest_dsc
->count
= port_count
;
5915 dest_dsc
->deallocate
= deallocate
;
5916 dest_dsc
->copy
= copy
;
5917 dest_dsc
->type
= type
;
5920 case MACH_MSG_GUARDED_PORT_DESCRIPTOR
: {
5921 mach_msg_guarded_port_descriptor_t
*source_dsc
= (typeof(source_dsc
)) & saddr
->guarded_port
;
5922 mach_msg_guarded_port_descriptor32_t
*dest_dsc
= &daddr
->guarded_port32
;
5924 dest_dsc
->name
= CAST_MACH_PORT_TO_NAME(source_dsc
->name
);
5925 dest_dsc
->disposition
= source_dsc
->disposition
;
5926 dest_dsc
->flags
= 0;
5927 dest_dsc
->type
= MACH_MSG_GUARDED_PORT_DESCRIPTOR
;
5928 dest_dsc
->context
= 0;
5933 panic("ipc_kmsg_copyout_to_kernel_legacy: bad descriptor");
5934 #endif /* MACH_ASSERT */
5940 dsc_adjust
= 4 * count
;
5941 memmove((char *)((vm_offset_t
)kmsg
->ikm_header
+ dsc_adjust
), kmsg
->ikm_header
, sizeof(mach_msg_base_t
));
5942 kmsg
->ikm_header
= (mach_msg_header_t
*)((vm_offset_t
)kmsg
->ikm_header
+ dsc_adjust
);
5943 /* Update the message size for the smaller user representation */
5944 kmsg
->ikm_header
->msgh_size
-= dsc_adjust
;
5947 #endif /* IKM_SUPPORT_LEGACY */
5951 * Just sets those parts of the trailer that aren't set up at allocation time.
5954 ipc_kmsg_munge_trailer(mach_msg_max_trailer_t
*in
, void *_out
, boolean_t is64bit
)
5957 mach_msg_max_trailer64_t
*out
= (mach_msg_max_trailer64_t
*)_out
;
5958 out
->msgh_seqno
= in
->msgh_seqno
;
5959 out
->msgh_context
= in
->msgh_context
;
5960 out
->msgh_trailer_size
= in
->msgh_trailer_size
;
5961 out
->msgh_ad
= in
->msgh_ad
;
5963 mach_msg_max_trailer32_t
*out
= (mach_msg_max_trailer32_t
*)_out
;
5964 out
->msgh_seqno
= in
->msgh_seqno
;
5965 out
->msgh_context
= (mach_port_context32_t
)in
->msgh_context
;
5966 out
->msgh_trailer_size
= in
->msgh_trailer_size
;
5967 out
->msgh_ad
= in
->msgh_ad
;
5970 #endif /* __arm64__ */
5972 mach_msg_trailer_size_t
5973 ipc_kmsg_trailer_size(
5974 mach_msg_option_t option
,
5975 __unused thread_t thread
)
5977 if (!(option
& MACH_RCV_TRAILER_MASK
)) {
5978 return MACH_MSG_TRAILER_MINIMUM_SIZE
;
5980 return REQUESTED_TRAILER_SIZE(thread_is_64bit_addr(thread
), option
);
5985 ipc_kmsg_add_trailer(ipc_kmsg_t kmsg
, ipc_space_t space __unused
,
5986 mach_msg_option_t option
, __unused thread_t thread
,
5987 mach_port_seqno_t seqno
, boolean_t minimal_trailer
,
5988 mach_vm_offset_t context
)
5990 mach_msg_max_trailer_t
*trailer
;
5993 mach_msg_max_trailer_t tmp_trailer
; /* This accommodates U64, and we'll munge */
5994 void *real_trailer_out
= (void*)(mach_msg_max_trailer_t
*)
5995 ((vm_offset_t
)kmsg
->ikm_header
+
5996 mach_round_msg(kmsg
->ikm_header
->msgh_size
));
5999 * Populate scratch with initial values set up at message allocation time.
6000 * After, we reinterpret the space in the message as the right type
6001 * of trailer for the address space in question.
6003 bcopy(real_trailer_out
, &tmp_trailer
, MAX_TRAILER_SIZE
);
6004 trailer
= &tmp_trailer
;
6005 #else /* __arm64__ */
6007 trailer
= (mach_msg_max_trailer_t
*)
6008 ((vm_offset_t
)kmsg
->ikm_header
+
6009 mach_round_msg(kmsg
->ikm_header
->msgh_size
));
6010 #endif /* __arm64__ */
6012 if (!(option
& MACH_RCV_TRAILER_MASK
)) {
6016 trailer
->msgh_seqno
= seqno
;
6017 trailer
->msgh_context
= context
;
6018 trailer
->msgh_trailer_size
= REQUESTED_TRAILER_SIZE(thread_is_64bit_addr(thread
), option
);
6020 if (minimal_trailer
) {
6024 if (GET_RCV_ELEMENTS(option
) >= MACH_RCV_TRAILER_AV
) {
6025 trailer
->msgh_ad
= kmsg
->ikm_filter_policy_id
;
6029 * The ipc_kmsg_t holds a reference to the label of a label
6030 * handle, not the port. We must get a reference to the port
6031 * and a send right to copyout to the receiver.
6034 if (option
& MACH_RCV_TRAILER_ELEMENTS(MACH_RCV_TRAILER_LABELS
)) {
6035 trailer
->msgh_labels
.sender
= 0;
6040 ipc_kmsg_munge_trailer(trailer
, real_trailer_out
, thread_is_64bit_addr(thread
));
6041 #endif /* __arm64__ */
6046 ipc_kmsg_msg_header(ipc_kmsg_t kmsg
)
6051 return kmsg
->ikm_header
;