]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ipc/ipc_kmsg.c
xnu-7195.60.75.tar.gz
[apple/xnu.git] / osfmk / ipc / ipc_kmsg.c
1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
58 * support for mandatory and extensible security protections. This notice
59 * is included in support of clause 2.2 (b) of the Apple Public License,
60 * Version 2.0.
61 * Copyright (c) 2005 SPARTA, Inc.
62 */
63 /*
64 */
65 /*
66 * File: ipc/ipc_kmsg.c
67 * Author: Rich Draves
68 * Date: 1989
69 *
70 * Operations on kernel messages.
71 */
72
73
74 #include <mach/mach_types.h>
75 #include <mach/boolean.h>
76 #include <mach/kern_return.h>
77 #include <mach/message.h>
78 #include <mach/port.h>
79 #include <mach/vm_map.h>
80 #include <mach/mach_vm.h>
81 #include <mach/vm_statistics.h>
82
83 #include <kern/kern_types.h>
84 #include <kern/assert.h>
85 #include <kern/debug.h>
86 #include <kern/ipc_kobject.h>
87 #include <kern/kalloc.h>
88 #include <kern/zalloc.h>
89 #include <kern/processor.h>
90 #include <kern/thread.h>
91 #include <kern/sched_prim.h>
92 #include <kern/misc_protos.h>
93 #include <kern/counters.h>
94 #include <kern/cpu_data.h>
95 #include <kern/policy_internal.h>
96 #include <kern/mach_filter.h>
97
98 #include <pthread/priority_private.h>
99
100 #include <machine/limits.h>
101
102 #include <vm/vm_map.h>
103 #include <vm/vm_object.h>
104 #include <vm/vm_kern.h>
105
106 #include <ipc/port.h>
107 #include <ipc/ipc_types.h>
108 #include <ipc/ipc_entry.h>
109 #include <ipc/ipc_kmsg.h>
110 #include <ipc/ipc_notify.h>
111 #include <ipc/ipc_object.h>
112 #include <ipc/ipc_space.h>
113 #include <ipc/ipc_port.h>
114 #include <ipc/ipc_right.h>
115 #include <ipc/ipc_hash.h>
116 #include <ipc/ipc_table.h>
117 #include <ipc/ipc_importance.h>
118 #if MACH_FLIPC
119 #include <kern/mach_node.h>
120 #include <ipc/flipc.h>
121 #endif
122
123 #include <os/overflow.h>
124
125 #include <security/mac_mach_internal.h>
126
127 #include <device/device_server.h>
128
129 #include <string.h>
130
131 #ifdef ppc
132 #include <ppc/Firmware.h>
133 #include <ppc/low_trace.h>
134 #endif
135
136 #if DEBUG
137 #define DEBUG_MSGS_K64 1
138 #endif
139
140 #include <sys/kdebug.h>
141 #include <libkern/OSAtomic.h>
142
143 #include <libkern/crypto/sha2.h>
144
145 #include <ptrauth.h>
146 #if __has_feature(ptrauth_calls)
147 #include <libkern/ptrauth_utils.h>
148 #endif
149
150 #pragma pack(4)
151
152 typedef struct{
153 mach_msg_bits_t msgh_bits;
154 mach_msg_size_t msgh_size;
155 mach_port_name_t msgh_remote_port;
156 mach_port_name_t msgh_local_port;
157 mach_port_name_t msgh_voucher_port;
158 mach_msg_id_t msgh_id;
159 } mach_msg_legacy_header_t;
160
161 typedef struct{
162 mach_msg_legacy_header_t header;
163 mach_msg_body_t body;
164 } mach_msg_legacy_base_t;
165
166 typedef struct{
167 mach_port_name_t name;
168 mach_msg_size_t pad1;
169 uint32_t pad2 : 16;
170 mach_msg_type_name_t disposition : 8;
171 mach_msg_descriptor_type_t type : 8;
172 } mach_msg_legacy_port_descriptor_t;
173
174
175 typedef union{
176 mach_msg_legacy_port_descriptor_t port;
177 mach_msg_ool_descriptor32_t out_of_line32;
178 mach_msg_ool_ports_descriptor32_t ool_ports32;
179 mach_msg_guarded_port_descriptor32_t guarded_port32;
180 mach_msg_type_descriptor_t type;
181 } mach_msg_legacy_descriptor_t;
182
183 #pragma pack()
184
185 #define LEGACY_HEADER_SIZE_DELTA ((mach_msg_size_t)(sizeof(mach_msg_header_t) - sizeof(mach_msg_legacy_header_t)))
186
187 // END LP64 fixes
188
189 #if __has_feature(ptrauth_calls)
190 typedef uintptr_t ikm_sig_scratch_t;
191
192 static void
193 ikm_init_sig(
194 __unused ipc_kmsg_t kmsg,
195 ikm_sig_scratch_t *scratchp)
196 {
197 *scratchp = OS_PTRAUTH_DISCRIMINATOR("kmsg.ikm_signature");
198 }
199
200 static void
201 ikm_chunk_sig(
202 ipc_kmsg_t kmsg,
203 void *data,
204 size_t len,
205 ikm_sig_scratch_t *scratchp)
206 {
207 int ptrauth_flags;
208 void *trailerp;
209
210 /*
211 * if we happen to be doing the trailer chunk,
212 * diversify with the ptrauth-ed trailer pointer -
213 * as that is unchanging for the kmsg
214 */
215 trailerp = (void *)
216 ((vm_offset_t)kmsg->ikm_header +
217 mach_round_msg(kmsg->ikm_header->msgh_size));
218
219 ptrauth_flags = (data == trailerp) ? PTRAUTH_ADDR_DIVERSIFY : 0;
220 *scratchp = ptrauth_utils_sign_blob_generic(data, len, *scratchp, ptrauth_flags);
221 }
222
223 static uintptr_t
224 ikm_finalize_sig(
225 __unused ipc_kmsg_t kmsg,
226 ikm_sig_scratch_t *scratchp)
227 {
228 return *scratchp;
229 }
230
231 #elif defined(CRYPTO_SHA2) && !defined(__x86_64__) && !defined(__arm__)
232
233 typedef SHA256_CTX ikm_sig_scratch_t;
234
235 static void
236 ikm_init_sig(
237 __unused ipc_kmsg_t kmsg,
238 ikm_sig_scratch_t *scratchp)
239 {
240 SHA256_Init(scratchp);
241 SHA256_Update(scratchp, &vm_kernel_addrhash_salt_ext, sizeof(uint64_t));
242 }
243
244 static void
245 ikm_chunk_sig(
246 __unused ipc_kmsg_t kmsg,
247 void *data,
248 size_t len,
249 ikm_sig_scratch_t *scratchp)
250 {
251 SHA256_Update(scratchp, data, len);
252 }
253
254 static uintptr_t
255 ikm_finalize_sig(
256 __unused ipc_kmsg_t kmsg,
257 ikm_sig_scratch_t *scratchp)
258 {
259 uintptr_t sha_digest[SHA256_DIGEST_LENGTH / sizeof(uintptr_t)];
260
261 SHA256_Final((uint8_t *)sha_digest, scratchp);
262
263 /*
264 * Only use one uintptr_t sized part of result for space and compat reasons.
265 * Truncation is better than XOR'ing the chunks together in hopes of higher
266 * entropy - because of its lower risk of collisions.
267 */
268 return *sha_digest;
269 }
270
271 #else
272 /* Stubbed out implementation (for __x86_64__, __arm__ for now) */
273
274 typedef uintptr_t ikm_sig_scratch_t;
275
276 static void
277 ikm_init_sig(
278 __unused ipc_kmsg_t kmsg,
279 ikm_sig_scratch_t *scratchp)
280 {
281 *scratchp = 0;
282 }
283
284 static void
285 ikm_chunk_sig(
286 __unused ipc_kmsg_t kmsg,
287 __unused void *data,
288 __unused size_t len,
289 __unused ikm_sig_scratch_t *scratchp)
290 {
291 return;
292 }
293
294 static uintptr_t
295 ikm_finalize_sig(
296 __unused ipc_kmsg_t kmsg,
297 ikm_sig_scratch_t *scratchp)
298 {
299 return *scratchp;
300 }
301
302 #endif
303
304 static void
305 ikm_header_sig(
306 ipc_kmsg_t kmsg,
307 ikm_sig_scratch_t *scratchp)
308 {
309 mach_msg_size_t dsc_count;
310 mach_msg_base_t base;
311 boolean_t complex;
312
313 /* take a snapshot of the message header/body-count */
314 base.header = *kmsg->ikm_header;
315 complex = ((base.header.msgh_bits & MACH_MSGH_BITS_COMPLEX) != 0);
316 if (complex) {
317 dsc_count = ((mach_msg_body_t *)(kmsg->ikm_header + 1))->msgh_descriptor_count;
318 } else {
319 dsc_count = 0;
320 }
321 base.body.msgh_descriptor_count = dsc_count;
322
323 /* compute sig of a copy of the header with all varying bits masked off */
324 base.header.msgh_bits &= MACH_MSGH_BITS_USER;
325 base.header.msgh_bits &= ~MACH_MSGH_BITS_VOUCHER_MASK;
326 ikm_chunk_sig(kmsg, &base, sizeof(mach_msg_base_t), scratchp);
327 }
328
329 static void
330 ikm_trailer_sig(
331 ipc_kmsg_t kmsg,
332 ikm_sig_scratch_t *scratchp)
333 {
334 mach_msg_max_trailer_t *trailerp;
335
336 /* Add sig of the trailer contents */
337 trailerp = (mach_msg_max_trailer_t *)
338 ((vm_offset_t)kmsg->ikm_header +
339 mach_round_msg(kmsg->ikm_header->msgh_size));
340 ikm_chunk_sig(kmsg, trailerp, sizeof(*trailerp), scratchp);
341 }
342
343 /* Compute the signature for the body bits of a message */
344 static void
345 ikm_body_sig(
346 ipc_kmsg_t kmsg,
347 ikm_sig_scratch_t *scratchp)
348 {
349 mach_msg_descriptor_t *kern_dsc;
350 mach_msg_size_t dsc_count;
351 mach_msg_body_t *body;
352 mach_msg_size_t i;
353
354 if ((kmsg->ikm_header->msgh_bits & MACH_MSGH_BITS_COMPLEX) == 0) {
355 return;
356 }
357 body = (mach_msg_body_t *) (kmsg->ikm_header + 1);
358 dsc_count = body->msgh_descriptor_count;
359
360 if (dsc_count == 0) {
361 return;
362 }
363
364 kern_dsc = (mach_msg_descriptor_t *) (body + 1);
365
366 /* Compute the signature for the whole descriptor array */
367 ikm_chunk_sig(kmsg, kern_dsc, sizeof(*kern_dsc) * dsc_count, scratchp);
368
369 /* look for descriptor contents that need a signature */
370 for (i = 0; i < dsc_count; i++) {
371 switch (kern_dsc[i].type.type) {
372 case MACH_MSG_PORT_DESCRIPTOR:
373 case MACH_MSG_GUARDED_PORT_DESCRIPTOR:
374 case MACH_MSG_OOL_VOLATILE_DESCRIPTOR:
375 case MACH_MSG_OOL_DESCRIPTOR:
376 break;
377
378 case MACH_MSG_OOL_PORTS_DESCRIPTOR: {
379 mach_msg_ool_ports_descriptor_t *ports_dsc;
380
381 /* Compute sig for the port/object pointers */
382 ports_dsc = (mach_msg_ool_ports_descriptor_t *)&kern_dsc[i];
383 ikm_chunk_sig(kmsg, ports_dsc->address, ports_dsc->count * sizeof(ipc_object_t), scratchp);
384 break;
385 }
386 default: {
387 panic("ipc_kmsg_body_sig: invalid message descriptor");
388 }
389 }
390 }
391 }
392
393 static void
394 ikm_sign(
395 ipc_kmsg_t kmsg)
396 {
397 ikm_sig_scratch_t scratch;
398 uintptr_t sig;
399
400 zone_require(ipc_kmsg_zone, kmsg);
401
402 ikm_init_sig(kmsg, &scratch);
403
404 ikm_header_sig(kmsg, &scratch);
405 #if IKM_PARTIAL_SIG
406 /* save off partial signature for just header */
407 sig = ikm_finalize_sig(kmsg, &scratch);
408 kmsg->ikm_header_sig = sig;
409 #endif
410
411 ikm_trailer_sig(kmsg, &scratch);
412 #if IKM_PARTIAL_SIG
413 /* save off partial signature for header+trailer */
414 sig = ikm_finalize_sig(kmsg, &scratch);
415 kmsg->ikm_headtrail_sig = sig;
416 #endif
417
418 ikm_body_sig(kmsg, &scratch);
419 sig = ikm_finalize_sig(kmsg, &scratch);
420 kmsg->ikm_signature = sig;
421 }
422
423 unsigned int ikm_signature_failures;
424 unsigned int ikm_signature_failure_id;
425 #if (DEVELOPMENT || DEBUG)
426 unsigned int ikm_signature_panic_disable;
427 unsigned int ikm_signature_header_failures;
428 unsigned int ikm_signature_trailer_failures;
429 #endif
430
431 static void
432 ikm_validate_sig(
433 ipc_kmsg_t kmsg)
434 {
435 ikm_sig_scratch_t scratch;
436 uintptr_t expected;
437 uintptr_t sig;
438 char *str;
439
440 zone_require(ipc_kmsg_zone, kmsg);
441
442 ikm_init_sig(kmsg, &scratch);
443
444 ikm_header_sig(kmsg, &scratch);
445 #if IKM_PARTIAL_SIG
446 /* Do partial evaluation of just the header signature */
447 sig = ikm_finalize_sig(kmsg, &scratch);
448 expected = kmsg->ikm_header_sig;
449 if (sig != expected) {
450 ikm_signature_header_failures++;
451 str = "header";
452 goto failure;
453 }
454 #endif
455
456 ikm_trailer_sig(kmsg, &scratch);
457 #if IKM_PARTIAL_SIG
458 /* Do partial evaluation of header+trailer signature */
459 sig = ikm_finalize_sig(kmsg, &scratch);
460 expected = kmsg->ikm_headtrail_sig;
461 if (sig != expected) {
462 ikm_signature_trailer_failures++;
463 str = "trailer";
464 goto failure;
465 }
466 #endif
467
468 ikm_body_sig(kmsg, &scratch);
469 sig = ikm_finalize_sig(kmsg, &scratch);
470
471 expected = kmsg->ikm_signature;
472 if (sig != expected) {
473 ikm_signature_failures++;
474 str = "full";
475
476 #if IKM_PARTIAL_SIG
477 failure:
478 #endif
479 {
480 mach_msg_id_t id = kmsg->ikm_header->msgh_id;
481
482 ikm_signature_failure_id = id;
483 #if (DEVELOPMENT || DEBUG)
484 if (ikm_signature_panic_disable) {
485 return;
486 }
487 #endif
488 panic("ikm_validate_sig: %s signature mismatch: kmsg=0x%p, id=%d, sig=0x%zx (expected 0x%zx)",
489 str, kmsg, id, sig, expected);
490 }
491 }
492 }
493
494 #if DEBUG_MSGS_K64
495 extern void ipc_pset_print64(
496 ipc_pset_t pset);
497
498 extern void ipc_kmsg_print64(
499 ipc_kmsg_t kmsg,
500 const char *str);
501
502 extern void ipc_msg_print64(
503 mach_msg_header_t *msgh);
504
505 extern ipc_port_t ipc_name_to_data64(
506 task_t task,
507 mach_port_name_t name);
508
509 /*
510 * Forward declarations
511 */
512 void ipc_msg_print_untyped64(
513 mach_msg_body_t *body);
514
515 const char * ipc_type_name64(
516 int type_name,
517 boolean_t received);
518
519 void ipc_print_type_name64(
520 int type_name);
521
522 const char *
523 msgh_bit_decode64(
524 mach_msg_bits_t bit);
525
526 const char *
527 mm_copy_options_string64(
528 mach_msg_copy_options_t option);
529
530 void db_print_msg_uid64(mach_msg_header_t *);
531
532 static void
533 ipc_msg_body_print64(void *body, int size)
534 {
535 uint32_t *word = (uint32_t *) body;
536 uint32_t *end = (uint32_t *)(((uintptr_t) body) + size
537 - sizeof(mach_msg_header_t));
538 int i;
539
540 kprintf(" body(%p-%p):\n %p: ", body, end, word);
541 for (;;) {
542 for (i = 0; i < 8; i++, word++) {
543 if (word >= end) {
544 kprintf("\n");
545 return;
546 }
547 kprintf("%08x ", *word);
548 }
549 kprintf("\n %p: ", word);
550 }
551 }
552
553
554 const char *
555 ipc_type_name64(
556 int type_name,
557 boolean_t received)
558 {
559 switch (type_name) {
560 case MACH_MSG_TYPE_PORT_NAME:
561 return "port_name";
562
563 case MACH_MSG_TYPE_MOVE_RECEIVE:
564 if (received) {
565 return "port_receive";
566 } else {
567 return "move_receive";
568 }
569
570 case MACH_MSG_TYPE_MOVE_SEND:
571 if (received) {
572 return "port_send";
573 } else {
574 return "move_send";
575 }
576
577 case MACH_MSG_TYPE_MOVE_SEND_ONCE:
578 if (received) {
579 return "port_send_once";
580 } else {
581 return "move_send_once";
582 }
583
584 case MACH_MSG_TYPE_COPY_SEND:
585 return "copy_send";
586
587 case MACH_MSG_TYPE_MAKE_SEND:
588 return "make_send";
589
590 case MACH_MSG_TYPE_MAKE_SEND_ONCE:
591 return "make_send_once";
592
593 default:
594 return (char *) 0;
595 }
596 }
597
598 void
599 ipc_print_type_name64(
600 int type_name)
601 {
602 const char *name = ipc_type_name64(type_name, TRUE);
603 if (name) {
604 kprintf("%s", name);
605 } else {
606 kprintf("type%d", type_name);
607 }
608 }
609
610 /*
611 * ipc_kmsg_print64 [ debug ]
612 */
613 void
614 ipc_kmsg_print64(
615 ipc_kmsg_t kmsg,
616 const char *str)
617 {
618 kprintf("%s kmsg=%p:\n", str, kmsg);
619 kprintf(" next=%p, prev=%p, size=%d",
620 kmsg->ikm_next,
621 kmsg->ikm_prev,
622 kmsg->ikm_size);
623 kprintf("\n");
624 ipc_msg_print64(kmsg->ikm_header);
625 }
626
627 const char *
628 msgh_bit_decode64(
629 mach_msg_bits_t bit)
630 {
631 switch (bit) {
632 case MACH_MSGH_BITS_COMPLEX: return "complex";
633 case MACH_MSGH_BITS_CIRCULAR: return "circular";
634 default: return (char *) 0;
635 }
636 }
637
638 /*
639 * ipc_msg_print64 [ debug ]
640 */
641 void
642 ipc_msg_print64(
643 mach_msg_header_t *msgh)
644 {
645 mach_msg_bits_t mbits;
646 unsigned int bit, i;
647 const char *bit_name;
648 int needs_comma;
649
650 mbits = msgh->msgh_bits;
651 kprintf(" msgh_bits=0x%x: l=0x%x,r=0x%x\n",
652 mbits,
653 MACH_MSGH_BITS_LOCAL(msgh->msgh_bits),
654 MACH_MSGH_BITS_REMOTE(msgh->msgh_bits));
655
656 mbits = MACH_MSGH_BITS_OTHER(mbits) & MACH_MSGH_BITS_USED;
657 kprintf(" decoded bits: ");
658 needs_comma = 0;
659 for (i = 0, bit = 1; i < sizeof(mbits) * 8; ++i, bit <<= 1) {
660 if ((mbits & bit) == 0) {
661 continue;
662 }
663 bit_name = msgh_bit_decode64((mach_msg_bits_t)bit);
664 if (bit_name) {
665 kprintf("%s%s", needs_comma ? "," : "", bit_name);
666 } else {
667 kprintf("%sunknown(0x%x),", needs_comma ? "," : "", bit);
668 }
669 ++needs_comma;
670 }
671 if (msgh->msgh_bits & ~MACH_MSGH_BITS_USED) {
672 kprintf("%sunused=0x%x,", needs_comma ? "," : "",
673 msgh->msgh_bits & ~MACH_MSGH_BITS_USED);
674 }
675 kprintf("\n");
676
677 needs_comma = 1;
678 if (msgh->msgh_remote_port) {
679 kprintf(" remote=%p(", msgh->msgh_remote_port);
680 ipc_print_type_name64(MACH_MSGH_BITS_REMOTE(msgh->msgh_bits));
681 kprintf(")");
682 } else {
683 kprintf(" remote=null");
684 }
685
686 if (msgh->msgh_local_port) {
687 kprintf("%slocal=%p(", needs_comma ? "," : "",
688 msgh->msgh_local_port);
689 ipc_print_type_name64(MACH_MSGH_BITS_LOCAL(msgh->msgh_bits));
690 kprintf(")\n");
691 } else {
692 kprintf("local=null\n");
693 }
694
695 kprintf(" msgh_id=%d, size=%d\n",
696 msgh->msgh_id,
697 msgh->msgh_size);
698
699 if (mbits & MACH_MSGH_BITS_COMPLEX) {
700 ipc_msg_print_untyped64((mach_msg_body_t *) (msgh + 1));
701 }
702
703 ipc_msg_body_print64((void *)(msgh + 1), msgh->msgh_size);
704 }
705
706
707 const char *
708 mm_copy_options_string64(
709 mach_msg_copy_options_t option)
710 {
711 const char *name;
712
713 switch (option) {
714 case MACH_MSG_PHYSICAL_COPY:
715 name = "PHYSICAL";
716 break;
717 case MACH_MSG_VIRTUAL_COPY:
718 name = "VIRTUAL";
719 break;
720 case MACH_MSG_OVERWRITE:
721 name = "OVERWRITE(DEPRECATED)";
722 break;
723 case MACH_MSG_ALLOCATE:
724 name = "ALLOCATE";
725 break;
726 case MACH_MSG_KALLOC_COPY_T:
727 name = "KALLOC_COPY_T";
728 break;
729 default:
730 name = "unknown";
731 break;
732 }
733 return name;
734 }
735
736 void
737 ipc_msg_print_untyped64(
738 mach_msg_body_t *body)
739 {
740 mach_msg_descriptor_t *saddr, *send;
741 mach_msg_descriptor_type_t type;
742
743 kprintf(" %d descriptors: \n", body->msgh_descriptor_count);
744
745 saddr = (mach_msg_descriptor_t *) (body + 1);
746 send = saddr + body->msgh_descriptor_count;
747
748 for (; saddr < send; saddr++) {
749 type = saddr->type.type;
750
751 switch (type) {
752 case MACH_MSG_PORT_DESCRIPTOR: {
753 mach_msg_port_descriptor_t *dsc;
754
755 dsc = &saddr->port;
756 kprintf(" PORT name = %p disp = ", dsc->name);
757 ipc_print_type_name64(dsc->disposition);
758 kprintf("\n");
759 break;
760 }
761 case MACH_MSG_OOL_VOLATILE_DESCRIPTOR:
762 case MACH_MSG_OOL_DESCRIPTOR: {
763 mach_msg_ool_descriptor_t *dsc;
764
765 dsc = (mach_msg_ool_descriptor_t *) &saddr->out_of_line;
766 kprintf(" OOL%s addr = %p size = 0x%x copy = %s %s\n",
767 type == MACH_MSG_OOL_DESCRIPTOR ? "" : " VOLATILE",
768 dsc->address, dsc->size,
769 mm_copy_options_string64(dsc->copy),
770 dsc->deallocate ? "DEALLOC" : "");
771 break;
772 }
773 case MACH_MSG_OOL_PORTS_DESCRIPTOR: {
774 mach_msg_ool_ports_descriptor_t *dsc;
775
776 dsc = (mach_msg_ool_ports_descriptor_t *) &saddr->ool_ports;
777
778 kprintf(" OOL_PORTS addr = %p count = 0x%x ",
779 dsc->address, dsc->count);
780 kprintf("disp = ");
781 ipc_print_type_name64(dsc->disposition);
782 kprintf(" copy = %s %s\n",
783 mm_copy_options_string64(dsc->copy),
784 dsc->deallocate ? "DEALLOC" : "");
785 break;
786 }
787 case MACH_MSG_GUARDED_PORT_DESCRIPTOR: {
788 mach_msg_guarded_port_descriptor_t *dsc;
789
790 dsc = (mach_msg_guarded_port_descriptor_t *)&saddr->guarded_port;
791 kprintf(" GUARDED_PORT name = %p flags = 0x%x disp = ", dsc->name, dsc->flags);
792 ipc_print_type_name64(dsc->disposition);
793 kprintf("\n");
794 break;
795 }
796 default: {
797 kprintf(" UNKNOWN DESCRIPTOR 0x%x\n", type);
798 break;
799 }
800 }
801 }
802 }
803
804 #define DEBUG_IPC_KMSG_PRINT(kmsg, string) \
805 __unreachable_ok_push \
806 if (DEBUG_KPRINT_SYSCALL_PREDICATE(DEBUG_KPRINT_SYSCALL_IPC_MASK)) { \
807 ipc_kmsg_print64(kmsg, string); \
808 } \
809 __unreachable_ok_pop
810
811 #define DEBUG_IPC_MSG_BODY_PRINT(body, size) \
812 __unreachable_ok_push \
813 if (DEBUG_KPRINT_SYSCALL_PREDICATE(DEBUG_KPRINT_SYSCALL_IPC_MASK)) { \
814 ipc_msg_body_print64(body,size);\
815 } \
816 __unreachable_ok_pop
817 #else /* !DEBUG_MSGS_K64 */
818 #define DEBUG_IPC_KMSG_PRINT(kmsg, string)
819 #define DEBUG_IPC_MSG_BODY_PRINT(body, size)
820 #endif /* !DEBUG_MSGS_K64 */
821
822 extern vm_map_t ipc_kernel_copy_map;
823 extern vm_size_t ipc_kmsg_max_space;
824 extern const vm_size_t ipc_kmsg_max_vm_space;
825 extern const vm_size_t ipc_kmsg_max_body_space;
826 extern vm_size_t msg_ool_size_small;
827
828 #define MSG_OOL_SIZE_SMALL msg_ool_size_small
829
830 #if defined(__LP64__)
831 #define MAP_SIZE_DIFFERS(map) (map->max_offset < MACH_VM_MAX_ADDRESS)
832 #define OTHER_OOL_DESCRIPTOR mach_msg_ool_descriptor32_t
833 #define OTHER_OOL_PORTS_DESCRIPTOR mach_msg_ool_ports_descriptor32_t
834 #else
835 #define MAP_SIZE_DIFFERS(map) (map->max_offset > VM_MAX_ADDRESS)
836 #define OTHER_OOL_DESCRIPTOR mach_msg_ool_descriptor64_t
837 #define OTHER_OOL_PORTS_DESCRIPTOR mach_msg_ool_ports_descriptor64_t
838 #endif
839
840 #define DESC_SIZE_ADJUSTMENT ((mach_msg_size_t)(sizeof(mach_msg_ool_descriptor64_t) - \
841 sizeof(mach_msg_ool_descriptor32_t)))
842
843 /* scatter list macros */
844
845 #define SKIP_PORT_DESCRIPTORS(s, c) \
846 MACRO_BEGIN \
847 if ((s) != MACH_MSG_DESCRIPTOR_NULL) { \
848 while ((c) > 0) { \
849 if ((s)->type.type != MACH_MSG_PORT_DESCRIPTOR) \
850 break; \
851 (s)++; (c)--; \
852 } \
853 if (c == 0) \
854 (s) = MACH_MSG_DESCRIPTOR_NULL; \
855 } \
856 MACRO_END
857
858 #define INCREMENT_SCATTER(s, c, d) \
859 MACRO_BEGIN \
860 if ((s) != MACH_MSG_DESCRIPTOR_NULL) { \
861 s = (d) ? (mach_msg_descriptor_t *) \
862 ((OTHER_OOL_DESCRIPTOR *)(s) + 1) : \
863 (s + 1); \
864 (c)--; \
865 } \
866 MACRO_END
867
868 #define KMSG_TRACE_FLAG_TRACED 0x000001
869 #define KMSG_TRACE_FLAG_COMPLEX 0x000002
870 #define KMSG_TRACE_FLAG_OOLMEM 0x000004
871 #define KMSG_TRACE_FLAG_VCPY 0x000008
872 #define KMSG_TRACE_FLAG_PCPY 0x000010
873 #define KMSG_TRACE_FLAG_SND64 0x000020
874 #define KMSG_TRACE_FLAG_RAISEIMP 0x000040
875 #define KMSG_TRACE_FLAG_APP_SRC 0x000080
876 #define KMSG_TRACE_FLAG_APP_DST 0x000100
877 #define KMSG_TRACE_FLAG_DAEMON_SRC 0x000200
878 #define KMSG_TRACE_FLAG_DAEMON_DST 0x000400
879 #define KMSG_TRACE_FLAG_DST_NDFLTQ 0x000800
880 #define KMSG_TRACE_FLAG_SRC_NDFLTQ 0x001000
881 #define KMSG_TRACE_FLAG_DST_SONCE 0x002000
882 #define KMSG_TRACE_FLAG_SRC_SONCE 0x004000
883 #define KMSG_TRACE_FLAG_CHECKIN 0x008000
884 #define KMSG_TRACE_FLAG_ONEWAY 0x010000
885 #define KMSG_TRACE_FLAG_IOKIT 0x020000
886 #define KMSG_TRACE_FLAG_SNDRCV 0x040000
887 #define KMSG_TRACE_FLAG_DSTQFULL 0x080000
888 #define KMSG_TRACE_FLAG_VOUCHER 0x100000
889 #define KMSG_TRACE_FLAG_TIMER 0x200000
890 #define KMSG_TRACE_FLAG_SEMA 0x400000
891 #define KMSG_TRACE_FLAG_DTMPOWNER 0x800000
892 #define KMSG_TRACE_FLAG_GUARDED_DESC 0x1000000
893
894 #define KMSG_TRACE_FLAGS_MASK 0x1ffffff
895 #define KMSG_TRACE_FLAGS_SHIFT 8
896
897 #define KMSG_TRACE_PORTS_MASK 0xff
898 #define KMSG_TRACE_PORTS_SHIFT 0
899
900 #if (KDEBUG_LEVEL >= KDEBUG_LEVEL_STANDARD)
901 #include <stdint.h>
902
903 void
904 ipc_kmsg_trace_send(ipc_kmsg_t kmsg,
905 mach_msg_option_t option)
906 {
907 task_t send_task = TASK_NULL;
908 ipc_port_t dst_port, src_port;
909 boolean_t is_task_64bit;
910 mach_msg_header_t *msg;
911 mach_msg_trailer_t *trailer;
912
913 int kotype = 0;
914 uint32_t msg_size = 0;
915 uint64_t msg_flags = KMSG_TRACE_FLAG_TRACED;
916 uint32_t num_ports = 0;
917 uint32_t send_pid, dst_pid;
918
919 /*
920 * check to see not only if ktracing is enabled, but if we will
921 * _actually_ emit the KMSG_INFO tracepoint. This saves us a
922 * significant amount of processing (and a port lock hold) in
923 * the non-tracing case.
924 */
925 if (__probable((kdebug_enable & KDEBUG_TRACE) == 0)) {
926 return;
927 }
928 if (!kdebug_debugid_enabled(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_KMSG_INFO))) {
929 return;
930 }
931
932 msg = kmsg->ikm_header;
933
934 dst_port = msg->msgh_remote_port;
935 if (!IPC_PORT_VALID(dst_port)) {
936 return;
937 }
938
939 /*
940 * Message properties / options
941 */
942 if ((option & (MACH_SEND_MSG | MACH_RCV_MSG)) == (MACH_SEND_MSG | MACH_RCV_MSG)) {
943 msg_flags |= KMSG_TRACE_FLAG_SNDRCV;
944 }
945
946 if (msg->msgh_id >= is_iokit_subsystem.start &&
947 msg->msgh_id < is_iokit_subsystem.end + 100) {
948 msg_flags |= KMSG_TRACE_FLAG_IOKIT;
949 }
950 /* magic XPC checkin message id (XPC_MESSAGE_ID_CHECKIN) from libxpc */
951 else if (msg->msgh_id == 0x77303074u /* w00t */) {
952 msg_flags |= KMSG_TRACE_FLAG_CHECKIN;
953 }
954
955 if (msg->msgh_bits & MACH_MSGH_BITS_RAISEIMP) {
956 msg_flags |= KMSG_TRACE_FLAG_RAISEIMP;
957 }
958
959 if (unsafe_convert_port_to_voucher(kmsg->ikm_voucher)) {
960 msg_flags |= KMSG_TRACE_FLAG_VOUCHER;
961 }
962
963 /*
964 * Sending task / port
965 */
966 send_task = current_task();
967 send_pid = task_pid(send_task);
968
969 if (send_pid != 0) {
970 if (task_is_daemon(send_task)) {
971 msg_flags |= KMSG_TRACE_FLAG_DAEMON_SRC;
972 } else if (task_is_app(send_task)) {
973 msg_flags |= KMSG_TRACE_FLAG_APP_SRC;
974 }
975 }
976
977 is_task_64bit = (send_task->map->max_offset > VM_MAX_ADDRESS);
978 if (is_task_64bit) {
979 msg_flags |= KMSG_TRACE_FLAG_SND64;
980 }
981
982 src_port = msg->msgh_local_port;
983 if (src_port) {
984 if (src_port->ip_messages.imq_qlimit != MACH_PORT_QLIMIT_DEFAULT) {
985 msg_flags |= KMSG_TRACE_FLAG_SRC_NDFLTQ;
986 }
987 switch (MACH_MSGH_BITS_LOCAL(msg->msgh_bits)) {
988 case MACH_MSG_TYPE_MOVE_SEND_ONCE:
989 msg_flags |= KMSG_TRACE_FLAG_SRC_SONCE;
990 break;
991 default:
992 break;
993 }
994 } else {
995 msg_flags |= KMSG_TRACE_FLAG_ONEWAY;
996 }
997
998
999 /*
1000 * Destination task / port
1001 */
1002 ip_lock(dst_port);
1003 if (!ip_active(dst_port)) {
1004 /* dst port is being torn down */
1005 dst_pid = (uint32_t)0xfffffff0;
1006 } else if (dst_port->ip_tempowner) {
1007 msg_flags |= KMSG_TRACE_FLAG_DTMPOWNER;
1008 if (IIT_NULL != dst_port->ip_imp_task) {
1009 dst_pid = task_pid(dst_port->ip_imp_task->iit_task);
1010 } else {
1011 dst_pid = (uint32_t)0xfffffff1;
1012 }
1013 } else if (dst_port->ip_receiver_name == MACH_PORT_NULL) {
1014 /* dst_port is otherwise in-transit */
1015 dst_pid = (uint32_t)0xfffffff2;
1016 } else {
1017 if (dst_port->ip_receiver == ipc_space_kernel) {
1018 dst_pid = 0;
1019 } else {
1020 ipc_space_t dst_space;
1021 dst_space = dst_port->ip_receiver;
1022 if (dst_space && is_active(dst_space)) {
1023 dst_pid = task_pid(dst_space->is_task);
1024 if (task_is_daemon(dst_space->is_task)) {
1025 msg_flags |= KMSG_TRACE_FLAG_DAEMON_DST;
1026 } else if (task_is_app(dst_space->is_task)) {
1027 msg_flags |= KMSG_TRACE_FLAG_APP_DST;
1028 }
1029 } else {
1030 /* receiving task is being torn down */
1031 dst_pid = (uint32_t)0xfffffff3;
1032 }
1033 }
1034 }
1035
1036 if (dst_port->ip_messages.imq_qlimit != MACH_PORT_QLIMIT_DEFAULT) {
1037 msg_flags |= KMSG_TRACE_FLAG_DST_NDFLTQ;
1038 }
1039 if (imq_full(&dst_port->ip_messages)) {
1040 msg_flags |= KMSG_TRACE_FLAG_DSTQFULL;
1041 }
1042
1043 kotype = ip_kotype(dst_port);
1044
1045 ip_unlock(dst_port);
1046
1047 switch (kotype) {
1048 case IKOT_SEMAPHORE:
1049 msg_flags |= KMSG_TRACE_FLAG_SEMA;
1050 break;
1051 case IKOT_TIMER:
1052 case IKOT_CLOCK:
1053 msg_flags |= KMSG_TRACE_FLAG_TIMER;
1054 break;
1055 case IKOT_MASTER_DEVICE:
1056 case IKOT_IOKIT_CONNECT:
1057 case IKOT_IOKIT_OBJECT:
1058 case IKOT_IOKIT_IDENT:
1059 case IKOT_UEXT_OBJECT:
1060 msg_flags |= KMSG_TRACE_FLAG_IOKIT;
1061 break;
1062 default:
1063 break;
1064 }
1065
1066 switch (MACH_MSGH_BITS_REMOTE(msg->msgh_bits)) {
1067 case MACH_MSG_TYPE_PORT_SEND_ONCE:
1068 msg_flags |= KMSG_TRACE_FLAG_DST_SONCE;
1069 break;
1070 default:
1071 break;
1072 }
1073
1074
1075 /*
1076 * Message size / content
1077 */
1078 msg_size = msg->msgh_size - sizeof(mach_msg_header_t);
1079
1080 if (msg->msgh_bits & MACH_MSGH_BITS_COMPLEX) {
1081 mach_msg_body_t *msg_body;
1082 mach_msg_descriptor_t *kern_dsc;
1083 int dsc_count;
1084
1085 msg_flags |= KMSG_TRACE_FLAG_COMPLEX;
1086
1087 msg_body = (mach_msg_body_t *)(kmsg->ikm_header + 1);
1088 dsc_count = (int)msg_body->msgh_descriptor_count;
1089 kern_dsc = (mach_msg_descriptor_t *)(msg_body + 1);
1090
1091 /* this is gross: see ipc_kmsg_copyin_body()... */
1092 if (!is_task_64bit) {
1093 msg_size -= (dsc_count * 12);
1094 }
1095
1096 for (int i = 0; i < dsc_count; i++) {
1097 switch (kern_dsc[i].type.type) {
1098 case MACH_MSG_PORT_DESCRIPTOR:
1099 num_ports++;
1100 if (is_task_64bit) {
1101 msg_size -= 12;
1102 }
1103 break;
1104 case MACH_MSG_OOL_VOLATILE_DESCRIPTOR:
1105 case MACH_MSG_OOL_DESCRIPTOR: {
1106 mach_msg_ool_descriptor_t *dsc;
1107 dsc = (mach_msg_ool_descriptor_t *)&kern_dsc[i];
1108 msg_flags |= KMSG_TRACE_FLAG_OOLMEM;
1109 msg_size += dsc->size;
1110 if ((dsc->size >= MSG_OOL_SIZE_SMALL) &&
1111 (dsc->copy == MACH_MSG_PHYSICAL_COPY) &&
1112 !dsc->deallocate) {
1113 msg_flags |= KMSG_TRACE_FLAG_PCPY;
1114 } else if (dsc->size <= MSG_OOL_SIZE_SMALL) {
1115 msg_flags |= KMSG_TRACE_FLAG_PCPY;
1116 } else {
1117 msg_flags |= KMSG_TRACE_FLAG_VCPY;
1118 }
1119 if (is_task_64bit) {
1120 msg_size -= 16;
1121 }
1122 } break;
1123 case MACH_MSG_OOL_PORTS_DESCRIPTOR: {
1124 mach_msg_ool_ports_descriptor_t *dsc;
1125 dsc = (mach_msg_ool_ports_descriptor_t *)&kern_dsc[i];
1126 num_ports += dsc->count;
1127 if (is_task_64bit) {
1128 msg_size -= 16;
1129 }
1130 } break;
1131 case MACH_MSG_GUARDED_PORT_DESCRIPTOR:
1132 num_ports++;
1133 msg_flags |= KMSG_TRACE_FLAG_GUARDED_DESC;
1134 if (is_task_64bit) {
1135 msg_size -= 16;
1136 }
1137 break;
1138 default:
1139 break;
1140 }
1141 }
1142 }
1143
1144 /*
1145 * Trailer contents
1146 */
1147 trailer = (mach_msg_trailer_t *)((vm_offset_t)msg +
1148 (vm_offset_t)mach_round_msg(msg->msgh_size));
1149 if (trailer->msgh_trailer_size <= sizeof(mach_msg_security_trailer_t)) {
1150 extern const security_token_t KERNEL_SECURITY_TOKEN;
1151 mach_msg_security_trailer_t *strailer;
1152 strailer = (mach_msg_security_trailer_t *)trailer;
1153 /*
1154 * verify the sender PID: replies from the kernel often look
1155 * like self-talk because the sending port is not reset.
1156 */
1157 if (memcmp(&strailer->msgh_sender,
1158 &KERNEL_SECURITY_TOKEN,
1159 sizeof(KERNEL_SECURITY_TOKEN)) == 0) {
1160 send_pid = 0;
1161 msg_flags &= ~(KMSG_TRACE_FLAG_APP_SRC | KMSG_TRACE_FLAG_DAEMON_SRC);
1162 }
1163 }
1164
1165 KDBG(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_KMSG_INFO) | DBG_FUNC_END,
1166 (uintptr_t)send_pid,
1167 (uintptr_t)dst_pid,
1168 (uintptr_t)msg_size,
1169 (uintptr_t)(
1170 ((msg_flags & KMSG_TRACE_FLAGS_MASK) << KMSG_TRACE_FLAGS_SHIFT) |
1171 ((num_ports & KMSG_TRACE_PORTS_MASK) << KMSG_TRACE_PORTS_SHIFT)
1172 )
1173 );
1174 }
1175 #endif
1176
1177 /* zone for cached ipc_kmsg_t structures */
1178 ZONE_DECLARE(ipc_kmsg_zone, "ipc kmsgs", IKM_SAVED_KMSG_SIZE,
1179 ZC_CACHING | ZC_ZFREE_CLEARMEM);
1180 static TUNABLE(bool, enforce_strict_reply, "ipc_strict_reply", false);
1181
1182 /*
1183 * Forward declarations
1184 */
1185
1186 void ipc_kmsg_clean(
1187 ipc_kmsg_t kmsg);
1188
1189 void ipc_kmsg_clean_body(
1190 ipc_kmsg_t kmsg,
1191 mach_msg_type_number_t number,
1192 mach_msg_descriptor_t *desc);
1193
1194 void ipc_kmsg_clean_partial(
1195 ipc_kmsg_t kmsg,
1196 mach_msg_type_number_t number,
1197 mach_msg_descriptor_t *desc,
1198 vm_offset_t paddr,
1199 vm_size_t length);
1200
1201 mach_msg_return_t ipc_kmsg_copyin_body(
1202 ipc_kmsg_t kmsg,
1203 ipc_space_t space,
1204 vm_map_t map,
1205 mach_msg_option_t *optionp);
1206
1207
1208 static void
1209 ipc_kmsg_link_reply_context_locked(
1210 ipc_port_t reply_port,
1211 ipc_port_t voucher_port);
1212
1213 static kern_return_t
1214 ipc_kmsg_validate_reply_port_locked(
1215 ipc_port_t reply_port,
1216 mach_msg_option_t options);
1217
1218 static mach_msg_return_t
1219 ipc_kmsg_validate_reply_context_locked(
1220 mach_msg_option_t option,
1221 ipc_port_t dest_port,
1222 ipc_voucher_t voucher,
1223 mach_port_name_t voucher_name);
1224
1225 /* we can't include the BSD <sys/persona.h> header here... */
1226 #ifndef PERSONA_ID_NONE
1227 #define PERSONA_ID_NONE ((uint32_t)-1)
1228 #endif
1229
1230 /*
1231 * We keep a per-processor cache of kernel message buffers.
1232 * The cache saves the overhead/locking of using kalloc/kfree.
1233 * The per-processor cache seems to miss less than a per-thread cache,
1234 * and it also uses less memory. Access to the cache doesn't
1235 * require locking.
1236 */
1237
1238 /*
1239 * Routine: ikm_set_header
1240 * Purpose:
1241 * Set the header (and data) pointers for a message. If the
1242 * message is small, the data pointer is NULL and all the
1243 * data resides within the fixed
1244 * the cache, that is best. Otherwise, allocate a new one.
1245 * Conditions:
1246 * Nothing locked.
1247 */
1248 static void
1249 ikm_set_header(
1250 ipc_kmsg_t kmsg,
1251 void *data,
1252 mach_msg_size_t mtsize)
1253 {
1254 if (data) {
1255 kmsg->ikm_data = data;
1256 kmsg->ikm_header = (mach_msg_header_t *)(data + kmsg->ikm_size - mtsize);
1257 } else {
1258 assert(kmsg->ikm_size == IKM_SAVED_MSG_SIZE);
1259 kmsg->ikm_header = (mach_msg_header_t *)
1260 ((vm_offset_t)(kmsg + 1) + kmsg->ikm_size - mtsize);
1261 }
1262 }
1263
1264 /*
1265 * Routine: ipc_kmsg_alloc
1266 * Purpose:
1267 * Allocate a kernel message structure. If we can get one from
1268 * the cache, that is best. Otherwise, allocate a new one.
1269 * Conditions:
1270 * Nothing locked.
1271 */
1272 ipc_kmsg_t
1273 ipc_kmsg_alloc(
1274 mach_msg_size_t msg_and_trailer_size)
1275 {
1276 mach_msg_size_t max_expanded_size;
1277 ipc_kmsg_t kmsg;
1278 void *data;
1279
1280 /*
1281 * LP64support -
1282 * Pad the allocation in case we need to expand the
1283 * message descriptors for user spaces with pointers larger than
1284 * the kernel's own, or vice versa. We don't know how many descriptors
1285 * there are yet, so just assume the whole body could be
1286 * descriptors (if there could be any at all).
1287 *
1288 * The expansion space is left in front of the header,
1289 * because it is easier to pull the header and descriptors
1290 * forward as we process them than it is to push all the
1291 * data backwards.
1292 */
1293 mach_msg_size_t size = msg_and_trailer_size - MAX_TRAILER_SIZE;
1294
1295 /* compare against implementation upper limit for the body */
1296 if (size > ipc_kmsg_max_body_space) {
1297 return IKM_NULL;
1298 }
1299
1300 if (size > sizeof(mach_msg_base_t)) {
1301 mach_msg_size_t max_desc = (mach_msg_size_t)(((size - sizeof(mach_msg_base_t)) /
1302 sizeof(mach_msg_ool_descriptor32_t)) *
1303 DESC_SIZE_ADJUSTMENT);
1304
1305 /* make sure expansion won't cause wrap */
1306 if (msg_and_trailer_size > MACH_MSG_SIZE_MAX - max_desc) {
1307 return IKM_NULL;
1308 }
1309
1310 max_expanded_size = msg_and_trailer_size + max_desc;
1311 } else {
1312 max_expanded_size = msg_and_trailer_size;
1313 }
1314
1315 if (max_expanded_size > IKM_SAVED_MSG_SIZE) {
1316 data = kheap_alloc(KHEAP_DATA_BUFFERS, max_expanded_size, Z_WAITOK);
1317 if (data == NULL) {
1318 return IKM_NULL;
1319 }
1320 } else {
1321 data = NULL;
1322 max_expanded_size = IKM_SAVED_MSG_SIZE;
1323 }
1324
1325 kmsg = zalloc_flags(ipc_kmsg_zone, Z_WAITOK | Z_ZERO | Z_NOFAIL);
1326 kmsg->ikm_size = max_expanded_size;
1327 ikm_qos_init(kmsg);
1328 ikm_set_header(kmsg, data, msg_and_trailer_size);
1329 assert((kmsg->ikm_prev = kmsg->ikm_next = IKM_BOGUS));
1330
1331 return kmsg;
1332 }
1333
1334 /*
1335 * Routine: ipc_kmsg_free
1336 * Purpose:
1337 * Free a kernel message buffer. If the kms is preallocated
1338 * to a port, just "put it back (marked unused)." We have to
1339 * do this with the port locked. The port may have its hold
1340 * on our message released. In that case, we have to just
1341 * revert the message to a traditional one and free it normally.
1342 * Conditions:
1343 * Nothing locked.
1344 */
1345
1346 void
1347 ipc_kmsg_free(
1348 ipc_kmsg_t kmsg)
1349 {
1350 mach_msg_size_t size = kmsg->ikm_size;
1351 ipc_port_t port;
1352
1353 assert(!IP_VALID(kmsg->ikm_voucher));
1354
1355 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_KMSG_FREE) | DBG_FUNC_NONE,
1356 VM_KERNEL_ADDRPERM((uintptr_t)kmsg),
1357 0, 0, 0, 0);
1358
1359 /*
1360 * Check to see if the message is bound to the port. If so,
1361 * mark it not in use. If the port isn't already dead, then
1362 * leave the message associated with it. Otherwise, free it.
1363 */
1364 if (size == IKM_SAVED_MSG_SIZE) {
1365 if ((void *)kmsg->ikm_header < (void *)(kmsg + 1) ||
1366 (void *)kmsg->ikm_header >= (void *)(kmsg + 1) + IKM_SAVED_MSG_SIZE) {
1367 panic("ipc_kmsg_free");
1368 }
1369 port = ikm_prealloc_inuse_port(kmsg);
1370 if (port != IP_NULL) {
1371 ip_lock(port);
1372 ikm_prealloc_clear_inuse(kmsg, port);
1373 if (ip_active(port) && (port->ip_premsg == kmsg)) {
1374 assert(IP_PREALLOC(port));
1375 ip_unlock(port);
1376 ip_release(port);
1377 return;
1378 }
1379 ip_unlock(port);
1380 ip_release(port); /* May be last reference */
1381 }
1382 } else {
1383 void *data = kmsg->ikm_data;
1384 if ((void *)kmsg->ikm_header < data ||
1385 (void *)kmsg->ikm_header >= data + size) {
1386 panic("ipc_kmsg_free");
1387 }
1388 kheap_free(KHEAP_DATA_BUFFERS, data, size);
1389 }
1390 zfree(ipc_kmsg_zone, kmsg);
1391 }
1392
1393
1394 /*
1395 * Routine: ipc_kmsg_enqueue
1396 * Purpose:
1397 * Enqueue a kmsg.
1398 */
1399
1400 void
1401 ipc_kmsg_enqueue(
1402 ipc_kmsg_queue_t queue,
1403 ipc_kmsg_t kmsg)
1404 {
1405 ipc_kmsg_t first = queue->ikmq_base;
1406 ipc_kmsg_t last;
1407
1408 if (first == IKM_NULL) {
1409 queue->ikmq_base = kmsg;
1410 kmsg->ikm_next = kmsg;
1411 kmsg->ikm_prev = kmsg;
1412 } else {
1413 last = first->ikm_prev;
1414 kmsg->ikm_next = first;
1415 kmsg->ikm_prev = last;
1416 first->ikm_prev = kmsg;
1417 last->ikm_next = kmsg;
1418 }
1419 }
1420
1421 /*
1422 * Routine: ipc_kmsg_enqueue_qos
1423 * Purpose:
1424 * Enqueue a kmsg, propagating qos
1425 * overrides towards the head of the queue.
1426 *
1427 * Returns:
1428 * whether the head of the queue had
1429 * it's override-qos adjusted because
1430 * of this insertion.
1431 */
1432
1433 boolean_t
1434 ipc_kmsg_enqueue_qos(
1435 ipc_kmsg_queue_t queue,
1436 ipc_kmsg_t kmsg)
1437 {
1438 ipc_kmsg_t first = queue->ikmq_base;
1439 ipc_kmsg_t prev;
1440 mach_msg_qos_t qos_ovr;
1441
1442 if (first == IKM_NULL) {
1443 /* insert a first message */
1444 queue->ikmq_base = kmsg;
1445 kmsg->ikm_next = kmsg;
1446 kmsg->ikm_prev = kmsg;
1447 return TRUE;
1448 }
1449
1450 /* insert at the tail */
1451 prev = first->ikm_prev;
1452 kmsg->ikm_next = first;
1453 kmsg->ikm_prev = prev;
1454 first->ikm_prev = kmsg;
1455 prev->ikm_next = kmsg;
1456
1457 /* apply QoS overrides towards the head */
1458 qos_ovr = kmsg->ikm_qos_override;
1459 while (prev != kmsg &&
1460 qos_ovr > prev->ikm_qos_override) {
1461 prev->ikm_qos_override = qos_ovr;
1462 prev = prev->ikm_prev;
1463 }
1464
1465 /* did we adjust everything? */
1466 return prev == kmsg;
1467 }
1468
1469 /*
1470 * Routine: ipc_kmsg_override_qos
1471 * Purpose:
1472 * Update the override for a given kmsg already
1473 * enqueued, propagating qos override adjustments
1474 * towards the head of the queue.
1475 *
1476 * Returns:
1477 * whether the head of the queue had
1478 * it's override-qos adjusted because
1479 * of this insertion.
1480 */
1481
1482 boolean_t
1483 ipc_kmsg_override_qos(
1484 ipc_kmsg_queue_t queue,
1485 ipc_kmsg_t kmsg,
1486 mach_msg_qos_t qos_ovr)
1487 {
1488 ipc_kmsg_t first = queue->ikmq_base;
1489 ipc_kmsg_t cur = kmsg;
1490
1491 /* apply QoS overrides towards the head */
1492 while (qos_ovr > cur->ikm_qos_override) {
1493 cur->ikm_qos_override = qos_ovr;
1494 if (cur == first) {
1495 return TRUE;
1496 }
1497 cur = cur->ikm_prev;
1498 }
1499 return FALSE;
1500 }
1501
1502 /*
1503 * Routine: ipc_kmsg_dequeue
1504 * Purpose:
1505 * Dequeue and return a kmsg.
1506 */
1507
1508 ipc_kmsg_t
1509 ipc_kmsg_dequeue(
1510 ipc_kmsg_queue_t queue)
1511 {
1512 ipc_kmsg_t first;
1513
1514 first = ipc_kmsg_queue_first(queue);
1515
1516 if (first != IKM_NULL) {
1517 ipc_kmsg_rmqueue(queue, first);
1518 }
1519
1520 return first;
1521 }
1522
1523 /*
1524 * Routine: ipc_kmsg_rmqueue
1525 * Purpose:
1526 * Pull a kmsg out of a queue.
1527 */
1528
1529 void
1530 ipc_kmsg_rmqueue(
1531 ipc_kmsg_queue_t queue,
1532 ipc_kmsg_t kmsg)
1533 {
1534 ipc_kmsg_t next, prev;
1535
1536 assert(queue->ikmq_base != IKM_NULL);
1537
1538 next = kmsg->ikm_next;
1539 prev = kmsg->ikm_prev;
1540
1541 if (next == kmsg) {
1542 assert(prev == kmsg);
1543 assert(queue->ikmq_base == kmsg);
1544
1545 queue->ikmq_base = IKM_NULL;
1546 } else {
1547 if (__improbable(next->ikm_prev != kmsg || prev->ikm_next != kmsg)) {
1548 panic("ipc_kmsg_rmqueue: inconsistent prev/next pointers. "
1549 "(prev->next: %p, next->prev: %p, kmsg: %p)",
1550 prev->ikm_next, next->ikm_prev, kmsg);
1551 }
1552
1553 if (queue->ikmq_base == kmsg) {
1554 queue->ikmq_base = next;
1555 }
1556
1557 next->ikm_prev = prev;
1558 prev->ikm_next = next;
1559 }
1560 /* XXX Temporary debug logic */
1561 assert((kmsg->ikm_next = IKM_BOGUS) == IKM_BOGUS);
1562 assert((kmsg->ikm_prev = IKM_BOGUS) == IKM_BOGUS);
1563 }
1564
1565 /*
1566 * Routine: ipc_kmsg_queue_next
1567 * Purpose:
1568 * Return the kmsg following the given kmsg.
1569 * (Or IKM_NULL if it is the last one in the queue.)
1570 */
1571
1572 ipc_kmsg_t
1573 ipc_kmsg_queue_next(
1574 ipc_kmsg_queue_t queue,
1575 ipc_kmsg_t kmsg)
1576 {
1577 ipc_kmsg_t next;
1578
1579 assert(queue->ikmq_base != IKM_NULL);
1580
1581 next = kmsg->ikm_next;
1582 if (queue->ikmq_base == next) {
1583 next = IKM_NULL;
1584 }
1585
1586 return next;
1587 }
1588
1589 /*
1590 * Routine: ipc_kmsg_destroy
1591 * Purpose:
1592 * Destroys a kernel message. Releases all rights,
1593 * references, and memory held by the message.
1594 * Frees the message.
1595 * Conditions:
1596 * No locks held.
1597 */
1598
1599 void
1600 ipc_kmsg_destroy(
1601 ipc_kmsg_t kmsg)
1602 {
1603 /*
1604 * Destroying a message can cause more messages to be destroyed.
1605 * Curtail recursion by putting messages on the deferred
1606 * destruction queue. If this was the first message on the
1607 * queue, this instance must process the full queue.
1608 */
1609 if (ipc_kmsg_delayed_destroy(kmsg)) {
1610 ipc_kmsg_reap_delayed();
1611 }
1612 }
1613
1614 /*
1615 * Routine: ipc_kmsg_delayed_destroy
1616 * Purpose:
1617 * Enqueues a kernel message for deferred destruction.
1618 * Returns:
1619 * Boolean indicator that the caller is responsible to reap
1620 * deferred messages.
1621 */
1622
1623 boolean_t
1624 ipc_kmsg_delayed_destroy(
1625 ipc_kmsg_t kmsg)
1626 {
1627 ipc_kmsg_queue_t queue = &(current_thread()->ith_messages);
1628 boolean_t first = ipc_kmsg_queue_empty(queue);
1629
1630 ipc_kmsg_enqueue(queue, kmsg);
1631 return first;
1632 }
1633
1634 /*
1635 * Routine: ipc_kmsg_destroy_queue
1636 * Purpose:
1637 * Destroys messages from the per-thread
1638 * deferred reaping queue.
1639 * Conditions:
1640 * No locks held.
1641 */
1642
1643 void
1644 ipc_kmsg_reap_delayed(void)
1645 {
1646 ipc_kmsg_queue_t queue = &(current_thread()->ith_messages);
1647 ipc_kmsg_t kmsg;
1648
1649 /*
1650 * must leave kmsg in queue while cleaning it to assure
1651 * no nested calls recurse into here.
1652 */
1653 while ((kmsg = ipc_kmsg_queue_first(queue)) != IKM_NULL) {
1654 ipc_kmsg_clean(kmsg);
1655 ipc_kmsg_rmqueue(queue, kmsg);
1656 ipc_kmsg_free(kmsg);
1657 }
1658 }
1659
1660 /*
1661 * Routine: ipc_kmsg_clean_body
1662 * Purpose:
1663 * Cleans the body of a kernel message.
1664 * Releases all rights, references, and memory.
1665 *
1666 * Conditions:
1667 * No locks held.
1668 */
1669 static unsigned int _ipc_kmsg_clean_invalid_desc = 0;
1670 void
1671 ipc_kmsg_clean_body(
1672 __unused ipc_kmsg_t kmsg,
1673 mach_msg_type_number_t number,
1674 mach_msg_descriptor_t *saddr)
1675 {
1676 mach_msg_type_number_t i;
1677
1678 if (number == 0) {
1679 return;
1680 }
1681
1682 for (i = 0; i < number; i++, saddr++) {
1683 switch (saddr->type.type) {
1684 case MACH_MSG_PORT_DESCRIPTOR: {
1685 mach_msg_port_descriptor_t *dsc;
1686
1687 dsc = &saddr->port;
1688
1689 /*
1690 * Destroy port rights carried in the message
1691 */
1692 if (!IP_VALID(dsc->name)) {
1693 continue;
1694 }
1695 ipc_object_destroy(ip_to_object(dsc->name), dsc->disposition);
1696 break;
1697 }
1698 case MACH_MSG_OOL_VOLATILE_DESCRIPTOR:
1699 case MACH_MSG_OOL_DESCRIPTOR: {
1700 mach_msg_ool_descriptor_t *dsc;
1701
1702 dsc = (mach_msg_ool_descriptor_t *)&saddr->out_of_line;
1703
1704 /*
1705 * Destroy memory carried in the message
1706 */
1707 if (dsc->size == 0) {
1708 assert(dsc->address == (void *) 0);
1709 } else {
1710 vm_map_copy_discard((vm_map_copy_t) dsc->address);
1711 }
1712 break;
1713 }
1714 case MACH_MSG_OOL_PORTS_DESCRIPTOR: {
1715 ipc_object_t *objects;
1716 mach_msg_type_number_t j;
1717 mach_msg_ool_ports_descriptor_t *dsc;
1718
1719 dsc = (mach_msg_ool_ports_descriptor_t *)&saddr->ool_ports;
1720 objects = (ipc_object_t *) dsc->address;
1721
1722 if (dsc->count == 0) {
1723 break;
1724 }
1725
1726 assert(objects != (ipc_object_t *) 0);
1727
1728 /* destroy port rights carried in the message */
1729
1730 for (j = 0; j < dsc->count; j++) {
1731 ipc_object_t object = objects[j];
1732
1733 if (!IO_VALID(object)) {
1734 continue;
1735 }
1736
1737 ipc_object_destroy(object, dsc->disposition);
1738 }
1739
1740 /* destroy memory carried in the message */
1741
1742 assert(dsc->count != 0);
1743
1744 kfree(dsc->address,
1745 (vm_size_t) dsc->count * sizeof(mach_port_t));
1746 break;
1747 }
1748 case MACH_MSG_GUARDED_PORT_DESCRIPTOR: {
1749 mach_msg_guarded_port_descriptor_t *dsc = (typeof(dsc)) & saddr->guarded_port;
1750
1751 /*
1752 * Destroy port rights carried in the message
1753 */
1754 if (!IP_VALID(dsc->name)) {
1755 continue;
1756 }
1757 ipc_object_destroy(ip_to_object(dsc->name), dsc->disposition);
1758 break;
1759 }
1760 default: {
1761 _ipc_kmsg_clean_invalid_desc++; /* don't understand this type of descriptor */
1762 }
1763 }
1764 }
1765 }
1766
1767 /*
1768 * Routine: ipc_kmsg_clean_partial
1769 * Purpose:
1770 * Cleans a partially-acquired kernel message.
1771 * number is the index of the type descriptor
1772 * in the body of the message that contained the error.
1773 * If dolast, the memory and port rights in this last
1774 * type spec are also cleaned. In that case, number
1775 * specifies the number of port rights to clean.
1776 * Conditions:
1777 * Nothing locked.
1778 */
1779
1780 void
1781 ipc_kmsg_clean_partial(
1782 ipc_kmsg_t kmsg,
1783 mach_msg_type_number_t number,
1784 mach_msg_descriptor_t *desc,
1785 vm_offset_t paddr,
1786 vm_size_t length)
1787 {
1788 ipc_object_t object;
1789 mach_msg_bits_t mbits = kmsg->ikm_header->msgh_bits;
1790
1791 /* deal with importance chain while we still have dest and voucher references */
1792 ipc_importance_clean(kmsg);
1793
1794 object = ip_to_object(kmsg->ikm_header->msgh_remote_port);
1795 assert(IO_VALID(object));
1796 ipc_object_destroy_dest(object, MACH_MSGH_BITS_REMOTE(mbits));
1797
1798 object = ip_to_object(kmsg->ikm_header->msgh_local_port);
1799 if (IO_VALID(object)) {
1800 ipc_object_destroy(object, MACH_MSGH_BITS_LOCAL(mbits));
1801 }
1802
1803 object = ip_to_object(kmsg->ikm_voucher);
1804 if (IO_VALID(object)) {
1805 assert(MACH_MSGH_BITS_VOUCHER(mbits) == MACH_MSG_TYPE_MOVE_SEND);
1806 ipc_object_destroy(object, MACH_MSG_TYPE_PORT_SEND);
1807 kmsg->ikm_voucher = IP_NULL;
1808 }
1809
1810 if (paddr) {
1811 (void) vm_deallocate(ipc_kernel_copy_map, paddr, length);
1812 }
1813
1814 ipc_kmsg_clean_body(kmsg, number, desc);
1815 }
1816
1817 /*
1818 * Routine: ipc_kmsg_clean
1819 * Purpose:
1820 * Cleans a kernel message. Releases all rights,
1821 * references, and memory held by the message.
1822 * Conditions:
1823 * No locks held.
1824 */
1825
1826 void
1827 ipc_kmsg_clean(
1828 ipc_kmsg_t kmsg)
1829 {
1830 ipc_object_t object;
1831 mach_msg_bits_t mbits;
1832
1833 /* deal with importance chain while we still have dest and voucher references */
1834 ipc_importance_clean(kmsg);
1835
1836 mbits = kmsg->ikm_header->msgh_bits;
1837 object = ip_to_object(kmsg->ikm_header->msgh_remote_port);
1838 if (IO_VALID(object)) {
1839 ipc_object_destroy_dest(object, MACH_MSGH_BITS_REMOTE(mbits));
1840 }
1841
1842 object = ip_to_object(kmsg->ikm_header->msgh_local_port);
1843 if (IO_VALID(object)) {
1844 ipc_object_destroy(object, MACH_MSGH_BITS_LOCAL(mbits));
1845 }
1846
1847 object = ip_to_object(kmsg->ikm_voucher);
1848 if (IO_VALID(object)) {
1849 assert(MACH_MSGH_BITS_VOUCHER(mbits) == MACH_MSG_TYPE_MOVE_SEND);
1850 ipc_object_destroy(object, MACH_MSG_TYPE_PORT_SEND);
1851 kmsg->ikm_voucher = IP_NULL;
1852 }
1853
1854 if (mbits & MACH_MSGH_BITS_COMPLEX) {
1855 mach_msg_body_t *body;
1856
1857 body = (mach_msg_body_t *) (kmsg->ikm_header + 1);
1858 ipc_kmsg_clean_body(kmsg, body->msgh_descriptor_count,
1859 (mach_msg_descriptor_t *)(body + 1));
1860 }
1861 }
1862
1863 /*
1864 * Routine: ipc_kmsg_set_prealloc
1865 * Purpose:
1866 * Assign a kmsg as a preallocated message buffer to a port.
1867 * Conditions:
1868 * port locked.
1869 */
1870
1871 void
1872 ipc_kmsg_set_prealloc(
1873 ipc_kmsg_t kmsg,
1874 ipc_port_t port)
1875 {
1876 assert(kmsg->ikm_prealloc == IP_NULL);
1877
1878 kmsg->ikm_prealloc = IP_NULL;
1879
1880 assert(port_send_turnstile(port) == TURNSTILE_NULL);
1881 kmsg->ikm_turnstile = TURNSTILE_NULL;
1882 IP_SET_PREALLOC(port, kmsg);
1883 }
1884
1885 /*
1886 * Routine: ipc_kmsg_clear_prealloc
1887 * Purpose:
1888 * Release the Assignment of a preallocated message buffer from a port.
1889 * Conditions:
1890 * port locked.
1891 */
1892 void
1893 ipc_kmsg_clear_prealloc(
1894 ipc_kmsg_t kmsg,
1895 ipc_port_t port)
1896 {
1897 /* take the mqueue lock since the turnstile is protected under it */
1898 imq_lock(&port->ip_messages);
1899
1900 IP_CLEAR_PREALLOC(port, kmsg);
1901 set_port_send_turnstile(port, kmsg->ikm_turnstile);
1902 imq_unlock(&port->ip_messages);
1903 }
1904
1905 /*
1906 * Routine: ipc_kmsg_prealloc
1907 * Purpose:
1908 * Wraper to ipc_kmsg_alloc() to account for
1909 * header expansion requirements.
1910 */
1911 ipc_kmsg_t
1912 ipc_kmsg_prealloc(mach_msg_size_t size)
1913 {
1914 #if defined(__LP64__)
1915 if (size > IKM_SAVED_MSG_SIZE - LEGACY_HEADER_SIZE_DELTA) {
1916 panic("ipc_kmsg_prealloc");
1917 }
1918
1919 size += LEGACY_HEADER_SIZE_DELTA;
1920 #endif
1921 return ipc_kmsg_alloc(size);
1922 }
1923
1924
1925 /*
1926 * Routine: ipc_kmsg_get
1927 * Purpose:
1928 * Allocates a kernel message buffer.
1929 * Copies a user message to the message buffer.
1930 * Conditions:
1931 * Nothing locked.
1932 * Returns:
1933 * MACH_MSG_SUCCESS Acquired a message buffer.
1934 * MACH_SEND_MSG_TOO_SMALL Message smaller than a header.
1935 * MACH_SEND_MSG_TOO_SMALL Message size not long-word multiple.
1936 * MACH_SEND_TOO_LARGE Message too large to ever be sent.
1937 * MACH_SEND_NO_BUFFER Couldn't allocate a message buffer.
1938 * MACH_SEND_INVALID_DATA Couldn't copy message data.
1939 */
1940
1941 mach_msg_return_t
1942 ipc_kmsg_get(
1943 mach_vm_address_t msg_addr,
1944 mach_msg_size_t size,
1945 ipc_kmsg_t *kmsgp)
1946 {
1947 mach_msg_size_t msg_and_trailer_size;
1948 ipc_kmsg_t kmsg;
1949 mach_msg_max_trailer_t *trailer;
1950 mach_msg_legacy_base_t legacy_base;
1951 mach_msg_size_t len_copied;
1952 legacy_base.body.msgh_descriptor_count = 0;
1953
1954 if ((size < sizeof(mach_msg_legacy_header_t)) || (size & 3)) {
1955 return MACH_SEND_MSG_TOO_SMALL;
1956 }
1957
1958 if (size > ipc_kmsg_max_body_space) {
1959 return MACH_SEND_TOO_LARGE;
1960 }
1961
1962 if (size == sizeof(mach_msg_legacy_header_t)) {
1963 len_copied = sizeof(mach_msg_legacy_header_t);
1964 } else {
1965 len_copied = sizeof(mach_msg_legacy_base_t);
1966 }
1967
1968 if (copyinmsg(msg_addr, (char *)&legacy_base, len_copied)) {
1969 return MACH_SEND_INVALID_DATA;
1970 }
1971
1972 /*
1973 * If the message claims to be complex, it must at least
1974 * have the length of a "base" message (header + dsc_count).
1975 */
1976 if (len_copied < sizeof(mach_msg_legacy_base_t) &&
1977 (legacy_base.header.msgh_bits & MACH_MSGH_BITS_COMPLEX)) {
1978 return MACH_SEND_MSG_TOO_SMALL;
1979 }
1980
1981 msg_addr += sizeof(legacy_base.header);
1982 #if defined(__LP64__)
1983 size += LEGACY_HEADER_SIZE_DELTA;
1984 #endif
1985 /* unreachable if !DEBUG */
1986 __unreachable_ok_push
1987 if (DEBUG_KPRINT_SYSCALL_PREDICATE(DEBUG_KPRINT_SYSCALL_IPC_MASK)) {
1988 unsigned int j;
1989 for (j = 0; j < sizeof(legacy_base.header); j++) {
1990 kprintf("%02x\n", ((unsigned char*)&legacy_base.header)[j]);
1991 }
1992 }
1993 __unreachable_ok_pop
1994
1995 msg_and_trailer_size = size + MAX_TRAILER_SIZE;
1996 kmsg = ipc_kmsg_alloc(msg_and_trailer_size);
1997 if (kmsg == IKM_NULL) {
1998 return MACH_SEND_NO_BUFFER;
1999 }
2000
2001 kmsg->ikm_header->msgh_size = size;
2002 kmsg->ikm_header->msgh_bits = legacy_base.header.msgh_bits;
2003 kmsg->ikm_header->msgh_remote_port = CAST_MACH_NAME_TO_PORT(legacy_base.header.msgh_remote_port);
2004 kmsg->ikm_header->msgh_local_port = CAST_MACH_NAME_TO_PORT(legacy_base.header.msgh_local_port);
2005 kmsg->ikm_header->msgh_voucher_port = legacy_base.header.msgh_voucher_port;
2006 kmsg->ikm_header->msgh_id = legacy_base.header.msgh_id;
2007
2008 DEBUG_KPRINT_SYSCALL_IPC("ipc_kmsg_get header:\n"
2009 " size: 0x%.8x\n"
2010 " bits: 0x%.8x\n"
2011 " remote_port: %p\n"
2012 " local_port: %p\n"
2013 " voucher_port: 0x%.8x\n"
2014 " id: %.8d\n",
2015 kmsg->ikm_header->msgh_size,
2016 kmsg->ikm_header->msgh_bits,
2017 kmsg->ikm_header->msgh_remote_port,
2018 kmsg->ikm_header->msgh_local_port,
2019 kmsg->ikm_header->msgh_voucher_port,
2020 kmsg->ikm_header->msgh_id);
2021
2022 if (copyinmsg(msg_addr, (char *)(kmsg->ikm_header + 1), size - (mach_msg_size_t)sizeof(mach_msg_header_t))) {
2023 ipc_kmsg_free(kmsg);
2024 return MACH_SEND_INVALID_DATA;
2025 }
2026
2027 /* unreachable if !DEBUG */
2028 __unreachable_ok_push
2029 if (DEBUG_KPRINT_SYSCALL_PREDICATE(DEBUG_KPRINT_SYSCALL_IPC_MASK)) {
2030 kprintf("body: size: %lu\n", (size - sizeof(mach_msg_header_t)));
2031 uint32_t i;
2032 for (i = 0; i * 4 < (size - sizeof(mach_msg_header_t)); i++) {
2033 kprintf("%.4x\n", ((uint32_t *)(kmsg->ikm_header + 1))[i]);
2034 }
2035 }
2036 __unreachable_ok_pop
2037 DEBUG_IPC_KMSG_PRINT(kmsg, "ipc_kmsg_get()");
2038
2039 /*
2040 * I reserve for the trailer the largest space (MAX_TRAILER_SIZE)
2041 * However, the internal size field of the trailer (msgh_trailer_size)
2042 * is initialized to the minimum (sizeof(mach_msg_trailer_t)), to optimize
2043 * the cases where no implicit data is requested.
2044 */
2045 trailer = (mach_msg_max_trailer_t *) ((vm_offset_t)kmsg->ikm_header + size);
2046 bzero(trailer, sizeof(*trailer));
2047 trailer->msgh_sender = current_thread()->task->sec_token;
2048 trailer->msgh_audit = current_thread()->task->audit_token;
2049 trailer->msgh_trailer_type = MACH_MSG_TRAILER_FORMAT_0;
2050 trailer->msgh_trailer_size = MACH_MSG_TRAILER_MINIMUM_SIZE;
2051
2052 #ifdef ppc
2053 if (trcWork.traceMask) {
2054 dbgTrace(0x1100, (unsigned int)kmsg->ikm_header->msgh_id,
2055 (unsigned int)kmsg->ikm_header->msgh_remote_port,
2056 (unsigned int)kmsg->ikm_header->msgh_local_port, 0);
2057 }
2058 #endif
2059
2060 trailer->msgh_labels.sender = 0;
2061 *kmsgp = kmsg;
2062 return MACH_MSG_SUCCESS;
2063 }
2064
2065 /*
2066 * Routine: ipc_kmsg_get_from_kernel
2067 * Purpose:
2068 * First checks for a preallocated message
2069 * reserved for kernel clients. If not found -
2070 * allocates a new kernel message buffer.
2071 * Copies a kernel message to the message buffer.
2072 * Only resource errors are allowed.
2073 * Conditions:
2074 * Nothing locked.
2075 * Ports in header are ipc_port_t.
2076 * Returns:
2077 * MACH_MSG_SUCCESS Acquired a message buffer.
2078 * MACH_SEND_NO_BUFFER Couldn't allocate a message buffer.
2079 */
2080
2081 mach_msg_return_t
2082 ipc_kmsg_get_from_kernel(
2083 mach_msg_header_t *msg,
2084 mach_msg_size_t size,
2085 ipc_kmsg_t *kmsgp)
2086 {
2087 ipc_kmsg_t kmsg;
2088 mach_msg_size_t msg_and_trailer_size;
2089 mach_msg_max_trailer_t *trailer;
2090 ipc_port_t dest_port;
2091
2092 assert(size >= sizeof(mach_msg_header_t));
2093 assert((size & 3) == 0);
2094
2095 dest_port = msg->msgh_remote_port;
2096
2097 msg_and_trailer_size = size + MAX_TRAILER_SIZE;
2098
2099 /*
2100 * See if the port has a pre-allocated kmsg for kernel
2101 * clients. These are set up for those kernel clients
2102 * which cannot afford to wait.
2103 */
2104 if (IP_VALID(dest_port) && IP_PREALLOC(dest_port)) {
2105 mach_msg_size_t max_desc = 0;
2106
2107 ip_lock(dest_port);
2108 if (!ip_active(dest_port)) {
2109 ip_unlock(dest_port);
2110 return MACH_SEND_NO_BUFFER;
2111 }
2112 assert(IP_PREALLOC(dest_port));
2113 kmsg = dest_port->ip_premsg;
2114 if (ikm_prealloc_inuse(kmsg)) {
2115 ip_unlock(dest_port);
2116 return MACH_SEND_NO_BUFFER;
2117 }
2118 #if !defined(__LP64__)
2119 if (msg->msgh_bits & MACH_MSGH_BITS_COMPLEX) {
2120 assert(size > sizeof(mach_msg_base_t));
2121 max_desc = ((mach_msg_base_t *)msg)->body.msgh_descriptor_count *
2122 DESC_SIZE_ADJUSTMENT;
2123 }
2124 #endif
2125 if (msg_and_trailer_size > kmsg->ikm_size - max_desc) {
2126 ip_unlock(dest_port);
2127 return MACH_SEND_TOO_LARGE;
2128 }
2129 ikm_prealloc_set_inuse(kmsg, dest_port);
2130 ikm_set_header(kmsg, NULL, msg_and_trailer_size);
2131 ip_unlock(dest_port);
2132 } else {
2133 kmsg = ipc_kmsg_alloc(msg_and_trailer_size);
2134 if (kmsg == IKM_NULL) {
2135 return MACH_SEND_NO_BUFFER;
2136 }
2137 }
2138
2139 (void) memcpy((void *) kmsg->ikm_header, (const void *) msg, size);
2140
2141 ikm_qos_init(kmsg);
2142
2143 kmsg->ikm_header->msgh_size = size;
2144
2145 /*
2146 * I reserve for the trailer the largest space (MAX_TRAILER_SIZE)
2147 * However, the internal size field of the trailer (msgh_trailer_size)
2148 * is initialized to the minimum (sizeof(mach_msg_trailer_t)), to
2149 * optimize the cases where no implicit data is requested.
2150 */
2151 trailer = (mach_msg_max_trailer_t *)
2152 ((vm_offset_t)kmsg->ikm_header + size);
2153 bzero(trailer, sizeof(*trailer));
2154 trailer->msgh_sender = KERNEL_SECURITY_TOKEN;
2155 trailer->msgh_audit = KERNEL_AUDIT_TOKEN;
2156 trailer->msgh_trailer_type = MACH_MSG_TRAILER_FORMAT_0;
2157 trailer->msgh_trailer_size = MACH_MSG_TRAILER_MINIMUM_SIZE;
2158
2159 trailer->msgh_labels.sender = 0;
2160
2161 *kmsgp = kmsg;
2162 return MACH_MSG_SUCCESS;
2163 }
2164
2165 /*
2166 * Routine: ipc_kmsg_send
2167 * Purpose:
2168 * Send a message. The message holds a reference
2169 * for the destination port in the msgh_remote_port field.
2170 *
2171 * If unsuccessful, the caller still has possession of
2172 * the message and must do something with it. If successful,
2173 * the message is queued, given to a receiver, destroyed,
2174 * or handled directly by the kernel via mach_msg.
2175 * Conditions:
2176 * Nothing locked.
2177 * Returns:
2178 * MACH_MSG_SUCCESS The message was accepted.
2179 * MACH_SEND_TIMED_OUT Caller still has message.
2180 * MACH_SEND_INTERRUPTED Caller still has message.
2181 * MACH_SEND_INVALID_DEST Caller still has message.
2182 */
2183 mach_msg_return_t
2184 ipc_kmsg_send(
2185 ipc_kmsg_t kmsg,
2186 mach_msg_option_t option,
2187 mach_msg_timeout_t send_timeout)
2188 {
2189 ipc_port_t port;
2190 thread_t th = current_thread();
2191 mach_msg_return_t error = MACH_MSG_SUCCESS;
2192 boolean_t kernel_reply = FALSE;
2193
2194 /* Check if honor qlimit flag is set on thread. */
2195 if ((th->options & TH_OPT_HONOR_QLIMIT) == TH_OPT_HONOR_QLIMIT) {
2196 /* Remove the MACH_SEND_ALWAYS flag to honor queue limit. */
2197 option &= (~MACH_SEND_ALWAYS);
2198 /* Add the timeout flag since the message queue might be full. */
2199 option |= MACH_SEND_TIMEOUT;
2200 th->options &= (~TH_OPT_HONOR_QLIMIT);
2201 }
2202
2203 #if IMPORTANCE_INHERITANCE
2204 bool did_importance = false;
2205 #if IMPORTANCE_TRACE
2206 mach_msg_id_t imp_msgh_id = -1;
2207 int sender_pid = -1;
2208 #endif /* IMPORTANCE_TRACE */
2209 #endif /* IMPORTANCE_INHERITANCE */
2210
2211 /* don't allow the creation of a circular loop */
2212 if (kmsg->ikm_header->msgh_bits & MACH_MSGH_BITS_CIRCULAR) {
2213 ipc_kmsg_destroy(kmsg);
2214 KDBG(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_KMSG_INFO) | DBG_FUNC_END, MACH_MSGH_BITS_CIRCULAR);
2215 return MACH_MSG_SUCCESS;
2216 }
2217
2218 ipc_voucher_send_preprocessing(kmsg);
2219
2220 port = kmsg->ikm_header->msgh_remote_port;
2221 assert(IP_VALID(port));
2222 ip_lock(port);
2223
2224 /*
2225 * If the destination has been guarded with a reply context, and the
2226 * sender is consuming a send-once right, then assume this is a reply
2227 * to an RPC and we need to validate that this sender is currently in
2228 * the correct context.
2229 */
2230 if (enforce_strict_reply && port->ip_reply_context != 0 &&
2231 ((option & MACH_SEND_KERNEL) == 0) &&
2232 MACH_MSGH_BITS_REMOTE(kmsg->ikm_header->msgh_bits) == MACH_MSG_TYPE_PORT_SEND_ONCE) {
2233 error = ipc_kmsg_validate_reply_context_locked(option, port, th->ith_voucher, th->ith_voucher_name);
2234 if (error != MACH_MSG_SUCCESS) {
2235 ip_unlock(port);
2236 return error;
2237 }
2238 }
2239
2240 #if IMPORTANCE_INHERITANCE
2241 retry:
2242 #endif /* IMPORTANCE_INHERITANCE */
2243 /*
2244 * Can't deliver to a dead port.
2245 * However, we can pretend it got sent
2246 * and was then immediately destroyed.
2247 */
2248 if (!ip_active(port)) {
2249 ip_unlock(port);
2250 #if MACH_FLIPC
2251 if (MACH_NODE_VALID(kmsg->ikm_node) && FPORT_VALID(port->ip_messages.imq_fport)) {
2252 flipc_msg_ack(kmsg->ikm_node, &port->ip_messages, FALSE);
2253 }
2254 #endif
2255 if (did_importance) {
2256 /*
2257 * We're going to pretend we delivered this message
2258 * successfully, and just eat the kmsg. However, the
2259 * kmsg is actually visible via the importance_task!
2260 * We need to cleanup this linkage before we destroy
2261 * the message, and more importantly before we set the
2262 * msgh_remote_port to NULL. See: 34302571
2263 */
2264 ipc_importance_clean(kmsg);
2265 }
2266 ip_release(port); /* JMM - Future: release right, not just ref */
2267 kmsg->ikm_header->msgh_remote_port = MACH_PORT_NULL;
2268 ipc_kmsg_destroy(kmsg);
2269 KDBG(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_KMSG_INFO) | DBG_FUNC_END, MACH_SEND_INVALID_DEST);
2270 return MACH_MSG_SUCCESS;
2271 }
2272
2273 if (port->ip_receiver == ipc_space_kernel) {
2274 /*
2275 * We can check ip_receiver == ipc_space_kernel
2276 * before checking that the port is active because
2277 * ipc_port_dealloc_kernel clears ip_receiver
2278 * before destroying a kernel port.
2279 */
2280 require_ip_active(port);
2281 port->ip_messages.imq_seqno++;
2282 ip_unlock(port);
2283
2284 current_task()->messages_sent++;
2285
2286 /*
2287 * Call the server routine, and get the reply message to send.
2288 */
2289 kmsg = ipc_kobject_server(kmsg, option);
2290 if (kmsg == IKM_NULL) {
2291 return MACH_MSG_SUCCESS;
2292 }
2293
2294 /* sign the reply message */
2295 ikm_sign(kmsg);
2296
2297 /* restart the KMSG_INFO tracing for the reply message */
2298 KDBG(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_KMSG_INFO) | DBG_FUNC_START);
2299 port = kmsg->ikm_header->msgh_remote_port;
2300 assert(IP_VALID(port));
2301 ip_lock(port);
2302 /* fall thru with reply - same options */
2303 kernel_reply = TRUE;
2304 if (!ip_active(port)) {
2305 error = MACH_SEND_INVALID_DEST;
2306 }
2307 }
2308
2309 #if IMPORTANCE_INHERITANCE
2310 /*
2311 * Need to see if this message needs importance donation and/or
2312 * propagation. That routine can drop the port lock temporarily.
2313 * If it does we'll have to revalidate the destination.
2314 */
2315 if (!did_importance) {
2316 did_importance = true;
2317 if (ipc_importance_send(kmsg, option)) {
2318 goto retry;
2319 }
2320 }
2321 #endif /* IMPORTANCE_INHERITANCE */
2322
2323 if (error != MACH_MSG_SUCCESS) {
2324 ip_unlock(port);
2325 } else {
2326 /*
2327 * We have a valid message and a valid reference on the port.
2328 * we can unlock the port and call mqueue_send() on its message
2329 * queue. Lock message queue while port is locked.
2330 */
2331 imq_lock(&port->ip_messages);
2332
2333 ipc_special_reply_port_msg_sent(port);
2334
2335 ip_unlock(port);
2336
2337 error = ipc_mqueue_send(&port->ip_messages, kmsg, option,
2338 send_timeout);
2339 }
2340
2341 #if IMPORTANCE_INHERITANCE
2342 if (did_importance) {
2343 __unused int importance_cleared = 0;
2344 switch (error) {
2345 case MACH_SEND_TIMED_OUT:
2346 case MACH_SEND_NO_BUFFER:
2347 case MACH_SEND_INTERRUPTED:
2348 case MACH_SEND_INVALID_DEST:
2349 /*
2350 * We still have the kmsg and its
2351 * reference on the port. But we
2352 * have to back out the importance
2353 * boost.
2354 *
2355 * The port could have changed hands,
2356 * be inflight to another destination,
2357 * etc... But in those cases our
2358 * back-out will find the new owner
2359 * (and all the operations that
2360 * transferred the right should have
2361 * applied their own boost adjustments
2362 * to the old owner(s)).
2363 */
2364 importance_cleared = 1;
2365 ipc_importance_clean(kmsg);
2366 break;
2367
2368 case MACH_MSG_SUCCESS:
2369 default:
2370 break;
2371 }
2372 #if IMPORTANCE_TRACE
2373 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_MSG, IMP_MSG_SEND)) | DBG_FUNC_END,
2374 task_pid(current_task()), sender_pid, imp_msgh_id, importance_cleared, 0);
2375 #endif /* IMPORTANCE_TRACE */
2376 }
2377 #endif /* IMPORTANCE_INHERITANCE */
2378
2379 /*
2380 * If the port has been destroyed while we wait, treat the message
2381 * as a successful delivery (like we do for an inactive port).
2382 */
2383 if (error == MACH_SEND_INVALID_DEST) {
2384 #if MACH_FLIPC
2385 if (MACH_NODE_VALID(kmsg->ikm_node) && FPORT_VALID(port->ip_messages.imq_fport)) {
2386 flipc_msg_ack(kmsg->ikm_node, &port->ip_messages, FALSE);
2387 }
2388 #endif
2389 ip_release(port); /* JMM - Future: release right, not just ref */
2390 kmsg->ikm_header->msgh_remote_port = MACH_PORT_NULL;
2391 ipc_kmsg_destroy(kmsg);
2392 KDBG(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_KMSG_INFO) | DBG_FUNC_END, MACH_SEND_INVALID_DEST);
2393 return MACH_MSG_SUCCESS;
2394 }
2395
2396 if (error != MACH_MSG_SUCCESS && kernel_reply) {
2397 /*
2398 * Kernel reply messages that fail can't be allowed to
2399 * pseudo-receive on error conditions. We need to just treat
2400 * the message as a successful delivery.
2401 */
2402 #if MACH_FLIPC
2403 if (MACH_NODE_VALID(kmsg->ikm_node) && FPORT_VALID(port->ip_messages.imq_fport)) {
2404 flipc_msg_ack(kmsg->ikm_node, &port->ip_messages, FALSE);
2405 }
2406 #endif
2407 ip_release(port); /* JMM - Future: release right, not just ref */
2408 kmsg->ikm_header->msgh_remote_port = MACH_PORT_NULL;
2409 ipc_kmsg_destroy(kmsg);
2410 KDBG(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_KMSG_INFO) | DBG_FUNC_END, error);
2411 return MACH_MSG_SUCCESS;
2412 }
2413 return error;
2414 }
2415
2416 /*
2417 * Routine: ipc_kmsg_put
2418 * Purpose:
2419 * Copies a message buffer to a user message.
2420 * Copies only the specified number of bytes.
2421 * Frees the message buffer.
2422 * Conditions:
2423 * Nothing locked. The message buffer must have clean
2424 * header fields.
2425 * Returns:
2426 * MACH_MSG_SUCCESS Copied data out of message buffer.
2427 * MACH_RCV_INVALID_DATA Couldn't copy to user message.
2428 */
2429
2430 mach_msg_return_t
2431 ipc_kmsg_put(
2432 ipc_kmsg_t kmsg,
2433 mach_msg_option_t option,
2434 mach_vm_address_t rcv_addr,
2435 mach_msg_size_t rcv_size,
2436 mach_msg_size_t trailer_size,
2437 mach_msg_size_t *sizep)
2438 {
2439 mach_msg_size_t size = kmsg->ikm_header->msgh_size + trailer_size;
2440 mach_msg_return_t mr;
2441
2442 DEBUG_IPC_KMSG_PRINT(kmsg, "ipc_kmsg_put()");
2443
2444
2445 DEBUG_KPRINT_SYSCALL_IPC("ipc_kmsg_put header:\n"
2446 " size: 0x%.8x\n"
2447 " bits: 0x%.8x\n"
2448 " remote_port: %p\n"
2449 " local_port: %p\n"
2450 " voucher_port: 0x%.8x\n"
2451 " id: %.8d\n",
2452 kmsg->ikm_header->msgh_size,
2453 kmsg->ikm_header->msgh_bits,
2454 kmsg->ikm_header->msgh_remote_port,
2455 kmsg->ikm_header->msgh_local_port,
2456 kmsg->ikm_header->msgh_voucher_port,
2457 kmsg->ikm_header->msgh_id);
2458
2459 #if defined(__LP64__)
2460 if (current_task() != kernel_task) { /* don't if receiver expects fully-cooked in-kernel msg; */
2461 mach_msg_legacy_header_t *legacy_header =
2462 (mach_msg_legacy_header_t *)((vm_offset_t)(kmsg->ikm_header) + LEGACY_HEADER_SIZE_DELTA);
2463
2464 mach_msg_bits_t bits = kmsg->ikm_header->msgh_bits;
2465 mach_msg_size_t msg_size = kmsg->ikm_header->msgh_size;
2466 mach_port_name_t remote_port = CAST_MACH_PORT_TO_NAME(kmsg->ikm_header->msgh_remote_port);
2467 mach_port_name_t local_port = CAST_MACH_PORT_TO_NAME(kmsg->ikm_header->msgh_local_port);
2468 mach_port_name_t voucher_port = kmsg->ikm_header->msgh_voucher_port;
2469 mach_msg_id_t id = kmsg->ikm_header->msgh_id;
2470
2471 legacy_header->msgh_id = id;
2472 legacy_header->msgh_local_port = local_port;
2473 legacy_header->msgh_remote_port = remote_port;
2474 legacy_header->msgh_voucher_port = voucher_port;
2475 legacy_header->msgh_size = msg_size - LEGACY_HEADER_SIZE_DELTA;
2476 legacy_header->msgh_bits = bits;
2477
2478 size -= LEGACY_HEADER_SIZE_DELTA;
2479 kmsg->ikm_header = (mach_msg_header_t *)legacy_header;
2480 }
2481 #endif
2482
2483 /* unreachable if !DEBUG */
2484 __unreachable_ok_push
2485 if (DEBUG_KPRINT_SYSCALL_PREDICATE(DEBUG_KPRINT_SYSCALL_IPC_MASK)) {
2486 kprintf("ipc_kmsg_put header+body: %d\n", (size));
2487 uint32_t i;
2488 for (i = 0; i * 4 < size; i++) {
2489 kprintf("%.4x\n", ((uint32_t *)kmsg->ikm_header)[i]);
2490 }
2491 kprintf("type: %d\n", ((mach_msg_type_descriptor_t *)(((mach_msg_base_t *)kmsg->ikm_header) + 1))->type);
2492 }
2493 __unreachable_ok_pop
2494
2495 /* Re-Compute target address if using stack-style delivery */
2496 if (option & MACH_RCV_STACK) {
2497 rcv_addr += rcv_size - size;
2498 }
2499
2500 if (copyoutmsg((const char *) kmsg->ikm_header, rcv_addr, size)) {
2501 mr = MACH_RCV_INVALID_DATA;
2502 size = 0;
2503 } else {
2504 mr = MACH_MSG_SUCCESS;
2505 }
2506
2507 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_KMSG_LINK) | DBG_FUNC_NONE,
2508 (rcv_addr >= VM_MIN_KERNEL_AND_KEXT_ADDRESS ||
2509 rcv_addr + size >= VM_MIN_KERNEL_AND_KEXT_ADDRESS) ? (uintptr_t)0 : (uintptr_t)rcv_addr,
2510 VM_KERNEL_ADDRPERM((uintptr_t)kmsg),
2511 1 /* this is on the receive/copyout path */,
2512 0,
2513 0);
2514 ipc_kmsg_free(kmsg);
2515
2516 if (sizep) {
2517 *sizep = size;
2518 }
2519 return mr;
2520 }
2521
2522 /*
2523 * Routine: ipc_kmsg_put_to_kernel
2524 * Purpose:
2525 * Copies a message buffer to a kernel message.
2526 * Frees the message buffer.
2527 * No errors allowed.
2528 * Conditions:
2529 * Nothing locked.
2530 */
2531
2532 void
2533 ipc_kmsg_put_to_kernel(
2534 mach_msg_header_t *msg,
2535 ipc_kmsg_t kmsg,
2536 mach_msg_size_t size)
2537 {
2538 (void) memcpy((void *) msg, (const void *) kmsg->ikm_header, size);
2539
2540 ipc_kmsg_free(kmsg);
2541 }
2542
2543 static pthread_priority_compact_t
2544 ipc_get_current_thread_priority(void)
2545 {
2546 thread_t thread = current_thread();
2547 thread_qos_t qos;
2548 int relpri;
2549
2550 qos = thread_get_requested_qos(thread, &relpri);
2551 if (!qos) {
2552 qos = thread_user_promotion_qos_for_pri(thread->base_pri);
2553 relpri = 0;
2554 }
2555 return _pthread_priority_make_from_thread_qos(qos, relpri, 0);
2556 }
2557
2558 static kern_return_t
2559 ipc_kmsg_set_qos(
2560 ipc_kmsg_t kmsg,
2561 mach_msg_option_t options,
2562 mach_msg_priority_t priority)
2563 {
2564 kern_return_t kr;
2565 ipc_port_t special_reply_port = kmsg->ikm_header->msgh_local_port;
2566 ipc_port_t dest_port = kmsg->ikm_header->msgh_remote_port;
2567
2568 if ((options & MACH_SEND_OVERRIDE) &&
2569 !mach_msg_priority_is_pthread_priority(priority)) {
2570 mach_msg_qos_t qos = mach_msg_priority_qos(priority);
2571 int relpri = mach_msg_priority_relpri(priority);
2572 mach_msg_qos_t ovr = mach_msg_priority_overide_qos(priority);
2573
2574 kmsg->ikm_ppriority = _pthread_priority_make_from_thread_qos(qos, relpri, 0);
2575 kmsg->ikm_qos_override = MAX(qos, ovr);
2576 } else {
2577 kr = ipc_get_pthpriority_from_kmsg_voucher(kmsg, &kmsg->ikm_ppriority);
2578 if (kr != KERN_SUCCESS) {
2579 if (options & MACH_SEND_PROPAGATE_QOS) {
2580 kmsg->ikm_ppriority = ipc_get_current_thread_priority();
2581 } else {
2582 kmsg->ikm_ppriority = MACH_MSG_PRIORITY_UNSPECIFIED;
2583 }
2584 }
2585
2586 if (options & MACH_SEND_OVERRIDE) {
2587 mach_msg_qos_t qos = _pthread_priority_thread_qos(kmsg->ikm_ppriority);
2588 mach_msg_qos_t ovr = _pthread_priority_thread_qos(priority);
2589 kmsg->ikm_qos_override = MAX(qos, ovr);
2590 } else {
2591 kmsg->ikm_qos_override = _pthread_priority_thread_qos(kmsg->ikm_ppriority);
2592 }
2593 }
2594
2595 kr = KERN_SUCCESS;
2596
2597 if (IP_VALID(special_reply_port) &&
2598 MACH_MSGH_BITS_LOCAL(kmsg->ikm_header->msgh_bits) == MACH_MSG_TYPE_PORT_SEND_ONCE) {
2599 if ((options & MACH_SEND_SYNC_OVERRIDE)) {
2600 boolean_t sync_bootstrap_checkin = !!(options & MACH_SEND_SYNC_BOOTSTRAP_CHECKIN);
2601 /*
2602 * Link the destination port to special reply port and make sure that
2603 * dest port has a send turnstile, else allocate one.
2604 */
2605 ipc_port_link_special_reply_port(special_reply_port, dest_port, sync_bootstrap_checkin);
2606 }
2607 }
2608 return kr;
2609 }
2610
2611 static inline void
2612 ipc_kmsg_allow_immovable_send(
2613 ipc_kmsg_t kmsg,
2614 ipc_entry_t dest_entry)
2615 {
2616 ipc_object_t object = dest_entry->ie_object;
2617 /*
2618 * If the dest port is a kobject, allow copyin of immovable send
2619 * rights in the message body to succeed
2620 */
2621 if (IO_VALID(object) && io_is_kobject(object)) {
2622 kmsg->ikm_flags |= IPC_KMSG_FLAGS_ALLOW_IMMOVABLE_SEND;
2623 }
2624 }
2625
2626 /*
2627 * Routine: ipc_kmsg_link_reply_context_locked
2628 * Purpose:
2629 * Link any required context from the sending voucher
2630 * to the reply port. The ipc_kmsg_copyin function will
2631 * enforce that the sender calls mach_msg in this context.
2632 * Conditions:
2633 * reply port is locked
2634 */
2635 static void
2636 ipc_kmsg_link_reply_context_locked(
2637 ipc_port_t reply_port,
2638 ipc_port_t voucher_port)
2639 {
2640 kern_return_t __assert_only kr;
2641 uint32_t persona_id = 0;
2642 ipc_voucher_t voucher;
2643
2644 ip_lock_held(reply_port);
2645
2646 if (!ip_active(reply_port)) {
2647 return;
2648 }
2649
2650 voucher = convert_port_to_voucher(voucher_port);
2651
2652 kr = bank_get_bank_ledger_thread_group_and_persona(voucher, NULL, NULL, &persona_id);
2653 assert(kr == KERN_SUCCESS);
2654 ipc_voucher_release(voucher);
2655
2656 if (persona_id == 0 || persona_id == PERSONA_ID_NONE) {
2657 /* there was no persona context to record */
2658 return;
2659 }
2660
2661 /*
2662 * Set the persona_id as the context on the reply port.
2663 * This will force the thread that replies to have adopted a voucher
2664 * with a matching persona.
2665 */
2666 reply_port->ip_reply_context = persona_id;
2667
2668 return;
2669 }
2670
2671 static kern_return_t
2672 ipc_kmsg_validate_reply_port_locked(ipc_port_t reply_port, mach_msg_option_t options)
2673 {
2674 ip_lock_held(reply_port);
2675
2676 if (!ip_active(reply_port)) {
2677 /*
2678 * Ideally, we would enforce that the reply receive right is
2679 * active, but asynchronous XPC cancellation destroys the
2680 * receive right, so we just have to return success here.
2681 */
2682 return KERN_SUCCESS;
2683 }
2684
2685 if (options & MACH_SEND_MSG) {
2686 /*
2687 * If the rely port is active, then it should not be
2688 * in-transit, and the receive right should be in the caller's
2689 * IPC space.
2690 */
2691 if (!reply_port->ip_receiver_name || reply_port->ip_receiver != current_task()->itk_space) {
2692 return KERN_INVALID_CAPABILITY;
2693 }
2694
2695 /*
2696 * A port used as a reply port in an RPC should have exactly 1
2697 * extant send-once right which we either just made or are
2698 * moving as part of the IPC.
2699 */
2700 if (reply_port->ip_sorights != 1) {
2701 return KERN_INVALID_CAPABILITY;
2702 }
2703 /*
2704 * XPC uses an extra send-right to keep the name of the reply
2705 * right around through cancellation. That makes it harder to
2706 * enforce a particular semantic kere, so for now, we say that
2707 * you can have a maximum of 1 send right (in addition to your
2708 * send once right). In the future, it would be great to lock
2709 * this down even further.
2710 */
2711 if (reply_port->ip_srights > 1) {
2712 return KERN_INVALID_CAPABILITY;
2713 }
2714
2715 /*
2716 * The sender can also specify that the receive right should
2717 * be immovable. Note that this check only applies to
2718 * send-only operations. Combined send/receive or rcv-only
2719 * operations can specify an immovable receive right by
2720 * opt-ing into guarded descriptors (MACH_RCV_GUARDED_DESC)
2721 * and using the MACH_MSG_STRICT_REPLY options flag.
2722 */
2723 if (MACH_SEND_REPLY_IS_IMMOVABLE(options)) {
2724 if (!reply_port->ip_immovable_receive) {
2725 return KERN_INVALID_CAPABILITY;
2726 }
2727 }
2728 }
2729
2730 /*
2731 * don't enforce this yet: need a better way of indicating the
2732 * receiver wants this...
2733 */
2734 #if 0
2735 if (MACH_RCV_WITH_IMMOVABLE_REPLY(options)) {
2736 if (!reply_port->ip_immovable_receive) {
2737 return KERN_INVALID_CAPABILITY;
2738 }
2739 }
2740 #endif /* 0 */
2741
2742 return KERN_SUCCESS;
2743 }
2744
2745 /*
2746 * Routine: ipc_kmsg_validate_reply_context_locked
2747 * Purpose:
2748 * Validate that the current thread is running in the context
2749 * required by the destination port.
2750 * Conditions:
2751 * dest_port is locked
2752 * Returns:
2753 * MACH_MSG_SUCCESS on success.
2754 * On error, an EXC_GUARD exception is also raised.
2755 * This function *always* resets the port reply context.
2756 */
2757 static mach_msg_return_t
2758 ipc_kmsg_validate_reply_context_locked(
2759 mach_msg_option_t option,
2760 ipc_port_t dest_port,
2761 ipc_voucher_t voucher,
2762 mach_port_name_t voucher_name)
2763 {
2764 uint32_t dest_ctx = dest_port->ip_reply_context;
2765 dest_port->ip_reply_context = 0;
2766
2767 if (!ip_active(dest_port)) {
2768 return MACH_MSG_SUCCESS;
2769 }
2770
2771 if (voucher == IPC_VOUCHER_NULL || !MACH_PORT_VALID(voucher_name)) {
2772 if ((option & MACH_SEND_KERNEL) == 0) {
2773 mach_port_guard_exception(voucher_name, 0,
2774 (MPG_FLAGS_STRICT_REPLY_INVALID_VOUCHER | dest_ctx),
2775 kGUARD_EXC_STRICT_REPLY);
2776 }
2777 return MACH_SEND_INVALID_CONTEXT;
2778 }
2779
2780 kern_return_t __assert_only kr;
2781 uint32_t persona_id = 0;
2782 kr = bank_get_bank_ledger_thread_group_and_persona(voucher, NULL, NULL, &persona_id);
2783 assert(kr == KERN_SUCCESS);
2784
2785 if (dest_ctx != persona_id) {
2786 if ((option & MACH_SEND_KERNEL) == 0) {
2787 mach_port_guard_exception(voucher_name, 0,
2788 (MPG_FLAGS_STRICT_REPLY_MISMATCHED_PERSONA | ((((uint64_t)persona_id << 32) & MPG_FLAGS_STRICT_REPLY_MASK) | dest_ctx)),
2789 kGUARD_EXC_STRICT_REPLY);
2790 }
2791 return MACH_SEND_INVALID_CONTEXT;
2792 }
2793
2794 return MACH_MSG_SUCCESS;
2795 }
2796
2797 /*
2798 * Routine: ipc_kmsg_copyin_header
2799 * Purpose:
2800 * "Copy-in" port rights in the header of a message.
2801 * Operates atomically; if it doesn't succeed the
2802 * message header and the space are left untouched.
2803 * If it does succeed the remote/local port fields
2804 * contain object pointers instead of port names,
2805 * and the bits field is updated. The destination port
2806 * will be a valid port pointer.
2807 *
2808 * Conditions:
2809 * Nothing locked.
2810 * Returns:
2811 * MACH_MSG_SUCCESS Successful copyin.
2812 * MACH_SEND_INVALID_HEADER
2813 * Illegal value in the message header bits.
2814 * MACH_SEND_INVALID_DEST The space is dead.
2815 * MACH_SEND_INVALID_DEST Can't copyin destination port.
2816 * (Either KERN_INVALID_NAME or KERN_INVALID_RIGHT.)
2817 * MACH_SEND_INVALID_REPLY Can't copyin reply port.
2818 * (Either KERN_INVALID_NAME or KERN_INVALID_RIGHT.)
2819 */
2820
2821 mach_msg_return_t
2822 ipc_kmsg_copyin_header(
2823 ipc_kmsg_t kmsg,
2824 ipc_space_t space,
2825 mach_msg_priority_t priority,
2826 mach_msg_option_t *optionp)
2827 {
2828 mach_msg_header_t *msg = kmsg->ikm_header;
2829 mach_msg_bits_t mbits = msg->msgh_bits & MACH_MSGH_BITS_USER;
2830 mach_port_name_t dest_name = CAST_MACH_PORT_TO_NAME(msg->msgh_remote_port);
2831 mach_port_name_t reply_name = CAST_MACH_PORT_TO_NAME(msg->msgh_local_port);
2832 mach_port_name_t voucher_name = MACH_PORT_NULL;
2833 kern_return_t kr;
2834
2835 mach_msg_type_name_t dest_type = MACH_MSGH_BITS_REMOTE(mbits);
2836 mach_msg_type_name_t reply_type = MACH_MSGH_BITS_LOCAL(mbits);
2837 mach_msg_type_name_t voucher_type = MACH_MSGH_BITS_VOUCHER(mbits);
2838 ipc_object_t dest_port = IO_NULL;
2839 ipc_object_t reply_port = IO_NULL;
2840 ipc_port_t dest_soright = IP_NULL;
2841 ipc_port_t reply_soright = IP_NULL;
2842 ipc_port_t voucher_soright = IP_NULL;
2843 ipc_port_t release_port = IP_NULL;
2844 ipc_port_t voucher_port = IP_NULL;
2845 ipc_port_t voucher_release_port = IP_NULL;
2846 ipc_entry_t dest_entry = IE_NULL;
2847 ipc_entry_t reply_entry = IE_NULL;
2848 ipc_entry_t voucher_entry = IE_NULL;
2849
2850 int assertcnt = 0;
2851 #if IMPORTANCE_INHERITANCE
2852 boolean_t needboost = FALSE;
2853 #endif /* IMPORTANCE_INHERITANCE */
2854
2855 if ((mbits != msg->msgh_bits) ||
2856 (!MACH_MSG_TYPE_PORT_ANY_SEND(dest_type)) ||
2857 ((reply_type == 0) ?
2858 (reply_name != MACH_PORT_NULL) :
2859 !MACH_MSG_TYPE_PORT_ANY_SEND(reply_type))) {
2860 return MACH_SEND_INVALID_HEADER;
2861 }
2862
2863 if (!MACH_PORT_VALID(dest_name)) {
2864 return MACH_SEND_INVALID_DEST;
2865 }
2866
2867 is_write_lock(space);
2868 if (!is_active(space)) {
2869 is_write_unlock(space);
2870 return MACH_SEND_INVALID_DEST;
2871 }
2872 /* space locked and active */
2873
2874 /*
2875 * If there is a voucher specified, make sure the disposition is
2876 * valid and the entry actually refers to a voucher port. Don't
2877 * actually copy in until we validate destination and reply.
2878 */
2879 if (voucher_type != MACH_MSGH_BITS_ZERO) {
2880 voucher_name = msg->msgh_voucher_port;
2881
2882 if (voucher_name == MACH_PORT_DEAD ||
2883 (voucher_type != MACH_MSG_TYPE_MOVE_SEND &&
2884 voucher_type != MACH_MSG_TYPE_COPY_SEND)) {
2885 is_write_unlock(space);
2886 if ((*optionp & MACH_SEND_KERNEL) == 0) {
2887 mach_port_guard_exception(voucher_name, 0, 0, kGUARD_EXC_SEND_INVALID_VOUCHER);
2888 }
2889 return MACH_SEND_INVALID_VOUCHER;
2890 }
2891
2892 if (voucher_name != MACH_PORT_NULL) {
2893 voucher_entry = ipc_entry_lookup(space, voucher_name);
2894 if (voucher_entry == IE_NULL ||
2895 (voucher_entry->ie_bits & MACH_PORT_TYPE_SEND) == 0 ||
2896 io_kotype(voucher_entry->ie_object) != IKOT_VOUCHER) {
2897 is_write_unlock(space);
2898 if ((*optionp & MACH_SEND_KERNEL) == 0) {
2899 mach_port_guard_exception(voucher_name, 0, 0, kGUARD_EXC_SEND_INVALID_VOUCHER);
2900 }
2901 return MACH_SEND_INVALID_VOUCHER;
2902 }
2903 } else {
2904 voucher_type = MACH_MSG_TYPE_MOVE_SEND;
2905 }
2906 }
2907
2908 if (enforce_strict_reply && MACH_SEND_WITH_STRICT_REPLY(*optionp) &&
2909 (!MACH_PORT_VALID(reply_name) ||
2910 ((reply_type != MACH_MSG_TYPE_MAKE_SEND_ONCE) && (reply_type != MACH_MSG_TYPE_MOVE_SEND_ONCE))
2911 )) {
2912 /*
2913 * The caller cannot enforce a reply context with an invalid
2914 * reply port name, or a non-send_once reply disposition.
2915 */
2916 is_write_unlock(space);
2917 if ((*optionp & MACH_SEND_KERNEL) == 0) {
2918 mach_port_guard_exception(reply_name, 0,
2919 (MPG_FLAGS_STRICT_REPLY_INVALID_REPLY_DISP | reply_type),
2920 kGUARD_EXC_STRICT_REPLY);
2921 }
2922 return MACH_SEND_INVALID_REPLY;
2923 }
2924
2925 /*
2926 * Handle combinations of validating destination and reply; along
2927 * with copying in destination, reply, and voucher in an atomic way.
2928 */
2929
2930 if (dest_name == voucher_name) {
2931 /*
2932 * If the destination name is the same as the voucher name,
2933 * the voucher_entry must already be known. Either that or
2934 * the destination name is MACH_PORT_NULL (i.e. invalid).
2935 */
2936 dest_entry = voucher_entry;
2937 if (dest_entry == IE_NULL) {
2938 goto invalid_dest;
2939 }
2940 /* Check if dest port allows immovable send rights to be sent in the kmsg body */
2941 ipc_kmsg_allow_immovable_send(kmsg, dest_entry);
2942
2943 /*
2944 * Make sure a future copyin of the reply port will succeed.
2945 * Once we start copying in the dest/voucher pair, we can't
2946 * back out.
2947 */
2948 if (MACH_PORT_VALID(reply_name)) {
2949 assert(reply_type != 0); /* because reply_name not null */
2950
2951 /* It is just WRONG if dest, voucher, and reply are all the same. */
2952 if (voucher_name == reply_name) {
2953 goto invalid_reply;
2954 }
2955 reply_entry = ipc_entry_lookup(space, reply_name);
2956 if (reply_entry == IE_NULL) {
2957 goto invalid_reply;
2958 }
2959 assert(dest_entry != reply_entry); /* names are not equal */
2960 if (!ipc_right_copyin_check_reply(space, reply_name, reply_entry, reply_type)) {
2961 goto invalid_reply;
2962 }
2963 }
2964
2965 /*
2966 * Do the joint copyin of the dest disposition and
2967 * voucher disposition from the one entry/port. We
2968 * already validated that the voucher copyin would
2969 * succeed (above). So, any failure in combining
2970 * the copyins can be blamed on the destination.
2971 */
2972 kr = ipc_right_copyin_two(space, dest_name, dest_entry,
2973 dest_type, voucher_type, &dest_port, &dest_soright,
2974 &release_port);
2975 if (kr != KERN_SUCCESS) {
2976 assert(kr != KERN_INVALID_CAPABILITY);
2977 goto invalid_dest;
2978 }
2979 voucher_port = ip_object_to_port(dest_port);
2980
2981 /*
2982 * could not have been one of these dispositions,
2983 * validated the port was a true kernel voucher port above,
2984 * AND was successfully able to copyin both dest and voucher.
2985 */
2986 assert(dest_type != MACH_MSG_TYPE_MAKE_SEND);
2987 assert(dest_type != MACH_MSG_TYPE_MAKE_SEND_ONCE);
2988 assert(dest_type != MACH_MSG_TYPE_MOVE_SEND_ONCE);
2989
2990 /*
2991 * Perform the delayed reply right copyin (guaranteed success).
2992 */
2993 if (reply_entry != IE_NULL) {
2994 kr = ipc_right_copyin(space, reply_name, reply_entry,
2995 reply_type, IPC_RIGHT_COPYIN_FLAGS_DEADOK,
2996 &reply_port, &reply_soright,
2997 &release_port, &assertcnt, 0, NULL);
2998 assert(assertcnt == 0);
2999 assert(kr == KERN_SUCCESS);
3000 }
3001 } else {
3002 if (dest_name == reply_name) {
3003 /*
3004 * Destination and reply ports are the same!
3005 * This is very similar to the case where the
3006 * destination and voucher ports were the same
3007 * (except the reply port disposition is not
3008 * previously validated).
3009 */
3010 dest_entry = ipc_entry_lookup(space, dest_name);
3011 if (dest_entry == IE_NULL) {
3012 goto invalid_dest;
3013 }
3014 ipc_kmsg_allow_immovable_send(kmsg, dest_entry);
3015
3016 reply_entry = dest_entry;
3017 assert(reply_type != 0); /* because name not null */
3018
3019 /*
3020 * Pre-validate that the reply right can be copied in by itself
3021 */
3022 if (!ipc_right_copyin_check_reply(space, reply_name, reply_entry, reply_type)) {
3023 goto invalid_reply;
3024 }
3025
3026 /*
3027 * Do the joint copyin of the dest disposition and
3028 * reply disposition from the one entry/port.
3029 */
3030 kr = ipc_right_copyin_two(space, dest_name, dest_entry,
3031 dest_type, reply_type, &dest_port, &dest_soright,
3032 &release_port);
3033 if (kr == KERN_INVALID_CAPABILITY) {
3034 goto invalid_reply;
3035 } else if (kr != KERN_SUCCESS) {
3036 goto invalid_dest;
3037 }
3038 reply_port = dest_port;
3039 } else {
3040 /*
3041 * Handle destination and reply independently, as
3042 * they are independent entries (even if the entries
3043 * refer to the same port).
3044 *
3045 * This can be the tough case to make atomic.
3046 *
3047 * The difficult problem is serializing with port death.
3048 * The bad case is when dest_port dies after its copyin,
3049 * reply_port dies before its copyin, and dest_port dies before
3050 * reply_port. Then the copyins operated as if dest_port was
3051 * alive and reply_port was dead, which shouldn't have happened
3052 * because they died in the other order.
3053 *
3054 * Note that it is easy for a user task to tell if
3055 * a copyin happened before or after a port died.
3056 * If a port dies before copyin, a dead-name notification
3057 * is generated and the dead name's urefs are incremented,
3058 * and if the copyin happens first, a port-deleted
3059 * notification is generated.
3060 *
3061 * Even so, avoiding that potentially detectable race is too
3062 * expensive - and no known code cares about it. So, we just
3063 * do the expedient thing and copy them in one after the other.
3064 */
3065
3066 dest_entry = ipc_entry_lookup(space, dest_name);
3067 if (dest_entry == IE_NULL) {
3068 goto invalid_dest;
3069 }
3070 assert(dest_entry != voucher_entry);
3071 ipc_kmsg_allow_immovable_send(kmsg, dest_entry);
3072
3073 /*
3074 * Make sure reply port entry is valid before dest copyin.
3075 */
3076 if (MACH_PORT_VALID(reply_name)) {
3077 if (reply_name == voucher_name) {
3078 goto invalid_reply;
3079 }
3080 reply_entry = ipc_entry_lookup(space, reply_name);
3081 if (reply_entry == IE_NULL) {
3082 goto invalid_reply;
3083 }
3084 assert(dest_entry != reply_entry); /* names are not equal */
3085 assert(reply_type != 0); /* because reply_name not null */
3086
3087 if (!ipc_right_copyin_check_reply(space, reply_name, reply_entry, reply_type)) {
3088 goto invalid_reply;
3089 }
3090 }
3091
3092 /*
3093 * copyin the destination.
3094 */
3095 kr = ipc_right_copyin(space, dest_name, dest_entry,
3096 dest_type, (IPC_RIGHT_COPYIN_FLAGS_ALLOW_IMMOVABLE_SEND |
3097 IPC_RIGHT_COPYIN_FLAGS_ALLOW_DEAD_SEND_ONCE),
3098 &dest_port, &dest_soright,
3099 &release_port, &assertcnt, 0, NULL);
3100 assert(assertcnt == 0);
3101 if (kr != KERN_SUCCESS) {
3102 goto invalid_dest;
3103 }
3104 assert(IO_VALID(dest_port));
3105 assert(!IP_VALID(release_port));
3106
3107 /*
3108 * Copyin the pre-validated reply right.
3109 * It's OK if the reply right has gone dead in the meantime.
3110 */
3111 if (MACH_PORT_VALID(reply_name)) {
3112 kr = ipc_right_copyin(space, reply_name, reply_entry,
3113 reply_type, IPC_RIGHT_COPYIN_FLAGS_DEADOK,
3114 &reply_port, &reply_soright,
3115 &release_port, &assertcnt, 0, NULL);
3116 assert(assertcnt == 0);
3117 assert(kr == KERN_SUCCESS);
3118 } else {
3119 /* convert invalid name to equivalent ipc_object type */
3120 reply_port = ip_to_object(CAST_MACH_NAME_TO_PORT(reply_name));
3121 }
3122 }
3123
3124 /*
3125 * Finally can copyin the voucher right now that dest and reply
3126 * are fully copied in (guaranteed success).
3127 */
3128 if (IE_NULL != voucher_entry) {
3129 kr = ipc_right_copyin(space, voucher_name, voucher_entry,
3130 voucher_type, IPC_RIGHT_COPYIN_FLAGS_NONE,
3131 (ipc_object_t *)&voucher_port,
3132 &voucher_soright,
3133 &voucher_release_port,
3134 &assertcnt, 0, NULL);
3135 assert(assertcnt == 0);
3136 assert(KERN_SUCCESS == kr);
3137 assert(IP_VALID(voucher_port));
3138 require_ip_active(voucher_port);
3139 }
3140 }
3141
3142 /*
3143 * The entries might need to be deallocated.
3144 *
3145 * Each entry should be deallocated only once,
3146 * even if it was specified in more than one slot in the header.
3147 * Note that dest can be the same entry as reply or voucher,
3148 * but reply and voucher must be distinct entries.
3149 */
3150 assert(IE_NULL != dest_entry);
3151 if (IE_NULL != reply_entry) {
3152 assert(reply_entry != voucher_entry);
3153 }
3154
3155 if (IE_BITS_TYPE(dest_entry->ie_bits) == MACH_PORT_TYPE_NONE) {
3156 ipc_entry_dealloc(space, dest_name, dest_entry);
3157
3158 if (dest_entry == reply_entry) {
3159 reply_entry = IE_NULL;
3160 }
3161
3162 if (dest_entry == voucher_entry) {
3163 voucher_entry = IE_NULL;
3164 }
3165
3166 dest_entry = IE_NULL;
3167 }
3168 if (IE_NULL != reply_entry &&
3169 IE_BITS_TYPE(reply_entry->ie_bits) == MACH_PORT_TYPE_NONE) {
3170 ipc_entry_dealloc(space, reply_name, reply_entry);
3171 reply_entry = IE_NULL;
3172 }
3173 if (IE_NULL != voucher_entry &&
3174 IE_BITS_TYPE(voucher_entry->ie_bits) == MACH_PORT_TYPE_NONE) {
3175 ipc_entry_dealloc(space, voucher_name, voucher_entry);
3176 voucher_entry = IE_NULL;
3177 }
3178
3179 dest_type = ipc_object_copyin_type(dest_type);
3180 reply_type = ipc_object_copyin_type(reply_type);
3181
3182 /*
3183 * JMM - Without rdar://problem/6275821, this is the last place we can
3184 * re-arm the send-possible notifications. It may trigger unexpectedly
3185 * early (send may NOT have failed), but better than missing. We assure
3186 * we won't miss by forcing MACH_SEND_ALWAYS if we got past arming.
3187 */
3188 if (((*optionp & MACH_SEND_NOTIFY) != 0) &&
3189 dest_type != MACH_MSG_TYPE_PORT_SEND_ONCE &&
3190 dest_entry != IE_NULL && dest_entry->ie_request != IE_REQ_NONE) {
3191 ipc_port_t dport = ip_object_to_port(dest_port);
3192
3193 assert(dport != IP_NULL);
3194 ip_lock(dport);
3195 if (ip_active(dport) && dport->ip_receiver != ipc_space_kernel) {
3196 if (ip_full(dport)) {
3197 #if IMPORTANCE_INHERITANCE
3198 needboost = ipc_port_request_sparm(dport, dest_name,
3199 dest_entry->ie_request,
3200 *optionp,
3201 priority);
3202 if (needboost == FALSE) {
3203 ip_unlock(dport);
3204 }
3205 #else
3206 ipc_port_request_sparm(dport, dest_name,
3207 dest_entry->ie_request,
3208 *optionp,
3209 priority);
3210 ip_unlock(dport);
3211 #endif /* IMPORTANCE_INHERITANCE */
3212 } else {
3213 *optionp |= MACH_SEND_ALWAYS;
3214 ip_unlock(dport);
3215 }
3216 } else {
3217 ip_unlock(dport);
3218 }
3219 }
3220
3221 is_write_unlock(space);
3222
3223 #if IMPORTANCE_INHERITANCE
3224 /*
3225 * If our request is the first boosting send-possible
3226 * notification this cycle, push the boost down the
3227 * destination port.
3228 */
3229 if (needboost == TRUE) {
3230 ipc_port_t dport = ip_object_to_port(dest_port);
3231
3232 /* dport still locked from above */
3233 if (ipc_port_importance_delta(dport, IPID_OPTION_SENDPOSSIBLE, 1) == FALSE) {
3234 ip_unlock(dport);
3235 }
3236 }
3237 #endif /* IMPORTANCE_INHERITANCE */
3238
3239 if (dest_soright != IP_NULL) {
3240 ipc_notify_port_deleted(dest_soright, dest_name);
3241 }
3242 if (reply_soright != IP_NULL) {
3243 ipc_notify_port_deleted(reply_soright, reply_name);
3244 }
3245 if (voucher_soright != IP_NULL) {
3246 ipc_notify_port_deleted(voucher_soright, voucher_name);
3247 }
3248
3249 /*
3250 * No room to store voucher port in in-kernel msg header,
3251 * so we store it back in the kmsg itself. Extract the
3252 * qos, and apply any override before we enqueue the kmsg.
3253 */
3254 if (IP_VALID(voucher_port)) {
3255 kmsg->ikm_voucher = voucher_port;
3256 voucher_type = MACH_MSG_TYPE_MOVE_SEND;
3257 }
3258
3259 msg->msgh_bits = MACH_MSGH_BITS_SET(dest_type, reply_type, voucher_type, mbits);
3260 msg->msgh_remote_port = ip_object_to_port(dest_port);
3261 msg->msgh_local_port = ip_object_to_port(reply_port);
3262
3263 /* capture the qos value(s) for the kmsg */
3264 ipc_kmsg_set_qos(kmsg, *optionp, priority);
3265
3266 if (release_port != IP_NULL) {
3267 ip_release(release_port);
3268 }
3269
3270 if (voucher_release_port != IP_NULL) {
3271 ip_release(voucher_release_port);
3272 }
3273
3274 if (enforce_strict_reply && MACH_SEND_WITH_STRICT_REPLY(*optionp) && IP_VALID(msg->msgh_local_port)) {
3275 /*
3276 * We've already validated that the reply disposition is a
3277 * [make/move] send-once. Ideally, we should enforce that the
3278 * reply port is also not dead, but XPC asynchronous
3279 * cancellation can make the reply port dead before we
3280 * actually make it to the mach_msg send.
3281 *
3282 * Here, we ensure that if we have a non-dead reply port, then
3283 * the reply port's receive right should not be in-transit,
3284 * and should live in the caller's IPC space.
3285 */
3286 ipc_port_t rport = msg->msgh_local_port;
3287 ip_lock(rport);
3288 kr = ipc_kmsg_validate_reply_port_locked(rport, *optionp);
3289 ip_unlock(rport);
3290 if (kr != KERN_SUCCESS) {
3291 /*
3292 * no descriptors have been copied in yet, but the
3293 * full header has been copied in: clean it up
3294 */
3295 ipc_kmsg_clean_partial(kmsg, 0, NULL, 0, 0);
3296 if ((*optionp & MACH_SEND_KERNEL) == 0) {
3297 mach_port_guard_exception(reply_name, 0,
3298 (MPG_FLAGS_STRICT_REPLY_INVALID_REPLY_PORT | kr),
3299 kGUARD_EXC_STRICT_REPLY);
3300 }
3301 return MACH_SEND_INVALID_REPLY;
3302 }
3303 }
3304
3305 return MACH_MSG_SUCCESS;
3306
3307 invalid_reply:
3308 is_write_unlock(space);
3309
3310 if (release_port != IP_NULL) {
3311 ip_release(release_port);
3312 }
3313
3314 assert(voucher_port == IP_NULL);
3315 assert(voucher_soright == IP_NULL);
3316
3317 if ((*optionp & MACH_SEND_KERNEL) == 0) {
3318 mach_port_guard_exception(reply_name, 0, 0, kGUARD_EXC_SEND_INVALID_REPLY);
3319 }
3320 return MACH_SEND_INVALID_REPLY;
3321
3322 invalid_dest:
3323 is_write_unlock(space);
3324
3325 if (release_port != IP_NULL) {
3326 ip_release(release_port);
3327 }
3328
3329 if (reply_soright != IP_NULL) {
3330 ipc_notify_port_deleted(reply_soright, reply_name);
3331 }
3332
3333 assert(voucher_port == IP_NULL);
3334 assert(voucher_soright == IP_NULL);
3335
3336 return MACH_SEND_INVALID_DEST;
3337 }
3338
3339 static mach_msg_descriptor_t *
3340 ipc_kmsg_copyin_port_descriptor(
3341 mach_msg_port_descriptor_t *dsc,
3342 mach_msg_legacy_port_descriptor_t *user_dsc_in,
3343 ipc_space_t space,
3344 ipc_object_t dest,
3345 ipc_kmsg_t kmsg,
3346 mach_msg_option_t *optionp,
3347 mach_msg_return_t *mr)
3348 {
3349 mach_msg_legacy_port_descriptor_t user_dsc = *user_dsc_in;
3350 mach_msg_type_name_t user_disp;
3351 mach_msg_type_name_t result_disp;
3352 mach_port_name_t name;
3353 ipc_object_t object;
3354
3355 user_disp = user_dsc.disposition;
3356 result_disp = ipc_object_copyin_type(user_disp);
3357
3358 name = (mach_port_name_t)user_dsc.name;
3359 if (MACH_PORT_VALID(name)) {
3360 kern_return_t kr = ipc_object_copyin(space, name, user_disp, &object, 0, NULL, kmsg->ikm_flags);
3361 if (kr != KERN_SUCCESS) {
3362 if (((*optionp & MACH_SEND_KERNEL) == 0) && (kr == KERN_INVALID_RIGHT)) {
3363 mach_port_guard_exception(name, 0, 0, kGUARD_EXC_SEND_INVALID_RIGHT);
3364 }
3365 *mr = MACH_SEND_INVALID_RIGHT;
3366 return NULL;
3367 }
3368
3369 if ((result_disp == MACH_MSG_TYPE_PORT_RECEIVE) &&
3370 ipc_port_check_circularity(ip_object_to_port(object),
3371 ip_object_to_port(dest))) {
3372 kmsg->ikm_header->msgh_bits |= MACH_MSGH_BITS_CIRCULAR;
3373 }
3374 dsc->name = ip_object_to_port(object);
3375 } else {
3376 dsc->name = CAST_MACH_NAME_TO_PORT(name);
3377 }
3378 dsc->disposition = result_disp;
3379 dsc->type = MACH_MSG_PORT_DESCRIPTOR;
3380
3381 dsc->pad_end = 0; // debug, unnecessary
3382
3383 return (mach_msg_descriptor_t *)(user_dsc_in + 1);
3384 }
3385
3386 static mach_msg_descriptor_t *
3387 ipc_kmsg_copyin_ool_descriptor(
3388 mach_msg_ool_descriptor_t *dsc,
3389 mach_msg_descriptor_t *user_dsc,
3390 int is_64bit,
3391 vm_offset_t *paddr,
3392 vm_map_copy_t *copy,
3393 vm_size_t *space_needed,
3394 vm_map_t map,
3395 __unused mach_msg_option_t *optionp,
3396 mach_msg_return_t *mr)
3397 {
3398 vm_size_t length;
3399 boolean_t dealloc;
3400 mach_msg_copy_options_t copy_options;
3401 mach_vm_offset_t addr;
3402 mach_msg_descriptor_type_t dsc_type;
3403
3404 if (is_64bit) {
3405 mach_msg_ool_descriptor64_t *user_ool_dsc = (typeof(user_ool_dsc))user_dsc;
3406
3407 addr = (mach_vm_offset_t) user_ool_dsc->address;
3408 length = user_ool_dsc->size;
3409 dealloc = user_ool_dsc->deallocate;
3410 copy_options = user_ool_dsc->copy;
3411 dsc_type = user_ool_dsc->type;
3412
3413 user_dsc = (typeof(user_dsc))(user_ool_dsc + 1);
3414 } else {
3415 mach_msg_ool_descriptor32_t *user_ool_dsc = (typeof(user_ool_dsc))user_dsc;
3416
3417 addr = CAST_USER_ADDR_T(user_ool_dsc->address);
3418 dealloc = user_ool_dsc->deallocate;
3419 copy_options = user_ool_dsc->copy;
3420 dsc_type = user_ool_dsc->type;
3421 length = user_ool_dsc->size;
3422
3423 user_dsc = (typeof(user_dsc))(user_ool_dsc + 1);
3424 }
3425
3426 dsc->size = (mach_msg_size_t)length;
3427 dsc->deallocate = dealloc;
3428 dsc->copy = copy_options;
3429 dsc->type = dsc_type;
3430
3431 if (length == 0) {
3432 dsc->address = NULL;
3433 } else if ((length >= MSG_OOL_SIZE_SMALL) &&
3434 (copy_options == MACH_MSG_PHYSICAL_COPY) && !dealloc) {
3435 /*
3436 * If the request is a physical copy and the source
3437 * is not being deallocated, then allocate space
3438 * in the kernel's pageable ipc copy map and copy
3439 * the data in. The semantics guarantee that the
3440 * data will have been physically copied before
3441 * the send operation terminates. Thus if the data
3442 * is not being deallocated, we must be prepared
3443 * to page if the region is sufficiently large.
3444 */
3445 if (copyin(addr, (char *)*paddr, length)) {
3446 *mr = MACH_SEND_INVALID_MEMORY;
3447 return NULL;
3448 }
3449
3450 /*
3451 * The kernel ipc copy map is marked no_zero_fill.
3452 * If the transfer is not a page multiple, we need
3453 * to zero fill the balance.
3454 */
3455 if (!page_aligned(length)) {
3456 (void) memset((void *) (*paddr + length), 0,
3457 round_page(length) - length);
3458 }
3459 if (vm_map_copyin(ipc_kernel_copy_map, (vm_map_address_t)*paddr,
3460 (vm_map_size_t)length, TRUE, copy) != KERN_SUCCESS) {
3461 *mr = MACH_MSG_VM_KERNEL;
3462 return NULL;
3463 }
3464 dsc->address = (void *)*copy;
3465 *paddr += round_page(length);
3466 *space_needed -= round_page(length);
3467 } else {
3468 /*
3469 * Make a vm_map_copy_t of the of the data. If the
3470 * data is small, this will do an optimized physical
3471 * copy. Otherwise, it will do a virtual copy.
3472 *
3473 * NOTE: A virtual copy is OK if the original is being
3474 * deallocted, even if a physical copy was requested.
3475 */
3476 kern_return_t kr = vm_map_copyin(map, addr,
3477 (vm_map_size_t)length, dealloc, copy);
3478 if (kr != KERN_SUCCESS) {
3479 *mr = (kr == KERN_RESOURCE_SHORTAGE) ?
3480 MACH_MSG_VM_KERNEL :
3481 MACH_SEND_INVALID_MEMORY;
3482 return NULL;
3483 }
3484 dsc->address = (void *)*copy;
3485 }
3486
3487 return user_dsc;
3488 }
3489
3490 static mach_msg_descriptor_t *
3491 ipc_kmsg_copyin_ool_ports_descriptor(
3492 mach_msg_ool_ports_descriptor_t *dsc,
3493 mach_msg_descriptor_t *user_dsc,
3494 int is_64bit,
3495 vm_map_t map,
3496 ipc_space_t space,
3497 ipc_object_t dest,
3498 ipc_kmsg_t kmsg,
3499 mach_msg_option_t *optionp,
3500 mach_msg_return_t *mr)
3501 {
3502 void *data;
3503 ipc_object_t *objects;
3504 unsigned int i;
3505 mach_vm_offset_t addr;
3506 mach_msg_type_name_t user_disp;
3507 mach_msg_type_name_t result_disp;
3508 mach_msg_type_number_t count;
3509 mach_msg_copy_options_t copy_option;
3510 boolean_t deallocate;
3511 mach_msg_descriptor_type_t type;
3512 vm_size_t ports_length, names_length;
3513
3514 if (is_64bit) {
3515 mach_msg_ool_ports_descriptor64_t *user_ool_dsc = (typeof(user_ool_dsc))user_dsc;
3516
3517 addr = (mach_vm_offset_t)user_ool_dsc->address;
3518 count = user_ool_dsc->count;
3519 deallocate = user_ool_dsc->deallocate;
3520 copy_option = user_ool_dsc->copy;
3521 user_disp = user_ool_dsc->disposition;
3522 type = user_ool_dsc->type;
3523
3524 user_dsc = (typeof(user_dsc))(user_ool_dsc + 1);
3525 } else {
3526 mach_msg_ool_ports_descriptor32_t *user_ool_dsc = (typeof(user_ool_dsc))user_dsc;
3527
3528 addr = CAST_USER_ADDR_T(user_ool_dsc->address);
3529 count = user_ool_dsc->count;
3530 deallocate = user_ool_dsc->deallocate;
3531 copy_option = user_ool_dsc->copy;
3532 user_disp = user_ool_dsc->disposition;
3533 type = user_ool_dsc->type;
3534
3535 user_dsc = (typeof(user_dsc))(user_ool_dsc + 1);
3536 }
3537
3538 dsc->deallocate = deallocate;
3539 dsc->copy = copy_option;
3540 dsc->type = type;
3541 dsc->count = count;
3542 dsc->address = NULL; /* for now */
3543
3544 result_disp = ipc_object_copyin_type(user_disp);
3545 dsc->disposition = result_disp;
3546
3547 /* We always do a 'physical copy', but you have to specify something valid */
3548 if (copy_option != MACH_MSG_PHYSICAL_COPY &&
3549 copy_option != MACH_MSG_VIRTUAL_COPY) {
3550 *mr = MACH_SEND_INVALID_TYPE;
3551 return NULL;
3552 }
3553
3554 /* calculate length of data in bytes, rounding up */
3555
3556 if (os_mul_overflow(count, sizeof(mach_port_t), &ports_length)) {
3557 *mr = MACH_SEND_TOO_LARGE;
3558 return NULL;
3559 }
3560
3561 if (os_mul_overflow(count, sizeof(mach_port_name_t), &names_length)) {
3562 *mr = MACH_SEND_TOO_LARGE;
3563 return NULL;
3564 }
3565
3566 if (ports_length == 0) {
3567 return user_dsc;
3568 }
3569
3570 data = kalloc(ports_length);
3571
3572 if (data == NULL) {
3573 *mr = MACH_SEND_NO_BUFFER;
3574 return NULL;
3575 }
3576
3577 #ifdef __LP64__
3578 mach_port_name_t *names = &((mach_port_name_t *)data)[count];
3579 #else
3580 mach_port_name_t *names = ((mach_port_name_t *)data);
3581 #endif
3582
3583 if (copyinmap(map, addr, names, names_length) != KERN_SUCCESS) {
3584 kfree(data, ports_length);
3585 *mr = MACH_SEND_INVALID_MEMORY;
3586 return NULL;
3587 }
3588
3589 if (deallocate) {
3590 (void) mach_vm_deallocate(map, addr, (mach_vm_size_t)names_length);
3591 }
3592
3593 objects = (ipc_object_t *) data;
3594 dsc->address = data;
3595
3596 for (i = 0; i < count; i++) {
3597 mach_port_name_t name = names[i];
3598 ipc_object_t object;
3599
3600 if (!MACH_PORT_VALID(name)) {
3601 objects[i] = ip_to_object(CAST_MACH_NAME_TO_PORT(name));
3602 continue;
3603 }
3604
3605 kern_return_t kr = ipc_object_copyin(space, name, user_disp, &object, 0, NULL, kmsg->ikm_flags);
3606
3607 if (kr != KERN_SUCCESS) {
3608 unsigned int j;
3609
3610 for (j = 0; j < i; j++) {
3611 object = objects[j];
3612 if (IPC_OBJECT_VALID(object)) {
3613 ipc_object_destroy(object, result_disp);
3614 }
3615 }
3616 kfree(data, ports_length);
3617 dsc->address = NULL;
3618 if (((*optionp & MACH_SEND_KERNEL) == 0) && (kr == KERN_INVALID_RIGHT)) {
3619 mach_port_guard_exception(name, 0, 0, kGUARD_EXC_SEND_INVALID_RIGHT);
3620 }
3621 *mr = MACH_SEND_INVALID_RIGHT;
3622 return NULL;
3623 }
3624
3625 if ((dsc->disposition == MACH_MSG_TYPE_PORT_RECEIVE) &&
3626 ipc_port_check_circularity(ip_object_to_port(object),
3627 ip_object_to_port(dest))) {
3628 kmsg->ikm_header->msgh_bits |= MACH_MSGH_BITS_CIRCULAR;
3629 }
3630
3631 objects[i] = object;
3632 }
3633
3634 return user_dsc;
3635 }
3636
3637 static mach_msg_descriptor_t *
3638 ipc_kmsg_copyin_guarded_port_descriptor(
3639 mach_msg_guarded_port_descriptor_t *dsc,
3640 mach_msg_descriptor_t *user_addr,
3641 int is_64bit,
3642 ipc_space_t space,
3643 ipc_object_t dest,
3644 ipc_kmsg_t kmsg,
3645 mach_msg_option_t *optionp,
3646 mach_msg_return_t *mr)
3647 {
3648 mach_msg_descriptor_t *user_dsc;
3649 mach_msg_type_name_t disp;
3650 mach_msg_type_name_t result_disp;
3651 mach_port_name_t name;
3652 mach_msg_guard_flags_t guard_flags;
3653 ipc_object_t object;
3654 mach_port_context_t context;
3655
3656 if (!is_64bit) {
3657 mach_msg_guarded_port_descriptor32_t *user_gp_dsc = (typeof(user_gp_dsc))user_addr;
3658 name = user_gp_dsc->name;
3659 guard_flags = user_gp_dsc->flags;
3660 disp = user_gp_dsc->disposition;
3661 context = user_gp_dsc->context;
3662 user_dsc = (mach_msg_descriptor_t *)(user_gp_dsc + 1);
3663 } else {
3664 mach_msg_guarded_port_descriptor64_t *user_gp_dsc = (typeof(user_gp_dsc))user_addr;
3665 name = user_gp_dsc->name;
3666 guard_flags = user_gp_dsc->flags;
3667 disp = user_gp_dsc->disposition;
3668 context = user_gp_dsc->context;
3669 user_dsc = (mach_msg_descriptor_t *)(user_gp_dsc + 1);
3670 }
3671
3672 guard_flags &= MACH_MSG_GUARD_FLAGS_MASK;
3673 result_disp = ipc_object_copyin_type(disp);
3674
3675 if (MACH_PORT_VALID(name)) {
3676 kern_return_t kr = ipc_object_copyin(space, name, disp, &object, context, &guard_flags, kmsg->ikm_flags);
3677 if (kr != KERN_SUCCESS) {
3678 if (((*optionp & MACH_SEND_KERNEL) == 0) && (kr == KERN_INVALID_RIGHT)) {
3679 mach_port_guard_exception(name, 0, 0, kGUARD_EXC_SEND_INVALID_RIGHT);
3680 }
3681 *mr = MACH_SEND_INVALID_RIGHT;
3682 return NULL;
3683 }
3684
3685 if ((result_disp == MACH_MSG_TYPE_PORT_RECEIVE) &&
3686 ipc_port_check_circularity(ip_object_to_port(object),
3687 ip_object_to_port(dest))) {
3688 kmsg->ikm_header->msgh_bits |= MACH_MSGH_BITS_CIRCULAR;
3689 }
3690 dsc->name = ip_object_to_port(object);
3691 } else {
3692 dsc->name = CAST_MACH_NAME_TO_PORT(name);
3693 }
3694 dsc->flags = guard_flags;
3695 dsc->disposition = result_disp;
3696 dsc->type = MACH_MSG_GUARDED_PORT_DESCRIPTOR;
3697
3698 #if __LP64__
3699 dsc->pad_end = 0; // debug, unnecessary
3700 #endif
3701
3702 return user_dsc;
3703 }
3704
3705
3706 /*
3707 * Routine: ipc_kmsg_copyin_body
3708 * Purpose:
3709 * "Copy-in" port rights and out-of-line memory
3710 * in the message body.
3711 *
3712 * In all failure cases, the message is left holding
3713 * no rights or memory. However, the message buffer
3714 * is not deallocated. If successful, the message
3715 * contains a valid destination port.
3716 * Conditions:
3717 * Nothing locked.
3718 * Returns:
3719 * MACH_MSG_SUCCESS Successful copyin.
3720 * MACH_SEND_INVALID_MEMORY Can't grab out-of-line memory.
3721 * MACH_SEND_INVALID_RIGHT Can't copyin port right in body.
3722 * MACH_SEND_INVALID_TYPE Bad type specification.
3723 * MACH_SEND_MSG_TOO_SMALL Body is too small for types/data.
3724 * MACH_SEND_INVALID_RT_OOL_SIZE OOL Buffer too large for RT
3725 * MACH_MSG_INVALID_RT_DESCRIPTOR Dealloc and RT are incompatible
3726 * MACH_SEND_NO_GRANT_DEST Dest port doesn't accept ports in body
3727 */
3728
3729 mach_msg_return_t
3730 ipc_kmsg_copyin_body(
3731 ipc_kmsg_t kmsg,
3732 ipc_space_t space,
3733 vm_map_t map,
3734 mach_msg_option_t *optionp)
3735 {
3736 ipc_object_t dest;
3737 mach_msg_body_t *body;
3738 mach_msg_descriptor_t *daddr, *naddr, *end;
3739 mach_msg_descriptor_t *user_addr, *kern_addr;
3740 mach_msg_type_number_t dsc_count;
3741 boolean_t is_task_64bit = (map->max_offset > VM_MAX_ADDRESS);
3742 boolean_t complex = FALSE;
3743 boolean_t contains_port_desc = FALSE;
3744 vm_size_t space_needed = 0;
3745 vm_offset_t paddr = 0;
3746 vm_map_copy_t copy = VM_MAP_COPY_NULL;
3747 mach_msg_type_number_t i;
3748 mach_msg_return_t mr = MACH_MSG_SUCCESS;
3749 ipc_port_t remote_port = kmsg->ikm_header->msgh_remote_port;
3750
3751 vm_size_t descriptor_size = 0;
3752
3753 mach_msg_type_number_t total_ool_port_count = 0;
3754 mach_msg_guard_flags_t guard_flags = 0;
3755 mach_port_context_t context;
3756 mach_msg_type_name_t disp;
3757
3758 /*
3759 * Determine if the target is a kernel port.
3760 */
3761 dest = ip_to_object(remote_port);
3762 body = (mach_msg_body_t *) (kmsg->ikm_header + 1);
3763 naddr = (mach_msg_descriptor_t *) (body + 1);
3764 end = (mach_msg_descriptor_t *) ((vm_offset_t)kmsg->ikm_header + kmsg->ikm_header->msgh_size);
3765
3766 dsc_count = body->msgh_descriptor_count;
3767 if (dsc_count == 0) {
3768 return MACH_MSG_SUCCESS;
3769 }
3770
3771 /*
3772 * Make an initial pass to determine kernal VM space requirements for
3773 * physical copies and possible contraction of the descriptors from
3774 * processes with pointers larger than the kernel's.
3775 */
3776 daddr = NULL;
3777 for (i = 0; i < dsc_count; i++) {
3778 mach_msg_size_t size;
3779 mach_msg_type_number_t ool_port_count = 0;
3780
3781 daddr = naddr;
3782
3783 /* make sure the descriptor fits in the message */
3784 if (is_task_64bit) {
3785 if ((mach_msg_descriptor_t*)((vm_offset_t)daddr + 12) > end) {
3786 mr = MACH_SEND_MSG_TOO_SMALL;
3787 goto clean_message;
3788 }
3789
3790 switch (daddr->type.type) {
3791 case MACH_MSG_OOL_DESCRIPTOR:
3792 case MACH_MSG_OOL_VOLATILE_DESCRIPTOR:
3793 case MACH_MSG_OOL_PORTS_DESCRIPTOR:
3794 case MACH_MSG_GUARDED_PORT_DESCRIPTOR:
3795 descriptor_size += 16;
3796 naddr = (typeof(naddr))((vm_offset_t)daddr + 16);
3797 break;
3798 default:
3799 descriptor_size += 12;
3800 naddr = (typeof(naddr))((vm_offset_t)daddr + 12);
3801 break;
3802 }
3803 } else {
3804 descriptor_size += 12;
3805 naddr = (typeof(naddr))((vm_offset_t)daddr + 12);
3806 }
3807
3808 if (naddr > end) {
3809 mr = MACH_SEND_MSG_TOO_SMALL;
3810 goto clean_message;
3811 }
3812
3813 switch (daddr->type.type) {
3814 case MACH_MSG_OOL_DESCRIPTOR:
3815 case MACH_MSG_OOL_VOLATILE_DESCRIPTOR:
3816 size = (is_task_64bit) ?
3817 ((mach_msg_ool_descriptor64_t *)daddr)->size :
3818 daddr->out_of_line.size;
3819
3820 if (daddr->out_of_line.copy != MACH_MSG_PHYSICAL_COPY &&
3821 daddr->out_of_line.copy != MACH_MSG_VIRTUAL_COPY) {
3822 /*
3823 * Invalid copy option
3824 */
3825 mr = MACH_SEND_INVALID_TYPE;
3826 goto clean_message;
3827 }
3828
3829 if ((size >= MSG_OOL_SIZE_SMALL) &&
3830 (daddr->out_of_line.copy == MACH_MSG_PHYSICAL_COPY) &&
3831 !(daddr->out_of_line.deallocate)) {
3832 /*
3833 * Out-of-line memory descriptor, accumulate kernel
3834 * memory requirements
3835 */
3836 if (space_needed + round_page(size) <= space_needed) {
3837 /* Overflow dectected */
3838 mr = MACH_MSG_VM_KERNEL;
3839 goto clean_message;
3840 }
3841
3842 space_needed += round_page(size);
3843 if (space_needed > ipc_kmsg_max_vm_space) {
3844 /* Per message kernel memory limit exceeded */
3845 mr = MACH_MSG_VM_KERNEL;
3846 goto clean_message;
3847 }
3848 }
3849 break;
3850 case MACH_MSG_PORT_DESCRIPTOR:
3851 if (os_add_overflow(total_ool_port_count, 1, &total_ool_port_count)) {
3852 /* Overflow detected */
3853 mr = MACH_SEND_TOO_LARGE;
3854 goto clean_message;
3855 }
3856 contains_port_desc = TRUE;
3857 break;
3858 case MACH_MSG_OOL_PORTS_DESCRIPTOR:
3859 ool_port_count = (is_task_64bit) ?
3860 ((mach_msg_ool_ports_descriptor64_t *)daddr)->count :
3861 daddr->ool_ports.count;
3862
3863 if (os_add_overflow(total_ool_port_count, ool_port_count, &total_ool_port_count)) {
3864 /* Overflow detected */
3865 mr = MACH_SEND_TOO_LARGE;
3866 goto clean_message;
3867 }
3868
3869 if (ool_port_count > (ipc_kmsg_max_vm_space / sizeof(mach_port_t))) {
3870 /* Per message kernel memory limit exceeded */
3871 mr = MACH_SEND_TOO_LARGE;
3872 goto clean_message;
3873 }
3874 contains_port_desc = TRUE;
3875 break;
3876 case MACH_MSG_GUARDED_PORT_DESCRIPTOR:
3877 guard_flags = (is_task_64bit) ?
3878 ((mach_msg_guarded_port_descriptor64_t *)daddr)->flags :
3879 ((mach_msg_guarded_port_descriptor32_t *)daddr)->flags;
3880 context = (is_task_64bit) ?
3881 ((mach_msg_guarded_port_descriptor64_t *)daddr)->context :
3882 ((mach_msg_guarded_port_descriptor32_t *)daddr)->context;
3883 disp = (is_task_64bit) ?
3884 ((mach_msg_guarded_port_descriptor64_t *)daddr)->disposition :
3885 ((mach_msg_guarded_port_descriptor32_t *)daddr)->disposition;
3886
3887 /* Only MACH_MSG_TYPE_MOVE_RECEIVE is supported for now */
3888 if (!guard_flags || ((guard_flags & ~MACH_MSG_GUARD_FLAGS_MASK) != 0) ||
3889 ((guard_flags & MACH_MSG_GUARD_FLAGS_UNGUARDED_ON_SEND) && (context != 0)) ||
3890 (disp != MACH_MSG_TYPE_MOVE_RECEIVE)) {
3891 /*
3892 * Invalid guard flags, context or disposition
3893 */
3894 mr = MACH_SEND_INVALID_TYPE;
3895 goto clean_message;
3896 }
3897 if (os_add_overflow(total_ool_port_count, 1, &total_ool_port_count)) {
3898 /* Overflow detected */
3899 mr = MACH_SEND_TOO_LARGE;
3900 goto clean_message;
3901 }
3902 contains_port_desc = TRUE;
3903 break;
3904 }
3905 }
3906
3907 /* Sending more than 16383 rights in one message seems crazy */
3908 if (total_ool_port_count >= (MACH_PORT_UREFS_MAX / 4)) {
3909 mr = MACH_SEND_TOO_LARGE;
3910 goto clean_message;
3911 }
3912
3913 /*
3914 * Check if dest is a no-grant port; Since this bit is set only on
3915 * port construction and cannot be unset later, we can peek at the
3916 * bit without paying the cost of locking the port.
3917 */
3918 if (contains_port_desc && remote_port->ip_no_grant) {
3919 mr = MACH_SEND_NO_GRANT_DEST;
3920 goto clean_message;
3921 }
3922
3923 /*
3924 * Allocate space in the pageable kernel ipc copy map for all the
3925 * ool data that is to be physically copied. Map is marked wait for
3926 * space.
3927 */
3928 if (space_needed) {
3929 if (vm_allocate_kernel(ipc_kernel_copy_map, &paddr, space_needed,
3930 VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_IPC) != KERN_SUCCESS) {
3931 mr = MACH_MSG_VM_KERNEL;
3932 goto clean_message;
3933 }
3934 }
3935
3936 /* user_addr = just after base as it was copied in */
3937 user_addr = (mach_msg_descriptor_t *)((vm_offset_t)kmsg->ikm_header + sizeof(mach_msg_base_t));
3938
3939 /* Shift the mach_msg_base_t down to make room for dsc_count*16bytes of descriptors on 64 bit kernels
3940 */
3941 if (descriptor_size != 16 * dsc_count) {
3942 vm_offset_t dsc_adjust = 16 * dsc_count - descriptor_size;
3943
3944 memmove((char *)(((vm_offset_t)kmsg->ikm_header) - dsc_adjust), kmsg->ikm_header, sizeof(mach_msg_base_t));
3945 kmsg->ikm_header = (mach_msg_header_t *)((vm_offset_t)kmsg->ikm_header - dsc_adjust);
3946
3947 /* Update the message size for the larger in-kernel representation */
3948 kmsg->ikm_header->msgh_size += (mach_msg_size_t)dsc_adjust;
3949 }
3950
3951
3952 /* kern_addr = just after base after it has been (conditionally) moved */
3953 kern_addr = (mach_msg_descriptor_t *)((vm_offset_t)kmsg->ikm_header + sizeof(mach_msg_base_t));
3954
3955 /* handle the OOL regions and port descriptors. */
3956 for (i = 0; i < dsc_count; i++) {
3957 switch (user_addr->type.type) {
3958 case MACH_MSG_PORT_DESCRIPTOR:
3959 user_addr = ipc_kmsg_copyin_port_descriptor((mach_msg_port_descriptor_t *)kern_addr,
3960 (mach_msg_legacy_port_descriptor_t *)user_addr, space, dest, kmsg, optionp, &mr);
3961 kern_addr++;
3962 complex = TRUE;
3963 break;
3964 case MACH_MSG_OOL_VOLATILE_DESCRIPTOR:
3965 case MACH_MSG_OOL_DESCRIPTOR:
3966 user_addr = ipc_kmsg_copyin_ool_descriptor((mach_msg_ool_descriptor_t *)kern_addr,
3967 user_addr, is_task_64bit, &paddr, &copy, &space_needed, map, optionp, &mr);
3968 kern_addr++;
3969 complex = TRUE;
3970 break;
3971 case MACH_MSG_OOL_PORTS_DESCRIPTOR:
3972 user_addr = ipc_kmsg_copyin_ool_ports_descriptor((mach_msg_ool_ports_descriptor_t *)kern_addr,
3973 user_addr, is_task_64bit, map, space, dest, kmsg, optionp, &mr);
3974 kern_addr++;
3975 complex = TRUE;
3976 break;
3977 case MACH_MSG_GUARDED_PORT_DESCRIPTOR:
3978 user_addr = ipc_kmsg_copyin_guarded_port_descriptor((mach_msg_guarded_port_descriptor_t *)kern_addr,
3979 user_addr, is_task_64bit, space, dest, kmsg, optionp, &mr);
3980 kern_addr++;
3981 complex = TRUE;
3982 break;
3983 default:
3984 /* Invalid descriptor */
3985 mr = MACH_SEND_INVALID_TYPE;
3986 break;
3987 }
3988
3989 if (MACH_MSG_SUCCESS != mr) {
3990 /* clean from start of message descriptors to i */
3991 ipc_kmsg_clean_partial(kmsg, i,
3992 (mach_msg_descriptor_t *)((mach_msg_base_t *)kmsg->ikm_header + 1),
3993 paddr, space_needed);
3994 goto out;
3995 }
3996 } /* End of loop */
3997
3998 if (!complex) {
3999 kmsg->ikm_header->msgh_bits &= ~MACH_MSGH_BITS_COMPLEX;
4000 }
4001 out:
4002 return mr;
4003
4004 clean_message:
4005 /* no descriptors have been copied in yet */
4006 ipc_kmsg_clean_partial(kmsg, 0, NULL, 0, 0);
4007 return mr;
4008 }
4009
4010
4011 /*
4012 * Routine: ipc_kmsg_copyin
4013 * Purpose:
4014 * "Copy-in" port rights and out-of-line memory
4015 * in the message.
4016 *
4017 * In all failure cases, the message is left holding
4018 * no rights or memory. However, the message buffer
4019 * is not deallocated. If successful, the message
4020 * contains a valid destination port.
4021 * Conditions:
4022 * Nothing locked.
4023 * Returns:
4024 * MACH_MSG_SUCCESS Successful copyin.
4025 * MACH_SEND_INVALID_HEADER Illegal value in the message header bits.
4026 * MACH_SEND_INVALID_DEST Can't copyin destination port.
4027 * MACH_SEND_INVALID_REPLY Can't copyin reply port.
4028 * MACH_SEND_INVALID_MEMORY Can't grab out-of-line memory.
4029 * MACH_SEND_INVALID_RIGHT Can't copyin port right in body.
4030 * MACH_SEND_INVALID_TYPE Bad type specification.
4031 * MACH_SEND_MSG_TOO_SMALL Body is too small for types/data.
4032 */
4033
4034 mach_msg_return_t
4035 ipc_kmsg_copyin(
4036 ipc_kmsg_t kmsg,
4037 ipc_space_t space,
4038 vm_map_t map,
4039 mach_msg_priority_t priority,
4040 mach_msg_option_t *optionp)
4041 {
4042 mach_msg_return_t mr;
4043 mach_port_name_t dest_name = CAST_MACH_PORT_TO_NAME(kmsg->ikm_header->msgh_remote_port);
4044
4045 kmsg->ikm_header->msgh_bits &= MACH_MSGH_BITS_USER;
4046
4047 mr = ipc_kmsg_copyin_header(kmsg, space, priority, optionp);
4048
4049 if (mr != MACH_MSG_SUCCESS) {
4050 return mr;
4051 }
4052
4053 /* Get the message filter policy if the task and port support filtering */
4054 mach_msg_filter_id fid = 0;
4055 if (ip_enforce_msg_filtering(kmsg->ikm_header->msgh_remote_port) &&
4056 task_get_filter_msg_flag(current_task())) {
4057 /* port label is yet to be supported */
4058 boolean_t allow_kmsg = mach_msg_fetch_filter_policy(NULL, kmsg->ikm_header->msgh_id, &fid);
4059 if (!allow_kmsg) {
4060 mach_port_guard_exception(dest_name, 0, 0, kGUARD_EXC_MSG_FILTERED);
4061 /* no descriptors have been copied in yet */
4062 ipc_kmsg_clean_partial(kmsg, 0, NULL, 0, 0);
4063 return MACH_SEND_MSG_FILTERED;
4064 }
4065 kmsg->ikm_filter_policy_id = fid;
4066 }
4067
4068 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_MSG_SEND) | DBG_FUNC_NONE,
4069 VM_KERNEL_ADDRPERM((uintptr_t)kmsg),
4070 (uintptr_t)kmsg->ikm_header->msgh_bits,
4071 (uintptr_t)kmsg->ikm_header->msgh_id,
4072 VM_KERNEL_ADDRPERM((uintptr_t)unsafe_convert_port_to_voucher(kmsg->ikm_voucher)),
4073 0);
4074
4075 DEBUG_KPRINT_SYSCALL_IPC("ipc_kmsg_copyin header:\n%.8x\n%.8x\n%p\n%p\n%p\n%.8x\n",
4076 kmsg->ikm_header->msgh_size,
4077 kmsg->ikm_header->msgh_bits,
4078 kmsg->ikm_header->msgh_remote_port,
4079 kmsg->ikm_header->msgh_local_port,
4080 kmsg->ikm_voucher,
4081 kmsg->ikm_header->msgh_id);
4082
4083 if (kmsg->ikm_header->msgh_bits & MACH_MSGH_BITS_COMPLEX) {
4084 mr = ipc_kmsg_copyin_body( kmsg, space, map, optionp);
4085
4086 /* unreachable if !DEBUG */
4087 __unreachable_ok_push
4088 if (DEBUG_KPRINT_SYSCALL_PREDICATE(DEBUG_KPRINT_SYSCALL_IPC_MASK)) {
4089 kprintf("body:\n");
4090 uint32_t i;
4091 for (i = 0; i * 4 < (kmsg->ikm_header->msgh_size - sizeof(mach_msg_header_t)); i++) {
4092 kprintf("%.4x\n", ((uint32_t *)(kmsg->ikm_header + 1))[i]);
4093 }
4094 }
4095 __unreachable_ok_pop
4096 }
4097
4098 /* Sign the message contents */
4099 if (mr == MACH_MSG_SUCCESS) {
4100 ikm_sign(kmsg);
4101 }
4102
4103 return mr;
4104 }
4105
4106 /*
4107 * Routine: ipc_kmsg_copyin_from_kernel
4108 * Purpose:
4109 * "Copy-in" port rights and out-of-line memory
4110 * in a message sent from the kernel.
4111 *
4112 * Because the message comes from the kernel,
4113 * the implementation assumes there are no errors
4114 * or peculiarities in the message.
4115 * Conditions:
4116 * Nothing locked.
4117 */
4118
4119 mach_msg_return_t
4120 ipc_kmsg_copyin_from_kernel(
4121 ipc_kmsg_t kmsg)
4122 {
4123 mach_msg_bits_t bits = kmsg->ikm_header->msgh_bits;
4124 mach_msg_type_name_t rname = MACH_MSGH_BITS_REMOTE(bits);
4125 mach_msg_type_name_t lname = MACH_MSGH_BITS_LOCAL(bits);
4126 ipc_object_t remote = ip_to_object(kmsg->ikm_header->msgh_remote_port);
4127 ipc_object_t local = ip_to_object(kmsg->ikm_header->msgh_local_port);
4128 ipc_port_t dest = kmsg->ikm_header->msgh_remote_port;
4129
4130 /* translate the destination and reply ports */
4131 if (!IO_VALID(remote)) {
4132 return MACH_SEND_INVALID_DEST;
4133 }
4134
4135 ipc_object_copyin_from_kernel(remote, rname);
4136 if (IO_VALID(local)) {
4137 ipc_object_copyin_from_kernel(local, lname);
4138 }
4139
4140 /*
4141 * The common case is a complex message with no reply port,
4142 * because that is what the memory_object interface uses.
4143 */
4144
4145 if (bits == (MACH_MSGH_BITS_COMPLEX |
4146 MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND, 0))) {
4147 bits = (MACH_MSGH_BITS_COMPLEX |
4148 MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND, 0));
4149
4150 kmsg->ikm_header->msgh_bits = bits;
4151 } else {
4152 bits = (MACH_MSGH_BITS_OTHER(bits) |
4153 MACH_MSGH_BITS(ipc_object_copyin_type(rname),
4154 ipc_object_copyin_type(lname)));
4155
4156 kmsg->ikm_header->msgh_bits = bits;
4157 }
4158
4159 if (bits & MACH_MSGH_BITS_COMPLEX) {
4160 /*
4161 * Check if the remote port accepts ports in the body.
4162 */
4163 if (dest->ip_no_grant) {
4164 mach_msg_descriptor_t *saddr;
4165 mach_msg_body_t *body;
4166 mach_msg_type_number_t i, count;
4167
4168 body = (mach_msg_body_t *) (kmsg->ikm_header + 1);
4169 saddr = (mach_msg_descriptor_t *) (body + 1);
4170 count = body->msgh_descriptor_count;
4171
4172 for (i = 0; i < count; i++, saddr++) {
4173 switch (saddr->type.type) {
4174 case MACH_MSG_PORT_DESCRIPTOR:
4175 case MACH_MSG_OOL_PORTS_DESCRIPTOR:
4176 case MACH_MSG_GUARDED_PORT_DESCRIPTOR:
4177 /* no descriptors have been copied in yet */
4178 ipc_kmsg_clean_partial(kmsg, 0, NULL, 0, 0);
4179 return MACH_SEND_NO_GRANT_DEST;
4180 }
4181 }
4182 }
4183
4184 mach_msg_descriptor_t *saddr;
4185 mach_msg_body_t *body;
4186 mach_msg_type_number_t i, count;
4187
4188 body = (mach_msg_body_t *) (kmsg->ikm_header + 1);
4189 saddr = (mach_msg_descriptor_t *) (body + 1);
4190 count = body->msgh_descriptor_count;
4191
4192 for (i = 0; i < count; i++, saddr++) {
4193 switch (saddr->type.type) {
4194 case MACH_MSG_PORT_DESCRIPTOR: {
4195 mach_msg_type_name_t name;
4196 ipc_object_t object;
4197 mach_msg_port_descriptor_t *dsc;
4198
4199 dsc = &saddr->port;
4200
4201 /* this is really the type SEND, SEND_ONCE, etc. */
4202 name = dsc->disposition;
4203 object = ip_to_object(dsc->name);
4204 dsc->disposition = ipc_object_copyin_type(name);
4205
4206 if (!IO_VALID(object)) {
4207 break;
4208 }
4209
4210 ipc_object_copyin_from_kernel(object, name);
4211
4212 /* CDY avoid circularity when the destination is also */
4213 /* the kernel. This check should be changed into an */
4214 /* assert when the new kobject model is in place since*/
4215 /* ports will not be used in kernel to kernel chats */
4216
4217 if (ip_object_to_port(remote)->ip_receiver != ipc_space_kernel) {
4218 if ((dsc->disposition == MACH_MSG_TYPE_PORT_RECEIVE) &&
4219 ipc_port_check_circularity(ip_object_to_port(object),
4220 ip_object_to_port(remote))) {
4221 kmsg->ikm_header->msgh_bits |=
4222 MACH_MSGH_BITS_CIRCULAR;
4223 }
4224 }
4225 break;
4226 }
4227 case MACH_MSG_OOL_VOLATILE_DESCRIPTOR:
4228 case MACH_MSG_OOL_DESCRIPTOR: {
4229 /*
4230 * The sender should supply ready-made memory, i.e.
4231 * a vm_map_copy_t, so we don't need to do anything.
4232 */
4233 break;
4234 }
4235 case MACH_MSG_OOL_PORTS_DESCRIPTOR: {
4236 ipc_object_t *objects;
4237 unsigned int j;
4238 mach_msg_type_name_t name;
4239 mach_msg_ool_ports_descriptor_t *dsc;
4240
4241 dsc = (mach_msg_ool_ports_descriptor_t *)&saddr->ool_ports;
4242
4243 /* this is really the type SEND, SEND_ONCE, etc. */
4244 name = dsc->disposition;
4245 dsc->disposition = ipc_object_copyin_type(name);
4246
4247 objects = (ipc_object_t *) dsc->address;
4248
4249 for (j = 0; j < dsc->count; j++) {
4250 ipc_object_t object = objects[j];
4251
4252 if (!IO_VALID(object)) {
4253 continue;
4254 }
4255
4256 ipc_object_copyin_from_kernel(object, name);
4257
4258 if ((dsc->disposition == MACH_MSG_TYPE_PORT_RECEIVE) &&
4259 ipc_port_check_circularity(ip_object_to_port(object),
4260 ip_object_to_port(remote))) {
4261 kmsg->ikm_header->msgh_bits |= MACH_MSGH_BITS_CIRCULAR;
4262 }
4263 }
4264 break;
4265 }
4266 case MACH_MSG_GUARDED_PORT_DESCRIPTOR: {
4267 mach_msg_guarded_port_descriptor_t *dsc = (typeof(dsc)) & saddr->guarded_port;
4268 mach_msg_type_name_t disp = dsc->disposition;
4269 ipc_object_t object = ip_to_object(dsc->name);
4270 dsc->disposition = ipc_object_copyin_type(disp);
4271 assert(dsc->flags == 0);
4272
4273 if (!IO_VALID(object)) {
4274 break;
4275 }
4276
4277 ipc_object_copyin_from_kernel(object, disp);
4278 /*
4279 * avoid circularity when the destination is also
4280 * the kernel. This check should be changed into an
4281 * assert when the new kobject model is in place since
4282 * ports will not be used in kernel to kernel chats
4283 */
4284
4285 if (ip_object_to_port(remote)->ip_receiver != ipc_space_kernel) {
4286 if ((dsc->disposition == MACH_MSG_TYPE_PORT_RECEIVE) &&
4287 ipc_port_check_circularity(ip_object_to_port(object),
4288 ip_object_to_port(remote))) {
4289 kmsg->ikm_header->msgh_bits |= MACH_MSGH_BITS_CIRCULAR;
4290 }
4291 }
4292 break;
4293 }
4294 default: {
4295 #if MACH_ASSERT
4296 panic("ipc_kmsg_copyin_from_kernel: bad descriptor");
4297 #endif /* MACH_ASSERT */
4298 }
4299 }
4300 }
4301 }
4302
4303 /* Add the signature to the message */
4304 ikm_sign(kmsg);
4305
4306 return MACH_MSG_SUCCESS;
4307 }
4308
4309 #if IKM_SUPPORT_LEGACY
4310 mach_msg_return_t
4311 ipc_kmsg_copyin_from_kernel_legacy(
4312 ipc_kmsg_t kmsg)
4313 {
4314 mach_msg_bits_t bits = kmsg->ikm_header->msgh_bits;
4315 mach_msg_type_name_t rname = MACH_MSGH_BITS_REMOTE(bits);
4316 mach_msg_type_name_t lname = MACH_MSGH_BITS_LOCAL(bits);
4317 ipc_object_t remote = ip_to_object(kmsg->ikm_header->msgh_remote_port);
4318 ipc_object_t local = ip_to_object(kmsg->ikm_header->msgh_local_port);
4319 ipc_port_t dest = kmsg->ikm_header->msgh_remote_port;
4320
4321 /* translate the destination and reply ports */
4322 if (!IO_VALID(remote)) {
4323 return MACH_SEND_INVALID_DEST;
4324 }
4325
4326 ipc_object_copyin_from_kernel(remote, rname);
4327 if (IO_VALID(local)) {
4328 ipc_object_copyin_from_kernel(local, lname);
4329 }
4330
4331 /*
4332 * The common case is a complex message with no reply port,
4333 * because that is what the memory_object interface uses.
4334 */
4335
4336 if (bits == (MACH_MSGH_BITS_COMPLEX |
4337 MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND, 0))) {
4338 bits = (MACH_MSGH_BITS_COMPLEX |
4339 MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND, 0));
4340
4341 kmsg->ikm_header->msgh_bits = bits;
4342 } else {
4343 bits = (MACH_MSGH_BITS_OTHER(bits) |
4344 MACH_MSGH_BITS(ipc_object_copyin_type(rname),
4345 ipc_object_copyin_type(lname)));
4346
4347 kmsg->ikm_header->msgh_bits = bits;
4348 }
4349
4350 if (bits & MACH_MSGH_BITS_COMPLEX) {
4351 if (dest->ip_no_grant) {
4352 mach_msg_descriptor_t *saddr;
4353 mach_msg_body_t *body;
4354 mach_msg_type_number_t i, count;
4355
4356 body = (mach_msg_body_t *) (kmsg->ikm_header + 1);
4357 saddr = (mach_msg_descriptor_t *) (body + 1);
4358 count = body->msgh_descriptor_count;
4359
4360 for (i = 0; i < count; i++, saddr++) {
4361 switch (saddr->type.type) {
4362 case MACH_MSG_PORT_DESCRIPTOR:
4363 case MACH_MSG_OOL_PORTS_DESCRIPTOR:
4364 case MACH_MSG_GUARDED_PORT_DESCRIPTOR:
4365 /* no descriptors have been copied in yet */
4366 ipc_kmsg_clean_partial(kmsg, 0, NULL, 0, 0);
4367 return MACH_SEND_NO_GRANT_DEST;
4368 }
4369 }
4370 }
4371
4372 mach_msg_legacy_descriptor_t *saddr;
4373 mach_msg_descriptor_t *daddr;
4374 mach_msg_body_t *body;
4375 mach_msg_type_number_t i, count;
4376
4377 body = (mach_msg_body_t *) (kmsg->ikm_header + 1);
4378 saddr = (typeof(saddr))(body + 1);
4379 count = body->msgh_descriptor_count;
4380
4381 if (count) {
4382 vm_offset_t dsc_adjust = 4 * count;
4383 memmove((char *)(((vm_offset_t)kmsg->ikm_header) - dsc_adjust), kmsg->ikm_header, sizeof(mach_msg_base_t));
4384 kmsg->ikm_header = (mach_msg_header_t *)((vm_offset_t)kmsg->ikm_header - dsc_adjust);
4385 /* Update the message size for the larger in-kernel representation */
4386 kmsg->ikm_header->msgh_size += dsc_adjust;
4387 }
4388 daddr = (mach_msg_descriptor_t *)((vm_offset_t)kmsg->ikm_header + sizeof(mach_msg_base_t));
4389
4390 for (i = 0; i < count; i++, saddr++, daddr++) {
4391 switch (saddr->type.type) {
4392 case MACH_MSG_PORT_DESCRIPTOR: {
4393 mach_msg_type_name_t name;
4394 ipc_object_t object;
4395 mach_msg_legacy_port_descriptor_t *dsc;
4396 mach_msg_port_descriptor_t *dest_dsc;
4397
4398 dsc = (typeof(dsc)) & saddr->port;
4399 dest_dsc = &daddr->port;
4400
4401 /* this is really the type SEND, SEND_ONCE, etc. */
4402 name = dsc->disposition;
4403 object = ip_to_object(CAST_MACH_NAME_TO_PORT(dsc->name));
4404 dest_dsc->disposition = ipc_object_copyin_type(name);
4405 dest_dsc->name = ip_object_to_port(object);
4406 dest_dsc->type = MACH_MSG_PORT_DESCRIPTOR;
4407
4408 if (!IO_VALID(object)) {
4409 break;
4410 }
4411
4412 ipc_object_copyin_from_kernel(object, name);
4413
4414 /* CDY avoid circularity when the destination is also */
4415 /* the kernel. This check should be changed into an */
4416 /* assert when the new kobject model is in place since*/
4417 /* ports will not be used in kernel to kernel chats */
4418
4419 if (ip_object_to_port(remote)->ip_receiver != ipc_space_kernel) {
4420 if ((dest_dsc->disposition == MACH_MSG_TYPE_PORT_RECEIVE) &&
4421 ipc_port_check_circularity(ip_object_to_port(object),
4422 ip_object_to_port(remote))) {
4423 kmsg->ikm_header->msgh_bits |=
4424 MACH_MSGH_BITS_CIRCULAR;
4425 }
4426 }
4427 break;
4428 }
4429 case MACH_MSG_OOL_VOLATILE_DESCRIPTOR:
4430 case MACH_MSG_OOL_DESCRIPTOR: {
4431 /* The sender should supply ready-made memory, i.e. a vm_map_copy_t
4432 * so we don't need to do anything special. */
4433
4434 mach_msg_ool_descriptor32_t *source_dsc = &saddr->out_of_line32;
4435 mach_msg_ool_descriptor_t *dest_dsc = (typeof(dest_dsc)) & daddr->out_of_line;
4436
4437 vm_offset_t address = source_dsc->address;
4438 vm_size_t size = source_dsc->size;
4439 boolean_t deallocate = source_dsc->deallocate;
4440 mach_msg_copy_options_t copy = source_dsc->copy;
4441 mach_msg_descriptor_type_t type = source_dsc->type;
4442
4443 dest_dsc->address = (void *)address;
4444 dest_dsc->size = size;
4445 dest_dsc->deallocate = deallocate;
4446 dest_dsc->copy = copy;
4447 dest_dsc->type = type;
4448 break;
4449 }
4450 case MACH_MSG_OOL_PORTS_DESCRIPTOR: {
4451 ipc_object_t *objects;
4452 unsigned int j;
4453 mach_msg_type_name_t name;
4454 mach_msg_ool_ports_descriptor_t *dest_dsc;
4455
4456 mach_msg_ool_ports_descriptor32_t *source_dsc = &saddr->ool_ports32;
4457 dest_dsc = (typeof(dest_dsc)) & daddr->ool_ports;
4458
4459 boolean_t deallocate = source_dsc->deallocate;
4460 mach_msg_copy_options_t copy = source_dsc->copy;
4461 mach_msg_size_t port_count = source_dsc->count;
4462 mach_msg_type_name_t disposition = source_dsc->disposition;
4463
4464 /* this is really the type SEND, SEND_ONCE, etc. */
4465 name = disposition;
4466 disposition = ipc_object_copyin_type(name);
4467
4468 objects = (ipc_object_t *) (uintptr_t)source_dsc->address;
4469
4470 for (j = 0; j < port_count; j++) {
4471 ipc_object_t object = objects[j];
4472
4473 if (!IO_VALID(object)) {
4474 continue;
4475 }
4476
4477 ipc_object_copyin_from_kernel(object, name);
4478
4479 if ((disposition == MACH_MSG_TYPE_PORT_RECEIVE) &&
4480 ipc_port_check_circularity(ip_object_to_port(object),
4481 ip_object_to_port(remote))) {
4482 kmsg->ikm_header->msgh_bits |= MACH_MSGH_BITS_CIRCULAR;
4483 }
4484 }
4485
4486 dest_dsc->address = objects;
4487 dest_dsc->deallocate = deallocate;
4488 dest_dsc->copy = copy;
4489 dest_dsc->disposition = disposition;
4490 dest_dsc->type = MACH_MSG_OOL_PORTS_DESCRIPTOR;
4491 dest_dsc->count = port_count;
4492 break;
4493 }
4494 case MACH_MSG_GUARDED_PORT_DESCRIPTOR: {
4495 mach_msg_type_name_t disp;
4496 ipc_object_t object;
4497 mach_msg_guarded_port_descriptor32_t *dsc;
4498 mach_msg_guarded_port_descriptor_t *dest_dsc;
4499
4500 dsc = (typeof(dsc)) & saddr->guarded_port32;
4501 dest_dsc = &daddr->guarded_port;
4502
4503 disp = dsc->disposition;
4504 object = ip_to_object(CAST_MACH_NAME_TO_PORT(dsc->name));
4505 assert(dsc->flags == 0);
4506 assert(dsc->context == 0);
4507
4508 dest_dsc->disposition = ipc_object_copyin_type(disp);
4509 dest_dsc->name = ip_object_to_port(object);
4510 dest_dsc->type = MACH_MSG_GUARDED_PORT_DESCRIPTOR;
4511 dest_dsc->flags = 0;
4512
4513 if (!IO_VALID(object)) {
4514 break;
4515 }
4516
4517 ipc_object_copyin_from_kernel(object, disp);
4518
4519 /* CDY avoid circularity when the destination is also */
4520 /* the kernel. This check should be changed into an */
4521 /* assert when the new kobject model is in place since*/
4522 /* ports will not be used in kernel to kernel chats */
4523
4524 if (ip_object_to_port(remote)->ip_receiver != ipc_space_kernel) {
4525 if ((dest_dsc->disposition == MACH_MSG_TYPE_PORT_RECEIVE) &&
4526 ipc_port_check_circularity(ip_object_to_port(object),
4527 ip_object_to_port(remote))) {
4528 kmsg->ikm_header->msgh_bits |=
4529 MACH_MSGH_BITS_CIRCULAR;
4530 }
4531 }
4532 break;
4533 }
4534 default: {
4535 #if MACH_ASSERT
4536 panic("ipc_kmsg_copyin_from_kernel: bad descriptor");
4537 #endif /* MACH_ASSERT */
4538 }
4539 }
4540 }
4541 }
4542
4543 ikm_sign(kmsg);
4544
4545 return MACH_MSG_SUCCESS;
4546 }
4547 #endif /* IKM_SUPPORT_LEGACY */
4548
4549 /*
4550 * Routine: ipc_kmsg_copyout_header
4551 * Purpose:
4552 * "Copy-out" port rights in the header of a message.
4553 * Operates atomically; if it doesn't succeed the
4554 * message header and the space are left untouched.
4555 * If it does succeed the remote/local port fields
4556 * contain port names instead of object pointers,
4557 * and the bits field is updated.
4558 * Conditions:
4559 * Nothing locked.
4560 * Returns:
4561 * MACH_MSG_SUCCESS Copied out port rights.
4562 * MACH_RCV_INVALID_NOTIFY
4563 * Notify is non-null and doesn't name a receive right.
4564 * (Either KERN_INVALID_NAME or KERN_INVALID_RIGHT.)
4565 * MACH_RCV_HEADER_ERROR|MACH_MSG_IPC_SPACE
4566 * The space is dead.
4567 * MACH_RCV_HEADER_ERROR|MACH_MSG_IPC_SPACE
4568 * No room in space for another name.
4569 * MACH_RCV_HEADER_ERROR|MACH_MSG_IPC_KERNEL
4570 * Couldn't allocate memory for the reply port.
4571 * MACH_RCV_HEADER_ERROR|MACH_MSG_IPC_KERNEL
4572 * Couldn't allocate memory for the dead-name request.
4573 */
4574
4575 mach_msg_return_t
4576 ipc_kmsg_copyout_header(
4577 ipc_kmsg_t kmsg,
4578 ipc_space_t space,
4579 mach_msg_option_t option)
4580 {
4581 mach_msg_header_t *msg = kmsg->ikm_header;
4582 mach_msg_bits_t mbits = msg->msgh_bits;
4583 ipc_port_t dest = msg->msgh_remote_port;
4584
4585 assert(IP_VALID(dest));
4586
4587 /*
4588 * While we still hold a reference on the received-from port,
4589 * process all send-possible notfications we received along with
4590 * the message.
4591 */
4592 ipc_port_spnotify(dest);
4593
4594 {
4595 mach_msg_type_name_t dest_type = MACH_MSGH_BITS_REMOTE(mbits);
4596 mach_msg_type_name_t reply_type = MACH_MSGH_BITS_LOCAL(mbits);
4597 mach_msg_type_name_t voucher_type = MACH_MSGH_BITS_VOUCHER(mbits);
4598 ipc_port_t reply = msg->msgh_local_port;
4599 ipc_port_t release_reply_port = IP_NULL;
4600 mach_port_name_t dest_name, reply_name;
4601
4602 ipc_port_t voucher = kmsg->ikm_voucher;
4603 ipc_port_t release_voucher_port = IP_NULL;
4604 mach_port_name_t voucher_name;
4605
4606 uint32_t entries_held = 0;
4607 boolean_t need_write_lock = FALSE;
4608 kern_return_t kr;
4609
4610 /*
4611 * Reserve any potentially needed entries in the target space.
4612 * We'll free any unused before unlocking the space.
4613 */
4614 if (IP_VALID(reply)) {
4615 entries_held++;
4616 need_write_lock = TRUE;
4617 }
4618 if (IP_VALID(voucher)) {
4619 assert(voucher_type == MACH_MSG_TYPE_MOVE_SEND);
4620
4621 if ((option & MACH_RCV_VOUCHER) != 0) {
4622 entries_held++;
4623 }
4624 need_write_lock = TRUE;
4625 }
4626
4627 if (need_write_lock) {
4628 is_write_lock(space);
4629
4630 while (entries_held) {
4631 if (!is_active(space)) {
4632 is_write_unlock(space);
4633 return MACH_RCV_HEADER_ERROR |
4634 MACH_MSG_IPC_SPACE;
4635 }
4636
4637 kr = ipc_entries_hold(space, entries_held);
4638 if (KERN_SUCCESS == kr) {
4639 break;
4640 }
4641
4642 kr = ipc_entry_grow_table(space, ITS_SIZE_NONE);
4643 if (KERN_SUCCESS != kr) {
4644 return MACH_RCV_HEADER_ERROR |
4645 MACH_MSG_IPC_SPACE;
4646 }
4647 /* space was unlocked and relocked - retry */
4648 }
4649
4650 /* Handle reply port. */
4651 if (IP_VALID(reply)) {
4652 ipc_entry_t entry;
4653
4654 /* Is there already an entry we can use? */
4655 if ((reply_type != MACH_MSG_TYPE_PORT_SEND_ONCE) &&
4656 ipc_right_reverse(space, ip_to_object(reply), &reply_name, &entry)) {
4657 /* reply port is locked and active */
4658 assert(entry->ie_bits & MACH_PORT_TYPE_SEND_RECEIVE);
4659 } else {
4660 ip_lock(reply);
4661 /* Is the reply port still active and allowed to be copied out? */
4662 if (!ip_active(reply) || !ip_label_check(space, reply, reply_type)) {
4663 /* clear the context value */
4664 reply->ip_reply_context = 0;
4665 ip_unlock(reply);
4666
4667 release_reply_port = reply;
4668 reply = IP_DEAD;
4669 reply_name = MACH_PORT_DEAD;
4670 goto done_with_reply;
4671 }
4672
4673 /* claim a held entry for the reply port */
4674 assert(entries_held > 0);
4675 entries_held--;
4676 ipc_entry_claim(space, &reply_name, &entry);
4677 assert(IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_NONE);
4678 assert(entry->ie_object == IO_NULL);
4679 entry->ie_object = ip_to_object(reply);
4680 }
4681
4682 /* space and reply port are locked and active */
4683 ip_reference(reply); /* hold onto the reply port */
4684
4685 /*
4686 * If the receiver would like to enforce strict reply
4687 * semantics, and the message looks like it expects a reply,
4688 * and contains a voucher, then link the context in the
4689 * voucher with the reply port so that the next message sent
4690 * to the reply port must come from a thread that has a
4691 * matching context (voucher).
4692 */
4693 if (enforce_strict_reply && MACH_RCV_WITH_STRICT_REPLY(option) && IP_VALID(voucher)) {
4694 if (ipc_kmsg_validate_reply_port_locked(reply, option) != KERN_SUCCESS) {
4695 /* if the receiver isn't happy with the reply port: fail the receive. */
4696 ip_unlock(reply);
4697 ipc_entry_dealloc(space, reply_name, entry);
4698 is_write_unlock(space);
4699 ip_release(reply);
4700 return MACH_RCV_INVALID_REPLY;
4701 }
4702 ipc_kmsg_link_reply_context_locked(reply, voucher);
4703 } else {
4704 /*
4705 * if the receive did not choose to participate
4706 * in the strict reply/RPC, then don't enforce
4707 * anything (as this could lead to booby-trapped
4708 * messages that kill the server).
4709 */
4710 reply->ip_reply_context = 0;
4711 }
4712
4713 kr = ipc_right_copyout(space, reply_name, entry,
4714 reply_type, NULL, NULL, ip_to_object(reply));
4715 assert(kr == KERN_SUCCESS);
4716 /* reply port is unlocked */
4717 } else {
4718 reply_name = CAST_MACH_PORT_TO_NAME(reply);
4719 }
4720
4721 done_with_reply:
4722
4723 /* Handle voucher port. */
4724 if (voucher_type != MACH_MSGH_BITS_ZERO) {
4725 assert(voucher_type == MACH_MSG_TYPE_MOVE_SEND);
4726
4727 if (!IP_VALID(voucher)) {
4728 if ((option & MACH_RCV_VOUCHER) == 0) {
4729 voucher_type = MACH_MSGH_BITS_ZERO;
4730 }
4731 voucher_name = MACH_PORT_NULL;
4732 goto done_with_voucher;
4733 }
4734
4735 /* clear voucher from its hiding place back in the kmsg */
4736 kmsg->ikm_voucher = IP_NULL;
4737
4738 if ((option & MACH_RCV_VOUCHER) != 0) {
4739 ipc_entry_t entry;
4740
4741 if (ipc_right_reverse(space, ip_to_object(voucher),
4742 &voucher_name, &entry)) {
4743 /* voucher port locked */
4744 assert(entry->ie_bits & MACH_PORT_TYPE_SEND);
4745 } else {
4746 assert(entries_held > 0);
4747 entries_held--;
4748 ipc_entry_claim(space, &voucher_name, &entry);
4749 assert(IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_NONE);
4750 assert(entry->ie_object == IO_NULL);
4751 entry->ie_object = ip_to_object(voucher);
4752 ip_lock(voucher);
4753 }
4754 /* space is locked and active */
4755 require_ip_active(voucher);
4756 assert(ip_kotype(voucher) == IKOT_VOUCHER);
4757 kr = ipc_right_copyout(space, voucher_name, entry,
4758 MACH_MSG_TYPE_MOVE_SEND, NULL, NULL,
4759 ip_to_object(voucher));
4760 /* voucher port is unlocked */
4761 } else {
4762 voucher_type = MACH_MSGH_BITS_ZERO;
4763 release_voucher_port = voucher;
4764 voucher_name = MACH_PORT_NULL;
4765 }
4766 } else {
4767 voucher_name = msg->msgh_voucher_port;
4768 }
4769
4770 done_with_voucher:
4771
4772 ip_lock(dest);
4773 is_write_unlock(space);
4774 } else {
4775 /*
4776 * No reply or voucher port! This is an easy case.
4777 * We only need to have the space locked
4778 * when locking the destination.
4779 */
4780
4781 is_read_lock(space);
4782 if (!is_active(space)) {
4783 is_read_unlock(space);
4784 return MACH_RCV_HEADER_ERROR | MACH_MSG_IPC_SPACE;
4785 }
4786
4787 ip_lock(dest);
4788 is_read_unlock(space);
4789
4790 reply_name = CAST_MACH_PORT_TO_NAME(reply);
4791
4792 if (voucher_type != MACH_MSGH_BITS_ZERO) {
4793 assert(voucher_type == MACH_MSG_TYPE_MOVE_SEND);
4794 if ((option & MACH_RCV_VOUCHER) == 0) {
4795 voucher_type = MACH_MSGH_BITS_ZERO;
4796 }
4797 voucher_name = MACH_PORT_NULL;
4798 } else {
4799 voucher_name = msg->msgh_voucher_port;
4800 }
4801 }
4802
4803 /*
4804 * At this point, the space is unlocked and the destination
4805 * port is locked. (Lock taken while space was locked.)
4806 * reply_name is taken care of; we still need dest_name.
4807 * We still hold a ref for reply (if it is valid).
4808 *
4809 * If the space holds receive rights for the destination,
4810 * we return its name for the right. Otherwise the task
4811 * managed to destroy or give away the receive right between
4812 * receiving the message and this copyout. If the destination
4813 * is dead, return MACH_PORT_DEAD, and if the receive right
4814 * exists somewhere else (another space, in transit)
4815 * return MACH_PORT_NULL.
4816 *
4817 * Making this copyout operation atomic with the previous
4818 * copyout of the reply port is a bit tricky. If there was
4819 * no real reply port (it wasn't IP_VALID) then this isn't
4820 * an issue. If the reply port was dead at copyout time,
4821 * then we are OK, because if dest is dead we serialize
4822 * after the death of both ports and if dest is alive
4823 * we serialize after reply died but before dest's (later) death.
4824 * So assume reply was alive when we copied it out. If dest
4825 * is alive, then we are OK because we serialize before
4826 * the ports' deaths. So assume dest is dead when we look at it.
4827 * If reply dies/died after dest, then we are OK because
4828 * we serialize after dest died but before reply dies.
4829 * So the hard case is when reply is alive at copyout,
4830 * dest is dead at copyout, and reply died before dest died.
4831 * In this case pretend that dest is still alive, so
4832 * we serialize while both ports are alive.
4833 *
4834 * Because the space lock is held across the copyout of reply
4835 * and locking dest, the receive right for dest can't move
4836 * in or out of the space while the copyouts happen, so
4837 * that isn't an atomicity problem. In the last hard case
4838 * above, this implies that when dest is dead that the
4839 * space couldn't have had receive rights for dest at
4840 * the time reply was copied-out, so when we pretend
4841 * that dest is still alive, we can return MACH_PORT_NULL.
4842 *
4843 * If dest == reply, then we have to make it look like
4844 * either both copyouts happened before the port died,
4845 * or both happened after the port died. This special
4846 * case works naturally if the timestamp comparison
4847 * is done correctly.
4848 */
4849
4850 if (ip_active(dest)) {
4851 ipc_object_copyout_dest(space, ip_to_object(dest),
4852 dest_type, &dest_name);
4853 /* dest is unlocked */
4854 } else {
4855 ipc_port_timestamp_t timestamp;
4856
4857 timestamp = dest->ip_timestamp;
4858 ip_unlock(dest);
4859 ip_release(dest);
4860
4861 if (IP_VALID(reply)) {
4862 ip_lock(reply);
4863 if (ip_active(reply) ||
4864 IP_TIMESTAMP_ORDER(timestamp,
4865 reply->ip_timestamp)) {
4866 dest_name = MACH_PORT_DEAD;
4867 } else {
4868 dest_name = MACH_PORT_NULL;
4869 }
4870 ip_unlock(reply);
4871 } else {
4872 dest_name = MACH_PORT_DEAD;
4873 }
4874 }
4875
4876 if (IP_VALID(reply)) {
4877 ip_release(reply);
4878 }
4879
4880 if (IP_VALID(release_reply_port)) {
4881 if (reply_type == MACH_MSG_TYPE_PORT_SEND_ONCE) {
4882 ipc_port_release_sonce(release_reply_port);
4883 } else {
4884 ipc_port_release_send(release_reply_port);
4885 }
4886 }
4887
4888 if ((option & MACH_RCV_VOUCHER) != 0) {
4889 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_MSG_RECV) | DBG_FUNC_NONE,
4890 VM_KERNEL_ADDRPERM((uintptr_t)kmsg),
4891 (uintptr_t)kmsg->ikm_header->msgh_bits,
4892 (uintptr_t)kmsg->ikm_header->msgh_id,
4893 VM_KERNEL_ADDRPERM((uintptr_t)unsafe_convert_port_to_voucher(voucher)),
4894 0);
4895 } else {
4896 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_MSG_RECV_VOUCHER_REFUSED) | DBG_FUNC_NONE,
4897 VM_KERNEL_ADDRPERM((uintptr_t)kmsg),
4898 (uintptr_t)kmsg->ikm_header->msgh_bits,
4899 (uintptr_t)kmsg->ikm_header->msgh_id,
4900 VM_KERNEL_ADDRPERM((uintptr_t)unsafe_convert_port_to_voucher(voucher)),
4901 0);
4902 }
4903
4904 if (IP_VALID(release_voucher_port)) {
4905 ipc_port_release_send(release_voucher_port);
4906 }
4907
4908 msg->msgh_bits = MACH_MSGH_BITS_SET(reply_type, dest_type,
4909 voucher_type, mbits);
4910 msg->msgh_local_port = CAST_MACH_NAME_TO_PORT(dest_name);
4911 msg->msgh_remote_port = CAST_MACH_NAME_TO_PORT(reply_name);
4912 msg->msgh_voucher_port = voucher_name;
4913 }
4914
4915 return MACH_MSG_SUCCESS;
4916 }
4917
4918 /*
4919 * Routine: ipc_kmsg_copyout_object
4920 * Purpose:
4921 * Copy-out a port right. Always returns a name,
4922 * even for unsuccessful return codes. Always
4923 * consumes the supplied object.
4924 * Conditions:
4925 * Nothing locked.
4926 * Returns:
4927 * MACH_MSG_SUCCESS The space acquired the right
4928 * (name is valid) or the object is dead (MACH_PORT_DEAD).
4929 * MACH_MSG_IPC_SPACE No room in space for the right,
4930 * or the space is dead. (Name is MACH_PORT_NULL.)
4931 * MACH_MSG_IPC_KERNEL Kernel resource shortage.
4932 * (Name is MACH_PORT_NULL.)
4933 */
4934
4935 mach_msg_return_t
4936 ipc_kmsg_copyout_object(
4937 ipc_space_t space,
4938 ipc_object_t object,
4939 mach_msg_type_name_t msgt_name,
4940 mach_port_context_t *context,
4941 mach_msg_guard_flags_t *guard_flags,
4942 mach_port_name_t *namep)
4943 {
4944 kern_return_t kr;
4945
4946 if (!IO_VALID(object)) {
4947 *namep = CAST_MACH_PORT_TO_NAME(object);
4948 return MACH_MSG_SUCCESS;
4949 }
4950
4951 kr = ipc_object_copyout(space, object, msgt_name, context, guard_flags, namep);
4952 if (kr != KERN_SUCCESS) {
4953 ipc_object_destroy(object, msgt_name);
4954
4955 if (kr == KERN_INVALID_CAPABILITY) {
4956 *namep = MACH_PORT_DEAD;
4957 } else {
4958 *namep = MACH_PORT_NULL;
4959
4960 if (kr == KERN_RESOURCE_SHORTAGE) {
4961 return MACH_MSG_IPC_KERNEL;
4962 } else {
4963 return MACH_MSG_IPC_SPACE;
4964 }
4965 }
4966 }
4967
4968 return MACH_MSG_SUCCESS;
4969 }
4970
4971 static mach_msg_descriptor_t *
4972 ipc_kmsg_copyout_port_descriptor(mach_msg_descriptor_t *dsc,
4973 mach_msg_descriptor_t *dest_dsc,
4974 ipc_space_t space,
4975 kern_return_t *mr)
4976 {
4977 mach_port_t port;
4978 mach_port_name_t name;
4979 mach_msg_type_name_t disp;
4980
4981 /* Copyout port right carried in the message */
4982 port = dsc->port.name;
4983 disp = dsc->port.disposition;
4984 *mr |= ipc_kmsg_copyout_object(space,
4985 ip_to_object(port), disp, NULL, NULL, &name);
4986
4987 if (current_task() == kernel_task) {
4988 mach_msg_port_descriptor_t *user_dsc = (typeof(user_dsc))dest_dsc;
4989 user_dsc--; // point to the start of this port descriptor
4990 bzero((void *)user_dsc, sizeof(*user_dsc));
4991 user_dsc->name = CAST_MACH_NAME_TO_PORT(name);
4992 user_dsc->disposition = disp;
4993 user_dsc->type = MACH_MSG_PORT_DESCRIPTOR;
4994 dest_dsc = (typeof(dest_dsc))user_dsc;
4995 } else {
4996 mach_msg_legacy_port_descriptor_t *user_dsc = (typeof(user_dsc))dest_dsc;
4997 user_dsc--; // point to the start of this port descriptor
4998 bzero((void *)user_dsc, sizeof(*user_dsc));
4999 user_dsc->name = CAST_MACH_PORT_TO_NAME(name);
5000 user_dsc->disposition = disp;
5001 user_dsc->type = MACH_MSG_PORT_DESCRIPTOR;
5002 dest_dsc = (typeof(dest_dsc))user_dsc;
5003 }
5004
5005 return (mach_msg_descriptor_t *)dest_dsc;
5006 }
5007
5008 mach_msg_descriptor_t *
5009 ipc_kmsg_copyout_ool_descriptor(mach_msg_ool_descriptor_t *dsc, mach_msg_descriptor_t *user_dsc, int is_64bit, vm_map_t map, mach_msg_return_t *mr);
5010 mach_msg_descriptor_t *
5011 ipc_kmsg_copyout_ool_descriptor(mach_msg_ool_descriptor_t *dsc, mach_msg_descriptor_t *user_dsc, int is_64bit, vm_map_t map, mach_msg_return_t *mr)
5012 {
5013 vm_map_copy_t copy;
5014 vm_map_address_t rcv_addr;
5015 mach_msg_copy_options_t copy_options;
5016 vm_map_size_t size;
5017 mach_msg_descriptor_type_t dsc_type;
5018 boolean_t misaligned = FALSE;
5019
5020 //SKIP_PORT_DESCRIPTORS(saddr, sdsc_count);
5021
5022 copy = (vm_map_copy_t)dsc->address;
5023 size = (vm_map_size_t)dsc->size;
5024 copy_options = dsc->copy;
5025 assert(copy_options != MACH_MSG_KALLOC_COPY_T);
5026 dsc_type = dsc->type;
5027
5028 if (copy != VM_MAP_COPY_NULL) {
5029 kern_return_t kr;
5030
5031 rcv_addr = 0;
5032 if (vm_map_copy_validate_size(map, copy, &size) == FALSE) {
5033 panic("Inconsistent OOL/copyout size on %p: expected %d, got %lld @%p",
5034 dsc, dsc->size, (unsigned long long)copy->size, copy);
5035 }
5036
5037 if ((copy->type == VM_MAP_COPY_ENTRY_LIST) &&
5038 (trunc_page(copy->offset) != copy->offset ||
5039 round_page(dsc->size) != dsc->size)) {
5040 misaligned = TRUE;
5041 }
5042
5043 if (misaligned) {
5044 vm_map_address_t rounded_addr;
5045 vm_map_size_t rounded_size;
5046 vm_map_offset_t effective_page_mask, effective_page_size;
5047
5048 effective_page_mask = VM_MAP_PAGE_MASK(map);
5049 effective_page_size = effective_page_mask + 1;
5050
5051 rounded_size = vm_map_round_page(copy->offset + size, effective_page_mask) - vm_map_trunc_page(copy->offset, effective_page_mask);
5052
5053 kr = vm_allocate_kernel(map, (vm_offset_t*)&rounded_addr, rounded_size, VM_FLAGS_ANYWHERE, 0);
5054
5055 if (kr == KERN_SUCCESS) {
5056 /*
5057 * vm_map_copy_overwrite does a full copy
5058 * if size is too small to optimize.
5059 * So we tried skipping the offset adjustment
5060 * if we fail the 'size' test.
5061 *
5062 * if (size >= VM_MAP_COPY_OVERWRITE_OPTIMIZATION_THRESHOLD_PAGES * effective_page_size) {
5063 *
5064 * This resulted in leaked memory especially on the
5065 * older watches (16k user - 4k kernel) because we
5066 * would do a physical copy into the start of this
5067 * rounded range but could leak part of it
5068 * on deallocation if the 'size' being deallocated
5069 * does not cover the full range. So instead we do
5070 * the misalignment adjustment always so that on
5071 * deallocation we will remove the full range.
5072 */
5073 if ((rounded_addr & effective_page_mask) !=
5074 (copy->offset & effective_page_mask)) {
5075 /*
5076 * Need similar mis-alignment of source and destination...
5077 */
5078 rounded_addr += (copy->offset & effective_page_mask);
5079
5080 assert((rounded_addr & effective_page_mask) == (copy->offset & effective_page_mask));
5081 }
5082 rcv_addr = rounded_addr;
5083
5084 kr = vm_map_copy_overwrite(map, rcv_addr, copy, size, FALSE);
5085 }
5086 } else {
5087 kr = vm_map_copyout_size(map, &rcv_addr, copy, size);
5088 }
5089 if (kr != KERN_SUCCESS) {
5090 if (kr == KERN_RESOURCE_SHORTAGE) {
5091 *mr |= MACH_MSG_VM_KERNEL;
5092 } else {
5093 *mr |= MACH_MSG_VM_SPACE;
5094 }
5095 vm_map_copy_discard(copy);
5096 rcv_addr = 0;
5097 size = 0;
5098 }
5099 } else {
5100 rcv_addr = 0;
5101 size = 0;
5102 }
5103
5104 /*
5105 * Now update the descriptor as the user would see it.
5106 * This may require expanding the descriptor to the user
5107 * visible size. There is already space allocated for
5108 * this in what naddr points to.
5109 */
5110 if (current_task() == kernel_task) {
5111 mach_msg_ool_descriptor_t *user_ool_dsc = (typeof(user_ool_dsc))user_dsc;
5112 user_ool_dsc--;
5113 bzero((void *)user_ool_dsc, sizeof(*user_ool_dsc));
5114
5115 user_ool_dsc->address = (void *)(uintptr_t)rcv_addr;
5116 user_ool_dsc->deallocate = (copy_options == MACH_MSG_VIRTUAL_COPY) ?
5117 TRUE : FALSE;
5118 user_ool_dsc->copy = copy_options;
5119 user_ool_dsc->type = dsc_type;
5120 user_ool_dsc->size = (mach_msg_size_t)size;
5121
5122 user_dsc = (typeof(user_dsc))user_ool_dsc;
5123 } else if (is_64bit) {
5124 mach_msg_ool_descriptor64_t *user_ool_dsc = (typeof(user_ool_dsc))user_dsc;
5125 user_ool_dsc--;
5126 bzero((void *)user_ool_dsc, sizeof(*user_ool_dsc));
5127
5128 user_ool_dsc->address = rcv_addr;
5129 user_ool_dsc->deallocate = (copy_options == MACH_MSG_VIRTUAL_COPY) ?
5130 TRUE : FALSE;
5131 user_ool_dsc->copy = copy_options;
5132 user_ool_dsc->type = dsc_type;
5133 user_ool_dsc->size = (mach_msg_size_t)size;
5134
5135 user_dsc = (typeof(user_dsc))user_ool_dsc;
5136 } else {
5137 mach_msg_ool_descriptor32_t *user_ool_dsc = (typeof(user_ool_dsc))user_dsc;
5138 user_ool_dsc--;
5139 bzero((void *)user_ool_dsc, sizeof(*user_ool_dsc));
5140
5141 user_ool_dsc->address = CAST_DOWN_EXPLICIT(uint32_t, rcv_addr);
5142 user_ool_dsc->size = (mach_msg_size_t)size;
5143 user_ool_dsc->deallocate = (copy_options == MACH_MSG_VIRTUAL_COPY) ?
5144 TRUE : FALSE;
5145 user_ool_dsc->copy = copy_options;
5146 user_ool_dsc->type = dsc_type;
5147
5148 user_dsc = (typeof(user_dsc))user_ool_dsc;
5149 }
5150 return user_dsc;
5151 }
5152
5153 mach_msg_descriptor_t *
5154 ipc_kmsg_copyout_ool_ports_descriptor(mach_msg_ool_ports_descriptor_t *dsc,
5155 mach_msg_descriptor_t *user_dsc,
5156 int is_64bit,
5157 vm_map_t map,
5158 ipc_space_t space,
5159 ipc_kmsg_t kmsg,
5160 mach_msg_return_t *mr);
5161 mach_msg_descriptor_t *
5162 ipc_kmsg_copyout_ool_ports_descriptor(mach_msg_ool_ports_descriptor_t *dsc,
5163 mach_msg_descriptor_t *user_dsc,
5164 int is_64bit,
5165 vm_map_t map,
5166 ipc_space_t space,
5167 ipc_kmsg_t kmsg,
5168 mach_msg_return_t *mr)
5169 {
5170 mach_vm_offset_t rcv_addr = 0;
5171 mach_msg_type_name_t disp;
5172 mach_msg_type_number_t count, i;
5173 vm_size_t ports_length, names_length;
5174
5175 mach_msg_copy_options_t copy_options = MACH_MSG_VIRTUAL_COPY;
5176
5177 //SKIP_PORT_DESCRIPTORS(saddr, sdsc_count);
5178
5179 count = dsc->count;
5180 disp = dsc->disposition;
5181 ports_length = count * sizeof(mach_port_t);
5182 names_length = count * sizeof(mach_port_name_t);
5183
5184 if (ports_length != 0 && dsc->address != 0) {
5185 /*
5186 * Check to see if there is an overwrite descriptor
5187 * specified in the scatter list for this ool data.
5188 * The descriptor has already been verified.
5189 */
5190 #if 0
5191 if (saddr != MACH_MSG_DESCRIPTOR_NULL) {
5192 if (differs) {
5193 OTHER_OOL_DESCRIPTOR *scatter_dsc;
5194
5195 scatter_dsc = (OTHER_OOL_DESCRIPTOR *)saddr;
5196 rcv_addr = (mach_vm_offset_t) scatter_dsc->address;
5197 copy_options = scatter_dsc->copy;
5198 } else {
5199 mach_msg_ool_descriptor_t *scatter_dsc;
5200
5201 scatter_dsc = &saddr->out_of_line;
5202 rcv_addr = CAST_USER_ADDR_T(scatter_dsc->address);
5203 copy_options = scatter_dsc->copy;
5204 }
5205 INCREMENT_SCATTER(saddr, sdsc_count, differs);
5206 }
5207 #endif
5208
5209 if (copy_options == MACH_MSG_VIRTUAL_COPY) {
5210 /*
5211 * Dynamically allocate the region
5212 */
5213 vm_tag_t tag;
5214 if (vm_kernel_map_is_kernel(map)) {
5215 tag = VM_KERN_MEMORY_IPC;
5216 } else {
5217 tag = VM_MEMORY_MACH_MSG;
5218 }
5219
5220 kern_return_t kr;
5221 if ((kr = mach_vm_allocate_kernel(map, &rcv_addr,
5222 (mach_vm_size_t)names_length,
5223 VM_FLAGS_ANYWHERE, tag)) != KERN_SUCCESS) {
5224 ipc_kmsg_clean_body(kmsg, 1, (mach_msg_descriptor_t *)dsc);
5225 rcv_addr = 0;
5226
5227 if (kr == KERN_RESOURCE_SHORTAGE) {
5228 *mr |= MACH_MSG_VM_KERNEL;
5229 } else {
5230 *mr |= MACH_MSG_VM_SPACE;
5231 }
5232 }
5233 }
5234
5235 /*
5236 * Handle the port rights and copy out the names
5237 * for those rights out to user-space.
5238 */
5239 if (rcv_addr != 0) {
5240 ipc_object_t *objects = (ipc_object_t *) dsc->address;
5241 mach_port_name_t *names = (mach_port_name_t *) dsc->address;
5242
5243 /* copyout port rights carried in the message */
5244
5245 for (i = 0; i < count; i++) {
5246 ipc_object_t object = objects[i];
5247
5248 *mr |= ipc_kmsg_copyout_object(space, object,
5249 disp, NULL, NULL, &names[i]);
5250 }
5251
5252 /* copyout to memory allocated above */
5253 void *data = dsc->address;
5254 if (copyoutmap(map, data, rcv_addr, names_length) != KERN_SUCCESS) {
5255 *mr |= MACH_MSG_VM_SPACE;
5256 }
5257 kfree(data, ports_length);
5258 }
5259 } else {
5260 rcv_addr = 0;
5261 }
5262
5263 /*
5264 * Now update the descriptor based on the information
5265 * calculated above.
5266 */
5267 if (current_task() == kernel_task) {
5268 mach_msg_ool_ports_descriptor_t *user_ool_dsc = (typeof(user_ool_dsc))user_dsc;
5269 user_ool_dsc--;
5270 bzero((void *)user_ool_dsc, sizeof(*user_ool_dsc));
5271
5272 user_ool_dsc->address = (void *)(uintptr_t)rcv_addr;
5273 user_ool_dsc->deallocate = (copy_options == MACH_MSG_VIRTUAL_COPY) ?
5274 TRUE : FALSE;
5275 user_ool_dsc->copy = copy_options;
5276 user_ool_dsc->disposition = disp;
5277 user_ool_dsc->type = MACH_MSG_OOL_PORTS_DESCRIPTOR;
5278 user_ool_dsc->count = count;
5279
5280 user_dsc = (typeof(user_dsc))user_ool_dsc;
5281 } else if (is_64bit) {
5282 mach_msg_ool_ports_descriptor64_t *user_ool_dsc = (typeof(user_ool_dsc))user_dsc;
5283 user_ool_dsc--;
5284 bzero((void *)user_ool_dsc, sizeof(*user_ool_dsc));
5285
5286 user_ool_dsc->address = rcv_addr;
5287 user_ool_dsc->deallocate = (copy_options == MACH_MSG_VIRTUAL_COPY) ?
5288 TRUE : FALSE;
5289 user_ool_dsc->copy = copy_options;
5290 user_ool_dsc->disposition = disp;
5291 user_ool_dsc->type = MACH_MSG_OOL_PORTS_DESCRIPTOR;
5292 user_ool_dsc->count = count;
5293
5294 user_dsc = (typeof(user_dsc))user_ool_dsc;
5295 } else {
5296 mach_msg_ool_ports_descriptor32_t *user_ool_dsc = (typeof(user_ool_dsc))user_dsc;
5297 user_ool_dsc--;
5298 bzero((void *)user_ool_dsc, sizeof(*user_ool_dsc));
5299
5300 user_ool_dsc->address = CAST_DOWN_EXPLICIT(uint32_t, rcv_addr);
5301 user_ool_dsc->count = count;
5302 user_ool_dsc->deallocate = (copy_options == MACH_MSG_VIRTUAL_COPY) ?
5303 TRUE : FALSE;
5304 user_ool_dsc->copy = copy_options;
5305 user_ool_dsc->disposition = disp;
5306 user_ool_dsc->type = MACH_MSG_OOL_PORTS_DESCRIPTOR;
5307
5308 user_dsc = (typeof(user_dsc))user_ool_dsc;
5309 }
5310 return user_dsc;
5311 }
5312
5313 static mach_msg_descriptor_t *
5314 ipc_kmsg_copyout_guarded_port_descriptor(
5315 mach_msg_guarded_port_descriptor_t *dsc,
5316 mach_msg_descriptor_t *dest_dsc,
5317 int is_64bit,
5318 __unused ipc_kmsg_t kmsg,
5319 ipc_space_t space,
5320 mach_msg_option_t option,
5321 kern_return_t *mr)
5322 {
5323 mach_port_t port;
5324 mach_port_name_t name = MACH_PORT_NULL;
5325 mach_msg_type_name_t disp;
5326 mach_msg_guard_flags_t guard_flags;
5327 mach_port_context_t context;
5328
5329 /* Copyout port right carried in the message */
5330 port = dsc->name;
5331 disp = dsc->disposition;
5332 guard_flags = dsc->flags;
5333 context = 0;
5334
5335 /* Currently kernel_task doesnt support receiving guarded port descriptors */
5336 struct knote *kn = current_thread()->ith_knote;
5337 if ((kn != ITH_KNOTE_PSEUDO) && (((option & MACH_RCV_GUARDED_DESC) == 0) ||
5338 (current_task() == kernel_task))) {
5339 #if DEVELOPMENT || DEBUG
5340 if (current_task() != kernel_task) {
5341 /*
5342 * Simulated crash needed for debugging, notifies the receiver to opt into receiving
5343 * guarded descriptors.
5344 */
5345 mach_port_guard_exception(current_thread()->ith_receiver_name, 0, 0, kGUARD_EXC_RCV_GUARDED_DESC);
5346 }
5347 #endif
5348 KDBG(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_DESTROY_GUARDED_DESC), current_thread()->ith_receiver_name,
5349 VM_KERNEL_ADDRPERM(port), disp, guard_flags);
5350 ipc_object_destroy(ip_to_object(port), disp);
5351 mach_msg_legacy_port_descriptor_t *user_dsc = (typeof(user_dsc))dest_dsc;
5352 user_dsc--; // point to the start of this port descriptor
5353 bzero((void *)user_dsc, sizeof(*user_dsc));
5354 user_dsc->name = name;
5355 user_dsc->disposition = disp;
5356 user_dsc->type = MACH_MSG_PORT_DESCRIPTOR;
5357 dest_dsc = (typeof(dest_dsc))user_dsc;
5358 } else {
5359 *mr |= ipc_kmsg_copyout_object(space,
5360 ip_to_object(port), disp, &context, &guard_flags, &name);
5361
5362 if (!is_64bit) {
5363 mach_msg_guarded_port_descriptor32_t *user_dsc = (typeof(user_dsc))dest_dsc;
5364 user_dsc--; // point to the start of this port descriptor
5365 bzero((void *)user_dsc, sizeof(*user_dsc));
5366 user_dsc->name = name;
5367 user_dsc->flags = guard_flags;
5368 user_dsc->disposition = disp;
5369 user_dsc->type = MACH_MSG_GUARDED_PORT_DESCRIPTOR;
5370 user_dsc->context = CAST_DOWN_EXPLICIT(uint32_t, context);
5371 dest_dsc = (typeof(dest_dsc))user_dsc;
5372 } else {
5373 mach_msg_guarded_port_descriptor64_t *user_dsc = (typeof(user_dsc))dest_dsc;
5374 user_dsc--; // point to the start of this port descriptor
5375 bzero((void *)user_dsc, sizeof(*user_dsc));
5376 user_dsc->name = name;
5377 user_dsc->flags = guard_flags;
5378 user_dsc->disposition = disp;
5379 user_dsc->type = MACH_MSG_GUARDED_PORT_DESCRIPTOR;
5380 user_dsc->context = context;
5381 dest_dsc = (typeof(dest_dsc))user_dsc;
5382 }
5383 }
5384
5385 return (mach_msg_descriptor_t *)dest_dsc;
5386 }
5387
5388
5389 /*
5390 * Routine: ipc_kmsg_copyout_body
5391 * Purpose:
5392 * "Copy-out" port rights and out-of-line memory
5393 * in the body of a message.
5394 *
5395 * The error codes are a combination of special bits.
5396 * The copyout proceeds despite errors.
5397 * Conditions:
5398 * Nothing locked.
5399 * Returns:
5400 * MACH_MSG_SUCCESS Successful copyout.
5401 * MACH_MSG_IPC_SPACE No room for port right in name space.
5402 * MACH_MSG_VM_SPACE No room for memory in address space.
5403 * MACH_MSG_IPC_KERNEL Resource shortage handling port right.
5404 * MACH_MSG_VM_KERNEL Resource shortage handling memory.
5405 * MACH_MSG_INVALID_RT_DESCRIPTOR Descriptor incompatible with RT
5406 */
5407
5408 mach_msg_return_t
5409 ipc_kmsg_copyout_body(
5410 ipc_kmsg_t kmsg,
5411 ipc_space_t space,
5412 vm_map_t map,
5413 mach_msg_option_t option,
5414 mach_msg_body_t *slist)
5415 {
5416 mach_msg_body_t *body;
5417 mach_msg_descriptor_t *kern_dsc, *user_dsc;
5418 mach_msg_descriptor_t *saddr;
5419 mach_msg_type_number_t dsc_count, sdsc_count;
5420 int i;
5421 mach_msg_return_t mr = MACH_MSG_SUCCESS;
5422 boolean_t is_task_64bit = (map->max_offset > VM_MAX_ADDRESS);
5423
5424 body = (mach_msg_body_t *) (kmsg->ikm_header + 1);
5425 dsc_count = body->msgh_descriptor_count;
5426 kern_dsc = (mach_msg_descriptor_t *) (body + 1);
5427 /* Point user_dsc just after the end of all the descriptors */
5428 user_dsc = &kern_dsc[dsc_count];
5429
5430 /* Do scatter list setup */
5431 if (slist != MACH_MSG_BODY_NULL) {
5432 panic("Scatter lists disabled");
5433 saddr = (mach_msg_descriptor_t *) (slist + 1);
5434 sdsc_count = slist->msgh_descriptor_count;
5435 } else {
5436 saddr = MACH_MSG_DESCRIPTOR_NULL;
5437 sdsc_count = 0;
5438 }
5439
5440 /* Now process the descriptors - in reverse order */
5441 for (i = dsc_count - 1; i >= 0; i--) {
5442 switch (kern_dsc[i].type.type) {
5443 case MACH_MSG_PORT_DESCRIPTOR:
5444 user_dsc = ipc_kmsg_copyout_port_descriptor(&kern_dsc[i], user_dsc, space, &mr);
5445 break;
5446 case MACH_MSG_OOL_VOLATILE_DESCRIPTOR:
5447 case MACH_MSG_OOL_DESCRIPTOR:
5448 user_dsc = ipc_kmsg_copyout_ool_descriptor(
5449 (mach_msg_ool_descriptor_t *)&kern_dsc[i], user_dsc, is_task_64bit, map, &mr);
5450 break;
5451 case MACH_MSG_OOL_PORTS_DESCRIPTOR:
5452 user_dsc = ipc_kmsg_copyout_ool_ports_descriptor(
5453 (mach_msg_ool_ports_descriptor_t *)&kern_dsc[i], user_dsc, is_task_64bit, map, space, kmsg, &mr);
5454 break;
5455 case MACH_MSG_GUARDED_PORT_DESCRIPTOR:
5456 user_dsc = ipc_kmsg_copyout_guarded_port_descriptor(
5457 (mach_msg_guarded_port_descriptor_t *)&kern_dsc[i], user_dsc, is_task_64bit, kmsg, space, option, &mr);
5458 break;
5459 default: {
5460 panic("untyped IPC copyout body: invalid message descriptor");
5461 }
5462 }
5463 }
5464
5465 if (user_dsc != kern_dsc) {
5466 vm_offset_t dsc_adjust = (vm_offset_t)user_dsc - (vm_offset_t)kern_dsc;
5467 memmove((char *)((vm_offset_t)kmsg->ikm_header + dsc_adjust), kmsg->ikm_header, sizeof(mach_msg_base_t));
5468 kmsg->ikm_header = (mach_msg_header_t *)((vm_offset_t)kmsg->ikm_header + dsc_adjust);
5469 /* Update the message size for the smaller user representation */
5470 kmsg->ikm_header->msgh_size -= (mach_msg_size_t)dsc_adjust;
5471 }
5472
5473 return mr;
5474 }
5475
5476 /*
5477 * Routine: ipc_kmsg_copyout_size
5478 * Purpose:
5479 * Compute the size of the message as copied out to the given
5480 * map. If the destination map's pointers are a different size
5481 * than the kernel's, we have to allow for expansion/
5482 * contraction of the descriptors as appropriate.
5483 * Conditions:
5484 * Nothing locked.
5485 * Returns:
5486 * size of the message as it would be received.
5487 */
5488
5489 mach_msg_size_t
5490 ipc_kmsg_copyout_size(
5491 ipc_kmsg_t kmsg,
5492 vm_map_t map)
5493 {
5494 mach_msg_size_t send_size;
5495
5496 send_size = kmsg->ikm_header->msgh_size;
5497
5498 boolean_t is_task_64bit = (map->max_offset > VM_MAX_ADDRESS);
5499
5500 #if defined(__LP64__)
5501 send_size -= LEGACY_HEADER_SIZE_DELTA;
5502 #endif
5503
5504 if (kmsg->ikm_header->msgh_bits & MACH_MSGH_BITS_COMPLEX) {
5505 mach_msg_body_t *body;
5506 mach_msg_descriptor_t *saddr, *eaddr;
5507
5508 body = (mach_msg_body_t *) (kmsg->ikm_header + 1);
5509 saddr = (mach_msg_descriptor_t *) (body + 1);
5510 eaddr = saddr + body->msgh_descriptor_count;
5511
5512 for (; saddr < eaddr; saddr++) {
5513 switch (saddr->type.type) {
5514 case MACH_MSG_OOL_DESCRIPTOR:
5515 case MACH_MSG_OOL_VOLATILE_DESCRIPTOR:
5516 case MACH_MSG_OOL_PORTS_DESCRIPTOR:
5517 case MACH_MSG_GUARDED_PORT_DESCRIPTOR:
5518 if (!is_task_64bit) {
5519 send_size -= DESC_SIZE_ADJUSTMENT;
5520 }
5521 break;
5522 case MACH_MSG_PORT_DESCRIPTOR:
5523 send_size -= DESC_SIZE_ADJUSTMENT;
5524 break;
5525 default:
5526 break;
5527 }
5528 }
5529 }
5530 return send_size;
5531 }
5532
5533 /*
5534 * Routine: ipc_kmsg_copyout
5535 * Purpose:
5536 * "Copy-out" port rights and out-of-line memory
5537 * in the message.
5538 * Conditions:
5539 * Nothing locked.
5540 * Returns:
5541 * MACH_MSG_SUCCESS Copied out all rights and memory.
5542 * MACH_RCV_HEADER_ERROR + special bits
5543 * Rights and memory in the message are intact.
5544 * MACH_RCV_BODY_ERROR + special bits
5545 * The message header was successfully copied out.
5546 * As much of the body was handled as possible.
5547 */
5548
5549 mach_msg_return_t
5550 ipc_kmsg_copyout(
5551 ipc_kmsg_t kmsg,
5552 ipc_space_t space,
5553 vm_map_t map,
5554 mach_msg_body_t *slist,
5555 mach_msg_option_t option)
5556 {
5557 mach_msg_return_t mr;
5558
5559 ikm_validate_sig(kmsg);
5560
5561 mr = ipc_kmsg_copyout_header(kmsg, space, option);
5562 if (mr != MACH_MSG_SUCCESS) {
5563 return mr;
5564 }
5565
5566 if (kmsg->ikm_header->msgh_bits & MACH_MSGH_BITS_COMPLEX) {
5567 mr = ipc_kmsg_copyout_body(kmsg, space, map, option, slist);
5568
5569 if (mr != MACH_MSG_SUCCESS) {
5570 mr |= MACH_RCV_BODY_ERROR;
5571 }
5572 }
5573
5574 return mr;
5575 }
5576
5577 /*
5578 * Routine: ipc_kmsg_copyout_pseudo
5579 * Purpose:
5580 * Does a pseudo-copyout of the message.
5581 * This is like a regular copyout, except
5582 * that the ports in the header are handled
5583 * as if they are in the body. They aren't reversed.
5584 *
5585 * The error codes are a combination of special bits.
5586 * The copyout proceeds despite errors.
5587 * Conditions:
5588 * Nothing locked.
5589 * Returns:
5590 * MACH_MSG_SUCCESS Successful copyout.
5591 * MACH_MSG_IPC_SPACE No room for port right in name space.
5592 * MACH_MSG_VM_SPACE No room for memory in address space.
5593 * MACH_MSG_IPC_KERNEL Resource shortage handling port right.
5594 * MACH_MSG_VM_KERNEL Resource shortage handling memory.
5595 */
5596
5597 mach_msg_return_t
5598 ipc_kmsg_copyout_pseudo(
5599 ipc_kmsg_t kmsg,
5600 ipc_space_t space,
5601 vm_map_t map,
5602 mach_msg_body_t *slist)
5603 {
5604 mach_msg_bits_t mbits = kmsg->ikm_header->msgh_bits;
5605 ipc_object_t dest = ip_to_object(kmsg->ikm_header->msgh_remote_port);
5606 ipc_object_t reply = ip_to_object(kmsg->ikm_header->msgh_local_port);
5607 ipc_object_t voucher = ip_to_object(kmsg->ikm_voucher);
5608 mach_msg_type_name_t dest_type = MACH_MSGH_BITS_REMOTE(mbits);
5609 mach_msg_type_name_t reply_type = MACH_MSGH_BITS_LOCAL(mbits);
5610 mach_msg_type_name_t voucher_type = MACH_MSGH_BITS_VOUCHER(mbits);
5611 mach_port_name_t voucher_name = kmsg->ikm_header->msgh_voucher_port;
5612 mach_port_name_t dest_name, reply_name;
5613 mach_msg_return_t mr;
5614
5615 /* Set ith_knote to ITH_KNOTE_PSEUDO */
5616 current_thread()->ith_knote = ITH_KNOTE_PSEUDO;
5617
5618 ikm_validate_sig(kmsg);
5619
5620 assert(IO_VALID(dest));
5621
5622 #if 0
5623 /*
5624 * If we did this here, it looks like we wouldn't need the undo logic
5625 * at the end of ipc_kmsg_send() in the error cases. Not sure which
5626 * would be more elegant to keep.
5627 */
5628 ipc_importance_clean(kmsg);
5629 #else
5630 /* just assert it is already clean */
5631 ipc_importance_assert_clean(kmsg);
5632 #endif
5633
5634 mr = (ipc_kmsg_copyout_object(space, dest, dest_type, NULL, NULL, &dest_name) |
5635 ipc_kmsg_copyout_object(space, reply, reply_type, NULL, NULL, &reply_name));
5636
5637 kmsg->ikm_header->msgh_bits = mbits & MACH_MSGH_BITS_USER;
5638 kmsg->ikm_header->msgh_remote_port = CAST_MACH_NAME_TO_PORT(dest_name);
5639 kmsg->ikm_header->msgh_local_port = CAST_MACH_NAME_TO_PORT(reply_name);
5640
5641 if (IO_VALID(voucher)) {
5642 assert(voucher_type == MACH_MSG_TYPE_MOVE_SEND);
5643
5644 kmsg->ikm_voucher = IP_NULL;
5645 mr |= ipc_kmsg_copyout_object(space, voucher, voucher_type, NULL, NULL, &voucher_name);
5646 kmsg->ikm_header->msgh_voucher_port = voucher_name;
5647 }
5648
5649 if (mbits & MACH_MSGH_BITS_COMPLEX) {
5650 mr |= ipc_kmsg_copyout_body(kmsg, space, map, 0, slist);
5651 }
5652
5653 return mr;
5654 }
5655
5656 /*
5657 * Routine: ipc_kmsg_copyout_dest
5658 * Purpose:
5659 * Copies out the destination port in the message.
5660 * Destroys all other rights and memory in the message.
5661 * Conditions:
5662 * Nothing locked.
5663 */
5664
5665 void
5666 ipc_kmsg_copyout_dest(
5667 ipc_kmsg_t kmsg,
5668 ipc_space_t space)
5669 {
5670 mach_msg_bits_t mbits;
5671 ipc_object_t dest;
5672 ipc_object_t reply;
5673 ipc_object_t voucher;
5674 mach_msg_type_name_t dest_type;
5675 mach_msg_type_name_t reply_type;
5676 mach_msg_type_name_t voucher_type;
5677 mach_port_name_t dest_name, reply_name, voucher_name;
5678
5679 ikm_validate_sig(kmsg);
5680
5681 mbits = kmsg->ikm_header->msgh_bits;
5682 dest = ip_to_object(kmsg->ikm_header->msgh_remote_port);
5683 reply = ip_to_object(kmsg->ikm_header->msgh_local_port);
5684 voucher = ip_to_object(kmsg->ikm_voucher);
5685 voucher_name = kmsg->ikm_header->msgh_voucher_port;
5686 dest_type = MACH_MSGH_BITS_REMOTE(mbits);
5687 reply_type = MACH_MSGH_BITS_LOCAL(mbits);
5688 voucher_type = MACH_MSGH_BITS_VOUCHER(mbits);
5689
5690 assert(IO_VALID(dest));
5691
5692 ipc_importance_assert_clean(kmsg);
5693
5694 io_lock(dest);
5695 if (io_active(dest)) {
5696 ipc_object_copyout_dest(space, dest, dest_type, &dest_name);
5697 /* dest is unlocked */
5698 } else {
5699 io_unlock(dest);
5700 io_release(dest);
5701 dest_name = MACH_PORT_DEAD;
5702 }
5703
5704 if (IO_VALID(reply)) {
5705 ipc_object_destroy(reply, reply_type);
5706 reply_name = MACH_PORT_NULL;
5707 } else {
5708 reply_name = CAST_MACH_PORT_TO_NAME(reply);
5709 }
5710
5711 if (IO_VALID(voucher)) {
5712 assert(voucher_type == MACH_MSG_TYPE_MOVE_SEND);
5713
5714 kmsg->ikm_voucher = IP_NULL;
5715 ipc_object_destroy(voucher, voucher_type);
5716 voucher_name = MACH_PORT_NULL;
5717 }
5718
5719 kmsg->ikm_header->msgh_bits = MACH_MSGH_BITS_SET(reply_type, dest_type,
5720 voucher_type, mbits);
5721 kmsg->ikm_header->msgh_local_port = CAST_MACH_NAME_TO_PORT(dest_name);
5722 kmsg->ikm_header->msgh_remote_port = CAST_MACH_NAME_TO_PORT(reply_name);
5723 kmsg->ikm_header->msgh_voucher_port = voucher_name;
5724
5725 if (mbits & MACH_MSGH_BITS_COMPLEX) {
5726 mach_msg_body_t *body;
5727
5728 body = (mach_msg_body_t *) (kmsg->ikm_header + 1);
5729 ipc_kmsg_clean_body(kmsg, body->msgh_descriptor_count,
5730 (mach_msg_descriptor_t *)(body + 1));
5731 }
5732 }
5733
5734 /*
5735 * Routine: ipc_kmsg_copyout_to_kernel
5736 * Purpose:
5737 * Copies out the destination and reply ports in the message.
5738 * Leaves all other rights and memory in the message alone.
5739 * Conditions:
5740 * Nothing locked.
5741 *
5742 * Derived from ipc_kmsg_copyout_dest.
5743 * Use by mach_msg_rpc_from_kernel (which used to use copyout_dest).
5744 * We really do want to save rights and memory.
5745 */
5746
5747 void
5748 ipc_kmsg_copyout_to_kernel(
5749 ipc_kmsg_t kmsg,
5750 ipc_space_t space)
5751 {
5752 ipc_object_t dest;
5753 mach_port_t reply;
5754 mach_msg_type_name_t dest_type;
5755 mach_msg_type_name_t reply_type;
5756 mach_port_name_t dest_name;
5757
5758 ikm_validate_sig(kmsg);
5759
5760 dest = ip_to_object(kmsg->ikm_header->msgh_remote_port);
5761 reply = kmsg->ikm_header->msgh_local_port;
5762 dest_type = MACH_MSGH_BITS_REMOTE(kmsg->ikm_header->msgh_bits);
5763 reply_type = MACH_MSGH_BITS_LOCAL(kmsg->ikm_header->msgh_bits);
5764
5765 assert(IO_VALID(dest));
5766
5767 io_lock(dest);
5768 if (io_active(dest)) {
5769 ipc_object_copyout_dest(space, dest, dest_type, &dest_name);
5770 /* dest is unlocked */
5771 } else {
5772 io_unlock(dest);
5773 io_release(dest);
5774 dest_name = MACH_PORT_DEAD;
5775 }
5776
5777 /*
5778 * While MIG kernel users don't receive vouchers, the
5779 * msgh_voucher_port field is intended to be round-tripped through the
5780 * kernel if there is no voucher disposition set. Here we check for a
5781 * non-zero voucher disposition, and consume the voucher send right as
5782 * there is no possible way to specify MACH_RCV_VOUCHER semantics.
5783 */
5784 mach_msg_type_name_t voucher_type;
5785 voucher_type = MACH_MSGH_BITS_VOUCHER(kmsg->ikm_header->msgh_bits);
5786 if (voucher_type != MACH_MSGH_BITS_ZERO) {
5787 assert(voucher_type == MACH_MSG_TYPE_MOVE_SEND);
5788 /*
5789 * someone managed to send this kernel routine a message with
5790 * a voucher in it. Cleanup the reference in
5791 * kmsg->ikm_voucher.
5792 */
5793 if (IP_VALID(kmsg->ikm_voucher)) {
5794 ipc_port_release_send(kmsg->ikm_voucher);
5795 }
5796 kmsg->ikm_voucher = IP_NULL;
5797 kmsg->ikm_header->msgh_voucher_port = 0;
5798 }
5799
5800 kmsg->ikm_header->msgh_bits =
5801 (MACH_MSGH_BITS_OTHER(kmsg->ikm_header->msgh_bits) |
5802 MACH_MSGH_BITS(reply_type, dest_type));
5803 kmsg->ikm_header->msgh_local_port = CAST_MACH_NAME_TO_PORT(dest_name);
5804 kmsg->ikm_header->msgh_remote_port = reply;
5805 }
5806
5807 #if IKM_SUPPORT_LEGACY
5808 void
5809 ipc_kmsg_copyout_to_kernel_legacy(
5810 ipc_kmsg_t kmsg,
5811 ipc_space_t space)
5812 {
5813 ipc_object_t dest;
5814 mach_port_t reply;
5815 mach_msg_type_name_t dest_type;
5816 mach_msg_type_name_t reply_type;
5817 mach_port_name_t dest_name;
5818
5819 ikm_validate_sig(kmsg);
5820
5821 dest = ip_to_object(kmsg->ikm_header->msgh_remote_port);
5822 reply = kmsg->ikm_header->msgh_local_port;
5823 dest_type = MACH_MSGH_BITS_REMOTE(kmsg->ikm_header->msgh_bits);
5824 reply_type = MACH_MSGH_BITS_LOCAL(kmsg->ikm_header->msgh_bits);
5825
5826 assert(IO_VALID(dest));
5827
5828 io_lock(dest);
5829 if (io_active(dest)) {
5830 ipc_object_copyout_dest(space, dest, dest_type, &dest_name);
5831 /* dest is unlocked */
5832 } else {
5833 io_unlock(dest);
5834 io_release(dest);
5835 dest_name = MACH_PORT_DEAD;
5836 }
5837
5838 mach_msg_type_name_t voucher_type;
5839 voucher_type = MACH_MSGH_BITS_VOUCHER(kmsg->ikm_header->msgh_bits);
5840 if (voucher_type != MACH_MSGH_BITS_ZERO) {
5841 assert(voucher_type == MACH_MSG_TYPE_MOVE_SEND);
5842 assert(IP_VALID(kmsg->ikm_voucher));
5843 /*
5844 * someone managed to send this kernel routine a message with
5845 * a voucher in it. Cleanup the reference in
5846 * kmsg->ikm_voucher.
5847 */
5848 ipc_port_release_send(kmsg->ikm_voucher);
5849 kmsg->ikm_voucher = IP_NULL;
5850 kmsg->ikm_header->msgh_voucher_port = 0;
5851 }
5852
5853 kmsg->ikm_header->msgh_bits =
5854 (MACH_MSGH_BITS_OTHER(kmsg->ikm_header->msgh_bits) |
5855 MACH_MSGH_BITS(reply_type, dest_type));
5856 kmsg->ikm_header->msgh_local_port = CAST_MACH_NAME_TO_PORT(dest_name);
5857 kmsg->ikm_header->msgh_remote_port = reply;
5858
5859 mach_msg_descriptor_t *saddr;
5860 mach_msg_legacy_descriptor_t *daddr;
5861 mach_msg_type_number_t i, count = ((mach_msg_base_t *)kmsg->ikm_header)->body.msgh_descriptor_count;
5862 saddr = (mach_msg_descriptor_t *) (((mach_msg_base_t *)kmsg->ikm_header) + 1);
5863 saddr = &saddr[count - 1];
5864 daddr = (mach_msg_legacy_descriptor_t *)&saddr[count];
5865 daddr--;
5866
5867 vm_offset_t dsc_adjust = 0;
5868
5869 for (i = 0; i < count; i++, saddr--, daddr--) {
5870 switch (saddr->type.type) {
5871 case MACH_MSG_PORT_DESCRIPTOR: {
5872 mach_msg_port_descriptor_t *dsc = &saddr->port;
5873 mach_msg_legacy_port_descriptor_t *dest_dsc = &daddr->port;
5874
5875 mach_port_t name = dsc->name;
5876 mach_msg_type_name_t disposition = dsc->disposition;
5877
5878 dest_dsc->name = CAST_MACH_PORT_TO_NAME(name);
5879 dest_dsc->disposition = disposition;
5880 dest_dsc->type = MACH_MSG_PORT_DESCRIPTOR;
5881 break;
5882 }
5883 case MACH_MSG_OOL_VOLATILE_DESCRIPTOR:
5884 case MACH_MSG_OOL_DESCRIPTOR: {
5885 /* The sender should supply ready-made memory, i.e. a vm_map_copy_t
5886 * so we don't need to do anything special. */
5887
5888 mach_msg_ool_descriptor_t *source_dsc = (typeof(source_dsc)) & saddr->out_of_line;
5889
5890 mach_msg_ool_descriptor32_t *dest_dsc = &daddr->out_of_line32;
5891
5892 vm_offset_t address = (vm_offset_t)source_dsc->address;
5893 vm_size_t size = source_dsc->size;
5894 boolean_t deallocate = source_dsc->deallocate;
5895 mach_msg_copy_options_t copy = source_dsc->copy;
5896 mach_msg_descriptor_type_t type = source_dsc->type;
5897
5898 dest_dsc->address = address;
5899 dest_dsc->size = size;
5900 dest_dsc->deallocate = deallocate;
5901 dest_dsc->copy = copy;
5902 dest_dsc->type = type;
5903 break;
5904 }
5905 case MACH_MSG_OOL_PORTS_DESCRIPTOR: {
5906 mach_msg_ool_ports_descriptor_t *source_dsc = (typeof(source_dsc)) & saddr->ool_ports;
5907
5908 mach_msg_ool_ports_descriptor32_t *dest_dsc = &daddr->ool_ports32;
5909
5910 vm_offset_t address = (vm_offset_t)source_dsc->address;
5911 vm_size_t port_count = source_dsc->count;
5912 boolean_t deallocate = source_dsc->deallocate;
5913 mach_msg_copy_options_t copy = source_dsc->copy;
5914 mach_msg_descriptor_type_t type = source_dsc->type;
5915
5916 dest_dsc->address = address;
5917 dest_dsc->count = port_count;
5918 dest_dsc->deallocate = deallocate;
5919 dest_dsc->copy = copy;
5920 dest_dsc->type = type;
5921 break;
5922 }
5923 case MACH_MSG_GUARDED_PORT_DESCRIPTOR: {
5924 mach_msg_guarded_port_descriptor_t *source_dsc = (typeof(source_dsc)) & saddr->guarded_port;
5925 mach_msg_guarded_port_descriptor32_t *dest_dsc = &daddr->guarded_port32;
5926
5927 dest_dsc->name = CAST_MACH_PORT_TO_NAME(source_dsc->name);
5928 dest_dsc->disposition = source_dsc->disposition;
5929 dest_dsc->flags = 0;
5930 dest_dsc->type = MACH_MSG_GUARDED_PORT_DESCRIPTOR;
5931 dest_dsc->context = 0;
5932 break;
5933 }
5934 default: {
5935 #if MACH_ASSERT
5936 panic("ipc_kmsg_copyout_to_kernel_legacy: bad descriptor");
5937 #endif /* MACH_ASSERT */
5938 }
5939 }
5940 }
5941
5942 if (count) {
5943 dsc_adjust = 4 * count;
5944 memmove((char *)((vm_offset_t)kmsg->ikm_header + dsc_adjust), kmsg->ikm_header, sizeof(mach_msg_base_t));
5945 kmsg->ikm_header = (mach_msg_header_t *)((vm_offset_t)kmsg->ikm_header + dsc_adjust);
5946 /* Update the message size for the smaller user representation */
5947 kmsg->ikm_header->msgh_size -= dsc_adjust;
5948 }
5949 }
5950 #endif /* IKM_SUPPORT_LEGACY */
5951
5952 #ifdef __arm64__
5953 /*
5954 * Just sets those parts of the trailer that aren't set up at allocation time.
5955 */
5956 static void
5957 ipc_kmsg_munge_trailer(mach_msg_max_trailer_t *in, void *_out, boolean_t is64bit)
5958 {
5959 if (is64bit) {
5960 mach_msg_max_trailer64_t *out = (mach_msg_max_trailer64_t*)_out;
5961 out->msgh_seqno = in->msgh_seqno;
5962 out->msgh_context = in->msgh_context;
5963 out->msgh_trailer_size = in->msgh_trailer_size;
5964 out->msgh_ad = in->msgh_ad;
5965 } else {
5966 mach_msg_max_trailer32_t *out = (mach_msg_max_trailer32_t*)_out;
5967 out->msgh_seqno = in->msgh_seqno;
5968 out->msgh_context = (mach_port_context32_t)in->msgh_context;
5969 out->msgh_trailer_size = in->msgh_trailer_size;
5970 out->msgh_ad = in->msgh_ad;
5971 }
5972 }
5973 #endif /* __arm64__ */
5974
5975 mach_msg_trailer_size_t
5976 ipc_kmsg_trailer_size(
5977 mach_msg_option_t option,
5978 __unused thread_t thread)
5979 {
5980 if (!(option & MACH_RCV_TRAILER_MASK)) {
5981 return MACH_MSG_TRAILER_MINIMUM_SIZE;
5982 } else {
5983 return REQUESTED_TRAILER_SIZE(thread_is_64bit_addr(thread), option);
5984 }
5985 }
5986
5987 void
5988 ipc_kmsg_add_trailer(ipc_kmsg_t kmsg, ipc_space_t space __unused,
5989 mach_msg_option_t option, __unused thread_t thread,
5990 mach_port_seqno_t seqno, boolean_t minimal_trailer,
5991 mach_vm_offset_t context)
5992 {
5993 mach_msg_max_trailer_t *trailer;
5994
5995 #ifdef __arm64__
5996 mach_msg_max_trailer_t tmp_trailer; /* This accommodates U64, and we'll munge */
5997 void *real_trailer_out = (void*)(mach_msg_max_trailer_t *)
5998 ((vm_offset_t)kmsg->ikm_header +
5999 mach_round_msg(kmsg->ikm_header->msgh_size));
6000
6001 /*
6002 * Populate scratch with initial values set up at message allocation time.
6003 * After, we reinterpret the space in the message as the right type
6004 * of trailer for the address space in question.
6005 */
6006 bcopy(real_trailer_out, &tmp_trailer, MAX_TRAILER_SIZE);
6007 trailer = &tmp_trailer;
6008 #else /* __arm64__ */
6009 (void)thread;
6010 trailer = (mach_msg_max_trailer_t *)
6011 ((vm_offset_t)kmsg->ikm_header +
6012 mach_round_msg(kmsg->ikm_header->msgh_size));
6013 #endif /* __arm64__ */
6014
6015 if (!(option & MACH_RCV_TRAILER_MASK)) {
6016 return;
6017 }
6018
6019 trailer->msgh_seqno = seqno;
6020 trailer->msgh_context = context;
6021 trailer->msgh_trailer_size = REQUESTED_TRAILER_SIZE(thread_is_64bit_addr(thread), option);
6022
6023 if (minimal_trailer) {
6024 goto done;
6025 }
6026
6027 if (GET_RCV_ELEMENTS(option) >= MACH_RCV_TRAILER_AV) {
6028 trailer->msgh_ad = kmsg->ikm_filter_policy_id;
6029 }
6030
6031 /*
6032 * The ipc_kmsg_t holds a reference to the label of a label
6033 * handle, not the port. We must get a reference to the port
6034 * and a send right to copyout to the receiver.
6035 */
6036
6037 if (option & MACH_RCV_TRAILER_ELEMENTS(MACH_RCV_TRAILER_LABELS)) {
6038 trailer->msgh_labels.sender = 0;
6039 }
6040
6041 done:
6042 #ifdef __arm64__
6043 ipc_kmsg_munge_trailer(trailer, real_trailer_out, thread_is_64bit_addr(thread));
6044 #endif /* __arm64__ */
6045 return;
6046 }
6047
6048 mach_msg_header_t *
6049 ipc_kmsg_msg_header(ipc_kmsg_t kmsg)
6050 {
6051 if (NULL == kmsg) {
6052 return NULL;
6053 }
6054 return kmsg->ikm_header;
6055 }