]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ipc/ipc_kmsg.c
e128f7138c00c943441ffdab892878e0d732a522
[apple/xnu.git] / osfmk / ipc / ipc_kmsg.c
1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
58 * support for mandatory and extensible security protections. This notice
59 * is included in support of clause 2.2 (b) of the Apple Public License,
60 * Version 2.0.
61 * Copyright (c) 2005 SPARTA, Inc.
62 */
63 /*
64 */
65 /*
66 * File: ipc/ipc_kmsg.c
67 * Author: Rich Draves
68 * Date: 1989
69 *
70 * Operations on kernel messages.
71 */
72
73
74 #include <mach/mach_types.h>
75 #include <mach/boolean.h>
76 #include <mach/kern_return.h>
77 #include <mach/message.h>
78 #include <mach/port.h>
79 #include <mach/vm_map.h>
80 #include <mach/mach_vm.h>
81 #include <mach/vm_statistics.h>
82
83 #include <kern/kern_types.h>
84 #include <kern/assert.h>
85 #include <kern/debug.h>
86 #include <kern/ipc_kobject.h>
87 #include <kern/kalloc.h>
88 #include <kern/zalloc.h>
89 #include <kern/processor.h>
90 #include <kern/thread.h>
91 #include <kern/sched_prim.h>
92 #include <kern/misc_protos.h>
93 #include <kern/counters.h>
94 #include <kern/cpu_data.h>
95 #include <kern/policy_internal.h>
96 #include <kern/mach_filter.h>
97
98 #include <pthread/priority_private.h>
99
100 #include <machine/limits.h>
101
102 #include <vm/vm_map.h>
103 #include <vm/vm_object.h>
104 #include <vm/vm_kern.h>
105
106 #include <ipc/port.h>
107 #include <ipc/ipc_types.h>
108 #include <ipc/ipc_entry.h>
109 #include <ipc/ipc_kmsg.h>
110 #include <ipc/ipc_notify.h>
111 #include <ipc/ipc_object.h>
112 #include <ipc/ipc_space.h>
113 #include <ipc/ipc_port.h>
114 #include <ipc/ipc_right.h>
115 #include <ipc/ipc_hash.h>
116 #include <ipc/ipc_table.h>
117 #include <ipc/ipc_importance.h>
118 #if MACH_FLIPC
119 #include <kern/mach_node.h>
120 #include <ipc/flipc.h>
121 #endif
122
123 #include <os/overflow.h>
124
125 #include <security/mac_mach_internal.h>
126
127 #include <device/device_server.h>
128
129 #include <string.h>
130
131 #ifdef ppc
132 #include <ppc/Firmware.h>
133 #include <ppc/low_trace.h>
134 #endif
135
136 #if DEBUG
137 #define DEBUG_MSGS_K64 1
138 #endif
139
140 #include <sys/kdebug.h>
141 #include <libkern/OSAtomic.h>
142
143 #include <libkern/crypto/sha2.h>
144
145 #include <ptrauth.h>
146 #if __has_feature(ptrauth_calls)
147 #include <libkern/ptrauth_utils.h>
148 #endif
149
150 #pragma pack(4)
151
152 typedef struct{
153 mach_msg_bits_t msgh_bits;
154 mach_msg_size_t msgh_size;
155 mach_port_name_t msgh_remote_port;
156 mach_port_name_t msgh_local_port;
157 mach_port_name_t msgh_voucher_port;
158 mach_msg_id_t msgh_id;
159 } mach_msg_legacy_header_t;
160
161 typedef struct{
162 mach_msg_legacy_header_t header;
163 mach_msg_body_t body;
164 } mach_msg_legacy_base_t;
165
166 typedef struct{
167 mach_port_name_t name;
168 mach_msg_size_t pad1;
169 uint32_t pad2 : 16;
170 mach_msg_type_name_t disposition : 8;
171 mach_msg_descriptor_type_t type : 8;
172 } mach_msg_legacy_port_descriptor_t;
173
174
175 typedef union{
176 mach_msg_legacy_port_descriptor_t port;
177 mach_msg_ool_descriptor32_t out_of_line32;
178 mach_msg_ool_ports_descriptor32_t ool_ports32;
179 mach_msg_guarded_port_descriptor32_t guarded_port32;
180 mach_msg_type_descriptor_t type;
181 } mach_msg_legacy_descriptor_t;
182
183 #pragma pack()
184
185 #define LEGACY_HEADER_SIZE_DELTA ((mach_msg_size_t)(sizeof(mach_msg_header_t) - sizeof(mach_msg_legacy_header_t)))
186
187 // END LP64 fixes
188
189 #if __has_feature(ptrauth_calls)
190 typedef uintptr_t ikm_sig_scratch_t;
191
192 static void
193 ikm_init_sig(
194 __unused ipc_kmsg_t kmsg,
195 ikm_sig_scratch_t *scratchp)
196 {
197 *scratchp = OS_PTRAUTH_DISCRIMINATOR("kmsg.ikm_signature");
198 }
199
200 static void
201 ikm_chunk_sig(
202 ipc_kmsg_t kmsg,
203 void *data,
204 size_t len,
205 ikm_sig_scratch_t *scratchp)
206 {
207 int ptrauth_flags;
208 void *trailerp;
209
210 /*
211 * if we happen to be doing the trailer chunk,
212 * diversify with the ptrauth-ed trailer pointer -
213 * as that is unchanging for the kmsg
214 */
215 trailerp = (void *)
216 ((vm_offset_t)kmsg->ikm_header +
217 mach_round_msg(kmsg->ikm_header->msgh_size));
218
219 ptrauth_flags = (data == trailerp) ? PTRAUTH_ADDR_DIVERSIFY : 0;
220 *scratchp = ptrauth_utils_sign_blob_generic(data, len, *scratchp, ptrauth_flags);
221 }
222
223 static uintptr_t
224 ikm_finalize_sig(
225 __unused ipc_kmsg_t kmsg,
226 ikm_sig_scratch_t *scratchp)
227 {
228 return *scratchp;
229 }
230
231 #elif defined(CRYPTO_SHA2) && !defined(__x86_64__)
232
233 typedef SHA256_CTX ikm_sig_scratch_t;
234
235 static void
236 ikm_init_sig(
237 __unused ipc_kmsg_t kmsg,
238 ikm_sig_scratch_t *scratchp)
239 {
240 SHA256_Init(scratchp);
241 SHA256_Update(scratchp, &vm_kernel_addrhash_salt_ext, sizeof(uint64_t));
242 }
243
244 static void
245 ikm_chunk_sig(
246 __unused ipc_kmsg_t kmsg,
247 void *data,
248 size_t len,
249 ikm_sig_scratch_t *scratchp)
250 {
251 SHA256_Update(scratchp, data, len);
252 }
253
254 static uintptr_t
255 ikm_finalize_sig(
256 __unused ipc_kmsg_t kmsg,
257 ikm_sig_scratch_t *scratchp)
258 {
259 uintptr_t sha_digest[SHA256_DIGEST_LENGTH / sizeof(uintptr_t)];
260
261 SHA256_Final((uint8_t *)sha_digest, scratchp);
262
263 /*
264 * Only use one uintptr_t sized part of result for space and compat reasons.
265 * Truncation is better than XOR'ing the chunks together in hopes of higher
266 * entropy - because of its lower risk of collisions.
267 */
268 return *sha_digest;
269 }
270
271 #else
272 /* Stubbed out implementation (for __x86_64__ for now) */
273
274 typedef uintptr_t ikm_sig_scratch_t;
275
276 static void
277 ikm_init_sig(
278 __unused ipc_kmsg_t kmsg,
279 ikm_sig_scratch_t *scratchp)
280 {
281 *scratchp = 0;
282 }
283
284 static void
285 ikm_chunk_sig(
286 __unused ipc_kmsg_t kmsg,
287 __unused void *data,
288 __unused size_t len,
289 __unused ikm_sig_scratch_t *scratchp)
290 {
291 return;
292 }
293
294 static uintptr_t
295 ikm_finalize_sig(
296 __unused ipc_kmsg_t kmsg,
297 ikm_sig_scratch_t *scratchp)
298 {
299 return *scratchp;
300 }
301
302 #endif
303
304 static void
305 ikm_header_sig(
306 ipc_kmsg_t kmsg,
307 ikm_sig_scratch_t *scratchp)
308 {
309 mach_msg_size_t dsc_count;
310 mach_msg_base_t base;
311 boolean_t complex;
312
313 /* take a snapshot of the message header/body-count */
314 base.header = *kmsg->ikm_header;
315 complex = ((base.header.msgh_bits & MACH_MSGH_BITS_COMPLEX) != 0);
316 if (complex) {
317 dsc_count = ((mach_msg_body_t *)(kmsg->ikm_header + 1))->msgh_descriptor_count;
318 } else {
319 dsc_count = 0;
320 }
321 base.body.msgh_descriptor_count = dsc_count;
322
323 /* compute sig of a copy of the header with all varying bits masked off */
324 base.header.msgh_bits &= MACH_MSGH_BITS_USER;
325 base.header.msgh_bits &= ~MACH_MSGH_BITS_VOUCHER_MASK;
326 ikm_chunk_sig(kmsg, &base, sizeof(mach_msg_base_t), scratchp);
327 }
328
329 static void
330 ikm_trailer_sig(
331 ipc_kmsg_t kmsg,
332 ikm_sig_scratch_t *scratchp)
333 {
334 mach_msg_max_trailer_t *trailerp;
335
336 /* Add sig of the trailer contents */
337 trailerp = (mach_msg_max_trailer_t *)
338 ((vm_offset_t)kmsg->ikm_header +
339 mach_round_msg(kmsg->ikm_header->msgh_size));
340 ikm_chunk_sig(kmsg, trailerp, sizeof(*trailerp), scratchp);
341 }
342
343 /* Compute the signature for the body bits of a message */
344 static void
345 ikm_body_sig(
346 ipc_kmsg_t kmsg,
347 ikm_sig_scratch_t *scratchp)
348 {
349 mach_msg_descriptor_t *kern_dsc;
350 mach_msg_size_t dsc_count;
351 mach_msg_body_t *body;
352 mach_msg_size_t i;
353
354 if ((kmsg->ikm_header->msgh_bits & MACH_MSGH_BITS_COMPLEX) == 0) {
355 return;
356 }
357 body = (mach_msg_body_t *) (kmsg->ikm_header + 1);
358 dsc_count = body->msgh_descriptor_count;
359
360 if (dsc_count == 0) {
361 return;
362 }
363
364 kern_dsc = (mach_msg_descriptor_t *) (body + 1);
365
366 /* Compute the signature for the whole descriptor array */
367 ikm_chunk_sig(kmsg, kern_dsc, sizeof(*kern_dsc) * dsc_count, scratchp);
368
369 /* look for descriptor contents that need a signature */
370 for (i = 0; i < dsc_count; i++) {
371 switch (kern_dsc[i].type.type) {
372 case MACH_MSG_PORT_DESCRIPTOR:
373 case MACH_MSG_GUARDED_PORT_DESCRIPTOR:
374 case MACH_MSG_OOL_VOLATILE_DESCRIPTOR:
375 case MACH_MSG_OOL_DESCRIPTOR:
376 break;
377
378 case MACH_MSG_OOL_PORTS_DESCRIPTOR: {
379 mach_msg_ool_ports_descriptor_t *ports_dsc;
380
381 /* Compute sig for the port/object pointers */
382 ports_dsc = (mach_msg_ool_ports_descriptor_t *)&kern_dsc[i];
383 ikm_chunk_sig(kmsg, ports_dsc->address, ports_dsc->count * sizeof(ipc_object_t), scratchp);
384 break;
385 }
386 default: {
387 panic("ipc_kmsg_body_sig: invalid message descriptor");
388 }
389 }
390 }
391 }
392
393 static void
394 ikm_sign(
395 ipc_kmsg_t kmsg)
396 {
397 ikm_sig_scratch_t scratch;
398 uintptr_t sig;
399
400 zone_require(ipc_kmsg_zone, kmsg);
401
402 ikm_init_sig(kmsg, &scratch);
403
404 ikm_header_sig(kmsg, &scratch);
405 #if IKM_PARTIAL_SIG
406 /* save off partial signature for just header */
407 sig = ikm_finalize_sig(kmsg, &scratch);
408 kmsg->ikm_header_sig = sig;
409 #endif
410
411 ikm_trailer_sig(kmsg, &scratch);
412 #if IKM_PARTIAL_SIG
413 /* save off partial signature for header+trailer */
414 sig = ikm_finalize_sig(kmsg, &scratch);
415 kmsg->ikm_headtrail_sig = sig;
416 #endif
417
418 ikm_body_sig(kmsg, &scratch);
419 sig = ikm_finalize_sig(kmsg, &scratch);
420 kmsg->ikm_signature = sig;
421 }
422
423 unsigned int ikm_signature_failures;
424 unsigned int ikm_signature_failure_id;
425 #if (DEVELOPMENT || DEBUG)
426 unsigned int ikm_signature_panic_disable;
427 unsigned int ikm_signature_header_failures;
428 unsigned int ikm_signature_trailer_failures;
429 #endif
430
431 static void
432 ikm_validate_sig(
433 ipc_kmsg_t kmsg)
434 {
435 ikm_sig_scratch_t scratch;
436 uintptr_t expected;
437 uintptr_t sig;
438 char *str;
439
440 zone_require(ipc_kmsg_zone, kmsg);
441
442 ikm_init_sig(kmsg, &scratch);
443
444 ikm_header_sig(kmsg, &scratch);
445 #if IKM_PARTIAL_SIG
446 /* Do partial evaluation of just the header signature */
447 sig = ikm_finalize_sig(kmsg, &scratch);
448 expected = kmsg->ikm_header_sig;
449 if (sig != expected) {
450 ikm_signature_header_failures++;
451 str = "header";
452 goto failure;
453 }
454 #endif
455
456 ikm_trailer_sig(kmsg, &scratch);
457 #if IKM_PARTIAL_SIG
458 /* Do partial evaluation of header+trailer signature */
459 sig = ikm_finalize_sig(kmsg, &scratch);
460 expected = kmsg->ikm_headtrail_sig;
461 if (sig != expected) {
462 ikm_signature_trailer_failures++;
463 str = "trailer";
464 goto failure;
465 }
466 #endif
467
468 ikm_body_sig(kmsg, &scratch);
469 sig = ikm_finalize_sig(kmsg, &scratch);
470
471 expected = kmsg->ikm_signature;
472 if (sig != expected) {
473 ikm_signature_failures++;
474 str = "full";
475
476 #if IKM_PARTIAL_SIG
477 failure:
478 #endif
479 {
480 mach_msg_id_t id = kmsg->ikm_header->msgh_id;
481
482 ikm_signature_failure_id = id;
483 #if (DEVELOPMENT || DEBUG)
484 if (ikm_signature_panic_disable) {
485 return;
486 }
487 #endif
488 panic("ikm_validate_sig: %s signature mismatch: kmsg=0x%p, id=%d, sig=0x%zx (expected 0x%zx)",
489 str, kmsg, id, sig, expected);
490 }
491 }
492 }
493
494 #if DEBUG_MSGS_K64
495 extern void ipc_pset_print64(
496 ipc_pset_t pset);
497
498 extern void ipc_kmsg_print64(
499 ipc_kmsg_t kmsg,
500 const char *str);
501
502 extern void ipc_msg_print64(
503 mach_msg_header_t *msgh);
504
505 extern ipc_port_t ipc_name_to_data64(
506 task_t task,
507 mach_port_name_t name);
508
509 /*
510 * Forward declarations
511 */
512 void ipc_msg_print_untyped64(
513 mach_msg_body_t *body);
514
515 const char * ipc_type_name64(
516 int type_name,
517 boolean_t received);
518
519 void ipc_print_type_name64(
520 int type_name);
521
522 const char *
523 msgh_bit_decode64(
524 mach_msg_bits_t bit);
525
526 const char *
527 mm_copy_options_string64(
528 mach_msg_copy_options_t option);
529
530 void db_print_msg_uid64(mach_msg_header_t *);
531
532 static void
533 ipc_msg_body_print64(void *body, int size)
534 {
535 uint32_t *word = (uint32_t *) body;
536 uint32_t *end = (uint32_t *)(((uintptr_t) body) + size
537 - sizeof(mach_msg_header_t));
538 int i;
539
540 kprintf(" body(%p-%p):\n %p: ", body, end, word);
541 for (;;) {
542 for (i = 0; i < 8; i++, word++) {
543 if (word >= end) {
544 kprintf("\n");
545 return;
546 }
547 kprintf("%08x ", *word);
548 }
549 kprintf("\n %p: ", word);
550 }
551 }
552
553
554 const char *
555 ipc_type_name64(
556 int type_name,
557 boolean_t received)
558 {
559 switch (type_name) {
560 case MACH_MSG_TYPE_PORT_NAME:
561 return "port_name";
562
563 case MACH_MSG_TYPE_MOVE_RECEIVE:
564 if (received) {
565 return "port_receive";
566 } else {
567 return "move_receive";
568 }
569
570 case MACH_MSG_TYPE_MOVE_SEND:
571 if (received) {
572 return "port_send";
573 } else {
574 return "move_send";
575 }
576
577 case MACH_MSG_TYPE_MOVE_SEND_ONCE:
578 if (received) {
579 return "port_send_once";
580 } else {
581 return "move_send_once";
582 }
583
584 case MACH_MSG_TYPE_COPY_SEND:
585 return "copy_send";
586
587 case MACH_MSG_TYPE_MAKE_SEND:
588 return "make_send";
589
590 case MACH_MSG_TYPE_MAKE_SEND_ONCE:
591 return "make_send_once";
592
593 default:
594 return (char *) 0;
595 }
596 }
597
598 void
599 ipc_print_type_name64(
600 int type_name)
601 {
602 const char *name = ipc_type_name64(type_name, TRUE);
603 if (name) {
604 kprintf("%s", name);
605 } else {
606 kprintf("type%d", type_name);
607 }
608 }
609
610 /*
611 * ipc_kmsg_print64 [ debug ]
612 */
613 void
614 ipc_kmsg_print64(
615 ipc_kmsg_t kmsg,
616 const char *str)
617 {
618 kprintf("%s kmsg=%p:\n", str, kmsg);
619 kprintf(" next=%p, prev=%p, size=%d",
620 kmsg->ikm_next,
621 kmsg->ikm_prev,
622 kmsg->ikm_size);
623 kprintf("\n");
624 ipc_msg_print64(kmsg->ikm_header);
625 }
626
627 const char *
628 msgh_bit_decode64(
629 mach_msg_bits_t bit)
630 {
631 switch (bit) {
632 case MACH_MSGH_BITS_COMPLEX: return "complex";
633 case MACH_MSGH_BITS_CIRCULAR: return "circular";
634 default: return (char *) 0;
635 }
636 }
637
638 /*
639 * ipc_msg_print64 [ debug ]
640 */
641 void
642 ipc_msg_print64(
643 mach_msg_header_t *msgh)
644 {
645 mach_msg_bits_t mbits;
646 unsigned int bit, i;
647 const char *bit_name;
648 int needs_comma;
649
650 mbits = msgh->msgh_bits;
651 kprintf(" msgh_bits=0x%x: l=0x%x,r=0x%x\n",
652 mbits,
653 MACH_MSGH_BITS_LOCAL(msgh->msgh_bits),
654 MACH_MSGH_BITS_REMOTE(msgh->msgh_bits));
655
656 mbits = MACH_MSGH_BITS_OTHER(mbits) & MACH_MSGH_BITS_USED;
657 kprintf(" decoded bits: ");
658 needs_comma = 0;
659 for (i = 0, bit = 1; i < sizeof(mbits) * 8; ++i, bit <<= 1) {
660 if ((mbits & bit) == 0) {
661 continue;
662 }
663 bit_name = msgh_bit_decode64((mach_msg_bits_t)bit);
664 if (bit_name) {
665 kprintf("%s%s", needs_comma ? "," : "", bit_name);
666 } else {
667 kprintf("%sunknown(0x%x),", needs_comma ? "," : "", bit);
668 }
669 ++needs_comma;
670 }
671 if (msgh->msgh_bits & ~MACH_MSGH_BITS_USED) {
672 kprintf("%sunused=0x%x,", needs_comma ? "," : "",
673 msgh->msgh_bits & ~MACH_MSGH_BITS_USED);
674 }
675 kprintf("\n");
676
677 needs_comma = 1;
678 if (msgh->msgh_remote_port) {
679 kprintf(" remote=%p(", msgh->msgh_remote_port);
680 ipc_print_type_name64(MACH_MSGH_BITS_REMOTE(msgh->msgh_bits));
681 kprintf(")");
682 } else {
683 kprintf(" remote=null");
684 }
685
686 if (msgh->msgh_local_port) {
687 kprintf("%slocal=%p(", needs_comma ? "," : "",
688 msgh->msgh_local_port);
689 ipc_print_type_name64(MACH_MSGH_BITS_LOCAL(msgh->msgh_bits));
690 kprintf(")\n");
691 } else {
692 kprintf("local=null\n");
693 }
694
695 kprintf(" msgh_id=%d, size=%d\n",
696 msgh->msgh_id,
697 msgh->msgh_size);
698
699 if (mbits & MACH_MSGH_BITS_COMPLEX) {
700 ipc_msg_print_untyped64((mach_msg_body_t *) (msgh + 1));
701 }
702
703 ipc_msg_body_print64((void *)(msgh + 1), msgh->msgh_size);
704 }
705
706
707 const char *
708 mm_copy_options_string64(
709 mach_msg_copy_options_t option)
710 {
711 const char *name;
712
713 switch (option) {
714 case MACH_MSG_PHYSICAL_COPY:
715 name = "PHYSICAL";
716 break;
717 case MACH_MSG_VIRTUAL_COPY:
718 name = "VIRTUAL";
719 break;
720 case MACH_MSG_OVERWRITE:
721 name = "OVERWRITE(DEPRECATED)";
722 break;
723 case MACH_MSG_ALLOCATE:
724 name = "ALLOCATE";
725 break;
726 case MACH_MSG_KALLOC_COPY_T:
727 name = "KALLOC_COPY_T";
728 break;
729 default:
730 name = "unknown";
731 break;
732 }
733 return name;
734 }
735
736 void
737 ipc_msg_print_untyped64(
738 mach_msg_body_t *body)
739 {
740 mach_msg_descriptor_t *saddr, *send;
741 mach_msg_descriptor_type_t type;
742
743 kprintf(" %d descriptors: \n", body->msgh_descriptor_count);
744
745 saddr = (mach_msg_descriptor_t *) (body + 1);
746 send = saddr + body->msgh_descriptor_count;
747
748 for (; saddr < send; saddr++) {
749 type = saddr->type.type;
750
751 switch (type) {
752 case MACH_MSG_PORT_DESCRIPTOR: {
753 mach_msg_port_descriptor_t *dsc;
754
755 dsc = &saddr->port;
756 kprintf(" PORT name = %p disp = ", dsc->name);
757 ipc_print_type_name64(dsc->disposition);
758 kprintf("\n");
759 break;
760 }
761 case MACH_MSG_OOL_VOLATILE_DESCRIPTOR:
762 case MACH_MSG_OOL_DESCRIPTOR: {
763 mach_msg_ool_descriptor_t *dsc;
764
765 dsc = (mach_msg_ool_descriptor_t *) &saddr->out_of_line;
766 kprintf(" OOL%s addr = %p size = 0x%x copy = %s %s\n",
767 type == MACH_MSG_OOL_DESCRIPTOR ? "" : " VOLATILE",
768 dsc->address, dsc->size,
769 mm_copy_options_string64(dsc->copy),
770 dsc->deallocate ? "DEALLOC" : "");
771 break;
772 }
773 case MACH_MSG_OOL_PORTS_DESCRIPTOR: {
774 mach_msg_ool_ports_descriptor_t *dsc;
775
776 dsc = (mach_msg_ool_ports_descriptor_t *) &saddr->ool_ports;
777
778 kprintf(" OOL_PORTS addr = %p count = 0x%x ",
779 dsc->address, dsc->count);
780 kprintf("disp = ");
781 ipc_print_type_name64(dsc->disposition);
782 kprintf(" copy = %s %s\n",
783 mm_copy_options_string64(dsc->copy),
784 dsc->deallocate ? "DEALLOC" : "");
785 break;
786 }
787 case MACH_MSG_GUARDED_PORT_DESCRIPTOR: {
788 mach_msg_guarded_port_descriptor_t *dsc;
789
790 dsc = (mach_msg_guarded_port_descriptor_t *)&saddr->guarded_port;
791 kprintf(" GUARDED_PORT name = %p flags = 0x%x disp = ", dsc->name, dsc->flags);
792 ipc_print_type_name64(dsc->disposition);
793 kprintf("\n");
794 break;
795 }
796 default: {
797 kprintf(" UNKNOWN DESCRIPTOR 0x%x\n", type);
798 break;
799 }
800 }
801 }
802 }
803
804 #define DEBUG_IPC_KMSG_PRINT(kmsg, string) \
805 __unreachable_ok_push \
806 if (DEBUG_KPRINT_SYSCALL_PREDICATE(DEBUG_KPRINT_SYSCALL_IPC_MASK)) { \
807 ipc_kmsg_print64(kmsg, string); \
808 } \
809 __unreachable_ok_pop
810
811 #define DEBUG_IPC_MSG_BODY_PRINT(body, size) \
812 __unreachable_ok_push \
813 if (DEBUG_KPRINT_SYSCALL_PREDICATE(DEBUG_KPRINT_SYSCALL_IPC_MASK)) { \
814 ipc_msg_body_print64(body,size);\
815 } \
816 __unreachable_ok_pop
817 #else /* !DEBUG_MSGS_K64 */
818 #define DEBUG_IPC_KMSG_PRINT(kmsg, string)
819 #define DEBUG_IPC_MSG_BODY_PRINT(body, size)
820 #endif /* !DEBUG_MSGS_K64 */
821
822 extern vm_map_t ipc_kernel_copy_map;
823 extern vm_size_t ipc_kmsg_max_space;
824 extern const vm_size_t ipc_kmsg_max_vm_space;
825 extern const vm_size_t ipc_kmsg_max_body_space;
826 extern vm_size_t msg_ool_size_small;
827
828 #define MSG_OOL_SIZE_SMALL msg_ool_size_small
829
830 #if defined(__LP64__)
831 #define MAP_SIZE_DIFFERS(map) (map->max_offset < MACH_VM_MAX_ADDRESS)
832 #define OTHER_OOL_DESCRIPTOR mach_msg_ool_descriptor32_t
833 #define OTHER_OOL_PORTS_DESCRIPTOR mach_msg_ool_ports_descriptor32_t
834 #else
835 #define MAP_SIZE_DIFFERS(map) (map->max_offset > VM_MAX_ADDRESS)
836 #define OTHER_OOL_DESCRIPTOR mach_msg_ool_descriptor64_t
837 #define OTHER_OOL_PORTS_DESCRIPTOR mach_msg_ool_ports_descriptor64_t
838 #endif
839
840 #define DESC_SIZE_ADJUSTMENT ((mach_msg_size_t)(sizeof(mach_msg_ool_descriptor64_t) - \
841 sizeof(mach_msg_ool_descriptor32_t)))
842
843 /* scatter list macros */
844
845 #define SKIP_PORT_DESCRIPTORS(s, c) \
846 MACRO_BEGIN \
847 if ((s) != MACH_MSG_DESCRIPTOR_NULL) { \
848 while ((c) > 0) { \
849 if ((s)->type.type != MACH_MSG_PORT_DESCRIPTOR) \
850 break; \
851 (s)++; (c)--; \
852 } \
853 if (c == 0) \
854 (s) = MACH_MSG_DESCRIPTOR_NULL; \
855 } \
856 MACRO_END
857
858 #define INCREMENT_SCATTER(s, c, d) \
859 MACRO_BEGIN \
860 if ((s) != MACH_MSG_DESCRIPTOR_NULL) { \
861 s = (d) ? (mach_msg_descriptor_t *) \
862 ((OTHER_OOL_DESCRIPTOR *)(s) + 1) : \
863 (s + 1); \
864 (c)--; \
865 } \
866 MACRO_END
867
868 #define KMSG_TRACE_FLAG_TRACED 0x000001
869 #define KMSG_TRACE_FLAG_COMPLEX 0x000002
870 #define KMSG_TRACE_FLAG_OOLMEM 0x000004
871 #define KMSG_TRACE_FLAG_VCPY 0x000008
872 #define KMSG_TRACE_FLAG_PCPY 0x000010
873 #define KMSG_TRACE_FLAG_SND64 0x000020
874 #define KMSG_TRACE_FLAG_RAISEIMP 0x000040
875 #define KMSG_TRACE_FLAG_APP_SRC 0x000080
876 #define KMSG_TRACE_FLAG_APP_DST 0x000100
877 #define KMSG_TRACE_FLAG_DAEMON_SRC 0x000200
878 #define KMSG_TRACE_FLAG_DAEMON_DST 0x000400
879 #define KMSG_TRACE_FLAG_DST_NDFLTQ 0x000800
880 #define KMSG_TRACE_FLAG_SRC_NDFLTQ 0x001000
881 #define KMSG_TRACE_FLAG_DST_SONCE 0x002000
882 #define KMSG_TRACE_FLAG_SRC_SONCE 0x004000
883 #define KMSG_TRACE_FLAG_CHECKIN 0x008000
884 #define KMSG_TRACE_FLAG_ONEWAY 0x010000
885 #define KMSG_TRACE_FLAG_IOKIT 0x020000
886 #define KMSG_TRACE_FLAG_SNDRCV 0x040000
887 #define KMSG_TRACE_FLAG_DSTQFULL 0x080000
888 #define KMSG_TRACE_FLAG_VOUCHER 0x100000
889 #define KMSG_TRACE_FLAG_TIMER 0x200000
890 #define KMSG_TRACE_FLAG_SEMA 0x400000
891 #define KMSG_TRACE_FLAG_DTMPOWNER 0x800000
892 #define KMSG_TRACE_FLAG_GUARDED_DESC 0x1000000
893
894 #define KMSG_TRACE_FLAGS_MASK 0x1ffffff
895 #define KMSG_TRACE_FLAGS_SHIFT 8
896
897 #define KMSG_TRACE_PORTS_MASK 0xff
898 #define KMSG_TRACE_PORTS_SHIFT 0
899
900 #if (KDEBUG_LEVEL >= KDEBUG_LEVEL_STANDARD)
901 #include <stdint.h>
902
903 void
904 ipc_kmsg_trace_send(ipc_kmsg_t kmsg,
905 mach_msg_option_t option)
906 {
907 task_t send_task = TASK_NULL;
908 ipc_port_t dst_port, src_port;
909 boolean_t is_task_64bit;
910 mach_msg_header_t *msg;
911 mach_msg_trailer_t *trailer;
912
913 int kotype = 0;
914 uint32_t msg_size = 0;
915 uint64_t msg_flags = KMSG_TRACE_FLAG_TRACED;
916 uint32_t num_ports = 0;
917 uint32_t send_pid, dst_pid;
918
919 /*
920 * check to see not only if ktracing is enabled, but if we will
921 * _actually_ emit the KMSG_INFO tracepoint. This saves us a
922 * significant amount of processing (and a port lock hold) in
923 * the non-tracing case.
924 */
925 if (__probable((kdebug_enable & KDEBUG_TRACE) == 0)) {
926 return;
927 }
928 if (!kdebug_debugid_enabled(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_KMSG_INFO))) {
929 return;
930 }
931
932 msg = kmsg->ikm_header;
933
934 dst_port = msg->msgh_remote_port;
935 if (!IPC_PORT_VALID(dst_port)) {
936 return;
937 }
938
939 /*
940 * Message properties / options
941 */
942 if ((option & (MACH_SEND_MSG | MACH_RCV_MSG)) == (MACH_SEND_MSG | MACH_RCV_MSG)) {
943 msg_flags |= KMSG_TRACE_FLAG_SNDRCV;
944 }
945
946 if (msg->msgh_id >= is_iokit_subsystem.start &&
947 msg->msgh_id < is_iokit_subsystem.end + 100) {
948 msg_flags |= KMSG_TRACE_FLAG_IOKIT;
949 }
950 /* magic XPC checkin message id (XPC_MESSAGE_ID_CHECKIN) from libxpc */
951 else if (msg->msgh_id == 0x77303074u /* w00t */) {
952 msg_flags |= KMSG_TRACE_FLAG_CHECKIN;
953 }
954
955 if (msg->msgh_bits & MACH_MSGH_BITS_RAISEIMP) {
956 msg_flags |= KMSG_TRACE_FLAG_RAISEIMP;
957 }
958
959 if (unsafe_convert_port_to_voucher(kmsg->ikm_voucher)) {
960 msg_flags |= KMSG_TRACE_FLAG_VOUCHER;
961 }
962
963 /*
964 * Sending task / port
965 */
966 send_task = current_task();
967 send_pid = task_pid(send_task);
968
969 if (send_pid != 0) {
970 if (task_is_daemon(send_task)) {
971 msg_flags |= KMSG_TRACE_FLAG_DAEMON_SRC;
972 } else if (task_is_app(send_task)) {
973 msg_flags |= KMSG_TRACE_FLAG_APP_SRC;
974 }
975 }
976
977 is_task_64bit = (send_task->map->max_offset > VM_MAX_ADDRESS);
978 if (is_task_64bit) {
979 msg_flags |= KMSG_TRACE_FLAG_SND64;
980 }
981
982 src_port = msg->msgh_local_port;
983 if (src_port) {
984 if (src_port->ip_messages.imq_qlimit != MACH_PORT_QLIMIT_DEFAULT) {
985 msg_flags |= KMSG_TRACE_FLAG_SRC_NDFLTQ;
986 }
987 switch (MACH_MSGH_BITS_LOCAL(msg->msgh_bits)) {
988 case MACH_MSG_TYPE_MOVE_SEND_ONCE:
989 msg_flags |= KMSG_TRACE_FLAG_SRC_SONCE;
990 break;
991 default:
992 break;
993 }
994 } else {
995 msg_flags |= KMSG_TRACE_FLAG_ONEWAY;
996 }
997
998
999 /*
1000 * Destination task / port
1001 */
1002 ip_lock(dst_port);
1003 if (!ip_active(dst_port)) {
1004 /* dst port is being torn down */
1005 dst_pid = (uint32_t)0xfffffff0;
1006 } else if (dst_port->ip_tempowner) {
1007 msg_flags |= KMSG_TRACE_FLAG_DTMPOWNER;
1008 if (IIT_NULL != dst_port->ip_imp_task) {
1009 dst_pid = task_pid(dst_port->ip_imp_task->iit_task);
1010 } else {
1011 dst_pid = (uint32_t)0xfffffff1;
1012 }
1013 } else if (dst_port->ip_receiver_name == MACH_PORT_NULL) {
1014 /* dst_port is otherwise in-transit */
1015 dst_pid = (uint32_t)0xfffffff2;
1016 } else {
1017 if (dst_port->ip_receiver == ipc_space_kernel) {
1018 dst_pid = 0;
1019 } else {
1020 ipc_space_t dst_space;
1021 dst_space = dst_port->ip_receiver;
1022 if (dst_space && is_active(dst_space)) {
1023 dst_pid = task_pid(dst_space->is_task);
1024 if (task_is_daemon(dst_space->is_task)) {
1025 msg_flags |= KMSG_TRACE_FLAG_DAEMON_DST;
1026 } else if (task_is_app(dst_space->is_task)) {
1027 msg_flags |= KMSG_TRACE_FLAG_APP_DST;
1028 }
1029 } else {
1030 /* receiving task is being torn down */
1031 dst_pid = (uint32_t)0xfffffff3;
1032 }
1033 }
1034 }
1035
1036 if (dst_port->ip_messages.imq_qlimit != MACH_PORT_QLIMIT_DEFAULT) {
1037 msg_flags |= KMSG_TRACE_FLAG_DST_NDFLTQ;
1038 }
1039 if (imq_full(&dst_port->ip_messages)) {
1040 msg_flags |= KMSG_TRACE_FLAG_DSTQFULL;
1041 }
1042
1043 kotype = ip_kotype(dst_port);
1044
1045 ip_unlock(dst_port);
1046
1047 switch (kotype) {
1048 case IKOT_SEMAPHORE:
1049 msg_flags |= KMSG_TRACE_FLAG_SEMA;
1050 break;
1051 case IKOT_TIMER:
1052 case IKOT_CLOCK:
1053 msg_flags |= KMSG_TRACE_FLAG_TIMER;
1054 break;
1055 case IKOT_MASTER_DEVICE:
1056 case IKOT_IOKIT_CONNECT:
1057 case IKOT_IOKIT_OBJECT:
1058 case IKOT_IOKIT_IDENT:
1059 case IKOT_UEXT_OBJECT:
1060 msg_flags |= KMSG_TRACE_FLAG_IOKIT;
1061 break;
1062 default:
1063 break;
1064 }
1065
1066 switch (MACH_MSGH_BITS_REMOTE(msg->msgh_bits)) {
1067 case MACH_MSG_TYPE_PORT_SEND_ONCE:
1068 msg_flags |= KMSG_TRACE_FLAG_DST_SONCE;
1069 break;
1070 default:
1071 break;
1072 }
1073
1074
1075 /*
1076 * Message size / content
1077 */
1078 msg_size = msg->msgh_size - sizeof(mach_msg_header_t);
1079
1080 if (msg->msgh_bits & MACH_MSGH_BITS_COMPLEX) {
1081 mach_msg_body_t *msg_body;
1082 mach_msg_descriptor_t *kern_dsc;
1083 int dsc_count;
1084
1085 msg_flags |= KMSG_TRACE_FLAG_COMPLEX;
1086
1087 msg_body = (mach_msg_body_t *)(kmsg->ikm_header + 1);
1088 dsc_count = (int)msg_body->msgh_descriptor_count;
1089 kern_dsc = (mach_msg_descriptor_t *)(msg_body + 1);
1090
1091 /* this is gross: see ipc_kmsg_copyin_body()... */
1092 if (!is_task_64bit) {
1093 msg_size -= (dsc_count * 12);
1094 }
1095
1096 for (int i = 0; i < dsc_count; i++) {
1097 switch (kern_dsc[i].type.type) {
1098 case MACH_MSG_PORT_DESCRIPTOR:
1099 num_ports++;
1100 if (is_task_64bit) {
1101 msg_size -= 12;
1102 }
1103 break;
1104 case MACH_MSG_OOL_VOLATILE_DESCRIPTOR:
1105 case MACH_MSG_OOL_DESCRIPTOR: {
1106 mach_msg_ool_descriptor_t *dsc;
1107 dsc = (mach_msg_ool_descriptor_t *)&kern_dsc[i];
1108 msg_flags |= KMSG_TRACE_FLAG_OOLMEM;
1109 msg_size += dsc->size;
1110 if ((dsc->size >= MSG_OOL_SIZE_SMALL) &&
1111 (dsc->copy == MACH_MSG_PHYSICAL_COPY) &&
1112 !dsc->deallocate) {
1113 msg_flags |= KMSG_TRACE_FLAG_PCPY;
1114 } else if (dsc->size <= MSG_OOL_SIZE_SMALL) {
1115 msg_flags |= KMSG_TRACE_FLAG_PCPY;
1116 } else {
1117 msg_flags |= KMSG_TRACE_FLAG_VCPY;
1118 }
1119 if (is_task_64bit) {
1120 msg_size -= 16;
1121 }
1122 } break;
1123 case MACH_MSG_OOL_PORTS_DESCRIPTOR: {
1124 mach_msg_ool_ports_descriptor_t *dsc;
1125 dsc = (mach_msg_ool_ports_descriptor_t *)&kern_dsc[i];
1126 num_ports += dsc->count;
1127 if (is_task_64bit) {
1128 msg_size -= 16;
1129 }
1130 } break;
1131 case MACH_MSG_GUARDED_PORT_DESCRIPTOR:
1132 num_ports++;
1133 msg_flags |= KMSG_TRACE_FLAG_GUARDED_DESC;
1134 if (is_task_64bit) {
1135 msg_size -= 16;
1136 }
1137 break;
1138 default:
1139 break;
1140 }
1141 }
1142 }
1143
1144 /*
1145 * Trailer contents
1146 */
1147 trailer = (mach_msg_trailer_t *)((vm_offset_t)msg +
1148 (vm_offset_t)mach_round_msg(msg->msgh_size));
1149 if (trailer->msgh_trailer_size <= sizeof(mach_msg_security_trailer_t)) {
1150 extern const security_token_t KERNEL_SECURITY_TOKEN;
1151 mach_msg_security_trailer_t *strailer;
1152 strailer = (mach_msg_security_trailer_t *)trailer;
1153 /*
1154 * verify the sender PID: replies from the kernel often look
1155 * like self-talk because the sending port is not reset.
1156 */
1157 if (memcmp(&strailer->msgh_sender,
1158 &KERNEL_SECURITY_TOKEN,
1159 sizeof(KERNEL_SECURITY_TOKEN)) == 0) {
1160 send_pid = 0;
1161 msg_flags &= ~(KMSG_TRACE_FLAG_APP_SRC | KMSG_TRACE_FLAG_DAEMON_SRC);
1162 }
1163 }
1164
1165 KDBG(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_KMSG_INFO) | DBG_FUNC_END,
1166 (uintptr_t)send_pid,
1167 (uintptr_t)dst_pid,
1168 (uintptr_t)msg_size,
1169 (uintptr_t)(
1170 ((msg_flags & KMSG_TRACE_FLAGS_MASK) << KMSG_TRACE_FLAGS_SHIFT) |
1171 ((num_ports & KMSG_TRACE_PORTS_MASK) << KMSG_TRACE_PORTS_SHIFT)
1172 )
1173 );
1174 }
1175 #endif
1176
1177 /* zone for cached ipc_kmsg_t structures */
1178 ZONE_DECLARE(ipc_kmsg_zone, "ipc kmsgs", IKM_SAVED_KMSG_SIZE, ZC_CACHING);
1179 static TUNABLE(bool, enforce_strict_reply, "ipc_strict_reply", false);
1180
1181 /*
1182 * Forward declarations
1183 */
1184
1185 void ipc_kmsg_clean(
1186 ipc_kmsg_t kmsg);
1187
1188 void ipc_kmsg_clean_body(
1189 ipc_kmsg_t kmsg,
1190 mach_msg_type_number_t number,
1191 mach_msg_descriptor_t *desc);
1192
1193 void ipc_kmsg_clean_partial(
1194 ipc_kmsg_t kmsg,
1195 mach_msg_type_number_t number,
1196 mach_msg_descriptor_t *desc,
1197 vm_offset_t paddr,
1198 vm_size_t length);
1199
1200 mach_msg_return_t ipc_kmsg_copyin_body(
1201 ipc_kmsg_t kmsg,
1202 ipc_space_t space,
1203 vm_map_t map,
1204 mach_msg_option_t *optionp);
1205
1206
1207 static void
1208 ipc_kmsg_link_reply_context_locked(
1209 ipc_port_t reply_port,
1210 ipc_port_t voucher_port);
1211
1212 static kern_return_t
1213 ipc_kmsg_validate_reply_port_locked(
1214 ipc_port_t reply_port,
1215 mach_msg_option_t options);
1216
1217 static mach_msg_return_t
1218 ipc_kmsg_validate_reply_context_locked(
1219 mach_msg_option_t option,
1220 ipc_port_t dest_port,
1221 ipc_voucher_t voucher,
1222 mach_port_name_t voucher_name);
1223
1224 /* we can't include the BSD <sys/persona.h> header here... */
1225 #ifndef PERSONA_ID_NONE
1226 #define PERSONA_ID_NONE ((uint32_t)-1)
1227 #endif
1228
1229 /*
1230 * We keep a per-processor cache of kernel message buffers.
1231 * The cache saves the overhead/locking of using kalloc/kfree.
1232 * The per-processor cache seems to miss less than a per-thread cache,
1233 * and it also uses less memory. Access to the cache doesn't
1234 * require locking.
1235 */
1236
1237 /*
1238 * Routine: ikm_set_header
1239 * Purpose:
1240 * Set the header (and data) pointers for a message. If the
1241 * message is small, the data pointer is NULL and all the
1242 * data resides within the fixed
1243 * the cache, that is best. Otherwise, allocate a new one.
1244 * Conditions:
1245 * Nothing locked.
1246 */
1247 static void
1248 ikm_set_header(
1249 ipc_kmsg_t kmsg,
1250 void *data,
1251 mach_msg_size_t mtsize)
1252 {
1253 if (data) {
1254 kmsg->ikm_data = data;
1255 kmsg->ikm_header = (mach_msg_header_t *)(data + kmsg->ikm_size - mtsize);
1256 } else {
1257 assert(kmsg->ikm_size == IKM_SAVED_MSG_SIZE);
1258 kmsg->ikm_header = (mach_msg_header_t *)
1259 ((vm_offset_t)(kmsg + 1) + kmsg->ikm_size - mtsize);
1260 }
1261 }
1262
1263 /*
1264 * Routine: ipc_kmsg_alloc
1265 * Purpose:
1266 * Allocate a kernel message structure. If we can get one from
1267 * the cache, that is best. Otherwise, allocate a new one.
1268 * Conditions:
1269 * Nothing locked.
1270 */
1271 ipc_kmsg_t
1272 ipc_kmsg_alloc(
1273 mach_msg_size_t msg_and_trailer_size)
1274 {
1275 mach_msg_size_t max_expanded_size;
1276 ipc_kmsg_t kmsg;
1277 void *data;
1278
1279 /*
1280 * LP64support -
1281 * Pad the allocation in case we need to expand the
1282 * message descriptors for user spaces with pointers larger than
1283 * the kernel's own, or vice versa. We don't know how many descriptors
1284 * there are yet, so just assume the whole body could be
1285 * descriptors (if there could be any at all).
1286 *
1287 * The expansion space is left in front of the header,
1288 * because it is easier to pull the header and descriptors
1289 * forward as we process them than it is to push all the
1290 * data backwards.
1291 */
1292 mach_msg_size_t size = msg_and_trailer_size - MAX_TRAILER_SIZE;
1293
1294 /* compare against implementation upper limit for the body */
1295 if (size > ipc_kmsg_max_body_space) {
1296 return IKM_NULL;
1297 }
1298
1299 if (size > sizeof(mach_msg_base_t)) {
1300 mach_msg_size_t max_desc = (mach_msg_size_t)(((size - sizeof(mach_msg_base_t)) /
1301 sizeof(mach_msg_ool_descriptor32_t)) *
1302 DESC_SIZE_ADJUSTMENT);
1303
1304 /* make sure expansion won't cause wrap */
1305 if (msg_and_trailer_size > MACH_MSG_SIZE_MAX - max_desc) {
1306 return IKM_NULL;
1307 }
1308
1309 max_expanded_size = msg_and_trailer_size + max_desc;
1310 } else {
1311 max_expanded_size = msg_and_trailer_size;
1312 }
1313
1314 kmsg = (ipc_kmsg_t)zalloc(ipc_kmsg_zone);
1315
1316 if (max_expanded_size < IKM_SAVED_MSG_SIZE) {
1317 max_expanded_size = IKM_SAVED_MSG_SIZE; /* round up for ikm_cache */
1318 data = NULL;
1319 } else if (max_expanded_size > IKM_SAVED_MSG_SIZE) {
1320 data = kheap_alloc(KHEAP_DATA_BUFFERS, max_expanded_size, Z_WAITOK);
1321 }
1322
1323 if (kmsg != IKM_NULL) {
1324 ikm_init(kmsg, max_expanded_size);
1325 ikm_set_header(kmsg, data, msg_and_trailer_size);
1326 }
1327
1328 return kmsg;
1329 }
1330
1331 /*
1332 * Routine: ipc_kmsg_free
1333 * Purpose:
1334 * Free a kernel message buffer. If the kms is preallocated
1335 * to a port, just "put it back (marked unused)." We have to
1336 * do this with the port locked. The port may have its hold
1337 * on our message released. In that case, we have to just
1338 * revert the message to a traditional one and free it normally.
1339 * Conditions:
1340 * Nothing locked.
1341 */
1342
1343 void
1344 ipc_kmsg_free(
1345 ipc_kmsg_t kmsg)
1346 {
1347 mach_msg_size_t size = kmsg->ikm_size;
1348 ipc_port_t port;
1349
1350 assert(!IP_VALID(kmsg->ikm_voucher));
1351
1352 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_KMSG_FREE) | DBG_FUNC_NONE,
1353 VM_KERNEL_ADDRPERM((uintptr_t)kmsg),
1354 0, 0, 0, 0);
1355
1356 /*
1357 * Check to see if the message is bound to the port. If so,
1358 * mark it not in use. If the port isn't already dead, then
1359 * leave the message associated with it. Otherwise, free it.
1360 */
1361 if (size == IKM_SAVED_MSG_SIZE) {
1362 if ((void *)kmsg->ikm_header < (void *)(kmsg + 1) ||
1363 (void *)kmsg->ikm_header >= (void *)(kmsg + 1) + IKM_SAVED_MSG_SIZE) {
1364 panic("ipc_kmsg_free");
1365 }
1366 port = ikm_prealloc_inuse_port(kmsg);
1367 if (port != IP_NULL) {
1368 ip_lock(port);
1369 ikm_prealloc_clear_inuse(kmsg, port);
1370 if (ip_active(port) && (port->ip_premsg == kmsg)) {
1371 assert(IP_PREALLOC(port));
1372 ip_unlock(port);
1373 ip_release(port);
1374 return;
1375 }
1376 ip_unlock(port);
1377 ip_release(port); /* May be last reference */
1378 }
1379 } else {
1380 void *data = kmsg->ikm_data;
1381 if ((void *)kmsg->ikm_header < data ||
1382 (void *)kmsg->ikm_header >= data + size) {
1383 panic("ipc_kmsg_free");
1384 }
1385 kheap_free(KHEAP_DATA_BUFFERS, data, size);
1386 }
1387 zfree(ipc_kmsg_zone, kmsg);
1388 }
1389
1390
1391 /*
1392 * Routine: ipc_kmsg_enqueue
1393 * Purpose:
1394 * Enqueue a kmsg.
1395 */
1396
1397 void
1398 ipc_kmsg_enqueue(
1399 ipc_kmsg_queue_t queue,
1400 ipc_kmsg_t kmsg)
1401 {
1402 ipc_kmsg_t first = queue->ikmq_base;
1403 ipc_kmsg_t last;
1404
1405 if (first == IKM_NULL) {
1406 queue->ikmq_base = kmsg;
1407 kmsg->ikm_next = kmsg;
1408 kmsg->ikm_prev = kmsg;
1409 } else {
1410 last = first->ikm_prev;
1411 kmsg->ikm_next = first;
1412 kmsg->ikm_prev = last;
1413 first->ikm_prev = kmsg;
1414 last->ikm_next = kmsg;
1415 }
1416 }
1417
1418 /*
1419 * Routine: ipc_kmsg_enqueue_qos
1420 * Purpose:
1421 * Enqueue a kmsg, propagating qos
1422 * overrides towards the head of the queue.
1423 *
1424 * Returns:
1425 * whether the head of the queue had
1426 * it's override-qos adjusted because
1427 * of this insertion.
1428 */
1429
1430 boolean_t
1431 ipc_kmsg_enqueue_qos(
1432 ipc_kmsg_queue_t queue,
1433 ipc_kmsg_t kmsg)
1434 {
1435 ipc_kmsg_t first = queue->ikmq_base;
1436 ipc_kmsg_t prev;
1437 mach_msg_qos_t qos_ovr;
1438
1439 if (first == IKM_NULL) {
1440 /* insert a first message */
1441 queue->ikmq_base = kmsg;
1442 kmsg->ikm_next = kmsg;
1443 kmsg->ikm_prev = kmsg;
1444 return TRUE;
1445 }
1446
1447 /* insert at the tail */
1448 prev = first->ikm_prev;
1449 kmsg->ikm_next = first;
1450 kmsg->ikm_prev = prev;
1451 first->ikm_prev = kmsg;
1452 prev->ikm_next = kmsg;
1453
1454 /* apply QoS overrides towards the head */
1455 qos_ovr = kmsg->ikm_qos_override;
1456 while (prev != kmsg &&
1457 qos_ovr > prev->ikm_qos_override) {
1458 prev->ikm_qos_override = qos_ovr;
1459 prev = prev->ikm_prev;
1460 }
1461
1462 /* did we adjust everything? */
1463 return prev == kmsg;
1464 }
1465
1466 /*
1467 * Routine: ipc_kmsg_override_qos
1468 * Purpose:
1469 * Update the override for a given kmsg already
1470 * enqueued, propagating qos override adjustments
1471 * towards the head of the queue.
1472 *
1473 * Returns:
1474 * whether the head of the queue had
1475 * it's override-qos adjusted because
1476 * of this insertion.
1477 */
1478
1479 boolean_t
1480 ipc_kmsg_override_qos(
1481 ipc_kmsg_queue_t queue,
1482 ipc_kmsg_t kmsg,
1483 mach_msg_qos_t qos_ovr)
1484 {
1485 ipc_kmsg_t first = queue->ikmq_base;
1486 ipc_kmsg_t cur = kmsg;
1487
1488 /* apply QoS overrides towards the head */
1489 while (qos_ovr > cur->ikm_qos_override) {
1490 cur->ikm_qos_override = qos_ovr;
1491 if (cur == first) {
1492 return TRUE;
1493 }
1494 cur = cur->ikm_prev;
1495 }
1496 return FALSE;
1497 }
1498
1499 /*
1500 * Routine: ipc_kmsg_dequeue
1501 * Purpose:
1502 * Dequeue and return a kmsg.
1503 */
1504
1505 ipc_kmsg_t
1506 ipc_kmsg_dequeue(
1507 ipc_kmsg_queue_t queue)
1508 {
1509 ipc_kmsg_t first;
1510
1511 first = ipc_kmsg_queue_first(queue);
1512
1513 if (first != IKM_NULL) {
1514 ipc_kmsg_rmqueue(queue, first);
1515 }
1516
1517 return first;
1518 }
1519
1520 /*
1521 * Routine: ipc_kmsg_rmqueue
1522 * Purpose:
1523 * Pull a kmsg out of a queue.
1524 */
1525
1526 void
1527 ipc_kmsg_rmqueue(
1528 ipc_kmsg_queue_t queue,
1529 ipc_kmsg_t kmsg)
1530 {
1531 ipc_kmsg_t next, prev;
1532
1533 assert(queue->ikmq_base != IKM_NULL);
1534
1535 next = kmsg->ikm_next;
1536 prev = kmsg->ikm_prev;
1537
1538 if (next == kmsg) {
1539 assert(prev == kmsg);
1540 assert(queue->ikmq_base == kmsg);
1541
1542 queue->ikmq_base = IKM_NULL;
1543 } else {
1544 if (__improbable(next->ikm_prev != kmsg || prev->ikm_next != kmsg)) {
1545 panic("ipc_kmsg_rmqueue: inconsistent prev/next pointers. "
1546 "(prev->next: %p, next->prev: %p, kmsg: %p)",
1547 prev->ikm_next, next->ikm_prev, kmsg);
1548 }
1549
1550 if (queue->ikmq_base == kmsg) {
1551 queue->ikmq_base = next;
1552 }
1553
1554 next->ikm_prev = prev;
1555 prev->ikm_next = next;
1556 }
1557 /* XXX Temporary debug logic */
1558 assert((kmsg->ikm_next = IKM_BOGUS) == IKM_BOGUS);
1559 assert((kmsg->ikm_prev = IKM_BOGUS) == IKM_BOGUS);
1560 }
1561
1562 /*
1563 * Routine: ipc_kmsg_queue_next
1564 * Purpose:
1565 * Return the kmsg following the given kmsg.
1566 * (Or IKM_NULL if it is the last one in the queue.)
1567 */
1568
1569 ipc_kmsg_t
1570 ipc_kmsg_queue_next(
1571 ipc_kmsg_queue_t queue,
1572 ipc_kmsg_t kmsg)
1573 {
1574 ipc_kmsg_t next;
1575
1576 assert(queue->ikmq_base != IKM_NULL);
1577
1578 next = kmsg->ikm_next;
1579 if (queue->ikmq_base == next) {
1580 next = IKM_NULL;
1581 }
1582
1583 return next;
1584 }
1585
1586 /*
1587 * Routine: ipc_kmsg_destroy
1588 * Purpose:
1589 * Destroys a kernel message. Releases all rights,
1590 * references, and memory held by the message.
1591 * Frees the message.
1592 * Conditions:
1593 * No locks held.
1594 */
1595
1596 void
1597 ipc_kmsg_destroy(
1598 ipc_kmsg_t kmsg)
1599 {
1600 /*
1601 * Destroying a message can cause more messages to be destroyed.
1602 * Curtail recursion by putting messages on the deferred
1603 * destruction queue. If this was the first message on the
1604 * queue, this instance must process the full queue.
1605 */
1606 if (ipc_kmsg_delayed_destroy(kmsg)) {
1607 ipc_kmsg_reap_delayed();
1608 }
1609 }
1610
1611 /*
1612 * Routine: ipc_kmsg_delayed_destroy
1613 * Purpose:
1614 * Enqueues a kernel message for deferred destruction.
1615 * Returns:
1616 * Boolean indicator that the caller is responsible to reap
1617 * deferred messages.
1618 */
1619
1620 boolean_t
1621 ipc_kmsg_delayed_destroy(
1622 ipc_kmsg_t kmsg)
1623 {
1624 ipc_kmsg_queue_t queue = &(current_thread()->ith_messages);
1625 boolean_t first = ipc_kmsg_queue_empty(queue);
1626
1627 ipc_kmsg_enqueue(queue, kmsg);
1628 return first;
1629 }
1630
1631 /*
1632 * Routine: ipc_kmsg_destroy_queue
1633 * Purpose:
1634 * Destroys messages from the per-thread
1635 * deferred reaping queue.
1636 * Conditions:
1637 * No locks held.
1638 */
1639
1640 void
1641 ipc_kmsg_reap_delayed(void)
1642 {
1643 ipc_kmsg_queue_t queue = &(current_thread()->ith_messages);
1644 ipc_kmsg_t kmsg;
1645
1646 /*
1647 * must leave kmsg in queue while cleaning it to assure
1648 * no nested calls recurse into here.
1649 */
1650 while ((kmsg = ipc_kmsg_queue_first(queue)) != IKM_NULL) {
1651 ipc_kmsg_clean(kmsg);
1652 ipc_kmsg_rmqueue(queue, kmsg);
1653 ipc_kmsg_free(kmsg);
1654 }
1655 }
1656
1657 /*
1658 * Routine: ipc_kmsg_clean_body
1659 * Purpose:
1660 * Cleans the body of a kernel message.
1661 * Releases all rights, references, and memory.
1662 *
1663 * Conditions:
1664 * No locks held.
1665 */
1666 static unsigned int _ipc_kmsg_clean_invalid_desc = 0;
1667 void
1668 ipc_kmsg_clean_body(
1669 __unused ipc_kmsg_t kmsg,
1670 mach_msg_type_number_t number,
1671 mach_msg_descriptor_t *saddr)
1672 {
1673 mach_msg_type_number_t i;
1674
1675 if (number == 0) {
1676 return;
1677 }
1678
1679 for (i = 0; i < number; i++, saddr++) {
1680 switch (saddr->type.type) {
1681 case MACH_MSG_PORT_DESCRIPTOR: {
1682 mach_msg_port_descriptor_t *dsc;
1683
1684 dsc = &saddr->port;
1685
1686 /*
1687 * Destroy port rights carried in the message
1688 */
1689 if (!IP_VALID(dsc->name)) {
1690 continue;
1691 }
1692 ipc_object_destroy(ip_to_object(dsc->name), dsc->disposition);
1693 break;
1694 }
1695 case MACH_MSG_OOL_VOLATILE_DESCRIPTOR:
1696 case MACH_MSG_OOL_DESCRIPTOR: {
1697 mach_msg_ool_descriptor_t *dsc;
1698
1699 dsc = (mach_msg_ool_descriptor_t *)&saddr->out_of_line;
1700
1701 /*
1702 * Destroy memory carried in the message
1703 */
1704 if (dsc->size == 0) {
1705 assert(dsc->address == (void *) 0);
1706 } else {
1707 vm_map_copy_discard((vm_map_copy_t) dsc->address);
1708 }
1709 break;
1710 }
1711 case MACH_MSG_OOL_PORTS_DESCRIPTOR: {
1712 ipc_object_t *objects;
1713 mach_msg_type_number_t j;
1714 mach_msg_ool_ports_descriptor_t *dsc;
1715
1716 dsc = (mach_msg_ool_ports_descriptor_t *)&saddr->ool_ports;
1717 objects = (ipc_object_t *) dsc->address;
1718
1719 if (dsc->count == 0) {
1720 break;
1721 }
1722
1723 assert(objects != (ipc_object_t *) 0);
1724
1725 /* destroy port rights carried in the message */
1726
1727 for (j = 0; j < dsc->count; j++) {
1728 ipc_object_t object = objects[j];
1729
1730 if (!IO_VALID(object)) {
1731 continue;
1732 }
1733
1734 ipc_object_destroy(object, dsc->disposition);
1735 }
1736
1737 /* destroy memory carried in the message */
1738
1739 assert(dsc->count != 0);
1740
1741 kfree(dsc->address,
1742 (vm_size_t) dsc->count * sizeof(mach_port_t));
1743 break;
1744 }
1745 case MACH_MSG_GUARDED_PORT_DESCRIPTOR: {
1746 mach_msg_guarded_port_descriptor_t *dsc = (typeof(dsc)) & saddr->guarded_port;
1747
1748 /*
1749 * Destroy port rights carried in the message
1750 */
1751 if (!IP_VALID(dsc->name)) {
1752 continue;
1753 }
1754 ipc_object_destroy(ip_to_object(dsc->name), dsc->disposition);
1755 break;
1756 }
1757 default: {
1758 _ipc_kmsg_clean_invalid_desc++; /* don't understand this type of descriptor */
1759 }
1760 }
1761 }
1762 }
1763
1764 /*
1765 * Routine: ipc_kmsg_clean_partial
1766 * Purpose:
1767 * Cleans a partially-acquired kernel message.
1768 * number is the index of the type descriptor
1769 * in the body of the message that contained the error.
1770 * If dolast, the memory and port rights in this last
1771 * type spec are also cleaned. In that case, number
1772 * specifies the number of port rights to clean.
1773 * Conditions:
1774 * Nothing locked.
1775 */
1776
1777 void
1778 ipc_kmsg_clean_partial(
1779 ipc_kmsg_t kmsg,
1780 mach_msg_type_number_t number,
1781 mach_msg_descriptor_t *desc,
1782 vm_offset_t paddr,
1783 vm_size_t length)
1784 {
1785 ipc_object_t object;
1786 mach_msg_bits_t mbits = kmsg->ikm_header->msgh_bits;
1787
1788 /* deal with importance chain while we still have dest and voucher references */
1789 ipc_importance_clean(kmsg);
1790
1791 object = ip_to_object(kmsg->ikm_header->msgh_remote_port);
1792 assert(IO_VALID(object));
1793 ipc_object_destroy_dest(object, MACH_MSGH_BITS_REMOTE(mbits));
1794
1795 object = ip_to_object(kmsg->ikm_header->msgh_local_port);
1796 if (IO_VALID(object)) {
1797 ipc_object_destroy(object, MACH_MSGH_BITS_LOCAL(mbits));
1798 }
1799
1800 object = ip_to_object(kmsg->ikm_voucher);
1801 if (IO_VALID(object)) {
1802 assert(MACH_MSGH_BITS_VOUCHER(mbits) == MACH_MSG_TYPE_MOVE_SEND);
1803 ipc_object_destroy(object, MACH_MSG_TYPE_PORT_SEND);
1804 kmsg->ikm_voucher = IP_NULL;
1805 }
1806
1807 if (paddr) {
1808 (void) vm_deallocate(ipc_kernel_copy_map, paddr, length);
1809 }
1810
1811 ipc_kmsg_clean_body(kmsg, number, desc);
1812 }
1813
1814 /*
1815 * Routine: ipc_kmsg_clean
1816 * Purpose:
1817 * Cleans a kernel message. Releases all rights,
1818 * references, and memory held by the message.
1819 * Conditions:
1820 * No locks held.
1821 */
1822
1823 void
1824 ipc_kmsg_clean(
1825 ipc_kmsg_t kmsg)
1826 {
1827 ipc_object_t object;
1828 mach_msg_bits_t mbits;
1829
1830 /* deal with importance chain while we still have dest and voucher references */
1831 ipc_importance_clean(kmsg);
1832
1833 mbits = kmsg->ikm_header->msgh_bits;
1834 object = ip_to_object(kmsg->ikm_header->msgh_remote_port);
1835 if (IO_VALID(object)) {
1836 ipc_object_destroy_dest(object, MACH_MSGH_BITS_REMOTE(mbits));
1837 }
1838
1839 object = ip_to_object(kmsg->ikm_header->msgh_local_port);
1840 if (IO_VALID(object)) {
1841 ipc_object_destroy(object, MACH_MSGH_BITS_LOCAL(mbits));
1842 }
1843
1844 object = ip_to_object(kmsg->ikm_voucher);
1845 if (IO_VALID(object)) {
1846 assert(MACH_MSGH_BITS_VOUCHER(mbits) == MACH_MSG_TYPE_MOVE_SEND);
1847 ipc_object_destroy(object, MACH_MSG_TYPE_PORT_SEND);
1848 kmsg->ikm_voucher = IP_NULL;
1849 }
1850
1851 if (mbits & MACH_MSGH_BITS_COMPLEX) {
1852 mach_msg_body_t *body;
1853
1854 body = (mach_msg_body_t *) (kmsg->ikm_header + 1);
1855 ipc_kmsg_clean_body(kmsg, body->msgh_descriptor_count,
1856 (mach_msg_descriptor_t *)(body + 1));
1857 }
1858 }
1859
1860 /*
1861 * Routine: ipc_kmsg_set_prealloc
1862 * Purpose:
1863 * Assign a kmsg as a preallocated message buffer to a port.
1864 * Conditions:
1865 * port locked.
1866 */
1867
1868 void
1869 ipc_kmsg_set_prealloc(
1870 ipc_kmsg_t kmsg,
1871 ipc_port_t port)
1872 {
1873 assert(kmsg->ikm_prealloc == IP_NULL);
1874
1875 kmsg->ikm_prealloc = IP_NULL;
1876
1877 assert(port_send_turnstile(port) == TURNSTILE_NULL);
1878 kmsg->ikm_turnstile = TURNSTILE_NULL;
1879 IP_SET_PREALLOC(port, kmsg);
1880 }
1881
1882 /*
1883 * Routine: ipc_kmsg_clear_prealloc
1884 * Purpose:
1885 * Release the Assignment of a preallocated message buffer from a port.
1886 * Conditions:
1887 * port locked.
1888 */
1889 void
1890 ipc_kmsg_clear_prealloc(
1891 ipc_kmsg_t kmsg,
1892 ipc_port_t port)
1893 {
1894 /* take the mqueue lock since the turnstile is protected under it */
1895 imq_lock(&port->ip_messages);
1896
1897 IP_CLEAR_PREALLOC(port, kmsg);
1898 set_port_send_turnstile(port, kmsg->ikm_turnstile);
1899 imq_unlock(&port->ip_messages);
1900 }
1901
1902 /*
1903 * Routine: ipc_kmsg_prealloc
1904 * Purpose:
1905 * Wraper to ipc_kmsg_alloc() to account for
1906 * header expansion requirements.
1907 */
1908 ipc_kmsg_t
1909 ipc_kmsg_prealloc(mach_msg_size_t size)
1910 {
1911 #if defined(__LP64__)
1912 if (size > IKM_SAVED_MSG_SIZE - LEGACY_HEADER_SIZE_DELTA) {
1913 panic("ipc_kmsg_prealloc");
1914 }
1915
1916 size += LEGACY_HEADER_SIZE_DELTA;
1917 #endif
1918 return ipc_kmsg_alloc(size);
1919 }
1920
1921
1922 /*
1923 * Routine: ipc_kmsg_get
1924 * Purpose:
1925 * Allocates a kernel message buffer.
1926 * Copies a user message to the message buffer.
1927 * Conditions:
1928 * Nothing locked.
1929 * Returns:
1930 * MACH_MSG_SUCCESS Acquired a message buffer.
1931 * MACH_SEND_MSG_TOO_SMALL Message smaller than a header.
1932 * MACH_SEND_MSG_TOO_SMALL Message size not long-word multiple.
1933 * MACH_SEND_TOO_LARGE Message too large to ever be sent.
1934 * MACH_SEND_NO_BUFFER Couldn't allocate a message buffer.
1935 * MACH_SEND_INVALID_DATA Couldn't copy message data.
1936 */
1937
1938 mach_msg_return_t
1939 ipc_kmsg_get(
1940 mach_vm_address_t msg_addr,
1941 mach_msg_size_t size,
1942 ipc_kmsg_t *kmsgp)
1943 {
1944 mach_msg_size_t msg_and_trailer_size;
1945 ipc_kmsg_t kmsg;
1946 mach_msg_max_trailer_t *trailer;
1947 mach_msg_legacy_base_t legacy_base;
1948 mach_msg_size_t len_copied;
1949 legacy_base.body.msgh_descriptor_count = 0;
1950
1951 if ((size < sizeof(mach_msg_legacy_header_t)) || (size & 3)) {
1952 return MACH_SEND_MSG_TOO_SMALL;
1953 }
1954
1955 if (size > ipc_kmsg_max_body_space) {
1956 return MACH_SEND_TOO_LARGE;
1957 }
1958
1959 if (size == sizeof(mach_msg_legacy_header_t)) {
1960 len_copied = sizeof(mach_msg_legacy_header_t);
1961 } else {
1962 len_copied = sizeof(mach_msg_legacy_base_t);
1963 }
1964
1965 if (copyinmsg(msg_addr, (char *)&legacy_base, len_copied)) {
1966 return MACH_SEND_INVALID_DATA;
1967 }
1968
1969 /*
1970 * If the message claims to be complex, it must at least
1971 * have the length of a "base" message (header + dsc_count).
1972 */
1973 if (len_copied < sizeof(mach_msg_legacy_base_t) &&
1974 (legacy_base.header.msgh_bits & MACH_MSGH_BITS_COMPLEX)) {
1975 return MACH_SEND_MSG_TOO_SMALL;
1976 }
1977
1978 msg_addr += sizeof(legacy_base.header);
1979 #if defined(__LP64__)
1980 size += LEGACY_HEADER_SIZE_DELTA;
1981 #endif
1982 /* unreachable if !DEBUG */
1983 __unreachable_ok_push
1984 if (DEBUG_KPRINT_SYSCALL_PREDICATE(DEBUG_KPRINT_SYSCALL_IPC_MASK)) {
1985 unsigned int j;
1986 for (j = 0; j < sizeof(legacy_base.header); j++) {
1987 kprintf("%02x\n", ((unsigned char*)&legacy_base.header)[j]);
1988 }
1989 }
1990 __unreachable_ok_pop
1991
1992 msg_and_trailer_size = size + MAX_TRAILER_SIZE;
1993 kmsg = ipc_kmsg_alloc(msg_and_trailer_size);
1994 if (kmsg == IKM_NULL) {
1995 return MACH_SEND_NO_BUFFER;
1996 }
1997
1998 kmsg->ikm_header->msgh_size = size;
1999 kmsg->ikm_header->msgh_bits = legacy_base.header.msgh_bits;
2000 kmsg->ikm_header->msgh_remote_port = CAST_MACH_NAME_TO_PORT(legacy_base.header.msgh_remote_port);
2001 kmsg->ikm_header->msgh_local_port = CAST_MACH_NAME_TO_PORT(legacy_base.header.msgh_local_port);
2002 kmsg->ikm_header->msgh_voucher_port = legacy_base.header.msgh_voucher_port;
2003 kmsg->ikm_header->msgh_id = legacy_base.header.msgh_id;
2004
2005 DEBUG_KPRINT_SYSCALL_IPC("ipc_kmsg_get header:\n"
2006 " size: 0x%.8x\n"
2007 " bits: 0x%.8x\n"
2008 " remote_port: %p\n"
2009 " local_port: %p\n"
2010 " voucher_port: 0x%.8x\n"
2011 " id: %.8d\n",
2012 kmsg->ikm_header->msgh_size,
2013 kmsg->ikm_header->msgh_bits,
2014 kmsg->ikm_header->msgh_remote_port,
2015 kmsg->ikm_header->msgh_local_port,
2016 kmsg->ikm_header->msgh_voucher_port,
2017 kmsg->ikm_header->msgh_id);
2018
2019 if (copyinmsg(msg_addr, (char *)(kmsg->ikm_header + 1), size - (mach_msg_size_t)sizeof(mach_msg_header_t))) {
2020 ipc_kmsg_free(kmsg);
2021 return MACH_SEND_INVALID_DATA;
2022 }
2023
2024 /* unreachable if !DEBUG */
2025 __unreachable_ok_push
2026 if (DEBUG_KPRINT_SYSCALL_PREDICATE(DEBUG_KPRINT_SYSCALL_IPC_MASK)) {
2027 kprintf("body: size: %lu\n", (size - sizeof(mach_msg_header_t)));
2028 uint32_t i;
2029 for (i = 0; i * 4 < (size - sizeof(mach_msg_header_t)); i++) {
2030 kprintf("%.4x\n", ((uint32_t *)(kmsg->ikm_header + 1))[i]);
2031 }
2032 }
2033 __unreachable_ok_pop
2034 DEBUG_IPC_KMSG_PRINT(kmsg, "ipc_kmsg_get()");
2035
2036 /*
2037 * I reserve for the trailer the largest space (MAX_TRAILER_SIZE)
2038 * However, the internal size field of the trailer (msgh_trailer_size)
2039 * is initialized to the minimum (sizeof(mach_msg_trailer_t)), to optimize
2040 * the cases where no implicit data is requested.
2041 */
2042 trailer = (mach_msg_max_trailer_t *) ((vm_offset_t)kmsg->ikm_header + size);
2043 bzero(trailer, sizeof(*trailer));
2044 trailer->msgh_sender = current_thread()->task->sec_token;
2045 trailer->msgh_audit = current_thread()->task->audit_token;
2046 trailer->msgh_trailer_type = MACH_MSG_TRAILER_FORMAT_0;
2047 trailer->msgh_trailer_size = MACH_MSG_TRAILER_MINIMUM_SIZE;
2048
2049 #ifdef ppc
2050 if (trcWork.traceMask) {
2051 dbgTrace(0x1100, (unsigned int)kmsg->ikm_header->msgh_id,
2052 (unsigned int)kmsg->ikm_header->msgh_remote_port,
2053 (unsigned int)kmsg->ikm_header->msgh_local_port, 0);
2054 }
2055 #endif
2056
2057 trailer->msgh_labels.sender = 0;
2058 *kmsgp = kmsg;
2059 return MACH_MSG_SUCCESS;
2060 }
2061
2062 /*
2063 * Routine: ipc_kmsg_get_from_kernel
2064 * Purpose:
2065 * First checks for a preallocated message
2066 * reserved for kernel clients. If not found -
2067 * allocates a new kernel message buffer.
2068 * Copies a kernel message to the message buffer.
2069 * Only resource errors are allowed.
2070 * Conditions:
2071 * Nothing locked.
2072 * Ports in header are ipc_port_t.
2073 * Returns:
2074 * MACH_MSG_SUCCESS Acquired a message buffer.
2075 * MACH_SEND_NO_BUFFER Couldn't allocate a message buffer.
2076 */
2077
2078 mach_msg_return_t
2079 ipc_kmsg_get_from_kernel(
2080 mach_msg_header_t *msg,
2081 mach_msg_size_t size,
2082 ipc_kmsg_t *kmsgp)
2083 {
2084 ipc_kmsg_t kmsg;
2085 mach_msg_size_t msg_and_trailer_size;
2086 mach_msg_max_trailer_t *trailer;
2087 ipc_port_t dest_port;
2088
2089 assert(size >= sizeof(mach_msg_header_t));
2090 assert((size & 3) == 0);
2091
2092 dest_port = msg->msgh_remote_port;
2093
2094 msg_and_trailer_size = size + MAX_TRAILER_SIZE;
2095
2096 /*
2097 * See if the port has a pre-allocated kmsg for kernel
2098 * clients. These are set up for those kernel clients
2099 * which cannot afford to wait.
2100 */
2101 if (IP_VALID(dest_port) && IP_PREALLOC(dest_port)) {
2102 mach_msg_size_t max_desc = 0;
2103
2104 ip_lock(dest_port);
2105 if (!ip_active(dest_port)) {
2106 ip_unlock(dest_port);
2107 return MACH_SEND_NO_BUFFER;
2108 }
2109 assert(IP_PREALLOC(dest_port));
2110 kmsg = dest_port->ip_premsg;
2111 if (ikm_prealloc_inuse(kmsg)) {
2112 ip_unlock(dest_port);
2113 return MACH_SEND_NO_BUFFER;
2114 }
2115 #if !defined(__LP64__)
2116 if (msg->msgh_bits & MACH_MSGH_BITS_COMPLEX) {
2117 assert(size > sizeof(mach_msg_base_t));
2118 max_desc = ((mach_msg_base_t *)msg)->body.msgh_descriptor_count *
2119 DESC_SIZE_ADJUSTMENT;
2120 }
2121 #endif
2122 if (msg_and_trailer_size > kmsg->ikm_size - max_desc) {
2123 ip_unlock(dest_port);
2124 return MACH_SEND_TOO_LARGE;
2125 }
2126 ikm_prealloc_set_inuse(kmsg, dest_port);
2127 ikm_set_header(kmsg, NULL, msg_and_trailer_size);
2128 ip_unlock(dest_port);
2129 } else {
2130 kmsg = ipc_kmsg_alloc(msg_and_trailer_size);
2131 if (kmsg == IKM_NULL) {
2132 return MACH_SEND_NO_BUFFER;
2133 }
2134 }
2135
2136 (void) memcpy((void *) kmsg->ikm_header, (const void *) msg, size);
2137
2138 ikm_qos_init(kmsg);
2139
2140 kmsg->ikm_header->msgh_size = size;
2141
2142 /*
2143 * I reserve for the trailer the largest space (MAX_TRAILER_SIZE)
2144 * However, the internal size field of the trailer (msgh_trailer_size)
2145 * is initialized to the minimum (sizeof(mach_msg_trailer_t)), to
2146 * optimize the cases where no implicit data is requested.
2147 */
2148 trailer = (mach_msg_max_trailer_t *)
2149 ((vm_offset_t)kmsg->ikm_header + size);
2150 bzero(trailer, sizeof(*trailer));
2151 trailer->msgh_sender = KERNEL_SECURITY_TOKEN;
2152 trailer->msgh_audit = KERNEL_AUDIT_TOKEN;
2153 trailer->msgh_trailer_type = MACH_MSG_TRAILER_FORMAT_0;
2154 trailer->msgh_trailer_size = MACH_MSG_TRAILER_MINIMUM_SIZE;
2155
2156 trailer->msgh_labels.sender = 0;
2157
2158 *kmsgp = kmsg;
2159 return MACH_MSG_SUCCESS;
2160 }
2161
2162 /*
2163 * Routine: ipc_kmsg_send
2164 * Purpose:
2165 * Send a message. The message holds a reference
2166 * for the destination port in the msgh_remote_port field.
2167 *
2168 * If unsuccessful, the caller still has possession of
2169 * the message and must do something with it. If successful,
2170 * the message is queued, given to a receiver, destroyed,
2171 * or handled directly by the kernel via mach_msg.
2172 * Conditions:
2173 * Nothing locked.
2174 * Returns:
2175 * MACH_MSG_SUCCESS The message was accepted.
2176 * MACH_SEND_TIMED_OUT Caller still has message.
2177 * MACH_SEND_INTERRUPTED Caller still has message.
2178 * MACH_SEND_INVALID_DEST Caller still has message.
2179 */
2180 mach_msg_return_t
2181 ipc_kmsg_send(
2182 ipc_kmsg_t kmsg,
2183 mach_msg_option_t option,
2184 mach_msg_timeout_t send_timeout)
2185 {
2186 ipc_port_t port;
2187 thread_t th = current_thread();
2188 mach_msg_return_t error = MACH_MSG_SUCCESS;
2189 boolean_t kernel_reply = FALSE;
2190
2191 /* Check if honor qlimit flag is set on thread. */
2192 if ((th->options & TH_OPT_HONOR_QLIMIT) == TH_OPT_HONOR_QLIMIT) {
2193 /* Remove the MACH_SEND_ALWAYS flag to honor queue limit. */
2194 option &= (~MACH_SEND_ALWAYS);
2195 /* Add the timeout flag since the message queue might be full. */
2196 option |= MACH_SEND_TIMEOUT;
2197 th->options &= (~TH_OPT_HONOR_QLIMIT);
2198 }
2199
2200 #if IMPORTANCE_INHERITANCE
2201 bool did_importance = false;
2202 #if IMPORTANCE_TRACE
2203 mach_msg_id_t imp_msgh_id = -1;
2204 int sender_pid = -1;
2205 #endif /* IMPORTANCE_TRACE */
2206 #endif /* IMPORTANCE_INHERITANCE */
2207
2208 /* don't allow the creation of a circular loop */
2209 if (kmsg->ikm_header->msgh_bits & MACH_MSGH_BITS_CIRCULAR) {
2210 ipc_kmsg_destroy(kmsg);
2211 KDBG(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_KMSG_INFO) | DBG_FUNC_END, MACH_MSGH_BITS_CIRCULAR);
2212 return MACH_MSG_SUCCESS;
2213 }
2214
2215 ipc_voucher_send_preprocessing(kmsg);
2216
2217 port = kmsg->ikm_header->msgh_remote_port;
2218 assert(IP_VALID(port));
2219 ip_lock(port);
2220
2221 /*
2222 * If the destination has been guarded with a reply context, and the
2223 * sender is consuming a send-once right, then assume this is a reply
2224 * to an RPC and we need to validate that this sender is currently in
2225 * the correct context.
2226 */
2227 if (enforce_strict_reply && port->ip_reply_context != 0 &&
2228 ((option & MACH_SEND_KERNEL) == 0) &&
2229 MACH_MSGH_BITS_REMOTE(kmsg->ikm_header->msgh_bits) == MACH_MSG_TYPE_PORT_SEND_ONCE) {
2230 error = ipc_kmsg_validate_reply_context_locked(option, port, th->ith_voucher, th->ith_voucher_name);
2231 if (error != MACH_MSG_SUCCESS) {
2232 ip_unlock(port);
2233 return error;
2234 }
2235 }
2236
2237 #if IMPORTANCE_INHERITANCE
2238 retry:
2239 #endif /* IMPORTANCE_INHERITANCE */
2240 /*
2241 * Can't deliver to a dead port.
2242 * However, we can pretend it got sent
2243 * and was then immediately destroyed.
2244 */
2245 if (!ip_active(port)) {
2246 ip_unlock(port);
2247 #if MACH_FLIPC
2248 if (MACH_NODE_VALID(kmsg->ikm_node) && FPORT_VALID(port->ip_messages.imq_fport)) {
2249 flipc_msg_ack(kmsg->ikm_node, &port->ip_messages, FALSE);
2250 }
2251 #endif
2252 if (did_importance) {
2253 /*
2254 * We're going to pretend we delivered this message
2255 * successfully, and just eat the kmsg. However, the
2256 * kmsg is actually visible via the importance_task!
2257 * We need to cleanup this linkage before we destroy
2258 * the message, and more importantly before we set the
2259 * msgh_remote_port to NULL. See: 34302571
2260 */
2261 ipc_importance_clean(kmsg);
2262 }
2263 ip_release(port); /* JMM - Future: release right, not just ref */
2264 kmsg->ikm_header->msgh_remote_port = MACH_PORT_NULL;
2265 ipc_kmsg_destroy(kmsg);
2266 KDBG(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_KMSG_INFO) | DBG_FUNC_END, MACH_SEND_INVALID_DEST);
2267 return MACH_MSG_SUCCESS;
2268 }
2269
2270 if (port->ip_receiver == ipc_space_kernel) {
2271 /*
2272 * We can check ip_receiver == ipc_space_kernel
2273 * before checking that the port is active because
2274 * ipc_port_dealloc_kernel clears ip_receiver
2275 * before destroying a kernel port.
2276 */
2277 require_ip_active(port);
2278 port->ip_messages.imq_seqno++;
2279 ip_unlock(port);
2280
2281 current_task()->messages_sent++;
2282
2283 /*
2284 * Call the server routine, and get the reply message to send.
2285 */
2286 kmsg = ipc_kobject_server(kmsg, option);
2287 if (kmsg == IKM_NULL) {
2288 return MACH_MSG_SUCCESS;
2289 }
2290
2291 /* sign the reply message */
2292 ikm_sign(kmsg);
2293
2294 /* restart the KMSG_INFO tracing for the reply message */
2295 KDBG(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_KMSG_INFO) | DBG_FUNC_START);
2296 port = kmsg->ikm_header->msgh_remote_port;
2297 assert(IP_VALID(port));
2298 ip_lock(port);
2299 /* fall thru with reply - same options */
2300 kernel_reply = TRUE;
2301 if (!ip_active(port)) {
2302 error = MACH_SEND_INVALID_DEST;
2303 }
2304 }
2305
2306 #if IMPORTANCE_INHERITANCE
2307 /*
2308 * Need to see if this message needs importance donation and/or
2309 * propagation. That routine can drop the port lock temporarily.
2310 * If it does we'll have to revalidate the destination.
2311 */
2312 if (!did_importance) {
2313 did_importance = true;
2314 if (ipc_importance_send(kmsg, option)) {
2315 goto retry;
2316 }
2317 }
2318 #endif /* IMPORTANCE_INHERITANCE */
2319
2320 if (error != MACH_MSG_SUCCESS) {
2321 ip_unlock(port);
2322 } else {
2323 /*
2324 * We have a valid message and a valid reference on the port.
2325 * we can unlock the port and call mqueue_send() on its message
2326 * queue. Lock message queue while port is locked.
2327 */
2328 imq_lock(&port->ip_messages);
2329
2330 ipc_special_reply_port_msg_sent(port);
2331
2332 ip_unlock(port);
2333
2334 error = ipc_mqueue_send(&port->ip_messages, kmsg, option,
2335 send_timeout);
2336 }
2337
2338 #if IMPORTANCE_INHERITANCE
2339 if (did_importance) {
2340 __unused int importance_cleared = 0;
2341 switch (error) {
2342 case MACH_SEND_TIMED_OUT:
2343 case MACH_SEND_NO_BUFFER:
2344 case MACH_SEND_INTERRUPTED:
2345 case MACH_SEND_INVALID_DEST:
2346 /*
2347 * We still have the kmsg and its
2348 * reference on the port. But we
2349 * have to back out the importance
2350 * boost.
2351 *
2352 * The port could have changed hands,
2353 * be inflight to another destination,
2354 * etc... But in those cases our
2355 * back-out will find the new owner
2356 * (and all the operations that
2357 * transferred the right should have
2358 * applied their own boost adjustments
2359 * to the old owner(s)).
2360 */
2361 importance_cleared = 1;
2362 ipc_importance_clean(kmsg);
2363 break;
2364
2365 case MACH_MSG_SUCCESS:
2366 default:
2367 break;
2368 }
2369 #if IMPORTANCE_TRACE
2370 KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_MSG, IMP_MSG_SEND)) | DBG_FUNC_END,
2371 task_pid(current_task()), sender_pid, imp_msgh_id, importance_cleared, 0);
2372 #endif /* IMPORTANCE_TRACE */
2373 }
2374 #endif /* IMPORTANCE_INHERITANCE */
2375
2376 /*
2377 * If the port has been destroyed while we wait, treat the message
2378 * as a successful delivery (like we do for an inactive port).
2379 */
2380 if (error == MACH_SEND_INVALID_DEST) {
2381 #if MACH_FLIPC
2382 if (MACH_NODE_VALID(kmsg->ikm_node) && FPORT_VALID(port->ip_messages.imq_fport)) {
2383 flipc_msg_ack(kmsg->ikm_node, &port->ip_messages, FALSE);
2384 }
2385 #endif
2386 ip_release(port); /* JMM - Future: release right, not just ref */
2387 kmsg->ikm_header->msgh_remote_port = MACH_PORT_NULL;
2388 ipc_kmsg_destroy(kmsg);
2389 KDBG(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_KMSG_INFO) | DBG_FUNC_END, MACH_SEND_INVALID_DEST);
2390 return MACH_MSG_SUCCESS;
2391 }
2392
2393 if (error != MACH_MSG_SUCCESS && kernel_reply) {
2394 /*
2395 * Kernel reply messages that fail can't be allowed to
2396 * pseudo-receive on error conditions. We need to just treat
2397 * the message as a successful delivery.
2398 */
2399 #if MACH_FLIPC
2400 if (MACH_NODE_VALID(kmsg->ikm_node) && FPORT_VALID(port->ip_messages.imq_fport)) {
2401 flipc_msg_ack(kmsg->ikm_node, &port->ip_messages, FALSE);
2402 }
2403 #endif
2404 ip_release(port); /* JMM - Future: release right, not just ref */
2405 kmsg->ikm_header->msgh_remote_port = MACH_PORT_NULL;
2406 ipc_kmsg_destroy(kmsg);
2407 KDBG(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_KMSG_INFO) | DBG_FUNC_END, error);
2408 return MACH_MSG_SUCCESS;
2409 }
2410 return error;
2411 }
2412
2413 /*
2414 * Routine: ipc_kmsg_put
2415 * Purpose:
2416 * Copies a message buffer to a user message.
2417 * Copies only the specified number of bytes.
2418 * Frees the message buffer.
2419 * Conditions:
2420 * Nothing locked. The message buffer must have clean
2421 * header fields.
2422 * Returns:
2423 * MACH_MSG_SUCCESS Copied data out of message buffer.
2424 * MACH_RCV_INVALID_DATA Couldn't copy to user message.
2425 */
2426
2427 mach_msg_return_t
2428 ipc_kmsg_put(
2429 ipc_kmsg_t kmsg,
2430 mach_msg_option_t option,
2431 mach_vm_address_t rcv_addr,
2432 mach_msg_size_t rcv_size,
2433 mach_msg_size_t trailer_size,
2434 mach_msg_size_t *sizep)
2435 {
2436 mach_msg_size_t size = kmsg->ikm_header->msgh_size + trailer_size;
2437 mach_msg_return_t mr;
2438
2439 DEBUG_IPC_KMSG_PRINT(kmsg, "ipc_kmsg_put()");
2440
2441
2442 DEBUG_KPRINT_SYSCALL_IPC("ipc_kmsg_put header:\n"
2443 " size: 0x%.8x\n"
2444 " bits: 0x%.8x\n"
2445 " remote_port: %p\n"
2446 " local_port: %p\n"
2447 " voucher_port: 0x%.8x\n"
2448 " id: %.8d\n",
2449 kmsg->ikm_header->msgh_size,
2450 kmsg->ikm_header->msgh_bits,
2451 kmsg->ikm_header->msgh_remote_port,
2452 kmsg->ikm_header->msgh_local_port,
2453 kmsg->ikm_header->msgh_voucher_port,
2454 kmsg->ikm_header->msgh_id);
2455
2456 #if defined(__LP64__)
2457 if (current_task() != kernel_task) { /* don't if receiver expects fully-cooked in-kernel msg; */
2458 mach_msg_legacy_header_t *legacy_header =
2459 (mach_msg_legacy_header_t *)((vm_offset_t)(kmsg->ikm_header) + LEGACY_HEADER_SIZE_DELTA);
2460
2461 mach_msg_bits_t bits = kmsg->ikm_header->msgh_bits;
2462 mach_msg_size_t msg_size = kmsg->ikm_header->msgh_size;
2463 mach_port_name_t remote_port = CAST_MACH_PORT_TO_NAME(kmsg->ikm_header->msgh_remote_port);
2464 mach_port_name_t local_port = CAST_MACH_PORT_TO_NAME(kmsg->ikm_header->msgh_local_port);
2465 mach_port_name_t voucher_port = kmsg->ikm_header->msgh_voucher_port;
2466 mach_msg_id_t id = kmsg->ikm_header->msgh_id;
2467
2468 legacy_header->msgh_id = id;
2469 legacy_header->msgh_local_port = local_port;
2470 legacy_header->msgh_remote_port = remote_port;
2471 legacy_header->msgh_voucher_port = voucher_port;
2472 legacy_header->msgh_size = msg_size - LEGACY_HEADER_SIZE_DELTA;
2473 legacy_header->msgh_bits = bits;
2474
2475 size -= LEGACY_HEADER_SIZE_DELTA;
2476 kmsg->ikm_header = (mach_msg_header_t *)legacy_header;
2477 }
2478 #endif
2479
2480 /* unreachable if !DEBUG */
2481 __unreachable_ok_push
2482 if (DEBUG_KPRINT_SYSCALL_PREDICATE(DEBUG_KPRINT_SYSCALL_IPC_MASK)) {
2483 kprintf("ipc_kmsg_put header+body: %d\n", (size));
2484 uint32_t i;
2485 for (i = 0; i * 4 < size; i++) {
2486 kprintf("%.4x\n", ((uint32_t *)kmsg->ikm_header)[i]);
2487 }
2488 kprintf("type: %d\n", ((mach_msg_type_descriptor_t *)(((mach_msg_base_t *)kmsg->ikm_header) + 1))->type);
2489 }
2490 __unreachable_ok_pop
2491
2492 /* Re-Compute target address if using stack-style delivery */
2493 if (option & MACH_RCV_STACK) {
2494 rcv_addr += rcv_size - size;
2495 }
2496
2497 if (copyoutmsg((const char *) kmsg->ikm_header, rcv_addr, size)) {
2498 mr = MACH_RCV_INVALID_DATA;
2499 size = 0;
2500 } else {
2501 mr = MACH_MSG_SUCCESS;
2502 }
2503
2504 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_KMSG_LINK) | DBG_FUNC_NONE,
2505 (rcv_addr >= VM_MIN_KERNEL_AND_KEXT_ADDRESS ||
2506 rcv_addr + size >= VM_MIN_KERNEL_AND_KEXT_ADDRESS) ? (uintptr_t)0 : (uintptr_t)rcv_addr,
2507 VM_KERNEL_ADDRPERM((uintptr_t)kmsg),
2508 1 /* this is on the receive/copyout path */,
2509 0,
2510 0);
2511 ipc_kmsg_free(kmsg);
2512
2513 if (sizep) {
2514 *sizep = size;
2515 }
2516 return mr;
2517 }
2518
2519 /*
2520 * Routine: ipc_kmsg_put_to_kernel
2521 * Purpose:
2522 * Copies a message buffer to a kernel message.
2523 * Frees the message buffer.
2524 * No errors allowed.
2525 * Conditions:
2526 * Nothing locked.
2527 */
2528
2529 void
2530 ipc_kmsg_put_to_kernel(
2531 mach_msg_header_t *msg,
2532 ipc_kmsg_t kmsg,
2533 mach_msg_size_t size)
2534 {
2535 (void) memcpy((void *) msg, (const void *) kmsg->ikm_header, size);
2536
2537 ipc_kmsg_free(kmsg);
2538 }
2539
2540 static pthread_priority_compact_t
2541 ipc_get_current_thread_priority(void)
2542 {
2543 thread_t thread = current_thread();
2544 thread_qos_t qos;
2545 int relpri;
2546
2547 qos = thread_get_requested_qos(thread, &relpri);
2548 if (!qos) {
2549 qos = thread_user_promotion_qos_for_pri(thread->base_pri);
2550 relpri = 0;
2551 }
2552 return _pthread_priority_make_from_thread_qos(qos, relpri, 0);
2553 }
2554
2555 static kern_return_t
2556 ipc_kmsg_set_qos(
2557 ipc_kmsg_t kmsg,
2558 mach_msg_option_t options,
2559 mach_msg_priority_t priority)
2560 {
2561 kern_return_t kr;
2562 ipc_port_t special_reply_port = kmsg->ikm_header->msgh_local_port;
2563 ipc_port_t dest_port = kmsg->ikm_header->msgh_remote_port;
2564
2565 if ((options & MACH_SEND_OVERRIDE) &&
2566 !mach_msg_priority_is_pthread_priority(priority)) {
2567 mach_msg_qos_t qos = mach_msg_priority_qos(priority);
2568 int relpri = mach_msg_priority_relpri(priority);
2569 mach_msg_qos_t ovr = mach_msg_priority_overide_qos(priority);
2570
2571 kmsg->ikm_ppriority = _pthread_priority_make_from_thread_qos(qos, relpri, 0);
2572 kmsg->ikm_qos_override = MAX(qos, ovr);
2573 } else {
2574 kr = ipc_get_pthpriority_from_kmsg_voucher(kmsg, &kmsg->ikm_ppriority);
2575 if (kr != KERN_SUCCESS) {
2576 if (options & MACH_SEND_PROPAGATE_QOS) {
2577 kmsg->ikm_ppriority = ipc_get_current_thread_priority();
2578 } else {
2579 kmsg->ikm_ppriority = MACH_MSG_PRIORITY_UNSPECIFIED;
2580 }
2581 }
2582
2583 if (options & MACH_SEND_OVERRIDE) {
2584 mach_msg_qos_t qos = _pthread_priority_thread_qos(kmsg->ikm_ppriority);
2585 mach_msg_qos_t ovr = _pthread_priority_thread_qos(priority);
2586 kmsg->ikm_qos_override = MAX(qos, ovr);
2587 } else {
2588 kmsg->ikm_qos_override = _pthread_priority_thread_qos(kmsg->ikm_ppriority);
2589 }
2590 }
2591
2592 kr = KERN_SUCCESS;
2593
2594 if (IP_VALID(special_reply_port) &&
2595 MACH_MSGH_BITS_LOCAL(kmsg->ikm_header->msgh_bits) == MACH_MSG_TYPE_PORT_SEND_ONCE) {
2596 if ((options & MACH_SEND_SYNC_OVERRIDE)) {
2597 boolean_t sync_bootstrap_checkin = !!(options & MACH_SEND_SYNC_BOOTSTRAP_CHECKIN);
2598 /*
2599 * Link the destination port to special reply port and make sure that
2600 * dest port has a send turnstile, else allocate one.
2601 */
2602 ipc_port_link_special_reply_port(special_reply_port, dest_port, sync_bootstrap_checkin);
2603 }
2604 }
2605 return kr;
2606 }
2607
2608 static inline void
2609 ipc_kmsg_allow_immovable_send(
2610 ipc_kmsg_t kmsg,
2611 ipc_entry_t dest_entry)
2612 {
2613 ipc_object_t object = dest_entry->ie_object;
2614 /*
2615 * If the dest port is a kobject, allow copyin of immovable send
2616 * rights in the message body to succeed
2617 */
2618 if (IO_VALID(object) && io_is_kobject(object)) {
2619 kmsg->ikm_flags |= IPC_KMSG_FLAGS_ALLOW_IMMOVABLE_SEND;
2620 }
2621 }
2622
2623 /*
2624 * Routine: ipc_kmsg_link_reply_context_locked
2625 * Purpose:
2626 * Link any required context from the sending voucher
2627 * to the reply port. The ipc_kmsg_copyin function will
2628 * enforce that the sender calls mach_msg in this context.
2629 * Conditions:
2630 * reply port is locked
2631 */
2632 static void
2633 ipc_kmsg_link_reply_context_locked(
2634 ipc_port_t reply_port,
2635 ipc_port_t voucher_port)
2636 {
2637 kern_return_t __assert_only kr;
2638 uint32_t persona_id = 0;
2639 ipc_voucher_t voucher;
2640
2641 ip_lock_held(reply_port);
2642
2643 if (!ip_active(reply_port)) {
2644 return;
2645 }
2646
2647 voucher = convert_port_to_voucher(voucher_port);
2648
2649 kr = bank_get_bank_ledger_thread_group_and_persona(voucher, NULL, NULL, &persona_id);
2650 assert(kr == KERN_SUCCESS);
2651 ipc_voucher_release(voucher);
2652
2653 if (persona_id == 0 || persona_id == PERSONA_ID_NONE) {
2654 /* there was no persona context to record */
2655 return;
2656 }
2657
2658 /*
2659 * Set the persona_id as the context on the reply port.
2660 * This will force the thread that replies to have adopted a voucher
2661 * with a matching persona.
2662 */
2663 reply_port->ip_reply_context = persona_id;
2664
2665 return;
2666 }
2667
2668 static kern_return_t
2669 ipc_kmsg_validate_reply_port_locked(ipc_port_t reply_port, mach_msg_option_t options)
2670 {
2671 ip_lock_held(reply_port);
2672
2673 if (!ip_active(reply_port)) {
2674 /*
2675 * Ideally, we would enforce that the reply receive right is
2676 * active, but asynchronous XPC cancellation destroys the
2677 * receive right, so we just have to return success here.
2678 */
2679 return KERN_SUCCESS;
2680 }
2681
2682 if (options & MACH_SEND_MSG) {
2683 /*
2684 * If the rely port is active, then it should not be
2685 * in-transit, and the receive right should be in the caller's
2686 * IPC space.
2687 */
2688 if (!reply_port->ip_receiver_name || reply_port->ip_receiver != current_task()->itk_space) {
2689 return KERN_INVALID_CAPABILITY;
2690 }
2691
2692 /*
2693 * A port used as a reply port in an RPC should have exactly 1
2694 * extant send-once right which we either just made or are
2695 * moving as part of the IPC.
2696 */
2697 if (reply_port->ip_sorights != 1) {
2698 return KERN_INVALID_CAPABILITY;
2699 }
2700 /*
2701 * XPC uses an extra send-right to keep the name of the reply
2702 * right around through cancellation. That makes it harder to
2703 * enforce a particular semantic kere, so for now, we say that
2704 * you can have a maximum of 1 send right (in addition to your
2705 * send once right). In the future, it would be great to lock
2706 * this down even further.
2707 */
2708 if (reply_port->ip_srights > 1) {
2709 return KERN_INVALID_CAPABILITY;
2710 }
2711
2712 /*
2713 * The sender can also specify that the receive right should
2714 * be immovable. Note that this check only applies to
2715 * send-only operations. Combined send/receive or rcv-only
2716 * operations can specify an immovable receive right by
2717 * opt-ing into guarded descriptors (MACH_RCV_GUARDED_DESC)
2718 * and using the MACH_MSG_STRICT_REPLY options flag.
2719 */
2720 if (MACH_SEND_REPLY_IS_IMMOVABLE(options)) {
2721 if (!reply_port->ip_immovable_receive) {
2722 return KERN_INVALID_CAPABILITY;
2723 }
2724 }
2725 }
2726
2727 /*
2728 * don't enforce this yet: need a better way of indicating the
2729 * receiver wants this...
2730 */
2731 #if 0
2732 if (MACH_RCV_WITH_IMMOVABLE_REPLY(options)) {
2733 if (!reply_port->ip_immovable_receive) {
2734 return KERN_INVALID_CAPABILITY;
2735 }
2736 }
2737 #endif /* 0 */
2738
2739 return KERN_SUCCESS;
2740 }
2741
2742 /*
2743 * Routine: ipc_kmsg_validate_reply_context_locked
2744 * Purpose:
2745 * Validate that the current thread is running in the context
2746 * required by the destination port.
2747 * Conditions:
2748 * dest_port is locked
2749 * Returns:
2750 * MACH_MSG_SUCCESS on success.
2751 * On error, an EXC_GUARD exception is also raised.
2752 * This function *always* resets the port reply context.
2753 */
2754 static mach_msg_return_t
2755 ipc_kmsg_validate_reply_context_locked(
2756 mach_msg_option_t option,
2757 ipc_port_t dest_port,
2758 ipc_voucher_t voucher,
2759 mach_port_name_t voucher_name)
2760 {
2761 uint32_t dest_ctx = dest_port->ip_reply_context;
2762 dest_port->ip_reply_context = 0;
2763
2764 if (!ip_active(dest_port)) {
2765 return MACH_MSG_SUCCESS;
2766 }
2767
2768 if (voucher == IPC_VOUCHER_NULL || !MACH_PORT_VALID(voucher_name)) {
2769 if ((option & MACH_SEND_KERNEL) == 0) {
2770 mach_port_guard_exception(voucher_name, 0,
2771 (MPG_FLAGS_STRICT_REPLY_INVALID_VOUCHER | dest_ctx),
2772 kGUARD_EXC_STRICT_REPLY);
2773 }
2774 return MACH_SEND_INVALID_CONTEXT;
2775 }
2776
2777 kern_return_t __assert_only kr;
2778 uint32_t persona_id = 0;
2779 kr = bank_get_bank_ledger_thread_group_and_persona(voucher, NULL, NULL, &persona_id);
2780 assert(kr == KERN_SUCCESS);
2781
2782 if (dest_ctx != persona_id) {
2783 if ((option & MACH_SEND_KERNEL) == 0) {
2784 mach_port_guard_exception(voucher_name, 0,
2785 (MPG_FLAGS_STRICT_REPLY_MISMATCHED_PERSONA | ((((uint64_t)persona_id << 32) & MPG_FLAGS_STRICT_REPLY_MASK) | dest_ctx)),
2786 kGUARD_EXC_STRICT_REPLY);
2787 }
2788 return MACH_SEND_INVALID_CONTEXT;
2789 }
2790
2791 return MACH_MSG_SUCCESS;
2792 }
2793
2794 /*
2795 * Routine: ipc_kmsg_copyin_header
2796 * Purpose:
2797 * "Copy-in" port rights in the header of a message.
2798 * Operates atomically; if it doesn't succeed the
2799 * message header and the space are left untouched.
2800 * If it does succeed the remote/local port fields
2801 * contain object pointers instead of port names,
2802 * and the bits field is updated. The destination port
2803 * will be a valid port pointer.
2804 *
2805 * Conditions:
2806 * Nothing locked.
2807 * Returns:
2808 * MACH_MSG_SUCCESS Successful copyin.
2809 * MACH_SEND_INVALID_HEADER
2810 * Illegal value in the message header bits.
2811 * MACH_SEND_INVALID_DEST The space is dead.
2812 * MACH_SEND_INVALID_DEST Can't copyin destination port.
2813 * (Either KERN_INVALID_NAME or KERN_INVALID_RIGHT.)
2814 * MACH_SEND_INVALID_REPLY Can't copyin reply port.
2815 * (Either KERN_INVALID_NAME or KERN_INVALID_RIGHT.)
2816 */
2817
2818 mach_msg_return_t
2819 ipc_kmsg_copyin_header(
2820 ipc_kmsg_t kmsg,
2821 ipc_space_t space,
2822 mach_msg_priority_t priority,
2823 mach_msg_option_t *optionp)
2824 {
2825 mach_msg_header_t *msg = kmsg->ikm_header;
2826 mach_msg_bits_t mbits = msg->msgh_bits & MACH_MSGH_BITS_USER;
2827 mach_port_name_t dest_name = CAST_MACH_PORT_TO_NAME(msg->msgh_remote_port);
2828 mach_port_name_t reply_name = CAST_MACH_PORT_TO_NAME(msg->msgh_local_port);
2829 mach_port_name_t voucher_name = MACH_PORT_NULL;
2830 kern_return_t kr;
2831
2832 mach_msg_type_name_t dest_type = MACH_MSGH_BITS_REMOTE(mbits);
2833 mach_msg_type_name_t reply_type = MACH_MSGH_BITS_LOCAL(mbits);
2834 mach_msg_type_name_t voucher_type = MACH_MSGH_BITS_VOUCHER(mbits);
2835 ipc_object_t dest_port = IO_NULL;
2836 ipc_object_t reply_port = IO_NULL;
2837 ipc_port_t dest_soright = IP_NULL;
2838 ipc_port_t reply_soright = IP_NULL;
2839 ipc_port_t voucher_soright = IP_NULL;
2840 ipc_port_t release_port = IP_NULL;
2841 ipc_port_t voucher_port = IP_NULL;
2842 ipc_port_t voucher_release_port = IP_NULL;
2843 ipc_entry_t dest_entry = IE_NULL;
2844 ipc_entry_t reply_entry = IE_NULL;
2845 ipc_entry_t voucher_entry = IE_NULL;
2846
2847 int assertcnt = 0;
2848 #if IMPORTANCE_INHERITANCE
2849 boolean_t needboost = FALSE;
2850 #endif /* IMPORTANCE_INHERITANCE */
2851
2852 if ((mbits != msg->msgh_bits) ||
2853 (!MACH_MSG_TYPE_PORT_ANY_SEND(dest_type)) ||
2854 ((reply_type == 0) ?
2855 (reply_name != MACH_PORT_NULL) :
2856 !MACH_MSG_TYPE_PORT_ANY_SEND(reply_type))) {
2857 return MACH_SEND_INVALID_HEADER;
2858 }
2859
2860 if (!MACH_PORT_VALID(dest_name)) {
2861 return MACH_SEND_INVALID_DEST;
2862 }
2863
2864 is_write_lock(space);
2865 if (!is_active(space)) {
2866 is_write_unlock(space);
2867 return MACH_SEND_INVALID_DEST;
2868 }
2869 /* space locked and active */
2870
2871 /*
2872 * If there is a voucher specified, make sure the disposition is
2873 * valid and the entry actually refers to a voucher port. Don't
2874 * actually copy in until we validate destination and reply.
2875 */
2876 if (voucher_type != MACH_MSGH_BITS_ZERO) {
2877 voucher_name = msg->msgh_voucher_port;
2878
2879 if (voucher_name == MACH_PORT_DEAD ||
2880 (voucher_type != MACH_MSG_TYPE_MOVE_SEND &&
2881 voucher_type != MACH_MSG_TYPE_COPY_SEND)) {
2882 is_write_unlock(space);
2883 if ((*optionp & MACH_SEND_KERNEL) == 0) {
2884 mach_port_guard_exception(voucher_name, 0, 0, kGUARD_EXC_SEND_INVALID_VOUCHER);
2885 }
2886 return MACH_SEND_INVALID_VOUCHER;
2887 }
2888
2889 if (voucher_name != MACH_PORT_NULL) {
2890 voucher_entry = ipc_entry_lookup(space, voucher_name);
2891 if (voucher_entry == IE_NULL ||
2892 (voucher_entry->ie_bits & MACH_PORT_TYPE_SEND) == 0 ||
2893 io_kotype(voucher_entry->ie_object) != IKOT_VOUCHER) {
2894 is_write_unlock(space);
2895 if ((*optionp & MACH_SEND_KERNEL) == 0) {
2896 mach_port_guard_exception(voucher_name, 0, 0, kGUARD_EXC_SEND_INVALID_VOUCHER);
2897 }
2898 return MACH_SEND_INVALID_VOUCHER;
2899 }
2900 } else {
2901 voucher_type = MACH_MSG_TYPE_MOVE_SEND;
2902 }
2903 }
2904
2905 if (enforce_strict_reply && MACH_SEND_WITH_STRICT_REPLY(*optionp) &&
2906 (!MACH_PORT_VALID(reply_name) ||
2907 ((reply_type != MACH_MSG_TYPE_MAKE_SEND_ONCE) && (reply_type != MACH_MSG_TYPE_MOVE_SEND_ONCE))
2908 )) {
2909 /*
2910 * The caller cannot enforce a reply context with an invalid
2911 * reply port name, or a non-send_once reply disposition.
2912 */
2913 is_write_unlock(space);
2914 if ((*optionp & MACH_SEND_KERNEL) == 0) {
2915 mach_port_guard_exception(reply_name, 0,
2916 (MPG_FLAGS_STRICT_REPLY_INVALID_REPLY_DISP | reply_type),
2917 kGUARD_EXC_STRICT_REPLY);
2918 }
2919 return MACH_SEND_INVALID_REPLY;
2920 }
2921
2922 /*
2923 * Handle combinations of validating destination and reply; along
2924 * with copying in destination, reply, and voucher in an atomic way.
2925 */
2926
2927 if (dest_name == voucher_name) {
2928 /*
2929 * If the destination name is the same as the voucher name,
2930 * the voucher_entry must already be known. Either that or
2931 * the destination name is MACH_PORT_NULL (i.e. invalid).
2932 */
2933 dest_entry = voucher_entry;
2934 if (dest_entry == IE_NULL) {
2935 goto invalid_dest;
2936 }
2937 /* Check if dest port allows immovable send rights to be sent in the kmsg body */
2938 ipc_kmsg_allow_immovable_send(kmsg, dest_entry);
2939
2940 /*
2941 * Make sure a future copyin of the reply port will succeed.
2942 * Once we start copying in the dest/voucher pair, we can't
2943 * back out.
2944 */
2945 if (MACH_PORT_VALID(reply_name)) {
2946 assert(reply_type != 0); /* because reply_name not null */
2947
2948 /* It is just WRONG if dest, voucher, and reply are all the same. */
2949 if (voucher_name == reply_name) {
2950 goto invalid_reply;
2951 }
2952 reply_entry = ipc_entry_lookup(space, reply_name);
2953 if (reply_entry == IE_NULL) {
2954 goto invalid_reply;
2955 }
2956 assert(dest_entry != reply_entry); /* names are not equal */
2957 if (!ipc_right_copyin_check_reply(space, reply_name, reply_entry, reply_type)) {
2958 goto invalid_reply;
2959 }
2960 }
2961
2962 /*
2963 * Do the joint copyin of the dest disposition and
2964 * voucher disposition from the one entry/port. We
2965 * already validated that the voucher copyin would
2966 * succeed (above). So, any failure in combining
2967 * the copyins can be blamed on the destination.
2968 */
2969 kr = ipc_right_copyin_two(space, dest_name, dest_entry,
2970 dest_type, voucher_type, &dest_port, &dest_soright,
2971 &release_port);
2972 if (kr != KERN_SUCCESS) {
2973 assert(kr != KERN_INVALID_CAPABILITY);
2974 goto invalid_dest;
2975 }
2976 voucher_port = ip_object_to_port(dest_port);
2977
2978 /*
2979 * could not have been one of these dispositions,
2980 * validated the port was a true kernel voucher port above,
2981 * AND was successfully able to copyin both dest and voucher.
2982 */
2983 assert(dest_type != MACH_MSG_TYPE_MAKE_SEND);
2984 assert(dest_type != MACH_MSG_TYPE_MAKE_SEND_ONCE);
2985 assert(dest_type != MACH_MSG_TYPE_MOVE_SEND_ONCE);
2986
2987 /*
2988 * Perform the delayed reply right copyin (guaranteed success).
2989 */
2990 if (reply_entry != IE_NULL) {
2991 kr = ipc_right_copyin(space, reply_name, reply_entry,
2992 reply_type, IPC_RIGHT_COPYIN_FLAGS_DEADOK,
2993 &reply_port, &reply_soright,
2994 &release_port, &assertcnt, 0, NULL);
2995 assert(assertcnt == 0);
2996 assert(kr == KERN_SUCCESS);
2997 }
2998 } else {
2999 if (dest_name == reply_name) {
3000 /*
3001 * Destination and reply ports are the same!
3002 * This is very similar to the case where the
3003 * destination and voucher ports were the same
3004 * (except the reply port disposition is not
3005 * previously validated).
3006 */
3007 dest_entry = ipc_entry_lookup(space, dest_name);
3008 if (dest_entry == IE_NULL) {
3009 goto invalid_dest;
3010 }
3011 ipc_kmsg_allow_immovable_send(kmsg, dest_entry);
3012
3013 reply_entry = dest_entry;
3014 assert(reply_type != 0); /* because name not null */
3015
3016 /*
3017 * Pre-validate that the reply right can be copied in by itself
3018 */
3019 if (!ipc_right_copyin_check_reply(space, reply_name, reply_entry, reply_type)) {
3020 goto invalid_reply;
3021 }
3022
3023 /*
3024 * Do the joint copyin of the dest disposition and
3025 * reply disposition from the one entry/port.
3026 */
3027 kr = ipc_right_copyin_two(space, dest_name, dest_entry,
3028 dest_type, reply_type, &dest_port, &dest_soright,
3029 &release_port);
3030 if (kr == KERN_INVALID_CAPABILITY) {
3031 goto invalid_reply;
3032 } else if (kr != KERN_SUCCESS) {
3033 goto invalid_dest;
3034 }
3035 reply_port = dest_port;
3036 } else {
3037 /*
3038 * Handle destination and reply independently, as
3039 * they are independent entries (even if the entries
3040 * refer to the same port).
3041 *
3042 * This can be the tough case to make atomic.
3043 *
3044 * The difficult problem is serializing with port death.
3045 * The bad case is when dest_port dies after its copyin,
3046 * reply_port dies before its copyin, and dest_port dies before
3047 * reply_port. Then the copyins operated as if dest_port was
3048 * alive and reply_port was dead, which shouldn't have happened
3049 * because they died in the other order.
3050 *
3051 * Note that it is easy for a user task to tell if
3052 * a copyin happened before or after a port died.
3053 * If a port dies before copyin, a dead-name notification
3054 * is generated and the dead name's urefs are incremented,
3055 * and if the copyin happens first, a port-deleted
3056 * notification is generated.
3057 *
3058 * Even so, avoiding that potentially detectable race is too
3059 * expensive - and no known code cares about it. So, we just
3060 * do the expedient thing and copy them in one after the other.
3061 */
3062
3063 dest_entry = ipc_entry_lookup(space, dest_name);
3064 if (dest_entry == IE_NULL) {
3065 goto invalid_dest;
3066 }
3067 assert(dest_entry != voucher_entry);
3068 ipc_kmsg_allow_immovable_send(kmsg, dest_entry);
3069
3070 /*
3071 * Make sure reply port entry is valid before dest copyin.
3072 */
3073 if (MACH_PORT_VALID(reply_name)) {
3074 if (reply_name == voucher_name) {
3075 goto invalid_reply;
3076 }
3077 reply_entry = ipc_entry_lookup(space, reply_name);
3078 if (reply_entry == IE_NULL) {
3079 goto invalid_reply;
3080 }
3081 assert(dest_entry != reply_entry); /* names are not equal */
3082 assert(reply_type != 0); /* because reply_name not null */
3083
3084 if (!ipc_right_copyin_check_reply(space, reply_name, reply_entry, reply_type)) {
3085 goto invalid_reply;
3086 }
3087 }
3088
3089 /*
3090 * copyin the destination.
3091 */
3092 kr = ipc_right_copyin(space, dest_name, dest_entry,
3093 dest_type, (IPC_RIGHT_COPYIN_FLAGS_ALLOW_IMMOVABLE_SEND |
3094 IPC_RIGHT_COPYIN_FLAGS_ALLOW_DEAD_SEND_ONCE),
3095 &dest_port, &dest_soright,
3096 &release_port, &assertcnt, 0, NULL);
3097 assert(assertcnt == 0);
3098 if (kr != KERN_SUCCESS) {
3099 goto invalid_dest;
3100 }
3101 assert(IO_VALID(dest_port));
3102 assert(!IP_VALID(release_port));
3103
3104 /*
3105 * Copyin the pre-validated reply right.
3106 * It's OK if the reply right has gone dead in the meantime.
3107 */
3108 if (MACH_PORT_VALID(reply_name)) {
3109 kr = ipc_right_copyin(space, reply_name, reply_entry,
3110 reply_type, IPC_RIGHT_COPYIN_FLAGS_DEADOK,
3111 &reply_port, &reply_soright,
3112 &release_port, &assertcnt, 0, NULL);
3113 assert(assertcnt == 0);
3114 assert(kr == KERN_SUCCESS);
3115 } else {
3116 /* convert invalid name to equivalent ipc_object type */
3117 reply_port = ip_to_object(CAST_MACH_NAME_TO_PORT(reply_name));
3118 }
3119 }
3120
3121 /*
3122 * Finally can copyin the voucher right now that dest and reply
3123 * are fully copied in (guaranteed success).
3124 */
3125 if (IE_NULL != voucher_entry) {
3126 kr = ipc_right_copyin(space, voucher_name, voucher_entry,
3127 voucher_type, IPC_RIGHT_COPYIN_FLAGS_NONE,
3128 (ipc_object_t *)&voucher_port,
3129 &voucher_soright,
3130 &voucher_release_port,
3131 &assertcnt, 0, NULL);
3132 assert(assertcnt == 0);
3133 assert(KERN_SUCCESS == kr);
3134 assert(IP_VALID(voucher_port));
3135 require_ip_active(voucher_port);
3136 }
3137 }
3138
3139 /*
3140 * The entries might need to be deallocated.
3141 *
3142 * Each entry should be deallocated only once,
3143 * even if it was specified in more than one slot in the header.
3144 * Note that dest can be the same entry as reply or voucher,
3145 * but reply and voucher must be distinct entries.
3146 */
3147 assert(IE_NULL != dest_entry);
3148 if (IE_NULL != reply_entry) {
3149 assert(reply_entry != voucher_entry);
3150 }
3151
3152 if (IE_BITS_TYPE(dest_entry->ie_bits) == MACH_PORT_TYPE_NONE) {
3153 ipc_entry_dealloc(space, dest_name, dest_entry);
3154
3155 if (dest_entry == reply_entry) {
3156 reply_entry = IE_NULL;
3157 }
3158
3159 if (dest_entry == voucher_entry) {
3160 voucher_entry = IE_NULL;
3161 }
3162
3163 dest_entry = IE_NULL;
3164 }
3165 if (IE_NULL != reply_entry &&
3166 IE_BITS_TYPE(reply_entry->ie_bits) == MACH_PORT_TYPE_NONE) {
3167 ipc_entry_dealloc(space, reply_name, reply_entry);
3168 reply_entry = IE_NULL;
3169 }
3170 if (IE_NULL != voucher_entry &&
3171 IE_BITS_TYPE(voucher_entry->ie_bits) == MACH_PORT_TYPE_NONE) {
3172 ipc_entry_dealloc(space, voucher_name, voucher_entry);
3173 voucher_entry = IE_NULL;
3174 }
3175
3176 dest_type = ipc_object_copyin_type(dest_type);
3177 reply_type = ipc_object_copyin_type(reply_type);
3178
3179 /*
3180 * JMM - Without rdar://problem/6275821, this is the last place we can
3181 * re-arm the send-possible notifications. It may trigger unexpectedly
3182 * early (send may NOT have failed), but better than missing. We assure
3183 * we won't miss by forcing MACH_SEND_ALWAYS if we got past arming.
3184 */
3185 if (((*optionp & MACH_SEND_NOTIFY) != 0) &&
3186 dest_type != MACH_MSG_TYPE_PORT_SEND_ONCE &&
3187 dest_entry != IE_NULL && dest_entry->ie_request != IE_REQ_NONE) {
3188 ipc_port_t dport = ip_object_to_port(dest_port);
3189
3190 assert(dport != IP_NULL);
3191 ip_lock(dport);
3192 if (ip_active(dport) && dport->ip_receiver != ipc_space_kernel) {
3193 if (ip_full(dport)) {
3194 #if IMPORTANCE_INHERITANCE
3195 needboost = ipc_port_request_sparm(dport, dest_name,
3196 dest_entry->ie_request,
3197 *optionp,
3198 priority);
3199 if (needboost == FALSE) {
3200 ip_unlock(dport);
3201 }
3202 #else
3203 ipc_port_request_sparm(dport, dest_name,
3204 dest_entry->ie_request,
3205 *optionp,
3206 priority);
3207 ip_unlock(dport);
3208 #endif /* IMPORTANCE_INHERITANCE */
3209 } else {
3210 *optionp |= MACH_SEND_ALWAYS;
3211 ip_unlock(dport);
3212 }
3213 } else {
3214 ip_unlock(dport);
3215 }
3216 }
3217
3218 is_write_unlock(space);
3219
3220 #if IMPORTANCE_INHERITANCE
3221 /*
3222 * If our request is the first boosting send-possible
3223 * notification this cycle, push the boost down the
3224 * destination port.
3225 */
3226 if (needboost == TRUE) {
3227 ipc_port_t dport = ip_object_to_port(dest_port);
3228
3229 /* dport still locked from above */
3230 if (ipc_port_importance_delta(dport, IPID_OPTION_SENDPOSSIBLE, 1) == FALSE) {
3231 ip_unlock(dport);
3232 }
3233 }
3234 #endif /* IMPORTANCE_INHERITANCE */
3235
3236 if (dest_soright != IP_NULL) {
3237 ipc_notify_port_deleted(dest_soright, dest_name);
3238 }
3239 if (reply_soright != IP_NULL) {
3240 ipc_notify_port_deleted(reply_soright, reply_name);
3241 }
3242 if (voucher_soright != IP_NULL) {
3243 ipc_notify_port_deleted(voucher_soright, voucher_name);
3244 }
3245
3246 /*
3247 * No room to store voucher port in in-kernel msg header,
3248 * so we store it back in the kmsg itself. Extract the
3249 * qos, and apply any override before we enqueue the kmsg.
3250 */
3251 if (IP_VALID(voucher_port)) {
3252 kmsg->ikm_voucher = voucher_port;
3253 voucher_type = MACH_MSG_TYPE_MOVE_SEND;
3254 }
3255
3256 msg->msgh_bits = MACH_MSGH_BITS_SET(dest_type, reply_type, voucher_type, mbits);
3257 msg->msgh_remote_port = ip_object_to_port(dest_port);
3258 msg->msgh_local_port = ip_object_to_port(reply_port);
3259
3260 /* capture the qos value(s) for the kmsg */
3261 ipc_kmsg_set_qos(kmsg, *optionp, priority);
3262
3263 if (release_port != IP_NULL) {
3264 ip_release(release_port);
3265 }
3266
3267 if (voucher_release_port != IP_NULL) {
3268 ip_release(voucher_release_port);
3269 }
3270
3271 if (enforce_strict_reply && MACH_SEND_WITH_STRICT_REPLY(*optionp) && IP_VALID(msg->msgh_local_port)) {
3272 /*
3273 * We've already validated that the reply disposition is a
3274 * [make/move] send-once. Ideally, we should enforce that the
3275 * reply port is also not dead, but XPC asynchronous
3276 * cancellation can make the reply port dead before we
3277 * actually make it to the mach_msg send.
3278 *
3279 * Here, we ensure that if we have a non-dead reply port, then
3280 * the reply port's receive right should not be in-transit,
3281 * and should live in the caller's IPC space.
3282 */
3283 ipc_port_t rport = msg->msgh_local_port;
3284 ip_lock(rport);
3285 kr = ipc_kmsg_validate_reply_port_locked(rport, *optionp);
3286 ip_unlock(rport);
3287 if (kr != KERN_SUCCESS) {
3288 /*
3289 * no descriptors have been copied in yet, but the
3290 * full header has been copied in: clean it up
3291 */
3292 ipc_kmsg_clean_partial(kmsg, 0, NULL, 0, 0);
3293 if ((*optionp & MACH_SEND_KERNEL) == 0) {
3294 mach_port_guard_exception(reply_name, 0,
3295 (MPG_FLAGS_STRICT_REPLY_INVALID_REPLY_PORT | kr),
3296 kGUARD_EXC_STRICT_REPLY);
3297 }
3298 return MACH_SEND_INVALID_REPLY;
3299 }
3300 }
3301
3302 return MACH_MSG_SUCCESS;
3303
3304 invalid_reply:
3305 is_write_unlock(space);
3306
3307 if (release_port != IP_NULL) {
3308 ip_release(release_port);
3309 }
3310
3311 assert(voucher_port == IP_NULL);
3312 assert(voucher_soright == IP_NULL);
3313
3314 if ((*optionp & MACH_SEND_KERNEL) == 0) {
3315 mach_port_guard_exception(reply_name, 0, 0, kGUARD_EXC_SEND_INVALID_REPLY);
3316 }
3317 return MACH_SEND_INVALID_REPLY;
3318
3319 invalid_dest:
3320 is_write_unlock(space);
3321
3322 if (release_port != IP_NULL) {
3323 ip_release(release_port);
3324 }
3325
3326 if (reply_soright != IP_NULL) {
3327 ipc_notify_port_deleted(reply_soright, reply_name);
3328 }
3329
3330 assert(voucher_port == IP_NULL);
3331 assert(voucher_soright == IP_NULL);
3332
3333 return MACH_SEND_INVALID_DEST;
3334 }
3335
3336 static mach_msg_descriptor_t *
3337 ipc_kmsg_copyin_port_descriptor(
3338 mach_msg_port_descriptor_t *dsc,
3339 mach_msg_legacy_port_descriptor_t *user_dsc_in,
3340 ipc_space_t space,
3341 ipc_object_t dest,
3342 ipc_kmsg_t kmsg,
3343 mach_msg_option_t *optionp,
3344 mach_msg_return_t *mr)
3345 {
3346 mach_msg_legacy_port_descriptor_t user_dsc = *user_dsc_in;
3347 mach_msg_type_name_t user_disp;
3348 mach_msg_type_name_t result_disp;
3349 mach_port_name_t name;
3350 ipc_object_t object;
3351
3352 user_disp = user_dsc.disposition;
3353 result_disp = ipc_object_copyin_type(user_disp);
3354
3355 name = (mach_port_name_t)user_dsc.name;
3356 if (MACH_PORT_VALID(name)) {
3357 kern_return_t kr = ipc_object_copyin(space, name, user_disp, &object, 0, NULL, kmsg->ikm_flags);
3358 if (kr != KERN_SUCCESS) {
3359 if (((*optionp & MACH_SEND_KERNEL) == 0) && (kr == KERN_INVALID_RIGHT)) {
3360 mach_port_guard_exception(name, 0, 0, kGUARD_EXC_SEND_INVALID_RIGHT);
3361 }
3362 *mr = MACH_SEND_INVALID_RIGHT;
3363 return NULL;
3364 }
3365
3366 if ((result_disp == MACH_MSG_TYPE_PORT_RECEIVE) &&
3367 ipc_port_check_circularity(ip_object_to_port(object),
3368 ip_object_to_port(dest))) {
3369 kmsg->ikm_header->msgh_bits |= MACH_MSGH_BITS_CIRCULAR;
3370 }
3371 dsc->name = ip_object_to_port(object);
3372 } else {
3373 dsc->name = CAST_MACH_NAME_TO_PORT(name);
3374 }
3375 dsc->disposition = result_disp;
3376 dsc->type = MACH_MSG_PORT_DESCRIPTOR;
3377
3378 dsc->pad_end = 0; // debug, unnecessary
3379
3380 return (mach_msg_descriptor_t *)(user_dsc_in + 1);
3381 }
3382
3383 static mach_msg_descriptor_t *
3384 ipc_kmsg_copyin_ool_descriptor(
3385 mach_msg_ool_descriptor_t *dsc,
3386 mach_msg_descriptor_t *user_dsc,
3387 int is_64bit,
3388 vm_offset_t *paddr,
3389 vm_map_copy_t *copy,
3390 vm_size_t *space_needed,
3391 vm_map_t map,
3392 __unused mach_msg_option_t *optionp,
3393 mach_msg_return_t *mr)
3394 {
3395 vm_size_t length;
3396 boolean_t dealloc;
3397 mach_msg_copy_options_t copy_options;
3398 mach_vm_offset_t addr;
3399 mach_msg_descriptor_type_t dsc_type;
3400
3401 if (is_64bit) {
3402 mach_msg_ool_descriptor64_t *user_ool_dsc = (typeof(user_ool_dsc))user_dsc;
3403
3404 addr = (mach_vm_offset_t) user_ool_dsc->address;
3405 length = user_ool_dsc->size;
3406 dealloc = user_ool_dsc->deallocate;
3407 copy_options = user_ool_dsc->copy;
3408 dsc_type = user_ool_dsc->type;
3409
3410 user_dsc = (typeof(user_dsc))(user_ool_dsc + 1);
3411 } else {
3412 mach_msg_ool_descriptor32_t *user_ool_dsc = (typeof(user_ool_dsc))user_dsc;
3413
3414 addr = CAST_USER_ADDR_T(user_ool_dsc->address);
3415 dealloc = user_ool_dsc->deallocate;
3416 copy_options = user_ool_dsc->copy;
3417 dsc_type = user_ool_dsc->type;
3418 length = user_ool_dsc->size;
3419
3420 user_dsc = (typeof(user_dsc))(user_ool_dsc + 1);
3421 }
3422
3423 dsc->size = (mach_msg_size_t)length;
3424 dsc->deallocate = dealloc;
3425 dsc->copy = copy_options;
3426 dsc->type = dsc_type;
3427
3428 if (length == 0) {
3429 dsc->address = NULL;
3430 } else if ((length >= MSG_OOL_SIZE_SMALL) &&
3431 (copy_options == MACH_MSG_PHYSICAL_COPY) && !dealloc) {
3432 /*
3433 * If the request is a physical copy and the source
3434 * is not being deallocated, then allocate space
3435 * in the kernel's pageable ipc copy map and copy
3436 * the data in. The semantics guarantee that the
3437 * data will have been physically copied before
3438 * the send operation terminates. Thus if the data
3439 * is not being deallocated, we must be prepared
3440 * to page if the region is sufficiently large.
3441 */
3442 if (copyin(addr, (char *)*paddr, length)) {
3443 *mr = MACH_SEND_INVALID_MEMORY;
3444 return NULL;
3445 }
3446
3447 /*
3448 * The kernel ipc copy map is marked no_zero_fill.
3449 * If the transfer is not a page multiple, we need
3450 * to zero fill the balance.
3451 */
3452 if (!page_aligned(length)) {
3453 (void) memset((void *) (*paddr + length), 0,
3454 round_page(length) - length);
3455 }
3456 if (vm_map_copyin(ipc_kernel_copy_map, (vm_map_address_t)*paddr,
3457 (vm_map_size_t)length, TRUE, copy) != KERN_SUCCESS) {
3458 *mr = MACH_MSG_VM_KERNEL;
3459 return NULL;
3460 }
3461 dsc->address = (void *)*copy;
3462 *paddr += round_page(length);
3463 *space_needed -= round_page(length);
3464 } else {
3465 /*
3466 * Make a vm_map_copy_t of the of the data. If the
3467 * data is small, this will do an optimized physical
3468 * copy. Otherwise, it will do a virtual copy.
3469 *
3470 * NOTE: A virtual copy is OK if the original is being
3471 * deallocted, even if a physical copy was requested.
3472 */
3473 kern_return_t kr = vm_map_copyin(map, addr,
3474 (vm_map_size_t)length, dealloc, copy);
3475 if (kr != KERN_SUCCESS) {
3476 *mr = (kr == KERN_RESOURCE_SHORTAGE) ?
3477 MACH_MSG_VM_KERNEL :
3478 MACH_SEND_INVALID_MEMORY;
3479 return NULL;
3480 }
3481 dsc->address = (void *)*copy;
3482 }
3483
3484 return user_dsc;
3485 }
3486
3487 static mach_msg_descriptor_t *
3488 ipc_kmsg_copyin_ool_ports_descriptor(
3489 mach_msg_ool_ports_descriptor_t *dsc,
3490 mach_msg_descriptor_t *user_dsc,
3491 int is_64bit,
3492 vm_map_t map,
3493 ipc_space_t space,
3494 ipc_object_t dest,
3495 ipc_kmsg_t kmsg,
3496 mach_msg_option_t *optionp,
3497 mach_msg_return_t *mr)
3498 {
3499 void *data;
3500 ipc_object_t *objects;
3501 unsigned int i;
3502 mach_vm_offset_t addr;
3503 mach_msg_type_name_t user_disp;
3504 mach_msg_type_name_t result_disp;
3505 mach_msg_type_number_t count;
3506 mach_msg_copy_options_t copy_option;
3507 boolean_t deallocate;
3508 mach_msg_descriptor_type_t type;
3509 vm_size_t ports_length, names_length;
3510
3511 if (is_64bit) {
3512 mach_msg_ool_ports_descriptor64_t *user_ool_dsc = (typeof(user_ool_dsc))user_dsc;
3513
3514 addr = (mach_vm_offset_t)user_ool_dsc->address;
3515 count = user_ool_dsc->count;
3516 deallocate = user_ool_dsc->deallocate;
3517 copy_option = user_ool_dsc->copy;
3518 user_disp = user_ool_dsc->disposition;
3519 type = user_ool_dsc->type;
3520
3521 user_dsc = (typeof(user_dsc))(user_ool_dsc + 1);
3522 } else {
3523 mach_msg_ool_ports_descriptor32_t *user_ool_dsc = (typeof(user_ool_dsc))user_dsc;
3524
3525 addr = CAST_USER_ADDR_T(user_ool_dsc->address);
3526 count = user_ool_dsc->count;
3527 deallocate = user_ool_dsc->deallocate;
3528 copy_option = user_ool_dsc->copy;
3529 user_disp = user_ool_dsc->disposition;
3530 type = user_ool_dsc->type;
3531
3532 user_dsc = (typeof(user_dsc))(user_ool_dsc + 1);
3533 }
3534
3535 dsc->deallocate = deallocate;
3536 dsc->copy = copy_option;
3537 dsc->type = type;
3538 dsc->count = count;
3539 dsc->address = NULL; /* for now */
3540
3541 result_disp = ipc_object_copyin_type(user_disp);
3542 dsc->disposition = result_disp;
3543
3544 /* We always do a 'physical copy', but you have to specify something valid */
3545 if (copy_option != MACH_MSG_PHYSICAL_COPY &&
3546 copy_option != MACH_MSG_VIRTUAL_COPY) {
3547 *mr = MACH_SEND_INVALID_TYPE;
3548 return NULL;
3549 }
3550
3551 /* calculate length of data in bytes, rounding up */
3552
3553 if (os_mul_overflow(count, sizeof(mach_port_t), &ports_length)) {
3554 *mr = MACH_SEND_TOO_LARGE;
3555 return NULL;
3556 }
3557
3558 if (os_mul_overflow(count, sizeof(mach_port_name_t), &names_length)) {
3559 *mr = MACH_SEND_TOO_LARGE;
3560 return NULL;
3561 }
3562
3563 if (ports_length == 0) {
3564 return user_dsc;
3565 }
3566
3567 data = kalloc(ports_length);
3568
3569 if (data == NULL) {
3570 *mr = MACH_SEND_NO_BUFFER;
3571 return NULL;
3572 }
3573
3574 #ifdef __LP64__
3575 mach_port_name_t *names = &((mach_port_name_t *)data)[count];
3576 #else
3577 mach_port_name_t *names = ((mach_port_name_t *)data);
3578 #endif
3579
3580 if (copyinmap(map, addr, names, names_length) != KERN_SUCCESS) {
3581 kfree(data, ports_length);
3582 *mr = MACH_SEND_INVALID_MEMORY;
3583 return NULL;
3584 }
3585
3586 if (deallocate) {
3587 (void) mach_vm_deallocate(map, addr, (mach_vm_size_t)names_length);
3588 }
3589
3590 objects = (ipc_object_t *) data;
3591 dsc->address = data;
3592
3593 for (i = 0; i < count; i++) {
3594 mach_port_name_t name = names[i];
3595 ipc_object_t object;
3596
3597 if (!MACH_PORT_VALID(name)) {
3598 objects[i] = ip_to_object(CAST_MACH_NAME_TO_PORT(name));
3599 continue;
3600 }
3601
3602 kern_return_t kr = ipc_object_copyin(space, name, user_disp, &object, 0, NULL, kmsg->ikm_flags);
3603
3604 if (kr != KERN_SUCCESS) {
3605 unsigned int j;
3606
3607 for (j = 0; j < i; j++) {
3608 object = objects[j];
3609 if (IPC_OBJECT_VALID(object)) {
3610 ipc_object_destroy(object, result_disp);
3611 }
3612 }
3613 kfree(data, ports_length);
3614 dsc->address = NULL;
3615 if (((*optionp & MACH_SEND_KERNEL) == 0) && (kr == KERN_INVALID_RIGHT)) {
3616 mach_port_guard_exception(name, 0, 0, kGUARD_EXC_SEND_INVALID_RIGHT);
3617 }
3618 *mr = MACH_SEND_INVALID_RIGHT;
3619 return NULL;
3620 }
3621
3622 if ((dsc->disposition == MACH_MSG_TYPE_PORT_RECEIVE) &&
3623 ipc_port_check_circularity(ip_object_to_port(object),
3624 ip_object_to_port(dest))) {
3625 kmsg->ikm_header->msgh_bits |= MACH_MSGH_BITS_CIRCULAR;
3626 }
3627
3628 objects[i] = object;
3629 }
3630
3631 return user_dsc;
3632 }
3633
3634 static mach_msg_descriptor_t *
3635 ipc_kmsg_copyin_guarded_port_descriptor(
3636 mach_msg_guarded_port_descriptor_t *dsc,
3637 mach_msg_descriptor_t *user_addr,
3638 int is_64bit,
3639 ipc_space_t space,
3640 ipc_object_t dest,
3641 ipc_kmsg_t kmsg,
3642 mach_msg_option_t *optionp,
3643 mach_msg_return_t *mr)
3644 {
3645 mach_msg_descriptor_t *user_dsc;
3646 mach_msg_type_name_t disp;
3647 mach_msg_type_name_t result_disp;
3648 mach_port_name_t name;
3649 mach_msg_guard_flags_t guard_flags;
3650 ipc_object_t object;
3651 mach_port_context_t context;
3652
3653 if (!is_64bit) {
3654 mach_msg_guarded_port_descriptor32_t *user_gp_dsc = (typeof(user_gp_dsc))user_addr;
3655 name = user_gp_dsc->name;
3656 guard_flags = user_gp_dsc->flags;
3657 disp = user_gp_dsc->disposition;
3658 context = user_gp_dsc->context;
3659 user_dsc = (mach_msg_descriptor_t *)(user_gp_dsc + 1);
3660 } else {
3661 mach_msg_guarded_port_descriptor64_t *user_gp_dsc = (typeof(user_gp_dsc))user_addr;
3662 name = user_gp_dsc->name;
3663 guard_flags = user_gp_dsc->flags;
3664 disp = user_gp_dsc->disposition;
3665 context = user_gp_dsc->context;
3666 user_dsc = (mach_msg_descriptor_t *)(user_gp_dsc + 1);
3667 }
3668
3669 guard_flags &= MACH_MSG_GUARD_FLAGS_MASK;
3670 result_disp = ipc_object_copyin_type(disp);
3671
3672 if (MACH_PORT_VALID(name)) {
3673 kern_return_t kr = ipc_object_copyin(space, name, disp, &object, context, &guard_flags, kmsg->ikm_flags);
3674 if (kr != KERN_SUCCESS) {
3675 if (((*optionp & MACH_SEND_KERNEL) == 0) && (kr == KERN_INVALID_RIGHT)) {
3676 mach_port_guard_exception(name, 0, 0, kGUARD_EXC_SEND_INVALID_RIGHT);
3677 }
3678 *mr = MACH_SEND_INVALID_RIGHT;
3679 return NULL;
3680 }
3681
3682 if ((result_disp == MACH_MSG_TYPE_PORT_RECEIVE) &&
3683 ipc_port_check_circularity(ip_object_to_port(object),
3684 ip_object_to_port(dest))) {
3685 kmsg->ikm_header->msgh_bits |= MACH_MSGH_BITS_CIRCULAR;
3686 }
3687 dsc->name = ip_object_to_port(object);
3688 } else {
3689 dsc->name = CAST_MACH_NAME_TO_PORT(name);
3690 }
3691 dsc->flags = guard_flags;
3692 dsc->disposition = result_disp;
3693 dsc->type = MACH_MSG_GUARDED_PORT_DESCRIPTOR;
3694
3695 #if __LP64__
3696 dsc->pad_end = 0; // debug, unnecessary
3697 #endif
3698
3699 return user_dsc;
3700 }
3701
3702
3703 /*
3704 * Routine: ipc_kmsg_copyin_body
3705 * Purpose:
3706 * "Copy-in" port rights and out-of-line memory
3707 * in the message body.
3708 *
3709 * In all failure cases, the message is left holding
3710 * no rights or memory. However, the message buffer
3711 * is not deallocated. If successful, the message
3712 * contains a valid destination port.
3713 * Conditions:
3714 * Nothing locked.
3715 * Returns:
3716 * MACH_MSG_SUCCESS Successful copyin.
3717 * MACH_SEND_INVALID_MEMORY Can't grab out-of-line memory.
3718 * MACH_SEND_INVALID_RIGHT Can't copyin port right in body.
3719 * MACH_SEND_INVALID_TYPE Bad type specification.
3720 * MACH_SEND_MSG_TOO_SMALL Body is too small for types/data.
3721 * MACH_SEND_INVALID_RT_OOL_SIZE OOL Buffer too large for RT
3722 * MACH_MSG_INVALID_RT_DESCRIPTOR Dealloc and RT are incompatible
3723 * MACH_SEND_NO_GRANT_DEST Dest port doesn't accept ports in body
3724 */
3725
3726 mach_msg_return_t
3727 ipc_kmsg_copyin_body(
3728 ipc_kmsg_t kmsg,
3729 ipc_space_t space,
3730 vm_map_t map,
3731 mach_msg_option_t *optionp)
3732 {
3733 ipc_object_t dest;
3734 mach_msg_body_t *body;
3735 mach_msg_descriptor_t *daddr, *naddr, *end;
3736 mach_msg_descriptor_t *user_addr, *kern_addr;
3737 mach_msg_type_number_t dsc_count;
3738 boolean_t is_task_64bit = (map->max_offset > VM_MAX_ADDRESS);
3739 boolean_t complex = FALSE;
3740 boolean_t contains_port_desc = FALSE;
3741 vm_size_t space_needed = 0;
3742 vm_offset_t paddr = 0;
3743 vm_map_copy_t copy = VM_MAP_COPY_NULL;
3744 mach_msg_type_number_t i;
3745 mach_msg_return_t mr = MACH_MSG_SUCCESS;
3746 ipc_port_t remote_port = kmsg->ikm_header->msgh_remote_port;
3747
3748 vm_size_t descriptor_size = 0;
3749
3750 mach_msg_type_number_t total_ool_port_count = 0;
3751 mach_msg_guard_flags_t guard_flags = 0;
3752 mach_port_context_t context;
3753 mach_msg_type_name_t disp;
3754
3755 /*
3756 * Determine if the target is a kernel port.
3757 */
3758 dest = ip_to_object(remote_port);
3759 body = (mach_msg_body_t *) (kmsg->ikm_header + 1);
3760 naddr = (mach_msg_descriptor_t *) (body + 1);
3761 end = (mach_msg_descriptor_t *) ((vm_offset_t)kmsg->ikm_header + kmsg->ikm_header->msgh_size);
3762
3763 dsc_count = body->msgh_descriptor_count;
3764 if (dsc_count == 0) {
3765 return MACH_MSG_SUCCESS;
3766 }
3767
3768 /*
3769 * Make an initial pass to determine kernal VM space requirements for
3770 * physical copies and possible contraction of the descriptors from
3771 * processes with pointers larger than the kernel's.
3772 */
3773 daddr = NULL;
3774 for (i = 0; i < dsc_count; i++) {
3775 mach_msg_size_t size;
3776 mach_msg_type_number_t ool_port_count = 0;
3777
3778 daddr = naddr;
3779
3780 /* make sure the descriptor fits in the message */
3781 if (is_task_64bit) {
3782 if ((mach_msg_descriptor_t*)((vm_offset_t)daddr + 12) > end) {
3783 mr = MACH_SEND_MSG_TOO_SMALL;
3784 goto clean_message;
3785 }
3786
3787 switch (daddr->type.type) {
3788 case MACH_MSG_OOL_DESCRIPTOR:
3789 case MACH_MSG_OOL_VOLATILE_DESCRIPTOR:
3790 case MACH_MSG_OOL_PORTS_DESCRIPTOR:
3791 case MACH_MSG_GUARDED_PORT_DESCRIPTOR:
3792 descriptor_size += 16;
3793 naddr = (typeof(naddr))((vm_offset_t)daddr + 16);
3794 break;
3795 default:
3796 descriptor_size += 12;
3797 naddr = (typeof(naddr))((vm_offset_t)daddr + 12);
3798 break;
3799 }
3800 } else {
3801 descriptor_size += 12;
3802 naddr = (typeof(naddr))((vm_offset_t)daddr + 12);
3803 }
3804
3805 if (naddr > end) {
3806 mr = MACH_SEND_MSG_TOO_SMALL;
3807 goto clean_message;
3808 }
3809
3810 switch (daddr->type.type) {
3811 case MACH_MSG_OOL_DESCRIPTOR:
3812 case MACH_MSG_OOL_VOLATILE_DESCRIPTOR:
3813 size = (is_task_64bit) ?
3814 ((mach_msg_ool_descriptor64_t *)daddr)->size :
3815 daddr->out_of_line.size;
3816
3817 if (daddr->out_of_line.copy != MACH_MSG_PHYSICAL_COPY &&
3818 daddr->out_of_line.copy != MACH_MSG_VIRTUAL_COPY) {
3819 /*
3820 * Invalid copy option
3821 */
3822 mr = MACH_SEND_INVALID_TYPE;
3823 goto clean_message;
3824 }
3825
3826 if ((size >= MSG_OOL_SIZE_SMALL) &&
3827 (daddr->out_of_line.copy == MACH_MSG_PHYSICAL_COPY) &&
3828 !(daddr->out_of_line.deallocate)) {
3829 /*
3830 * Out-of-line memory descriptor, accumulate kernel
3831 * memory requirements
3832 */
3833 if (space_needed + round_page(size) <= space_needed) {
3834 /* Overflow dectected */
3835 mr = MACH_MSG_VM_KERNEL;
3836 goto clean_message;
3837 }
3838
3839 space_needed += round_page(size);
3840 if (space_needed > ipc_kmsg_max_vm_space) {
3841 /* Per message kernel memory limit exceeded */
3842 mr = MACH_MSG_VM_KERNEL;
3843 goto clean_message;
3844 }
3845 }
3846 break;
3847 case MACH_MSG_PORT_DESCRIPTOR:
3848 if (os_add_overflow(total_ool_port_count, 1, &total_ool_port_count)) {
3849 /* Overflow detected */
3850 mr = MACH_SEND_TOO_LARGE;
3851 goto clean_message;
3852 }
3853 contains_port_desc = TRUE;
3854 break;
3855 case MACH_MSG_OOL_PORTS_DESCRIPTOR:
3856 ool_port_count = (is_task_64bit) ?
3857 ((mach_msg_ool_ports_descriptor64_t *)daddr)->count :
3858 daddr->ool_ports.count;
3859
3860 if (os_add_overflow(total_ool_port_count, ool_port_count, &total_ool_port_count)) {
3861 /* Overflow detected */
3862 mr = MACH_SEND_TOO_LARGE;
3863 goto clean_message;
3864 }
3865
3866 if (ool_port_count > (ipc_kmsg_max_vm_space / sizeof(mach_port_t))) {
3867 /* Per message kernel memory limit exceeded */
3868 mr = MACH_SEND_TOO_LARGE;
3869 goto clean_message;
3870 }
3871 contains_port_desc = TRUE;
3872 break;
3873 case MACH_MSG_GUARDED_PORT_DESCRIPTOR:
3874 guard_flags = (is_task_64bit) ?
3875 ((mach_msg_guarded_port_descriptor64_t *)daddr)->flags :
3876 ((mach_msg_guarded_port_descriptor32_t *)daddr)->flags;
3877 context = (is_task_64bit) ?
3878 ((mach_msg_guarded_port_descriptor64_t *)daddr)->context :
3879 ((mach_msg_guarded_port_descriptor32_t *)daddr)->context;
3880 disp = (is_task_64bit) ?
3881 ((mach_msg_guarded_port_descriptor64_t *)daddr)->disposition :
3882 ((mach_msg_guarded_port_descriptor32_t *)daddr)->disposition;
3883
3884 /* Only MACH_MSG_TYPE_MOVE_RECEIVE is supported for now */
3885 if (!guard_flags || ((guard_flags & ~MACH_MSG_GUARD_FLAGS_MASK) != 0) ||
3886 ((guard_flags & MACH_MSG_GUARD_FLAGS_UNGUARDED_ON_SEND) && (context != 0)) ||
3887 (disp != MACH_MSG_TYPE_MOVE_RECEIVE)) {
3888 /*
3889 * Invalid guard flags, context or disposition
3890 */
3891 mr = MACH_SEND_INVALID_TYPE;
3892 goto clean_message;
3893 }
3894 if (os_add_overflow(total_ool_port_count, 1, &total_ool_port_count)) {
3895 /* Overflow detected */
3896 mr = MACH_SEND_TOO_LARGE;
3897 goto clean_message;
3898 }
3899 contains_port_desc = TRUE;
3900 break;
3901 }
3902 }
3903
3904 /* Sending more than 16383 rights in one message seems crazy */
3905 if (total_ool_port_count >= (MACH_PORT_UREFS_MAX / 4)) {
3906 mr = MACH_SEND_TOO_LARGE;
3907 goto clean_message;
3908 }
3909
3910 /*
3911 * Check if dest is a no-grant port; Since this bit is set only on
3912 * port construction and cannot be unset later, we can peek at the
3913 * bit without paying the cost of locking the port.
3914 */
3915 if (contains_port_desc && remote_port->ip_no_grant) {
3916 mr = MACH_SEND_NO_GRANT_DEST;
3917 goto clean_message;
3918 }
3919
3920 /*
3921 * Allocate space in the pageable kernel ipc copy map for all the
3922 * ool data that is to be physically copied. Map is marked wait for
3923 * space.
3924 */
3925 if (space_needed) {
3926 if (vm_allocate_kernel(ipc_kernel_copy_map, &paddr, space_needed,
3927 VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_IPC) != KERN_SUCCESS) {
3928 mr = MACH_MSG_VM_KERNEL;
3929 goto clean_message;
3930 }
3931 }
3932
3933 /* user_addr = just after base as it was copied in */
3934 user_addr = (mach_msg_descriptor_t *)((vm_offset_t)kmsg->ikm_header + sizeof(mach_msg_base_t));
3935
3936 /* Shift the mach_msg_base_t down to make room for dsc_count*16bytes of descriptors on 64 bit kernels
3937 */
3938 if (descriptor_size != 16 * dsc_count) {
3939 vm_offset_t dsc_adjust = 16 * dsc_count - descriptor_size;
3940
3941 memmove((char *)(((vm_offset_t)kmsg->ikm_header) - dsc_adjust), kmsg->ikm_header, sizeof(mach_msg_base_t));
3942 kmsg->ikm_header = (mach_msg_header_t *)((vm_offset_t)kmsg->ikm_header - dsc_adjust);
3943
3944 /* Update the message size for the larger in-kernel representation */
3945 kmsg->ikm_header->msgh_size += (mach_msg_size_t)dsc_adjust;
3946 }
3947
3948
3949 /* kern_addr = just after base after it has been (conditionally) moved */
3950 kern_addr = (mach_msg_descriptor_t *)((vm_offset_t)kmsg->ikm_header + sizeof(mach_msg_base_t));
3951
3952 /* handle the OOL regions and port descriptors. */
3953 for (i = 0; i < dsc_count; i++) {
3954 switch (user_addr->type.type) {
3955 case MACH_MSG_PORT_DESCRIPTOR:
3956 user_addr = ipc_kmsg_copyin_port_descriptor((mach_msg_port_descriptor_t *)kern_addr,
3957 (mach_msg_legacy_port_descriptor_t *)user_addr, space, dest, kmsg, optionp, &mr);
3958 kern_addr++;
3959 complex = TRUE;
3960 break;
3961 case MACH_MSG_OOL_VOLATILE_DESCRIPTOR:
3962 case MACH_MSG_OOL_DESCRIPTOR:
3963 user_addr = ipc_kmsg_copyin_ool_descriptor((mach_msg_ool_descriptor_t *)kern_addr,
3964 user_addr, is_task_64bit, &paddr, &copy, &space_needed, map, optionp, &mr);
3965 kern_addr++;
3966 complex = TRUE;
3967 break;
3968 case MACH_MSG_OOL_PORTS_DESCRIPTOR:
3969 user_addr = ipc_kmsg_copyin_ool_ports_descriptor((mach_msg_ool_ports_descriptor_t *)kern_addr,
3970 user_addr, is_task_64bit, map, space, dest, kmsg, optionp, &mr);
3971 kern_addr++;
3972 complex = TRUE;
3973 break;
3974 case MACH_MSG_GUARDED_PORT_DESCRIPTOR:
3975 user_addr = ipc_kmsg_copyin_guarded_port_descriptor((mach_msg_guarded_port_descriptor_t *)kern_addr,
3976 user_addr, is_task_64bit, space, dest, kmsg, optionp, &mr);
3977 kern_addr++;
3978 complex = TRUE;
3979 break;
3980 default:
3981 /* Invalid descriptor */
3982 mr = MACH_SEND_INVALID_TYPE;
3983 break;
3984 }
3985
3986 if (MACH_MSG_SUCCESS != mr) {
3987 /* clean from start of message descriptors to i */
3988 ipc_kmsg_clean_partial(kmsg, i,
3989 (mach_msg_descriptor_t *)((mach_msg_base_t *)kmsg->ikm_header + 1),
3990 paddr, space_needed);
3991 goto out;
3992 }
3993 } /* End of loop */
3994
3995 if (!complex) {
3996 kmsg->ikm_header->msgh_bits &= ~MACH_MSGH_BITS_COMPLEX;
3997 }
3998 out:
3999 return mr;
4000
4001 clean_message:
4002 /* no descriptors have been copied in yet */
4003 ipc_kmsg_clean_partial(kmsg, 0, NULL, 0, 0);
4004 return mr;
4005 }
4006
4007
4008 /*
4009 * Routine: ipc_kmsg_copyin
4010 * Purpose:
4011 * "Copy-in" port rights and out-of-line memory
4012 * in the message.
4013 *
4014 * In all failure cases, the message is left holding
4015 * no rights or memory. However, the message buffer
4016 * is not deallocated. If successful, the message
4017 * contains a valid destination port.
4018 * Conditions:
4019 * Nothing locked.
4020 * Returns:
4021 * MACH_MSG_SUCCESS Successful copyin.
4022 * MACH_SEND_INVALID_HEADER Illegal value in the message header bits.
4023 * MACH_SEND_INVALID_DEST Can't copyin destination port.
4024 * MACH_SEND_INVALID_REPLY Can't copyin reply port.
4025 * MACH_SEND_INVALID_MEMORY Can't grab out-of-line memory.
4026 * MACH_SEND_INVALID_RIGHT Can't copyin port right in body.
4027 * MACH_SEND_INVALID_TYPE Bad type specification.
4028 * MACH_SEND_MSG_TOO_SMALL Body is too small for types/data.
4029 */
4030
4031 mach_msg_return_t
4032 ipc_kmsg_copyin(
4033 ipc_kmsg_t kmsg,
4034 ipc_space_t space,
4035 vm_map_t map,
4036 mach_msg_priority_t priority,
4037 mach_msg_option_t *optionp)
4038 {
4039 mach_msg_return_t mr;
4040 mach_port_name_t dest_name = CAST_MACH_PORT_TO_NAME(kmsg->ikm_header->msgh_remote_port);
4041
4042 kmsg->ikm_header->msgh_bits &= MACH_MSGH_BITS_USER;
4043
4044 mr = ipc_kmsg_copyin_header(kmsg, space, priority, optionp);
4045
4046 if (mr != MACH_MSG_SUCCESS) {
4047 return mr;
4048 }
4049
4050 /* Get the message filter policy if the task and port support filtering */
4051 mach_msg_filter_id fid = 0;
4052 if (ip_enforce_msg_filtering(kmsg->ikm_header->msgh_remote_port) &&
4053 task_get_filter_msg_flag(current_task())) {
4054 /* port label is yet to be supported */
4055 boolean_t allow_kmsg = mach_msg_fetch_filter_policy(NULL, kmsg->ikm_header->msgh_id, &fid);
4056 if (!allow_kmsg) {
4057 mach_port_guard_exception(dest_name, 0, 0, kGUARD_EXC_MSG_FILTERED);
4058 /* no descriptors have been copied in yet */
4059 ipc_kmsg_clean_partial(kmsg, 0, NULL, 0, 0);
4060 return MACH_SEND_MSG_FILTERED;
4061 }
4062 kmsg->ikm_filter_policy_id = fid;
4063 }
4064
4065 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_MSG_SEND) | DBG_FUNC_NONE,
4066 VM_KERNEL_ADDRPERM((uintptr_t)kmsg),
4067 (uintptr_t)kmsg->ikm_header->msgh_bits,
4068 (uintptr_t)kmsg->ikm_header->msgh_id,
4069 VM_KERNEL_ADDRPERM((uintptr_t)unsafe_convert_port_to_voucher(kmsg->ikm_voucher)),
4070 0);
4071
4072 DEBUG_KPRINT_SYSCALL_IPC("ipc_kmsg_copyin header:\n%.8x\n%.8x\n%p\n%p\n%p\n%.8x\n",
4073 kmsg->ikm_header->msgh_size,
4074 kmsg->ikm_header->msgh_bits,
4075 kmsg->ikm_header->msgh_remote_port,
4076 kmsg->ikm_header->msgh_local_port,
4077 kmsg->ikm_voucher,
4078 kmsg->ikm_header->msgh_id);
4079
4080 if (kmsg->ikm_header->msgh_bits & MACH_MSGH_BITS_COMPLEX) {
4081 mr = ipc_kmsg_copyin_body( kmsg, space, map, optionp);
4082
4083 /* unreachable if !DEBUG */
4084 __unreachable_ok_push
4085 if (DEBUG_KPRINT_SYSCALL_PREDICATE(DEBUG_KPRINT_SYSCALL_IPC_MASK)) {
4086 kprintf("body:\n");
4087 uint32_t i;
4088 for (i = 0; i * 4 < (kmsg->ikm_header->msgh_size - sizeof(mach_msg_header_t)); i++) {
4089 kprintf("%.4x\n", ((uint32_t *)(kmsg->ikm_header + 1))[i]);
4090 }
4091 }
4092 __unreachable_ok_pop
4093 }
4094
4095 /* Sign the message contents */
4096 if (mr == MACH_MSG_SUCCESS) {
4097 ikm_sign(kmsg);
4098 }
4099
4100 return mr;
4101 }
4102
4103 /*
4104 * Routine: ipc_kmsg_copyin_from_kernel
4105 * Purpose:
4106 * "Copy-in" port rights and out-of-line memory
4107 * in a message sent from the kernel.
4108 *
4109 * Because the message comes from the kernel,
4110 * the implementation assumes there are no errors
4111 * or peculiarities in the message.
4112 * Conditions:
4113 * Nothing locked.
4114 */
4115
4116 mach_msg_return_t
4117 ipc_kmsg_copyin_from_kernel(
4118 ipc_kmsg_t kmsg)
4119 {
4120 mach_msg_bits_t bits = kmsg->ikm_header->msgh_bits;
4121 mach_msg_type_name_t rname = MACH_MSGH_BITS_REMOTE(bits);
4122 mach_msg_type_name_t lname = MACH_MSGH_BITS_LOCAL(bits);
4123 ipc_object_t remote = ip_to_object(kmsg->ikm_header->msgh_remote_port);
4124 ipc_object_t local = ip_to_object(kmsg->ikm_header->msgh_local_port);
4125 ipc_port_t dest = kmsg->ikm_header->msgh_remote_port;
4126
4127 /* translate the destination and reply ports */
4128 if (!IO_VALID(remote)) {
4129 return MACH_SEND_INVALID_DEST;
4130 }
4131
4132 ipc_object_copyin_from_kernel(remote, rname);
4133 if (IO_VALID(local)) {
4134 ipc_object_copyin_from_kernel(local, lname);
4135 }
4136
4137 /*
4138 * The common case is a complex message with no reply port,
4139 * because that is what the memory_object interface uses.
4140 */
4141
4142 if (bits == (MACH_MSGH_BITS_COMPLEX |
4143 MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND, 0))) {
4144 bits = (MACH_MSGH_BITS_COMPLEX |
4145 MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND, 0));
4146
4147 kmsg->ikm_header->msgh_bits = bits;
4148 } else {
4149 bits = (MACH_MSGH_BITS_OTHER(bits) |
4150 MACH_MSGH_BITS(ipc_object_copyin_type(rname),
4151 ipc_object_copyin_type(lname)));
4152
4153 kmsg->ikm_header->msgh_bits = bits;
4154 }
4155
4156 if (bits & MACH_MSGH_BITS_COMPLEX) {
4157 /*
4158 * Check if the remote port accepts ports in the body.
4159 */
4160 if (dest->ip_no_grant) {
4161 mach_msg_descriptor_t *saddr;
4162 mach_msg_body_t *body;
4163 mach_msg_type_number_t i, count;
4164
4165 body = (mach_msg_body_t *) (kmsg->ikm_header + 1);
4166 saddr = (mach_msg_descriptor_t *) (body + 1);
4167 count = body->msgh_descriptor_count;
4168
4169 for (i = 0; i < count; i++, saddr++) {
4170 switch (saddr->type.type) {
4171 case MACH_MSG_PORT_DESCRIPTOR:
4172 case MACH_MSG_OOL_PORTS_DESCRIPTOR:
4173 case MACH_MSG_GUARDED_PORT_DESCRIPTOR:
4174 /* no descriptors have been copied in yet */
4175 ipc_kmsg_clean_partial(kmsg, 0, NULL, 0, 0);
4176 return MACH_SEND_NO_GRANT_DEST;
4177 }
4178 }
4179 }
4180
4181 mach_msg_descriptor_t *saddr;
4182 mach_msg_body_t *body;
4183 mach_msg_type_number_t i, count;
4184
4185 body = (mach_msg_body_t *) (kmsg->ikm_header + 1);
4186 saddr = (mach_msg_descriptor_t *) (body + 1);
4187 count = body->msgh_descriptor_count;
4188
4189 for (i = 0; i < count; i++, saddr++) {
4190 switch (saddr->type.type) {
4191 case MACH_MSG_PORT_DESCRIPTOR: {
4192 mach_msg_type_name_t name;
4193 ipc_object_t object;
4194 mach_msg_port_descriptor_t *dsc;
4195
4196 dsc = &saddr->port;
4197
4198 /* this is really the type SEND, SEND_ONCE, etc. */
4199 name = dsc->disposition;
4200 object = ip_to_object(dsc->name);
4201 dsc->disposition = ipc_object_copyin_type(name);
4202
4203 if (!IO_VALID(object)) {
4204 break;
4205 }
4206
4207 ipc_object_copyin_from_kernel(object, name);
4208
4209 /* CDY avoid circularity when the destination is also */
4210 /* the kernel. This check should be changed into an */
4211 /* assert when the new kobject model is in place since*/
4212 /* ports will not be used in kernel to kernel chats */
4213
4214 if (ip_object_to_port(remote)->ip_receiver != ipc_space_kernel) {
4215 if ((dsc->disposition == MACH_MSG_TYPE_PORT_RECEIVE) &&
4216 ipc_port_check_circularity(ip_object_to_port(object),
4217 ip_object_to_port(remote))) {
4218 kmsg->ikm_header->msgh_bits |=
4219 MACH_MSGH_BITS_CIRCULAR;
4220 }
4221 }
4222 break;
4223 }
4224 case MACH_MSG_OOL_VOLATILE_DESCRIPTOR:
4225 case MACH_MSG_OOL_DESCRIPTOR: {
4226 /*
4227 * The sender should supply ready-made memory, i.e.
4228 * a vm_map_copy_t, so we don't need to do anything.
4229 */
4230 break;
4231 }
4232 case MACH_MSG_OOL_PORTS_DESCRIPTOR: {
4233 ipc_object_t *objects;
4234 unsigned int j;
4235 mach_msg_type_name_t name;
4236 mach_msg_ool_ports_descriptor_t *dsc;
4237
4238 dsc = (mach_msg_ool_ports_descriptor_t *)&saddr->ool_ports;
4239
4240 /* this is really the type SEND, SEND_ONCE, etc. */
4241 name = dsc->disposition;
4242 dsc->disposition = ipc_object_copyin_type(name);
4243
4244 objects = (ipc_object_t *) dsc->address;
4245
4246 for (j = 0; j < dsc->count; j++) {
4247 ipc_object_t object = objects[j];
4248
4249 if (!IO_VALID(object)) {
4250 continue;
4251 }
4252
4253 ipc_object_copyin_from_kernel(object, name);
4254
4255 if ((dsc->disposition == MACH_MSG_TYPE_PORT_RECEIVE) &&
4256 ipc_port_check_circularity(ip_object_to_port(object),
4257 ip_object_to_port(remote))) {
4258 kmsg->ikm_header->msgh_bits |= MACH_MSGH_BITS_CIRCULAR;
4259 }
4260 }
4261 break;
4262 }
4263 case MACH_MSG_GUARDED_PORT_DESCRIPTOR: {
4264 mach_msg_guarded_port_descriptor_t *dsc = (typeof(dsc)) & saddr->guarded_port;
4265 mach_msg_type_name_t disp = dsc->disposition;
4266 ipc_object_t object = ip_to_object(dsc->name);
4267 dsc->disposition = ipc_object_copyin_type(disp);
4268 assert(dsc->flags == 0);
4269
4270 if (!IO_VALID(object)) {
4271 break;
4272 }
4273
4274 ipc_object_copyin_from_kernel(object, disp);
4275 /*
4276 * avoid circularity when the destination is also
4277 * the kernel. This check should be changed into an
4278 * assert when the new kobject model is in place since
4279 * ports will not be used in kernel to kernel chats
4280 */
4281
4282 if (ip_object_to_port(remote)->ip_receiver != ipc_space_kernel) {
4283 if ((dsc->disposition == MACH_MSG_TYPE_PORT_RECEIVE) &&
4284 ipc_port_check_circularity(ip_object_to_port(object),
4285 ip_object_to_port(remote))) {
4286 kmsg->ikm_header->msgh_bits |= MACH_MSGH_BITS_CIRCULAR;
4287 }
4288 }
4289 break;
4290 }
4291 default: {
4292 #if MACH_ASSERT
4293 panic("ipc_kmsg_copyin_from_kernel: bad descriptor");
4294 #endif /* MACH_ASSERT */
4295 }
4296 }
4297 }
4298 }
4299
4300 /* Add the signature to the message */
4301 ikm_sign(kmsg);
4302
4303 return MACH_MSG_SUCCESS;
4304 }
4305
4306 #if IKM_SUPPORT_LEGACY
4307 mach_msg_return_t
4308 ipc_kmsg_copyin_from_kernel_legacy(
4309 ipc_kmsg_t kmsg)
4310 {
4311 mach_msg_bits_t bits = kmsg->ikm_header->msgh_bits;
4312 mach_msg_type_name_t rname = MACH_MSGH_BITS_REMOTE(bits);
4313 mach_msg_type_name_t lname = MACH_MSGH_BITS_LOCAL(bits);
4314 ipc_object_t remote = ip_to_object(kmsg->ikm_header->msgh_remote_port);
4315 ipc_object_t local = ip_to_object(kmsg->ikm_header->msgh_local_port);
4316 ipc_port_t dest = kmsg->ikm_header->msgh_remote_port;
4317
4318 /* translate the destination and reply ports */
4319 if (!IO_VALID(remote)) {
4320 return MACH_SEND_INVALID_DEST;
4321 }
4322
4323 ipc_object_copyin_from_kernel(remote, rname);
4324 if (IO_VALID(local)) {
4325 ipc_object_copyin_from_kernel(local, lname);
4326 }
4327
4328 /*
4329 * The common case is a complex message with no reply port,
4330 * because that is what the memory_object interface uses.
4331 */
4332
4333 if (bits == (MACH_MSGH_BITS_COMPLEX |
4334 MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND, 0))) {
4335 bits = (MACH_MSGH_BITS_COMPLEX |
4336 MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND, 0));
4337
4338 kmsg->ikm_header->msgh_bits = bits;
4339 } else {
4340 bits = (MACH_MSGH_BITS_OTHER(bits) |
4341 MACH_MSGH_BITS(ipc_object_copyin_type(rname),
4342 ipc_object_copyin_type(lname)));
4343
4344 kmsg->ikm_header->msgh_bits = bits;
4345 }
4346
4347 if (bits & MACH_MSGH_BITS_COMPLEX) {
4348 if (dest->ip_no_grant) {
4349 mach_msg_descriptor_t *saddr;
4350 mach_msg_body_t *body;
4351 mach_msg_type_number_t i, count;
4352
4353 body = (mach_msg_body_t *) (kmsg->ikm_header + 1);
4354 saddr = (mach_msg_descriptor_t *) (body + 1);
4355 count = body->msgh_descriptor_count;
4356
4357 for (i = 0; i < count; i++, saddr++) {
4358 switch (saddr->type.type) {
4359 case MACH_MSG_PORT_DESCRIPTOR:
4360 case MACH_MSG_OOL_PORTS_DESCRIPTOR:
4361 case MACH_MSG_GUARDED_PORT_DESCRIPTOR:
4362 /* no descriptors have been copied in yet */
4363 ipc_kmsg_clean_partial(kmsg, 0, NULL, 0, 0);
4364 return MACH_SEND_NO_GRANT_DEST;
4365 }
4366 }
4367 }
4368
4369 mach_msg_legacy_descriptor_t *saddr;
4370 mach_msg_descriptor_t *daddr;
4371 mach_msg_body_t *body;
4372 mach_msg_type_number_t i, count;
4373
4374 body = (mach_msg_body_t *) (kmsg->ikm_header + 1);
4375 saddr = (typeof(saddr))(body + 1);
4376 count = body->msgh_descriptor_count;
4377
4378 if (count) {
4379 vm_offset_t dsc_adjust = 4 * count;
4380 memmove((char *)(((vm_offset_t)kmsg->ikm_header) - dsc_adjust), kmsg->ikm_header, sizeof(mach_msg_base_t));
4381 kmsg->ikm_header = (mach_msg_header_t *)((vm_offset_t)kmsg->ikm_header - dsc_adjust);
4382 /* Update the message size for the larger in-kernel representation */
4383 kmsg->ikm_header->msgh_size += dsc_adjust;
4384 }
4385 daddr = (mach_msg_descriptor_t *)((vm_offset_t)kmsg->ikm_header + sizeof(mach_msg_base_t));
4386
4387 for (i = 0; i < count; i++, saddr++, daddr++) {
4388 switch (saddr->type.type) {
4389 case MACH_MSG_PORT_DESCRIPTOR: {
4390 mach_msg_type_name_t name;
4391 ipc_object_t object;
4392 mach_msg_legacy_port_descriptor_t *dsc;
4393 mach_msg_port_descriptor_t *dest_dsc;
4394
4395 dsc = (typeof(dsc)) & saddr->port;
4396 dest_dsc = &daddr->port;
4397
4398 /* this is really the type SEND, SEND_ONCE, etc. */
4399 name = dsc->disposition;
4400 object = ip_to_object(CAST_MACH_NAME_TO_PORT(dsc->name));
4401 dest_dsc->disposition = ipc_object_copyin_type(name);
4402 dest_dsc->name = ip_object_to_port(object);
4403 dest_dsc->type = MACH_MSG_PORT_DESCRIPTOR;
4404
4405 if (!IO_VALID(object)) {
4406 break;
4407 }
4408
4409 ipc_object_copyin_from_kernel(object, name);
4410
4411 /* CDY avoid circularity when the destination is also */
4412 /* the kernel. This check should be changed into an */
4413 /* assert when the new kobject model is in place since*/
4414 /* ports will not be used in kernel to kernel chats */
4415
4416 if (ip_object_to_port(remote)->ip_receiver != ipc_space_kernel) {
4417 if ((dest_dsc->disposition == MACH_MSG_TYPE_PORT_RECEIVE) &&
4418 ipc_port_check_circularity(ip_object_to_port(object),
4419 ip_object_to_port(remote))) {
4420 kmsg->ikm_header->msgh_bits |=
4421 MACH_MSGH_BITS_CIRCULAR;
4422 }
4423 }
4424 break;
4425 }
4426 case MACH_MSG_OOL_VOLATILE_DESCRIPTOR:
4427 case MACH_MSG_OOL_DESCRIPTOR: {
4428 /* The sender should supply ready-made memory, i.e. a vm_map_copy_t
4429 * so we don't need to do anything special. */
4430
4431 mach_msg_ool_descriptor32_t *source_dsc = &saddr->out_of_line32;
4432 mach_msg_ool_descriptor_t *dest_dsc = (typeof(dest_dsc)) & daddr->out_of_line;
4433
4434 vm_offset_t address = source_dsc->address;
4435 vm_size_t size = source_dsc->size;
4436 boolean_t deallocate = source_dsc->deallocate;
4437 mach_msg_copy_options_t copy = source_dsc->copy;
4438 mach_msg_descriptor_type_t type = source_dsc->type;
4439
4440 dest_dsc->address = (void *)address;
4441 dest_dsc->size = size;
4442 dest_dsc->deallocate = deallocate;
4443 dest_dsc->copy = copy;
4444 dest_dsc->type = type;
4445 break;
4446 }
4447 case MACH_MSG_OOL_PORTS_DESCRIPTOR: {
4448 ipc_object_t *objects;
4449 unsigned int j;
4450 mach_msg_type_name_t name;
4451 mach_msg_ool_ports_descriptor_t *dest_dsc;
4452
4453 mach_msg_ool_ports_descriptor32_t *source_dsc = &saddr->ool_ports32;
4454 dest_dsc = (typeof(dest_dsc)) & daddr->ool_ports;
4455
4456 boolean_t deallocate = source_dsc->deallocate;
4457 mach_msg_copy_options_t copy = source_dsc->copy;
4458 mach_msg_size_t port_count = source_dsc->count;
4459 mach_msg_type_name_t disposition = source_dsc->disposition;
4460
4461 /* this is really the type SEND, SEND_ONCE, etc. */
4462 name = disposition;
4463 disposition = ipc_object_copyin_type(name);
4464
4465 objects = (ipc_object_t *) (uintptr_t)source_dsc->address;
4466
4467 for (j = 0; j < port_count; j++) {
4468 ipc_object_t object = objects[j];
4469
4470 if (!IO_VALID(object)) {
4471 continue;
4472 }
4473
4474 ipc_object_copyin_from_kernel(object, name);
4475
4476 if ((disposition == MACH_MSG_TYPE_PORT_RECEIVE) &&
4477 ipc_port_check_circularity(ip_object_to_port(object),
4478 ip_object_to_port(remote))) {
4479 kmsg->ikm_header->msgh_bits |= MACH_MSGH_BITS_CIRCULAR;
4480 }
4481 }
4482
4483 dest_dsc->address = objects;
4484 dest_dsc->deallocate = deallocate;
4485 dest_dsc->copy = copy;
4486 dest_dsc->disposition = disposition;
4487 dest_dsc->type = MACH_MSG_OOL_PORTS_DESCRIPTOR;
4488 dest_dsc->count = port_count;
4489 break;
4490 }
4491 case MACH_MSG_GUARDED_PORT_DESCRIPTOR: {
4492 mach_msg_type_name_t disp;
4493 ipc_object_t object;
4494 mach_msg_guarded_port_descriptor32_t *dsc;
4495 mach_msg_guarded_port_descriptor_t *dest_dsc;
4496
4497 dsc = (typeof(dsc)) & saddr->guarded_port32;
4498 dest_dsc = &daddr->guarded_port;
4499
4500 disp = dsc->disposition;
4501 object = ip_to_object(CAST_MACH_NAME_TO_PORT(dsc->name));
4502 assert(dsc->flags == 0);
4503 assert(dsc->context == 0);
4504
4505 dest_dsc->disposition = ipc_object_copyin_type(disp);
4506 dest_dsc->name = ip_object_to_port(object);
4507 dest_dsc->type = MACH_MSG_GUARDED_PORT_DESCRIPTOR;
4508 dest_dsc->flags = 0;
4509
4510 if (!IO_VALID(object)) {
4511 break;
4512 }
4513
4514 ipc_object_copyin_from_kernel(object, disp);
4515
4516 /* CDY avoid circularity when the destination is also */
4517 /* the kernel. This check should be changed into an */
4518 /* assert when the new kobject model is in place since*/
4519 /* ports will not be used in kernel to kernel chats */
4520
4521 if (ip_object_to_port(remote)->ip_receiver != ipc_space_kernel) {
4522 if ((dest_dsc->disposition == MACH_MSG_TYPE_PORT_RECEIVE) &&
4523 ipc_port_check_circularity(ip_object_to_port(object),
4524 ip_object_to_port(remote))) {
4525 kmsg->ikm_header->msgh_bits |=
4526 MACH_MSGH_BITS_CIRCULAR;
4527 }
4528 }
4529 break;
4530 }
4531 default: {
4532 #if MACH_ASSERT
4533 panic("ipc_kmsg_copyin_from_kernel: bad descriptor");
4534 #endif /* MACH_ASSERT */
4535 }
4536 }
4537 }
4538 }
4539
4540 ikm_sign(kmsg);
4541
4542 return MACH_MSG_SUCCESS;
4543 }
4544 #endif /* IKM_SUPPORT_LEGACY */
4545
4546 /*
4547 * Routine: ipc_kmsg_copyout_header
4548 * Purpose:
4549 * "Copy-out" port rights in the header of a message.
4550 * Operates atomically; if it doesn't succeed the
4551 * message header and the space are left untouched.
4552 * If it does succeed the remote/local port fields
4553 * contain port names instead of object pointers,
4554 * and the bits field is updated.
4555 * Conditions:
4556 * Nothing locked.
4557 * Returns:
4558 * MACH_MSG_SUCCESS Copied out port rights.
4559 * MACH_RCV_INVALID_NOTIFY
4560 * Notify is non-null and doesn't name a receive right.
4561 * (Either KERN_INVALID_NAME or KERN_INVALID_RIGHT.)
4562 * MACH_RCV_HEADER_ERROR|MACH_MSG_IPC_SPACE
4563 * The space is dead.
4564 * MACH_RCV_HEADER_ERROR|MACH_MSG_IPC_SPACE
4565 * No room in space for another name.
4566 * MACH_RCV_HEADER_ERROR|MACH_MSG_IPC_KERNEL
4567 * Couldn't allocate memory for the reply port.
4568 * MACH_RCV_HEADER_ERROR|MACH_MSG_IPC_KERNEL
4569 * Couldn't allocate memory for the dead-name request.
4570 */
4571
4572 mach_msg_return_t
4573 ipc_kmsg_copyout_header(
4574 ipc_kmsg_t kmsg,
4575 ipc_space_t space,
4576 mach_msg_option_t option)
4577 {
4578 mach_msg_header_t *msg = kmsg->ikm_header;
4579 mach_msg_bits_t mbits = msg->msgh_bits;
4580 ipc_port_t dest = msg->msgh_remote_port;
4581
4582 assert(IP_VALID(dest));
4583
4584 /*
4585 * While we still hold a reference on the received-from port,
4586 * process all send-possible notfications we received along with
4587 * the message.
4588 */
4589 ipc_port_spnotify(dest);
4590
4591 {
4592 mach_msg_type_name_t dest_type = MACH_MSGH_BITS_REMOTE(mbits);
4593 mach_msg_type_name_t reply_type = MACH_MSGH_BITS_LOCAL(mbits);
4594 mach_msg_type_name_t voucher_type = MACH_MSGH_BITS_VOUCHER(mbits);
4595 ipc_port_t reply = msg->msgh_local_port;
4596 ipc_port_t release_reply_port = IP_NULL;
4597 mach_port_name_t dest_name, reply_name;
4598
4599 ipc_port_t voucher = kmsg->ikm_voucher;
4600 ipc_port_t release_voucher_port = IP_NULL;
4601 mach_port_name_t voucher_name;
4602
4603 uint32_t entries_held = 0;
4604 boolean_t need_write_lock = FALSE;
4605 kern_return_t kr;
4606
4607 /*
4608 * Reserve any potentially needed entries in the target space.
4609 * We'll free any unused before unlocking the space.
4610 */
4611 if (IP_VALID(reply)) {
4612 entries_held++;
4613 need_write_lock = TRUE;
4614 }
4615 if (IP_VALID(voucher)) {
4616 assert(voucher_type == MACH_MSG_TYPE_MOVE_SEND);
4617
4618 if ((option & MACH_RCV_VOUCHER) != 0) {
4619 entries_held++;
4620 }
4621 need_write_lock = TRUE;
4622 }
4623
4624 if (need_write_lock) {
4625 is_write_lock(space);
4626
4627 while (entries_held) {
4628 if (!is_active(space)) {
4629 is_write_unlock(space);
4630 return MACH_RCV_HEADER_ERROR |
4631 MACH_MSG_IPC_SPACE;
4632 }
4633
4634 kr = ipc_entries_hold(space, entries_held);
4635 if (KERN_SUCCESS == kr) {
4636 break;
4637 }
4638
4639 kr = ipc_entry_grow_table(space, ITS_SIZE_NONE);
4640 if (KERN_SUCCESS != kr) {
4641 return MACH_RCV_HEADER_ERROR |
4642 MACH_MSG_IPC_SPACE;
4643 }
4644 /* space was unlocked and relocked - retry */
4645 }
4646
4647 /* Handle reply port. */
4648 if (IP_VALID(reply)) {
4649 ipc_entry_t entry;
4650
4651 /* Is there already an entry we can use? */
4652 if ((reply_type != MACH_MSG_TYPE_PORT_SEND_ONCE) &&
4653 ipc_right_reverse(space, ip_to_object(reply), &reply_name, &entry)) {
4654 /* reply port is locked and active */
4655 assert(entry->ie_bits & MACH_PORT_TYPE_SEND_RECEIVE);
4656 } else {
4657 ip_lock(reply);
4658 /* Is the reply port still active and allowed to be copied out? */
4659 if (!ip_active(reply) || !ip_label_check(space, reply, reply_type)) {
4660 /* clear the context value */
4661 reply->ip_reply_context = 0;
4662 ip_unlock(reply);
4663
4664 release_reply_port = reply;
4665 reply = IP_DEAD;
4666 reply_name = MACH_PORT_DEAD;
4667 goto done_with_reply;
4668 }
4669
4670 /* claim a held entry for the reply port */
4671 assert(entries_held > 0);
4672 entries_held--;
4673 ipc_entry_claim(space, &reply_name, &entry);
4674 assert(IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_NONE);
4675 assert(entry->ie_object == IO_NULL);
4676 entry->ie_object = ip_to_object(reply);
4677 }
4678
4679 /* space and reply port are locked and active */
4680 ip_reference(reply); /* hold onto the reply port */
4681
4682 /*
4683 * If the receiver would like to enforce strict reply
4684 * semantics, and the message looks like it expects a reply,
4685 * and contains a voucher, then link the context in the
4686 * voucher with the reply port so that the next message sent
4687 * to the reply port must come from a thread that has a
4688 * matching context (voucher).
4689 */
4690 if (enforce_strict_reply && MACH_RCV_WITH_STRICT_REPLY(option) && IP_VALID(voucher)) {
4691 if (ipc_kmsg_validate_reply_port_locked(reply, option) != KERN_SUCCESS) {
4692 /* if the receiver isn't happy with the reply port: fail the receive. */
4693 ip_unlock(reply);
4694 ipc_entry_dealloc(space, reply_name, entry);
4695 is_write_unlock(space);
4696 ip_release(reply);
4697 return MACH_RCV_INVALID_REPLY;
4698 }
4699 ipc_kmsg_link_reply_context_locked(reply, voucher);
4700 } else {
4701 /*
4702 * if the receive did not choose to participate
4703 * in the strict reply/RPC, then don't enforce
4704 * anything (as this could lead to booby-trapped
4705 * messages that kill the server).
4706 */
4707 reply->ip_reply_context = 0;
4708 }
4709
4710 kr = ipc_right_copyout(space, reply_name, entry,
4711 reply_type, NULL, NULL, ip_to_object(reply));
4712 assert(kr == KERN_SUCCESS);
4713 /* reply port is unlocked */
4714 } else {
4715 reply_name = CAST_MACH_PORT_TO_NAME(reply);
4716 }
4717
4718 done_with_reply:
4719
4720 /* Handle voucher port. */
4721 if (voucher_type != MACH_MSGH_BITS_ZERO) {
4722 assert(voucher_type == MACH_MSG_TYPE_MOVE_SEND);
4723
4724 if (!IP_VALID(voucher)) {
4725 if ((option & MACH_RCV_VOUCHER) == 0) {
4726 voucher_type = MACH_MSGH_BITS_ZERO;
4727 }
4728 voucher_name = MACH_PORT_NULL;
4729 goto done_with_voucher;
4730 }
4731
4732 /* clear voucher from its hiding place back in the kmsg */
4733 kmsg->ikm_voucher = IP_NULL;
4734
4735 if ((option & MACH_RCV_VOUCHER) != 0) {
4736 ipc_entry_t entry;
4737
4738 if (ipc_right_reverse(space, ip_to_object(voucher),
4739 &voucher_name, &entry)) {
4740 /* voucher port locked */
4741 assert(entry->ie_bits & MACH_PORT_TYPE_SEND);
4742 } else {
4743 assert(entries_held > 0);
4744 entries_held--;
4745 ipc_entry_claim(space, &voucher_name, &entry);
4746 assert(IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_NONE);
4747 assert(entry->ie_object == IO_NULL);
4748 entry->ie_object = ip_to_object(voucher);
4749 ip_lock(voucher);
4750 }
4751 /* space is locked and active */
4752 require_ip_active(voucher);
4753 assert(ip_kotype(voucher) == IKOT_VOUCHER);
4754 kr = ipc_right_copyout(space, voucher_name, entry,
4755 MACH_MSG_TYPE_MOVE_SEND, NULL, NULL,
4756 ip_to_object(voucher));
4757 /* voucher port is unlocked */
4758 } else {
4759 voucher_type = MACH_MSGH_BITS_ZERO;
4760 release_voucher_port = voucher;
4761 voucher_name = MACH_PORT_NULL;
4762 }
4763 } else {
4764 voucher_name = msg->msgh_voucher_port;
4765 }
4766
4767 done_with_voucher:
4768
4769 ip_lock(dest);
4770 is_write_unlock(space);
4771 } else {
4772 /*
4773 * No reply or voucher port! This is an easy case.
4774 * We only need to have the space locked
4775 * when locking the destination.
4776 */
4777
4778 is_read_lock(space);
4779 if (!is_active(space)) {
4780 is_read_unlock(space);
4781 return MACH_RCV_HEADER_ERROR | MACH_MSG_IPC_SPACE;
4782 }
4783
4784 ip_lock(dest);
4785 is_read_unlock(space);
4786
4787 reply_name = CAST_MACH_PORT_TO_NAME(reply);
4788
4789 if (voucher_type != MACH_MSGH_BITS_ZERO) {
4790 assert(voucher_type == MACH_MSG_TYPE_MOVE_SEND);
4791 if ((option & MACH_RCV_VOUCHER) == 0) {
4792 voucher_type = MACH_MSGH_BITS_ZERO;
4793 }
4794 voucher_name = MACH_PORT_NULL;
4795 } else {
4796 voucher_name = msg->msgh_voucher_port;
4797 }
4798 }
4799
4800 /*
4801 * At this point, the space is unlocked and the destination
4802 * port is locked. (Lock taken while space was locked.)
4803 * reply_name is taken care of; we still need dest_name.
4804 * We still hold a ref for reply (if it is valid).
4805 *
4806 * If the space holds receive rights for the destination,
4807 * we return its name for the right. Otherwise the task
4808 * managed to destroy or give away the receive right between
4809 * receiving the message and this copyout. If the destination
4810 * is dead, return MACH_PORT_DEAD, and if the receive right
4811 * exists somewhere else (another space, in transit)
4812 * return MACH_PORT_NULL.
4813 *
4814 * Making this copyout operation atomic with the previous
4815 * copyout of the reply port is a bit tricky. If there was
4816 * no real reply port (it wasn't IP_VALID) then this isn't
4817 * an issue. If the reply port was dead at copyout time,
4818 * then we are OK, because if dest is dead we serialize
4819 * after the death of both ports and if dest is alive
4820 * we serialize after reply died but before dest's (later) death.
4821 * So assume reply was alive when we copied it out. If dest
4822 * is alive, then we are OK because we serialize before
4823 * the ports' deaths. So assume dest is dead when we look at it.
4824 * If reply dies/died after dest, then we are OK because
4825 * we serialize after dest died but before reply dies.
4826 * So the hard case is when reply is alive at copyout,
4827 * dest is dead at copyout, and reply died before dest died.
4828 * In this case pretend that dest is still alive, so
4829 * we serialize while both ports are alive.
4830 *
4831 * Because the space lock is held across the copyout of reply
4832 * and locking dest, the receive right for dest can't move
4833 * in or out of the space while the copyouts happen, so
4834 * that isn't an atomicity problem. In the last hard case
4835 * above, this implies that when dest is dead that the
4836 * space couldn't have had receive rights for dest at
4837 * the time reply was copied-out, so when we pretend
4838 * that dest is still alive, we can return MACH_PORT_NULL.
4839 *
4840 * If dest == reply, then we have to make it look like
4841 * either both copyouts happened before the port died,
4842 * or both happened after the port died. This special
4843 * case works naturally if the timestamp comparison
4844 * is done correctly.
4845 */
4846
4847 if (ip_active(dest)) {
4848 ipc_object_copyout_dest(space, ip_to_object(dest),
4849 dest_type, &dest_name);
4850 /* dest is unlocked */
4851 } else {
4852 ipc_port_timestamp_t timestamp;
4853
4854 timestamp = dest->ip_timestamp;
4855 ip_unlock(dest);
4856 ip_release(dest);
4857
4858 if (IP_VALID(reply)) {
4859 ip_lock(reply);
4860 if (ip_active(reply) ||
4861 IP_TIMESTAMP_ORDER(timestamp,
4862 reply->ip_timestamp)) {
4863 dest_name = MACH_PORT_DEAD;
4864 } else {
4865 dest_name = MACH_PORT_NULL;
4866 }
4867 ip_unlock(reply);
4868 } else {
4869 dest_name = MACH_PORT_DEAD;
4870 }
4871 }
4872
4873 if (IP_VALID(reply)) {
4874 ip_release(reply);
4875 }
4876
4877 if (IP_VALID(release_reply_port)) {
4878 if (reply_type == MACH_MSG_TYPE_PORT_SEND_ONCE) {
4879 ipc_port_release_sonce(release_reply_port);
4880 } else {
4881 ipc_port_release_send(release_reply_port);
4882 }
4883 }
4884
4885 if ((option & MACH_RCV_VOUCHER) != 0) {
4886 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_MSG_RECV) | DBG_FUNC_NONE,
4887 VM_KERNEL_ADDRPERM((uintptr_t)kmsg),
4888 (uintptr_t)kmsg->ikm_header->msgh_bits,
4889 (uintptr_t)kmsg->ikm_header->msgh_id,
4890 VM_KERNEL_ADDRPERM((uintptr_t)unsafe_convert_port_to_voucher(voucher)),
4891 0);
4892 } else {
4893 KERNEL_DEBUG_CONSTANT(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_MSG_RECV_VOUCHER_REFUSED) | DBG_FUNC_NONE,
4894 VM_KERNEL_ADDRPERM((uintptr_t)kmsg),
4895 (uintptr_t)kmsg->ikm_header->msgh_bits,
4896 (uintptr_t)kmsg->ikm_header->msgh_id,
4897 VM_KERNEL_ADDRPERM((uintptr_t)unsafe_convert_port_to_voucher(voucher)),
4898 0);
4899 }
4900
4901 if (IP_VALID(release_voucher_port)) {
4902 ipc_port_release_send(release_voucher_port);
4903 }
4904
4905 msg->msgh_bits = MACH_MSGH_BITS_SET(reply_type, dest_type,
4906 voucher_type, mbits);
4907 msg->msgh_local_port = CAST_MACH_NAME_TO_PORT(dest_name);
4908 msg->msgh_remote_port = CAST_MACH_NAME_TO_PORT(reply_name);
4909 msg->msgh_voucher_port = voucher_name;
4910 }
4911
4912 return MACH_MSG_SUCCESS;
4913 }
4914
4915 /*
4916 * Routine: ipc_kmsg_copyout_object
4917 * Purpose:
4918 * Copy-out a port right. Always returns a name,
4919 * even for unsuccessful return codes. Always
4920 * consumes the supplied object.
4921 * Conditions:
4922 * Nothing locked.
4923 * Returns:
4924 * MACH_MSG_SUCCESS The space acquired the right
4925 * (name is valid) or the object is dead (MACH_PORT_DEAD).
4926 * MACH_MSG_IPC_SPACE No room in space for the right,
4927 * or the space is dead. (Name is MACH_PORT_NULL.)
4928 * MACH_MSG_IPC_KERNEL Kernel resource shortage.
4929 * (Name is MACH_PORT_NULL.)
4930 */
4931
4932 mach_msg_return_t
4933 ipc_kmsg_copyout_object(
4934 ipc_space_t space,
4935 ipc_object_t object,
4936 mach_msg_type_name_t msgt_name,
4937 mach_port_context_t *context,
4938 mach_msg_guard_flags_t *guard_flags,
4939 mach_port_name_t *namep)
4940 {
4941 kern_return_t kr;
4942
4943 if (!IO_VALID(object)) {
4944 *namep = CAST_MACH_PORT_TO_NAME(object);
4945 return MACH_MSG_SUCCESS;
4946 }
4947
4948 kr = ipc_object_copyout(space, object, msgt_name, context, guard_flags, namep);
4949 if (kr != KERN_SUCCESS) {
4950 ipc_object_destroy(object, msgt_name);
4951
4952 if (kr == KERN_INVALID_CAPABILITY) {
4953 *namep = MACH_PORT_DEAD;
4954 } else {
4955 *namep = MACH_PORT_NULL;
4956
4957 if (kr == KERN_RESOURCE_SHORTAGE) {
4958 return MACH_MSG_IPC_KERNEL;
4959 } else {
4960 return MACH_MSG_IPC_SPACE;
4961 }
4962 }
4963 }
4964
4965 return MACH_MSG_SUCCESS;
4966 }
4967
4968 static mach_msg_descriptor_t *
4969 ipc_kmsg_copyout_port_descriptor(mach_msg_descriptor_t *dsc,
4970 mach_msg_descriptor_t *dest_dsc,
4971 ipc_space_t space,
4972 kern_return_t *mr)
4973 {
4974 mach_port_t port;
4975 mach_port_name_t name;
4976 mach_msg_type_name_t disp;
4977
4978 /* Copyout port right carried in the message */
4979 port = dsc->port.name;
4980 disp = dsc->port.disposition;
4981 *mr |= ipc_kmsg_copyout_object(space,
4982 ip_to_object(port), disp, NULL, NULL, &name);
4983
4984 if (current_task() == kernel_task) {
4985 mach_msg_port_descriptor_t *user_dsc = (typeof(user_dsc))dest_dsc;
4986 user_dsc--; // point to the start of this port descriptor
4987 bzero((void *)user_dsc, sizeof(*user_dsc));
4988 user_dsc->name = CAST_MACH_NAME_TO_PORT(name);
4989 user_dsc->disposition = disp;
4990 user_dsc->type = MACH_MSG_PORT_DESCRIPTOR;
4991 dest_dsc = (typeof(dest_dsc))user_dsc;
4992 } else {
4993 mach_msg_legacy_port_descriptor_t *user_dsc = (typeof(user_dsc))dest_dsc;
4994 user_dsc--; // point to the start of this port descriptor
4995 bzero((void *)user_dsc, sizeof(*user_dsc));
4996 user_dsc->name = CAST_MACH_PORT_TO_NAME(name);
4997 user_dsc->disposition = disp;
4998 user_dsc->type = MACH_MSG_PORT_DESCRIPTOR;
4999 dest_dsc = (typeof(dest_dsc))user_dsc;
5000 }
5001
5002 return (mach_msg_descriptor_t *)dest_dsc;
5003 }
5004
5005 mach_msg_descriptor_t *
5006 ipc_kmsg_copyout_ool_descriptor(mach_msg_ool_descriptor_t *dsc, mach_msg_descriptor_t *user_dsc, int is_64bit, vm_map_t map, mach_msg_return_t *mr);
5007 mach_msg_descriptor_t *
5008 ipc_kmsg_copyout_ool_descriptor(mach_msg_ool_descriptor_t *dsc, mach_msg_descriptor_t *user_dsc, int is_64bit, vm_map_t map, mach_msg_return_t *mr)
5009 {
5010 vm_map_copy_t copy;
5011 vm_map_address_t rcv_addr;
5012 mach_msg_copy_options_t copy_options;
5013 vm_map_size_t size;
5014 mach_msg_descriptor_type_t dsc_type;
5015 boolean_t misaligned = FALSE;
5016
5017 //SKIP_PORT_DESCRIPTORS(saddr, sdsc_count);
5018
5019 copy = (vm_map_copy_t)dsc->address;
5020 size = (vm_map_size_t)dsc->size;
5021 copy_options = dsc->copy;
5022 assert(copy_options != MACH_MSG_KALLOC_COPY_T);
5023 dsc_type = dsc->type;
5024
5025 if (copy != VM_MAP_COPY_NULL) {
5026 kern_return_t kr;
5027
5028 rcv_addr = 0;
5029 if (vm_map_copy_validate_size(map, copy, &size) == FALSE) {
5030 panic("Inconsistent OOL/copyout size on %p: expected %d, got %lld @%p",
5031 dsc, dsc->size, (unsigned long long)copy->size, copy);
5032 }
5033
5034 if ((copy->type == VM_MAP_COPY_ENTRY_LIST) &&
5035 (trunc_page(copy->offset) != copy->offset ||
5036 round_page(dsc->size) != dsc->size)) {
5037 misaligned = TRUE;
5038 }
5039
5040 if (misaligned) {
5041 vm_map_address_t rounded_addr;
5042 vm_map_size_t rounded_size;
5043 vm_map_offset_t effective_page_mask, effective_page_size;
5044
5045 effective_page_mask = VM_MAP_PAGE_MASK(map);
5046 effective_page_size = effective_page_mask + 1;
5047
5048 rounded_size = vm_map_round_page(copy->offset + size, effective_page_mask) - vm_map_trunc_page(copy->offset, effective_page_mask);
5049
5050 kr = vm_allocate_kernel(map, (vm_offset_t*)&rounded_addr, rounded_size, VM_FLAGS_ANYWHERE, 0);
5051
5052 if (kr == KERN_SUCCESS) {
5053 /*
5054 * vm_map_copy_overwrite does a full copy
5055 * if size is too small to optimize.
5056 * So we tried skipping the offset adjustment
5057 * if we fail the 'size' test.
5058 *
5059 * if (size >= VM_MAP_COPY_OVERWRITE_OPTIMIZATION_THRESHOLD_PAGES * effective_page_size) {
5060 *
5061 * This resulted in leaked memory especially on the
5062 * older watches (16k user - 4k kernel) because we
5063 * would do a physical copy into the start of this
5064 * rounded range but could leak part of it
5065 * on deallocation if the 'size' being deallocated
5066 * does not cover the full range. So instead we do
5067 * the misalignment adjustment always so that on
5068 * deallocation we will remove the full range.
5069 */
5070 if ((rounded_addr & effective_page_mask) !=
5071 (copy->offset & effective_page_mask)) {
5072 /*
5073 * Need similar mis-alignment of source and destination...
5074 */
5075 rounded_addr += (copy->offset & effective_page_mask);
5076
5077 assert((rounded_addr & effective_page_mask) == (copy->offset & effective_page_mask));
5078 }
5079 rcv_addr = rounded_addr;
5080
5081 kr = vm_map_copy_overwrite(map, rcv_addr, copy, size, FALSE);
5082 }
5083 } else {
5084 kr = vm_map_copyout_size(map, &rcv_addr, copy, size);
5085 }
5086 if (kr != KERN_SUCCESS) {
5087 if (kr == KERN_RESOURCE_SHORTAGE) {
5088 *mr |= MACH_MSG_VM_KERNEL;
5089 } else {
5090 *mr |= MACH_MSG_VM_SPACE;
5091 }
5092 vm_map_copy_discard(copy);
5093 rcv_addr = 0;
5094 size = 0;
5095 }
5096 } else {
5097 rcv_addr = 0;
5098 size = 0;
5099 }
5100
5101 /*
5102 * Now update the descriptor as the user would see it.
5103 * This may require expanding the descriptor to the user
5104 * visible size. There is already space allocated for
5105 * this in what naddr points to.
5106 */
5107 if (current_task() == kernel_task) {
5108 mach_msg_ool_descriptor_t *user_ool_dsc = (typeof(user_ool_dsc))user_dsc;
5109 user_ool_dsc--;
5110 bzero((void *)user_ool_dsc, sizeof(*user_ool_dsc));
5111
5112 user_ool_dsc->address = (void *)(uintptr_t)rcv_addr;
5113 user_ool_dsc->deallocate = (copy_options == MACH_MSG_VIRTUAL_COPY) ?
5114 TRUE : FALSE;
5115 user_ool_dsc->copy = copy_options;
5116 user_ool_dsc->type = dsc_type;
5117 user_ool_dsc->size = (mach_msg_size_t)size;
5118
5119 user_dsc = (typeof(user_dsc))user_ool_dsc;
5120 } else if (is_64bit) {
5121 mach_msg_ool_descriptor64_t *user_ool_dsc = (typeof(user_ool_dsc))user_dsc;
5122 user_ool_dsc--;
5123 bzero((void *)user_ool_dsc, sizeof(*user_ool_dsc));
5124
5125 user_ool_dsc->address = rcv_addr;
5126 user_ool_dsc->deallocate = (copy_options == MACH_MSG_VIRTUAL_COPY) ?
5127 TRUE : FALSE;
5128 user_ool_dsc->copy = copy_options;
5129 user_ool_dsc->type = dsc_type;
5130 user_ool_dsc->size = (mach_msg_size_t)size;
5131
5132 user_dsc = (typeof(user_dsc))user_ool_dsc;
5133 } else {
5134 mach_msg_ool_descriptor32_t *user_ool_dsc = (typeof(user_ool_dsc))user_dsc;
5135 user_ool_dsc--;
5136 bzero((void *)user_ool_dsc, sizeof(*user_ool_dsc));
5137
5138 user_ool_dsc->address = CAST_DOWN_EXPLICIT(uint32_t, rcv_addr);
5139 user_ool_dsc->size = (mach_msg_size_t)size;
5140 user_ool_dsc->deallocate = (copy_options == MACH_MSG_VIRTUAL_COPY) ?
5141 TRUE : FALSE;
5142 user_ool_dsc->copy = copy_options;
5143 user_ool_dsc->type = dsc_type;
5144
5145 user_dsc = (typeof(user_dsc))user_ool_dsc;
5146 }
5147 return user_dsc;
5148 }
5149
5150 mach_msg_descriptor_t *
5151 ipc_kmsg_copyout_ool_ports_descriptor(mach_msg_ool_ports_descriptor_t *dsc,
5152 mach_msg_descriptor_t *user_dsc,
5153 int is_64bit,
5154 vm_map_t map,
5155 ipc_space_t space,
5156 ipc_kmsg_t kmsg,
5157 mach_msg_return_t *mr);
5158 mach_msg_descriptor_t *
5159 ipc_kmsg_copyout_ool_ports_descriptor(mach_msg_ool_ports_descriptor_t *dsc,
5160 mach_msg_descriptor_t *user_dsc,
5161 int is_64bit,
5162 vm_map_t map,
5163 ipc_space_t space,
5164 ipc_kmsg_t kmsg,
5165 mach_msg_return_t *mr)
5166 {
5167 mach_vm_offset_t rcv_addr = 0;
5168 mach_msg_type_name_t disp;
5169 mach_msg_type_number_t count, i;
5170 vm_size_t ports_length, names_length;
5171
5172 mach_msg_copy_options_t copy_options = MACH_MSG_VIRTUAL_COPY;
5173
5174 //SKIP_PORT_DESCRIPTORS(saddr, sdsc_count);
5175
5176 count = dsc->count;
5177 disp = dsc->disposition;
5178 ports_length = count * sizeof(mach_port_t);
5179 names_length = count * sizeof(mach_port_name_t);
5180
5181 if (ports_length != 0 && dsc->address != 0) {
5182 /*
5183 * Check to see if there is an overwrite descriptor
5184 * specified in the scatter list for this ool data.
5185 * The descriptor has already been verified.
5186 */
5187 #if 0
5188 if (saddr != MACH_MSG_DESCRIPTOR_NULL) {
5189 if (differs) {
5190 OTHER_OOL_DESCRIPTOR *scatter_dsc;
5191
5192 scatter_dsc = (OTHER_OOL_DESCRIPTOR *)saddr;
5193 rcv_addr = (mach_vm_offset_t) scatter_dsc->address;
5194 copy_options = scatter_dsc->copy;
5195 } else {
5196 mach_msg_ool_descriptor_t *scatter_dsc;
5197
5198 scatter_dsc = &saddr->out_of_line;
5199 rcv_addr = CAST_USER_ADDR_T(scatter_dsc->address);
5200 copy_options = scatter_dsc->copy;
5201 }
5202 INCREMENT_SCATTER(saddr, sdsc_count, differs);
5203 }
5204 #endif
5205
5206 if (copy_options == MACH_MSG_VIRTUAL_COPY) {
5207 /*
5208 * Dynamically allocate the region
5209 */
5210 vm_tag_t tag;
5211 if (vm_kernel_map_is_kernel(map)) {
5212 tag = VM_KERN_MEMORY_IPC;
5213 } else {
5214 tag = VM_MEMORY_MACH_MSG;
5215 }
5216
5217 kern_return_t kr;
5218 if ((kr = mach_vm_allocate_kernel(map, &rcv_addr,
5219 (mach_vm_size_t)names_length,
5220 VM_FLAGS_ANYWHERE, tag)) != KERN_SUCCESS) {
5221 ipc_kmsg_clean_body(kmsg, 1, (mach_msg_descriptor_t *)dsc);
5222 rcv_addr = 0;
5223
5224 if (kr == KERN_RESOURCE_SHORTAGE) {
5225 *mr |= MACH_MSG_VM_KERNEL;
5226 } else {
5227 *mr |= MACH_MSG_VM_SPACE;
5228 }
5229 }
5230 }
5231
5232 /*
5233 * Handle the port rights and copy out the names
5234 * for those rights out to user-space.
5235 */
5236 if (rcv_addr != 0) {
5237 ipc_object_t *objects = (ipc_object_t *) dsc->address;
5238 mach_port_name_t *names = (mach_port_name_t *) dsc->address;
5239
5240 /* copyout port rights carried in the message */
5241
5242 for (i = 0; i < count; i++) {
5243 ipc_object_t object = objects[i];
5244
5245 *mr |= ipc_kmsg_copyout_object(space, object,
5246 disp, NULL, NULL, &names[i]);
5247 }
5248
5249 /* copyout to memory allocated above */
5250 void *data = dsc->address;
5251 if (copyoutmap(map, data, rcv_addr, names_length) != KERN_SUCCESS) {
5252 *mr |= MACH_MSG_VM_SPACE;
5253 }
5254 kfree(data, ports_length);
5255 }
5256 } else {
5257 rcv_addr = 0;
5258 }
5259
5260 /*
5261 * Now update the descriptor based on the information
5262 * calculated above.
5263 */
5264 if (current_task() == kernel_task) {
5265 mach_msg_ool_ports_descriptor_t *user_ool_dsc = (typeof(user_ool_dsc))user_dsc;
5266 user_ool_dsc--;
5267 bzero((void *)user_ool_dsc, sizeof(*user_ool_dsc));
5268
5269 user_ool_dsc->address = (void *)(uintptr_t)rcv_addr;
5270 user_ool_dsc->deallocate = (copy_options == MACH_MSG_VIRTUAL_COPY) ?
5271 TRUE : FALSE;
5272 user_ool_dsc->copy = copy_options;
5273 user_ool_dsc->disposition = disp;
5274 user_ool_dsc->type = MACH_MSG_OOL_PORTS_DESCRIPTOR;
5275 user_ool_dsc->count = count;
5276
5277 user_dsc = (typeof(user_dsc))user_ool_dsc;
5278 } else if (is_64bit) {
5279 mach_msg_ool_ports_descriptor64_t *user_ool_dsc = (typeof(user_ool_dsc))user_dsc;
5280 user_ool_dsc--;
5281 bzero((void *)user_ool_dsc, sizeof(*user_ool_dsc));
5282
5283 user_ool_dsc->address = rcv_addr;
5284 user_ool_dsc->deallocate = (copy_options == MACH_MSG_VIRTUAL_COPY) ?
5285 TRUE : FALSE;
5286 user_ool_dsc->copy = copy_options;
5287 user_ool_dsc->disposition = disp;
5288 user_ool_dsc->type = MACH_MSG_OOL_PORTS_DESCRIPTOR;
5289 user_ool_dsc->count = count;
5290
5291 user_dsc = (typeof(user_dsc))user_ool_dsc;
5292 } else {
5293 mach_msg_ool_ports_descriptor32_t *user_ool_dsc = (typeof(user_ool_dsc))user_dsc;
5294 user_ool_dsc--;
5295 bzero((void *)user_ool_dsc, sizeof(*user_ool_dsc));
5296
5297 user_ool_dsc->address = CAST_DOWN_EXPLICIT(uint32_t, rcv_addr);
5298 user_ool_dsc->count = count;
5299 user_ool_dsc->deallocate = (copy_options == MACH_MSG_VIRTUAL_COPY) ?
5300 TRUE : FALSE;
5301 user_ool_dsc->copy = copy_options;
5302 user_ool_dsc->disposition = disp;
5303 user_ool_dsc->type = MACH_MSG_OOL_PORTS_DESCRIPTOR;
5304
5305 user_dsc = (typeof(user_dsc))user_ool_dsc;
5306 }
5307 return user_dsc;
5308 }
5309
5310 static mach_msg_descriptor_t *
5311 ipc_kmsg_copyout_guarded_port_descriptor(
5312 mach_msg_guarded_port_descriptor_t *dsc,
5313 mach_msg_descriptor_t *dest_dsc,
5314 int is_64bit,
5315 __unused ipc_kmsg_t kmsg,
5316 ipc_space_t space,
5317 mach_msg_option_t option,
5318 kern_return_t *mr)
5319 {
5320 mach_port_t port;
5321 mach_port_name_t name = MACH_PORT_NULL;
5322 mach_msg_type_name_t disp;
5323 mach_msg_guard_flags_t guard_flags;
5324 mach_port_context_t context;
5325
5326 /* Copyout port right carried in the message */
5327 port = dsc->name;
5328 disp = dsc->disposition;
5329 guard_flags = dsc->flags;
5330 context = 0;
5331
5332 /* Currently kernel_task doesnt support receiving guarded port descriptors */
5333 struct knote *kn = current_thread()->ith_knote;
5334 if ((kn != ITH_KNOTE_PSEUDO) && (((option & MACH_RCV_GUARDED_DESC) == 0) ||
5335 (current_task() == kernel_task))) {
5336 #if DEVELOPMENT || DEBUG
5337 if (current_task() != kernel_task) {
5338 /*
5339 * Simulated crash needed for debugging, notifies the receiver to opt into receiving
5340 * guarded descriptors.
5341 */
5342 mach_port_guard_exception(current_thread()->ith_receiver_name, 0, 0, kGUARD_EXC_RCV_GUARDED_DESC);
5343 }
5344 #endif
5345 KDBG(MACHDBG_CODE(DBG_MACH_IPC, MACH_IPC_DESTROY_GUARDED_DESC), current_thread()->ith_receiver_name,
5346 VM_KERNEL_ADDRPERM(port), disp, guard_flags);
5347 ipc_object_destroy(ip_to_object(port), disp);
5348 mach_msg_legacy_port_descriptor_t *user_dsc = (typeof(user_dsc))dest_dsc;
5349 user_dsc--; // point to the start of this port descriptor
5350 bzero((void *)user_dsc, sizeof(*user_dsc));
5351 user_dsc->name = name;
5352 user_dsc->disposition = disp;
5353 user_dsc->type = MACH_MSG_PORT_DESCRIPTOR;
5354 dest_dsc = (typeof(dest_dsc))user_dsc;
5355 } else {
5356 *mr |= ipc_kmsg_copyout_object(space,
5357 ip_to_object(port), disp, &context, &guard_flags, &name);
5358
5359 if (!is_64bit) {
5360 mach_msg_guarded_port_descriptor32_t *user_dsc = (typeof(user_dsc))dest_dsc;
5361 user_dsc--; // point to the start of this port descriptor
5362 bzero((void *)user_dsc, sizeof(*user_dsc));
5363 user_dsc->name = name;
5364 user_dsc->flags = guard_flags;
5365 user_dsc->disposition = disp;
5366 user_dsc->type = MACH_MSG_GUARDED_PORT_DESCRIPTOR;
5367 user_dsc->context = CAST_DOWN_EXPLICIT(uint32_t, context);
5368 dest_dsc = (typeof(dest_dsc))user_dsc;
5369 } else {
5370 mach_msg_guarded_port_descriptor64_t *user_dsc = (typeof(user_dsc))dest_dsc;
5371 user_dsc--; // point to the start of this port descriptor
5372 bzero((void *)user_dsc, sizeof(*user_dsc));
5373 user_dsc->name = name;
5374 user_dsc->flags = guard_flags;
5375 user_dsc->disposition = disp;
5376 user_dsc->type = MACH_MSG_GUARDED_PORT_DESCRIPTOR;
5377 user_dsc->context = context;
5378 dest_dsc = (typeof(dest_dsc))user_dsc;
5379 }
5380 }
5381
5382 return (mach_msg_descriptor_t *)dest_dsc;
5383 }
5384
5385
5386 /*
5387 * Routine: ipc_kmsg_copyout_body
5388 * Purpose:
5389 * "Copy-out" port rights and out-of-line memory
5390 * in the body of a message.
5391 *
5392 * The error codes are a combination of special bits.
5393 * The copyout proceeds despite errors.
5394 * Conditions:
5395 * Nothing locked.
5396 * Returns:
5397 * MACH_MSG_SUCCESS Successful copyout.
5398 * MACH_MSG_IPC_SPACE No room for port right in name space.
5399 * MACH_MSG_VM_SPACE No room for memory in address space.
5400 * MACH_MSG_IPC_KERNEL Resource shortage handling port right.
5401 * MACH_MSG_VM_KERNEL Resource shortage handling memory.
5402 * MACH_MSG_INVALID_RT_DESCRIPTOR Descriptor incompatible with RT
5403 */
5404
5405 mach_msg_return_t
5406 ipc_kmsg_copyout_body(
5407 ipc_kmsg_t kmsg,
5408 ipc_space_t space,
5409 vm_map_t map,
5410 mach_msg_option_t option,
5411 mach_msg_body_t *slist)
5412 {
5413 mach_msg_body_t *body;
5414 mach_msg_descriptor_t *kern_dsc, *user_dsc;
5415 mach_msg_descriptor_t *saddr;
5416 mach_msg_type_number_t dsc_count, sdsc_count;
5417 int i;
5418 mach_msg_return_t mr = MACH_MSG_SUCCESS;
5419 boolean_t is_task_64bit = (map->max_offset > VM_MAX_ADDRESS);
5420
5421 body = (mach_msg_body_t *) (kmsg->ikm_header + 1);
5422 dsc_count = body->msgh_descriptor_count;
5423 kern_dsc = (mach_msg_descriptor_t *) (body + 1);
5424 /* Point user_dsc just after the end of all the descriptors */
5425 user_dsc = &kern_dsc[dsc_count];
5426
5427 /* Do scatter list setup */
5428 if (slist != MACH_MSG_BODY_NULL) {
5429 panic("Scatter lists disabled");
5430 saddr = (mach_msg_descriptor_t *) (slist + 1);
5431 sdsc_count = slist->msgh_descriptor_count;
5432 } else {
5433 saddr = MACH_MSG_DESCRIPTOR_NULL;
5434 sdsc_count = 0;
5435 }
5436
5437 /* Now process the descriptors - in reverse order */
5438 for (i = dsc_count - 1; i >= 0; i--) {
5439 switch (kern_dsc[i].type.type) {
5440 case MACH_MSG_PORT_DESCRIPTOR:
5441 user_dsc = ipc_kmsg_copyout_port_descriptor(&kern_dsc[i], user_dsc, space, &mr);
5442 break;
5443 case MACH_MSG_OOL_VOLATILE_DESCRIPTOR:
5444 case MACH_MSG_OOL_DESCRIPTOR:
5445 user_dsc = ipc_kmsg_copyout_ool_descriptor(
5446 (mach_msg_ool_descriptor_t *)&kern_dsc[i], user_dsc, is_task_64bit, map, &mr);
5447 break;
5448 case MACH_MSG_OOL_PORTS_DESCRIPTOR:
5449 user_dsc = ipc_kmsg_copyout_ool_ports_descriptor(
5450 (mach_msg_ool_ports_descriptor_t *)&kern_dsc[i], user_dsc, is_task_64bit, map, space, kmsg, &mr);
5451 break;
5452 case MACH_MSG_GUARDED_PORT_DESCRIPTOR:
5453 user_dsc = ipc_kmsg_copyout_guarded_port_descriptor(
5454 (mach_msg_guarded_port_descriptor_t *)&kern_dsc[i], user_dsc, is_task_64bit, kmsg, space, option, &mr);
5455 break;
5456 default: {
5457 panic("untyped IPC copyout body: invalid message descriptor");
5458 }
5459 }
5460 }
5461
5462 if (user_dsc != kern_dsc) {
5463 vm_offset_t dsc_adjust = (vm_offset_t)user_dsc - (vm_offset_t)kern_dsc;
5464 memmove((char *)((vm_offset_t)kmsg->ikm_header + dsc_adjust), kmsg->ikm_header, sizeof(mach_msg_base_t));
5465 kmsg->ikm_header = (mach_msg_header_t *)((vm_offset_t)kmsg->ikm_header + dsc_adjust);
5466 /* Update the message size for the smaller user representation */
5467 kmsg->ikm_header->msgh_size -= (mach_msg_size_t)dsc_adjust;
5468 }
5469
5470 return mr;
5471 }
5472
5473 /*
5474 * Routine: ipc_kmsg_copyout_size
5475 * Purpose:
5476 * Compute the size of the message as copied out to the given
5477 * map. If the destination map's pointers are a different size
5478 * than the kernel's, we have to allow for expansion/
5479 * contraction of the descriptors as appropriate.
5480 * Conditions:
5481 * Nothing locked.
5482 * Returns:
5483 * size of the message as it would be received.
5484 */
5485
5486 mach_msg_size_t
5487 ipc_kmsg_copyout_size(
5488 ipc_kmsg_t kmsg,
5489 vm_map_t map)
5490 {
5491 mach_msg_size_t send_size;
5492
5493 send_size = kmsg->ikm_header->msgh_size;
5494
5495 boolean_t is_task_64bit = (map->max_offset > VM_MAX_ADDRESS);
5496
5497 #if defined(__LP64__)
5498 send_size -= LEGACY_HEADER_SIZE_DELTA;
5499 #endif
5500
5501 if (kmsg->ikm_header->msgh_bits & MACH_MSGH_BITS_COMPLEX) {
5502 mach_msg_body_t *body;
5503 mach_msg_descriptor_t *saddr, *eaddr;
5504
5505 body = (mach_msg_body_t *) (kmsg->ikm_header + 1);
5506 saddr = (mach_msg_descriptor_t *) (body + 1);
5507 eaddr = saddr + body->msgh_descriptor_count;
5508
5509 for (; saddr < eaddr; saddr++) {
5510 switch (saddr->type.type) {
5511 case MACH_MSG_OOL_DESCRIPTOR:
5512 case MACH_MSG_OOL_VOLATILE_DESCRIPTOR:
5513 case MACH_MSG_OOL_PORTS_DESCRIPTOR:
5514 case MACH_MSG_GUARDED_PORT_DESCRIPTOR:
5515 if (!is_task_64bit) {
5516 send_size -= DESC_SIZE_ADJUSTMENT;
5517 }
5518 break;
5519 case MACH_MSG_PORT_DESCRIPTOR:
5520 send_size -= DESC_SIZE_ADJUSTMENT;
5521 break;
5522 default:
5523 break;
5524 }
5525 }
5526 }
5527 return send_size;
5528 }
5529
5530 /*
5531 * Routine: ipc_kmsg_copyout
5532 * Purpose:
5533 * "Copy-out" port rights and out-of-line memory
5534 * in the message.
5535 * Conditions:
5536 * Nothing locked.
5537 * Returns:
5538 * MACH_MSG_SUCCESS Copied out all rights and memory.
5539 * MACH_RCV_HEADER_ERROR + special bits
5540 * Rights and memory in the message are intact.
5541 * MACH_RCV_BODY_ERROR + special bits
5542 * The message header was successfully copied out.
5543 * As much of the body was handled as possible.
5544 */
5545
5546 mach_msg_return_t
5547 ipc_kmsg_copyout(
5548 ipc_kmsg_t kmsg,
5549 ipc_space_t space,
5550 vm_map_t map,
5551 mach_msg_body_t *slist,
5552 mach_msg_option_t option)
5553 {
5554 mach_msg_return_t mr;
5555
5556 ikm_validate_sig(kmsg);
5557
5558 mr = ipc_kmsg_copyout_header(kmsg, space, option);
5559 if (mr != MACH_MSG_SUCCESS) {
5560 return mr;
5561 }
5562
5563 if (kmsg->ikm_header->msgh_bits & MACH_MSGH_BITS_COMPLEX) {
5564 mr = ipc_kmsg_copyout_body(kmsg, space, map, option, slist);
5565
5566 if (mr != MACH_MSG_SUCCESS) {
5567 mr |= MACH_RCV_BODY_ERROR;
5568 }
5569 }
5570
5571 return mr;
5572 }
5573
5574 /*
5575 * Routine: ipc_kmsg_copyout_pseudo
5576 * Purpose:
5577 * Does a pseudo-copyout of the message.
5578 * This is like a regular copyout, except
5579 * that the ports in the header are handled
5580 * as if they are in the body. They aren't reversed.
5581 *
5582 * The error codes are a combination of special bits.
5583 * The copyout proceeds despite errors.
5584 * Conditions:
5585 * Nothing locked.
5586 * Returns:
5587 * MACH_MSG_SUCCESS Successful copyout.
5588 * MACH_MSG_IPC_SPACE No room for port right in name space.
5589 * MACH_MSG_VM_SPACE No room for memory in address space.
5590 * MACH_MSG_IPC_KERNEL Resource shortage handling port right.
5591 * MACH_MSG_VM_KERNEL Resource shortage handling memory.
5592 */
5593
5594 mach_msg_return_t
5595 ipc_kmsg_copyout_pseudo(
5596 ipc_kmsg_t kmsg,
5597 ipc_space_t space,
5598 vm_map_t map,
5599 mach_msg_body_t *slist)
5600 {
5601 mach_msg_bits_t mbits = kmsg->ikm_header->msgh_bits;
5602 ipc_object_t dest = ip_to_object(kmsg->ikm_header->msgh_remote_port);
5603 ipc_object_t reply = ip_to_object(kmsg->ikm_header->msgh_local_port);
5604 ipc_object_t voucher = ip_to_object(kmsg->ikm_voucher);
5605 mach_msg_type_name_t dest_type = MACH_MSGH_BITS_REMOTE(mbits);
5606 mach_msg_type_name_t reply_type = MACH_MSGH_BITS_LOCAL(mbits);
5607 mach_msg_type_name_t voucher_type = MACH_MSGH_BITS_VOUCHER(mbits);
5608 mach_port_name_t voucher_name = kmsg->ikm_header->msgh_voucher_port;
5609 mach_port_name_t dest_name, reply_name;
5610 mach_msg_return_t mr;
5611
5612 /* Set ith_knote to ITH_KNOTE_PSEUDO */
5613 current_thread()->ith_knote = ITH_KNOTE_PSEUDO;
5614
5615 ikm_validate_sig(kmsg);
5616
5617 assert(IO_VALID(dest));
5618
5619 #if 0
5620 /*
5621 * If we did this here, it looks like we wouldn't need the undo logic
5622 * at the end of ipc_kmsg_send() in the error cases. Not sure which
5623 * would be more elegant to keep.
5624 */
5625 ipc_importance_clean(kmsg);
5626 #else
5627 /* just assert it is already clean */
5628 ipc_importance_assert_clean(kmsg);
5629 #endif
5630
5631 mr = (ipc_kmsg_copyout_object(space, dest, dest_type, NULL, NULL, &dest_name) |
5632 ipc_kmsg_copyout_object(space, reply, reply_type, NULL, NULL, &reply_name));
5633
5634 kmsg->ikm_header->msgh_bits = mbits & MACH_MSGH_BITS_USER;
5635 kmsg->ikm_header->msgh_remote_port = CAST_MACH_NAME_TO_PORT(dest_name);
5636 kmsg->ikm_header->msgh_local_port = CAST_MACH_NAME_TO_PORT(reply_name);
5637
5638 if (IO_VALID(voucher)) {
5639 assert(voucher_type == MACH_MSG_TYPE_MOVE_SEND);
5640
5641 kmsg->ikm_voucher = IP_NULL;
5642 mr |= ipc_kmsg_copyout_object(space, voucher, voucher_type, NULL, NULL, &voucher_name);
5643 kmsg->ikm_header->msgh_voucher_port = voucher_name;
5644 }
5645
5646 if (mbits & MACH_MSGH_BITS_COMPLEX) {
5647 mr |= ipc_kmsg_copyout_body(kmsg, space, map, 0, slist);
5648 }
5649
5650 return mr;
5651 }
5652
5653 /*
5654 * Routine: ipc_kmsg_copyout_dest
5655 * Purpose:
5656 * Copies out the destination port in the message.
5657 * Destroys all other rights and memory in the message.
5658 * Conditions:
5659 * Nothing locked.
5660 */
5661
5662 void
5663 ipc_kmsg_copyout_dest(
5664 ipc_kmsg_t kmsg,
5665 ipc_space_t space)
5666 {
5667 mach_msg_bits_t mbits;
5668 ipc_object_t dest;
5669 ipc_object_t reply;
5670 ipc_object_t voucher;
5671 mach_msg_type_name_t dest_type;
5672 mach_msg_type_name_t reply_type;
5673 mach_msg_type_name_t voucher_type;
5674 mach_port_name_t dest_name, reply_name, voucher_name;
5675
5676 ikm_validate_sig(kmsg);
5677
5678 mbits = kmsg->ikm_header->msgh_bits;
5679 dest = ip_to_object(kmsg->ikm_header->msgh_remote_port);
5680 reply = ip_to_object(kmsg->ikm_header->msgh_local_port);
5681 voucher = ip_to_object(kmsg->ikm_voucher);
5682 voucher_name = kmsg->ikm_header->msgh_voucher_port;
5683 dest_type = MACH_MSGH_BITS_REMOTE(mbits);
5684 reply_type = MACH_MSGH_BITS_LOCAL(mbits);
5685 voucher_type = MACH_MSGH_BITS_VOUCHER(mbits);
5686
5687 assert(IO_VALID(dest));
5688
5689 ipc_importance_assert_clean(kmsg);
5690
5691 io_lock(dest);
5692 if (io_active(dest)) {
5693 ipc_object_copyout_dest(space, dest, dest_type, &dest_name);
5694 /* dest is unlocked */
5695 } else {
5696 io_unlock(dest);
5697 io_release(dest);
5698 dest_name = MACH_PORT_DEAD;
5699 }
5700
5701 if (IO_VALID(reply)) {
5702 ipc_object_destroy(reply, reply_type);
5703 reply_name = MACH_PORT_NULL;
5704 } else {
5705 reply_name = CAST_MACH_PORT_TO_NAME(reply);
5706 }
5707
5708 if (IO_VALID(voucher)) {
5709 assert(voucher_type == MACH_MSG_TYPE_MOVE_SEND);
5710
5711 kmsg->ikm_voucher = IP_NULL;
5712 ipc_object_destroy(voucher, voucher_type);
5713 voucher_name = MACH_PORT_NULL;
5714 }
5715
5716 kmsg->ikm_header->msgh_bits = MACH_MSGH_BITS_SET(reply_type, dest_type,
5717 voucher_type, mbits);
5718 kmsg->ikm_header->msgh_local_port = CAST_MACH_NAME_TO_PORT(dest_name);
5719 kmsg->ikm_header->msgh_remote_port = CAST_MACH_NAME_TO_PORT(reply_name);
5720 kmsg->ikm_header->msgh_voucher_port = voucher_name;
5721
5722 if (mbits & MACH_MSGH_BITS_COMPLEX) {
5723 mach_msg_body_t *body;
5724
5725 body = (mach_msg_body_t *) (kmsg->ikm_header + 1);
5726 ipc_kmsg_clean_body(kmsg, body->msgh_descriptor_count,
5727 (mach_msg_descriptor_t *)(body + 1));
5728 }
5729 }
5730
5731 /*
5732 * Routine: ipc_kmsg_copyout_to_kernel
5733 * Purpose:
5734 * Copies out the destination and reply ports in the message.
5735 * Leaves all other rights and memory in the message alone.
5736 * Conditions:
5737 * Nothing locked.
5738 *
5739 * Derived from ipc_kmsg_copyout_dest.
5740 * Use by mach_msg_rpc_from_kernel (which used to use copyout_dest).
5741 * We really do want to save rights and memory.
5742 */
5743
5744 void
5745 ipc_kmsg_copyout_to_kernel(
5746 ipc_kmsg_t kmsg,
5747 ipc_space_t space)
5748 {
5749 ipc_object_t dest;
5750 mach_port_t reply;
5751 mach_msg_type_name_t dest_type;
5752 mach_msg_type_name_t reply_type;
5753 mach_port_name_t dest_name;
5754
5755 ikm_validate_sig(kmsg);
5756
5757 dest = ip_to_object(kmsg->ikm_header->msgh_remote_port);
5758 reply = kmsg->ikm_header->msgh_local_port;
5759 dest_type = MACH_MSGH_BITS_REMOTE(kmsg->ikm_header->msgh_bits);
5760 reply_type = MACH_MSGH_BITS_LOCAL(kmsg->ikm_header->msgh_bits);
5761
5762 assert(IO_VALID(dest));
5763
5764 io_lock(dest);
5765 if (io_active(dest)) {
5766 ipc_object_copyout_dest(space, dest, dest_type, &dest_name);
5767 /* dest is unlocked */
5768 } else {
5769 io_unlock(dest);
5770 io_release(dest);
5771 dest_name = MACH_PORT_DEAD;
5772 }
5773
5774 /*
5775 * While MIG kernel users don't receive vouchers, the
5776 * msgh_voucher_port field is intended to be round-tripped through the
5777 * kernel if there is no voucher disposition set. Here we check for a
5778 * non-zero voucher disposition, and consume the voucher send right as
5779 * there is no possible way to specify MACH_RCV_VOUCHER semantics.
5780 */
5781 mach_msg_type_name_t voucher_type;
5782 voucher_type = MACH_MSGH_BITS_VOUCHER(kmsg->ikm_header->msgh_bits);
5783 if (voucher_type != MACH_MSGH_BITS_ZERO) {
5784 assert(voucher_type == MACH_MSG_TYPE_MOVE_SEND);
5785 /*
5786 * someone managed to send this kernel routine a message with
5787 * a voucher in it. Cleanup the reference in
5788 * kmsg->ikm_voucher.
5789 */
5790 if (IP_VALID(kmsg->ikm_voucher)) {
5791 ipc_port_release_send(kmsg->ikm_voucher);
5792 }
5793 kmsg->ikm_voucher = IP_NULL;
5794 kmsg->ikm_header->msgh_voucher_port = 0;
5795 }
5796
5797 kmsg->ikm_header->msgh_bits =
5798 (MACH_MSGH_BITS_OTHER(kmsg->ikm_header->msgh_bits) |
5799 MACH_MSGH_BITS(reply_type, dest_type));
5800 kmsg->ikm_header->msgh_local_port = CAST_MACH_NAME_TO_PORT(dest_name);
5801 kmsg->ikm_header->msgh_remote_port = reply;
5802 }
5803
5804 #if IKM_SUPPORT_LEGACY
5805 void
5806 ipc_kmsg_copyout_to_kernel_legacy(
5807 ipc_kmsg_t kmsg,
5808 ipc_space_t space)
5809 {
5810 ipc_object_t dest;
5811 mach_port_t reply;
5812 mach_msg_type_name_t dest_type;
5813 mach_msg_type_name_t reply_type;
5814 mach_port_name_t dest_name;
5815
5816 ikm_validate_sig(kmsg);
5817
5818 dest = ip_to_object(kmsg->ikm_header->msgh_remote_port);
5819 reply = kmsg->ikm_header->msgh_local_port;
5820 dest_type = MACH_MSGH_BITS_REMOTE(kmsg->ikm_header->msgh_bits);
5821 reply_type = MACH_MSGH_BITS_LOCAL(kmsg->ikm_header->msgh_bits);
5822
5823 assert(IO_VALID(dest));
5824
5825 io_lock(dest);
5826 if (io_active(dest)) {
5827 ipc_object_copyout_dest(space, dest, dest_type, &dest_name);
5828 /* dest is unlocked */
5829 } else {
5830 io_unlock(dest);
5831 io_release(dest);
5832 dest_name = MACH_PORT_DEAD;
5833 }
5834
5835 mach_msg_type_name_t voucher_type;
5836 voucher_type = MACH_MSGH_BITS_VOUCHER(kmsg->ikm_header->msgh_bits);
5837 if (voucher_type != MACH_MSGH_BITS_ZERO) {
5838 assert(voucher_type == MACH_MSG_TYPE_MOVE_SEND);
5839 assert(IP_VALID(kmsg->ikm_voucher));
5840 /*
5841 * someone managed to send this kernel routine a message with
5842 * a voucher in it. Cleanup the reference in
5843 * kmsg->ikm_voucher.
5844 */
5845 ipc_port_release_send(kmsg->ikm_voucher);
5846 kmsg->ikm_voucher = IP_NULL;
5847 kmsg->ikm_header->msgh_voucher_port = 0;
5848 }
5849
5850 kmsg->ikm_header->msgh_bits =
5851 (MACH_MSGH_BITS_OTHER(kmsg->ikm_header->msgh_bits) |
5852 MACH_MSGH_BITS(reply_type, dest_type));
5853 kmsg->ikm_header->msgh_local_port = CAST_MACH_NAME_TO_PORT(dest_name);
5854 kmsg->ikm_header->msgh_remote_port = reply;
5855
5856 mach_msg_descriptor_t *saddr;
5857 mach_msg_legacy_descriptor_t *daddr;
5858 mach_msg_type_number_t i, count = ((mach_msg_base_t *)kmsg->ikm_header)->body.msgh_descriptor_count;
5859 saddr = (mach_msg_descriptor_t *) (((mach_msg_base_t *)kmsg->ikm_header) + 1);
5860 saddr = &saddr[count - 1];
5861 daddr = (mach_msg_legacy_descriptor_t *)&saddr[count];
5862 daddr--;
5863
5864 vm_offset_t dsc_adjust = 0;
5865
5866 for (i = 0; i < count; i++, saddr--, daddr--) {
5867 switch (saddr->type.type) {
5868 case MACH_MSG_PORT_DESCRIPTOR: {
5869 mach_msg_port_descriptor_t *dsc = &saddr->port;
5870 mach_msg_legacy_port_descriptor_t *dest_dsc = &daddr->port;
5871
5872 mach_port_t name = dsc->name;
5873 mach_msg_type_name_t disposition = dsc->disposition;
5874
5875 dest_dsc->name = CAST_MACH_PORT_TO_NAME(name);
5876 dest_dsc->disposition = disposition;
5877 dest_dsc->type = MACH_MSG_PORT_DESCRIPTOR;
5878 break;
5879 }
5880 case MACH_MSG_OOL_VOLATILE_DESCRIPTOR:
5881 case MACH_MSG_OOL_DESCRIPTOR: {
5882 /* The sender should supply ready-made memory, i.e. a vm_map_copy_t
5883 * so we don't need to do anything special. */
5884
5885 mach_msg_ool_descriptor_t *source_dsc = (typeof(source_dsc)) & saddr->out_of_line;
5886
5887 mach_msg_ool_descriptor32_t *dest_dsc = &daddr->out_of_line32;
5888
5889 vm_offset_t address = (vm_offset_t)source_dsc->address;
5890 vm_size_t size = source_dsc->size;
5891 boolean_t deallocate = source_dsc->deallocate;
5892 mach_msg_copy_options_t copy = source_dsc->copy;
5893 mach_msg_descriptor_type_t type = source_dsc->type;
5894
5895 dest_dsc->address = address;
5896 dest_dsc->size = size;
5897 dest_dsc->deallocate = deallocate;
5898 dest_dsc->copy = copy;
5899 dest_dsc->type = type;
5900 break;
5901 }
5902 case MACH_MSG_OOL_PORTS_DESCRIPTOR: {
5903 mach_msg_ool_ports_descriptor_t *source_dsc = (typeof(source_dsc)) & saddr->ool_ports;
5904
5905 mach_msg_ool_ports_descriptor32_t *dest_dsc = &daddr->ool_ports32;
5906
5907 vm_offset_t address = (vm_offset_t)source_dsc->address;
5908 vm_size_t port_count = source_dsc->count;
5909 boolean_t deallocate = source_dsc->deallocate;
5910 mach_msg_copy_options_t copy = source_dsc->copy;
5911 mach_msg_descriptor_type_t type = source_dsc->type;
5912
5913 dest_dsc->address = address;
5914 dest_dsc->count = port_count;
5915 dest_dsc->deallocate = deallocate;
5916 dest_dsc->copy = copy;
5917 dest_dsc->type = type;
5918 break;
5919 }
5920 case MACH_MSG_GUARDED_PORT_DESCRIPTOR: {
5921 mach_msg_guarded_port_descriptor_t *source_dsc = (typeof(source_dsc)) & saddr->guarded_port;
5922 mach_msg_guarded_port_descriptor32_t *dest_dsc = &daddr->guarded_port32;
5923
5924 dest_dsc->name = CAST_MACH_PORT_TO_NAME(source_dsc->name);
5925 dest_dsc->disposition = source_dsc->disposition;
5926 dest_dsc->flags = 0;
5927 dest_dsc->type = MACH_MSG_GUARDED_PORT_DESCRIPTOR;
5928 dest_dsc->context = 0;
5929 break;
5930 }
5931 default: {
5932 #if MACH_ASSERT
5933 panic("ipc_kmsg_copyout_to_kernel_legacy: bad descriptor");
5934 #endif /* MACH_ASSERT */
5935 }
5936 }
5937 }
5938
5939 if (count) {
5940 dsc_adjust = 4 * count;
5941 memmove((char *)((vm_offset_t)kmsg->ikm_header + dsc_adjust), kmsg->ikm_header, sizeof(mach_msg_base_t));
5942 kmsg->ikm_header = (mach_msg_header_t *)((vm_offset_t)kmsg->ikm_header + dsc_adjust);
5943 /* Update the message size for the smaller user representation */
5944 kmsg->ikm_header->msgh_size -= dsc_adjust;
5945 }
5946 }
5947 #endif /* IKM_SUPPORT_LEGACY */
5948
5949 #ifdef __arm64__
5950 /*
5951 * Just sets those parts of the trailer that aren't set up at allocation time.
5952 */
5953 static void
5954 ipc_kmsg_munge_trailer(mach_msg_max_trailer_t *in, void *_out, boolean_t is64bit)
5955 {
5956 if (is64bit) {
5957 mach_msg_max_trailer64_t *out = (mach_msg_max_trailer64_t*)_out;
5958 out->msgh_seqno = in->msgh_seqno;
5959 out->msgh_context = in->msgh_context;
5960 out->msgh_trailer_size = in->msgh_trailer_size;
5961 out->msgh_ad = in->msgh_ad;
5962 } else {
5963 mach_msg_max_trailer32_t *out = (mach_msg_max_trailer32_t*)_out;
5964 out->msgh_seqno = in->msgh_seqno;
5965 out->msgh_context = (mach_port_context32_t)in->msgh_context;
5966 out->msgh_trailer_size = in->msgh_trailer_size;
5967 out->msgh_ad = in->msgh_ad;
5968 }
5969 }
5970 #endif /* __arm64__ */
5971
5972 mach_msg_trailer_size_t
5973 ipc_kmsg_trailer_size(
5974 mach_msg_option_t option,
5975 __unused thread_t thread)
5976 {
5977 if (!(option & MACH_RCV_TRAILER_MASK)) {
5978 return MACH_MSG_TRAILER_MINIMUM_SIZE;
5979 } else {
5980 return REQUESTED_TRAILER_SIZE(thread_is_64bit_addr(thread), option);
5981 }
5982 }
5983
5984 void
5985 ipc_kmsg_add_trailer(ipc_kmsg_t kmsg, ipc_space_t space __unused,
5986 mach_msg_option_t option, __unused thread_t thread,
5987 mach_port_seqno_t seqno, boolean_t minimal_trailer,
5988 mach_vm_offset_t context)
5989 {
5990 mach_msg_max_trailer_t *trailer;
5991
5992 #ifdef __arm64__
5993 mach_msg_max_trailer_t tmp_trailer; /* This accommodates U64, and we'll munge */
5994 void *real_trailer_out = (void*)(mach_msg_max_trailer_t *)
5995 ((vm_offset_t)kmsg->ikm_header +
5996 mach_round_msg(kmsg->ikm_header->msgh_size));
5997
5998 /*
5999 * Populate scratch with initial values set up at message allocation time.
6000 * After, we reinterpret the space in the message as the right type
6001 * of trailer for the address space in question.
6002 */
6003 bcopy(real_trailer_out, &tmp_trailer, MAX_TRAILER_SIZE);
6004 trailer = &tmp_trailer;
6005 #else /* __arm64__ */
6006 (void)thread;
6007 trailer = (mach_msg_max_trailer_t *)
6008 ((vm_offset_t)kmsg->ikm_header +
6009 mach_round_msg(kmsg->ikm_header->msgh_size));
6010 #endif /* __arm64__ */
6011
6012 if (!(option & MACH_RCV_TRAILER_MASK)) {
6013 return;
6014 }
6015
6016 trailer->msgh_seqno = seqno;
6017 trailer->msgh_context = context;
6018 trailer->msgh_trailer_size = REQUESTED_TRAILER_SIZE(thread_is_64bit_addr(thread), option);
6019
6020 if (minimal_trailer) {
6021 goto done;
6022 }
6023
6024 if (GET_RCV_ELEMENTS(option) >= MACH_RCV_TRAILER_AV) {
6025 trailer->msgh_ad = kmsg->ikm_filter_policy_id;
6026 }
6027
6028 /*
6029 * The ipc_kmsg_t holds a reference to the label of a label
6030 * handle, not the port. We must get a reference to the port
6031 * and a send right to copyout to the receiver.
6032 */
6033
6034 if (option & MACH_RCV_TRAILER_ELEMENTS(MACH_RCV_TRAILER_LABELS)) {
6035 trailer->msgh_labels.sender = 0;
6036 }
6037
6038 done:
6039 #ifdef __arm64__
6040 ipc_kmsg_munge_trailer(trailer, real_trailer_out, thread_is_64bit_addr(thread));
6041 #endif /* __arm64__ */
6042 return;
6043 }
6044
6045 mach_msg_header_t *
6046 ipc_kmsg_msg_header(ipc_kmsg_t kmsg)
6047 {
6048 if (NULL == kmsg) {
6049 return NULL;
6050 }
6051 return kmsg->ikm_header;
6052 }