/*
- * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
*
- * @APPLE_LICENSE_HEADER_START@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
- * The contents of this file constitute Original Code as defined in and
- * are subject to the Apple Public Source License Version 1.1 (the
- * "License"). You may not use this file except in compliance with the
- * License. Please obtain a copy of the License at
- * http://www.apple.com/publicsource and read it before using this file.
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
*
- * This Original Code and all software distributed under the License are
- * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
- * License for the specific language governing rights and limitations
- * under the License.
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
*
- * @APPLE_LICENSE_HEADER_END@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
/*
* @OSF_COPYRIGHT@
* any improvements or extensions that they make and grant Carnegie Mellon
* the rights to redistribute these changes.
*/
+/*
+ * NOTICE: This file was modified by McAfee Research in 2004 to introduce
+ * support for mandatory and extensible security protections. This notice
+ * is included in support of clause 2.2 (b) of the Apple Public License,
+ * Version 2.0.
+ * Copyright (c) 2005 SPARTA, Inc.
+ */
/*
*/
/*
* Operations on kernel messages.
*/
-#include <cpus.h>
#include <norma_vm.h>
+#include <mach/mach_types.h>
#include <mach/boolean.h>
#include <mach/kern_return.h>
#include <mach/message.h>
#include <mach/port.h>
+#include <mach/vm_map.h>
+#include <mach/mach_vm.h>
#include <mach/vm_statistics.h>
+
+#include <kern/kern_types.h>
#include <kern/assert.h>
+#include <kern/debug.h>
+#include <kern/ipc_kobject.h>
#include <kern/kalloc.h>
+#include <kern/zalloc.h>
+#include <kern/processor.h>
#include <kern/thread.h>
#include <kern/sched_prim.h>
#include <kern/spl.h>
#include <kern/misc_protos.h>
#include <kern/counters.h>
+#include <kern/cpu_data.h>
+
+#include <machine/machlimits.h>
+
#include <vm/vm_map.h>
#include <vm/vm_object.h>
#include <vm/vm_kern.h>
+
#include <ipc/port.h>
+#include <ipc/ipc_types.h>
#include <ipc/ipc_entry.h>
#include <ipc/ipc_kmsg.h>
#include <ipc/ipc_notify.h>
#include <ipc/ipc_hash.h>
#include <ipc/ipc_table.h>
+#include <security/mac_mach_internal.h>
+
#include <string.h>
+#ifdef ppc
+#include <ppc/Firmware.h>
+#include <ppc/low_trace.h>
+#endif
+
+#if DEBUG
+#define DEBUG_MSGS_K64 1
+#endif
+
+#pragma pack(4)
+
+typedef struct
+{
+ mach_msg_bits_t msgh_bits;
+ mach_msg_size_t msgh_size;
+ uint32_t msgh_remote_port;
+ uint32_t msgh_local_port;
+ mach_msg_size_t msgh_reserved;
+ mach_msg_id_t msgh_id;
+} mach_msg_legacy_header_t;
+
+typedef struct
+{
+ mach_msg_legacy_header_t header;
+ mach_msg_body_t body;
+} mach_msg_legacy_base_t;
+
+typedef struct
+{
+ mach_port_name_t name;
+ mach_msg_size_t pad1;
+ uint32_t pad2 : 16;
+ mach_msg_type_name_t disposition : 8;
+ mach_msg_descriptor_type_t type : 8;
+} mach_msg_legacy_port_descriptor_t;
+
+
+typedef union
+{
+ mach_msg_legacy_port_descriptor_t port;
+ mach_msg_ool_descriptor32_t out_of_line32;
+ mach_msg_ool_ports_descriptor32_t ool_ports32;
+ mach_msg_type_descriptor_t type;
+} mach_msg_legacy_descriptor_t;
+
+#pragma pack()
+
+#define LEGACY_HEADER_SIZE_DELTA ((mach_msg_size_t)(sizeof(mach_msg_header_t) - sizeof(mach_msg_legacy_header_t)))
+// END LP64 fixes
+
+
+#if DEBUG_MSGS_K64
+extern void ipc_pset_print64(
+ ipc_pset_t pset);
+
+extern void ipc_kmsg_print64(
+ ipc_kmsg_t kmsg,
+ const char *str);
+
+extern void ipc_msg_print64(
+ mach_msg_header_t *msgh);
+
+extern ipc_port_t ipc_name_to_data64(
+ task_t task,
+ mach_port_name_t name);
+
+/*
+ * Forward declarations
+ */
+void ipc_msg_print_untyped64(
+ mach_msg_body_t *body);
+
+const char * ipc_type_name64(
+ int type_name,
+ boolean_t received);
+
+void ipc_print_type_name64(
+ int type_name);
+
+const char *
+msgh_bit_decode64(
+ mach_msg_bits_t bit);
+
+const char *
+mm_copy_options_string64(
+ mach_msg_copy_options_t option);
+
+void db_print_msg_uid64(mach_msg_header_t *);
+
+static void
+ipc_msg_body_print64(void *body, int size)
+{
+ uint32_t *word = (uint32_t *) body;
+ uint32_t *end = (uint32_t *)(((uintptr_t) body) + size
+ - sizeof(mach_msg_header_t));
+ int i;
+
+ kprintf(" body(%p-%p):\n %p: ", body, end, word);
+ for (;;) {
+ for (i = 0; i < 8; i++, word++) {
+ if (word >= end) {
+ kprintf("\n");
+ return;
+ }
+ kprintf("%08x ", *word);
+ }
+ kprintf("\n %p: ", word);
+ }
+}
+
+
+const char *
+ipc_type_name64(
+ int type_name,
+ boolean_t received)
+{
+ switch (type_name) {
+ case MACH_MSG_TYPE_PORT_NAME:
+ return "port_name";
+
+ case MACH_MSG_TYPE_MOVE_RECEIVE:
+ if (received) {
+ return "port_receive";
+ } else {
+ return "move_receive";
+ }
+
+ case MACH_MSG_TYPE_MOVE_SEND:
+ if (received) {
+ return "port_send";
+ } else {
+ return "move_send";
+ }
+
+ case MACH_MSG_TYPE_MOVE_SEND_ONCE:
+ if (received) {
+ return "port_send_once";
+ } else {
+ return "move_send_once";
+ }
+
+ case MACH_MSG_TYPE_COPY_SEND:
+ return "copy_send";
+
+ case MACH_MSG_TYPE_MAKE_SEND:
+ return "make_send";
+
+ case MACH_MSG_TYPE_MAKE_SEND_ONCE:
+ return "make_send_once";
+
+ default:
+ return (char *) 0;
+ }
+}
+
+void
+ipc_print_type_name64(
+ int type_name)
+{
+ const char *name = ipc_type_name64(type_name, TRUE);
+ if (name) {
+ kprintf("%s", name);
+ } else {
+ kprintf("type%d", type_name);
+ }
+}
+
+/*
+ * ipc_kmsg_print64 [ debug ]
+ */
+void
+ipc_kmsg_print64(
+ ipc_kmsg_t kmsg,
+ const char *str)
+{
+ kprintf("%s kmsg=%p:\n", str, kmsg);
+ kprintf(" next=%p, prev=%p, size=%d",
+ kmsg->ikm_next,
+ kmsg->ikm_prev,
+ kmsg->ikm_size);
+ kprintf("\n");
+ ipc_msg_print64(kmsg->ikm_header);
+}
+
+const char *
+msgh_bit_decode64(
+ mach_msg_bits_t bit)
+{
+ switch (bit) {
+ case MACH_MSGH_BITS_COMPLEX: return "complex";
+ case MACH_MSGH_BITS_CIRCULAR: return "circular";
+ default: return (char *) 0;
+ }
+}
+
+/*
+ * ipc_msg_print64 [ debug ]
+ */
+void
+ipc_msg_print64(
+ mach_msg_header_t *msgh)
+{
+ mach_msg_bits_t mbits;
+ unsigned int bit, i;
+ const char *bit_name;
+ int needs_comma;
+
+ mbits = msgh->msgh_bits;
+ kprintf(" msgh_bits=0x%x: l=0x%x,r=0x%x\n",
+ mbits,
+ MACH_MSGH_BITS_LOCAL(msgh->msgh_bits),
+ MACH_MSGH_BITS_REMOTE(msgh->msgh_bits));
+
+ mbits = MACH_MSGH_BITS_OTHER(mbits) & MACH_MSGH_BITS_USED;
+ kprintf(" decoded bits: ");
+ needs_comma = 0;
+ for (i = 0, bit = 1; i < sizeof(mbits) * 8; ++i, bit <<= 1) {
+ if ((mbits & bit) == 0)
+ continue;
+ bit_name = msgh_bit_decode64((mach_msg_bits_t)bit);
+ if (bit_name)
+ kprintf("%s%s", needs_comma ? "," : "", bit_name);
+ else
+ kprintf("%sunknown(0x%x),", needs_comma ? "," : "", bit);
+ ++needs_comma;
+ }
+ if (msgh->msgh_bits & ~MACH_MSGH_BITS_USED) {
+ kprintf("%sunused=0x%x,", needs_comma ? "," : "",
+ msgh->msgh_bits & ~MACH_MSGH_BITS_USED);
+ }
+ kprintf("\n");
+
+ needs_comma = 1;
+ if (msgh->msgh_remote_port) {
+ kprintf(" remote=%p(", msgh->msgh_remote_port);
+ ipc_print_type_name64(MACH_MSGH_BITS_REMOTE(msgh->msgh_bits));
+ kprintf(")");
+ } else {
+ kprintf(" remote=null");
+ }
+
+ if (msgh->msgh_local_port) {
+ kprintf("%slocal=%p(", needs_comma ? "," : "",
+ msgh->msgh_local_port);
+ ipc_print_type_name64(MACH_MSGH_BITS_LOCAL(msgh->msgh_bits));
+ kprintf(")\n");
+ } else {
+ kprintf("local=null\n");
+ }
+
+ kprintf(" msgh_id=%d, size=%d\n",
+ msgh->msgh_id,
+ msgh->msgh_size);
+
+ if (mbits & MACH_MSGH_BITS_COMPLEX) {
+ ipc_msg_print_untyped64((mach_msg_body_t *) (msgh + 1));
+ }
+
+ ipc_msg_body_print64((void *)(msgh + 1), msgh->msgh_size);
+}
+
+
+const char *
+mm_copy_options_string64(
+ mach_msg_copy_options_t option)
+{
+ const char *name;
+
+ switch (option) {
+ case MACH_MSG_PHYSICAL_COPY:
+ name = "PHYSICAL";
+ break;
+ case MACH_MSG_VIRTUAL_COPY:
+ name = "VIRTUAL";
+ break;
+ case MACH_MSG_OVERWRITE:
+ name = "OVERWRITE";
+ break;
+ case MACH_MSG_ALLOCATE:
+ name = "ALLOCATE";
+ break;
+ case MACH_MSG_KALLOC_COPY_T:
+ name = "KALLOC_COPY_T";
+ break;
+ default:
+ name = "unknown";
+ break;
+ }
+ return name;
+}
+
+void
+ipc_msg_print_untyped64(
+ mach_msg_body_t *body)
+{
+ mach_msg_descriptor_t *saddr, *send;
+ mach_msg_descriptor_type_t type;
+
+ kprintf(" %d descriptors: \n", body->msgh_descriptor_count);
+
+ saddr = (mach_msg_descriptor_t *) (body + 1);
+ send = saddr + body->msgh_descriptor_count;
+
+ for ( ; saddr < send; saddr++ ) {
+
+ type = saddr->type.type;
+
+ switch (type) {
+
+ case MACH_MSG_PORT_DESCRIPTOR: {
+ mach_msg_port_descriptor_t *dsc;
+
+ dsc = &saddr->port;
+ kprintf(" PORT name = %p disp = ", dsc->name);
+ ipc_print_type_name64(dsc->disposition);
+ kprintf("\n");
+ break;
+ }
+ case MACH_MSG_OOL_VOLATILE_DESCRIPTOR:
+ case MACH_MSG_OOL_DESCRIPTOR: {
+ mach_msg_ool_descriptor_t *dsc;
+
+ dsc = (mach_msg_ool_descriptor_t *) &saddr->out_of_line;
+ kprintf(" OOL%s addr = %p size = 0x%x copy = %s %s\n",
+ type == MACH_MSG_OOL_DESCRIPTOR ? "" : " VOLATILE",
+ dsc->address, dsc->size,
+ mm_copy_options_string64(dsc->copy),
+ dsc->deallocate ? "DEALLOC" : "");
+ break;
+ }
+ case MACH_MSG_OOL_PORTS_DESCRIPTOR : {
+ mach_msg_ool_ports_descriptor_t *dsc;
+
+ dsc = (mach_msg_ool_ports_descriptor_t *) &saddr->ool_ports;
+
+ kprintf(" OOL_PORTS addr = %p count = 0x%x ",
+ dsc->address, dsc->count);
+ kprintf("disp = ");
+ ipc_print_type_name64(dsc->disposition);
+ kprintf(" copy = %s %s\n",
+ mm_copy_options_string64(dsc->copy),
+ dsc->deallocate ? "DEALLOC" : "");
+ break;
+ }
+
+ default: {
+ kprintf(" UNKNOWN DESCRIPTOR 0x%x\n", type);
+ break;
+ }
+ }
+ }
+}
+
+#define DEBUG_IPC_KMSG_PRINT(kmsg,string) \
+ if (DEBUG_KPRINT_SYSCALL_PREDICATE(DEBUG_KPRINT_SYSCALL_IPC_MASK)) { \
+ ipc_kmsg_print64(kmsg, string); \
+ }
+#define DEBUG_IPC_MSG_BODY_PRINT(body,size) \
+ if (DEBUG_KPRINT_SYSCALL_PREDICATE(DEBUG_KPRINT_SYSCALL_IPC_MASK)) { \
+ ipc_msg_body_print64(body,size);\
+ }
+#else /* !DEBUG_MSGS_K64 */
+#define DEBUG_IPC_KMSG_PRINT(kmsg,string)
+#define DEBUG_IPC_MSG_BODY_PRINT(body,size)
+#endif /* !DEBUG_MSGS_K64 */
+
extern vm_map_t ipc_kernel_copy_map;
extern vm_size_t ipc_kmsg_max_vm_space;
extern vm_size_t msg_ool_size_small;
#define MSG_OOL_SIZE_SMALL msg_ool_size_small
+#if defined(__LP64__)
+#define MAP_SIZE_DIFFERS(map) (map->max_offset < MACH_VM_MAX_ADDRESS)
+#define OTHER_OOL_DESCRIPTOR mach_msg_ool_descriptor32_t
+#define OTHER_OOL_PORTS_DESCRIPTOR mach_msg_ool_ports_descriptor32_t
+#else
+#define MAP_SIZE_DIFFERS(map) (map->max_offset > VM_MAX_ADDRESS)
+#define OTHER_OOL_DESCRIPTOR mach_msg_ool_descriptor64_t
+#define OTHER_OOL_PORTS_DESCRIPTOR mach_msg_ool_ports_descriptor64_t
+#endif
+
+#define DESC_SIZE_ADJUSTMENT ((mach_msg_size_t)(sizeof(mach_msg_ool_descriptor64_t) - \
+ sizeof(mach_msg_ool_descriptor32_t)))
+
+/* scatter list macros */
+
+#define SKIP_PORT_DESCRIPTORS(s, c) \
+MACRO_BEGIN \
+ if ((s) != MACH_MSG_DESCRIPTOR_NULL) { \
+ while ((c) > 0) { \
+ if ((s)->type.type != MACH_MSG_PORT_DESCRIPTOR) \
+ break; \
+ (s)++; (c)--; \
+ } \
+ if (c == 0) \
+ (s) = MACH_MSG_DESCRIPTOR_NULL; \
+ } \
+MACRO_END
+
+#define INCREMENT_SCATTER(s, c, d) \
+MACRO_BEGIN \
+ if ((s) != MACH_MSG_DESCRIPTOR_NULL) { \
+ s = (d) ? (mach_msg_descriptor_t *) \
+ ((OTHER_OOL_DESCRIPTOR *)(s) + 1) : \
+ (s + 1); \
+ (c)--; \
+ } \
+MACRO_END
+
+/* zone for cached ipc_kmsg_t structures */
+zone_t ipc_kmsg_zone;
/*
* Forward declarations
void ipc_kmsg_clean_body(
ipc_kmsg_t kmsg,
- mach_msg_type_number_t number);
+ mach_msg_type_number_t number,
+ mach_msg_descriptor_t *desc);
void ipc_kmsg_clean_partial(
ipc_kmsg_t kmsg,
mach_msg_type_number_t number,
+ mach_msg_descriptor_t *desc,
vm_offset_t paddr,
vm_size_t length);
-mach_msg_return_t ipc_kmsg_copyout_body(
- ipc_kmsg_t kmsg,
- ipc_space_t space,
- vm_map_t map,
- mach_msg_body_t *slist);
-
mach_msg_return_t ipc_kmsg_copyin_body(
ipc_kmsg_t kmsg,
ipc_space_t space,
vm_map_t map);
-void ikm_cache_init(void);
/*
* We keep a per-processor cache of kernel message buffers.
* The cache saves the overhead/locking of using kalloc/kfree.
* and it also uses less memory. Access to the cache doesn't
* require locking.
*/
-#define IKM_STASH 16 /* # of cache entries per cpu */
-ipc_kmsg_t ipc_kmsg_cache[ NCPUS ][ IKM_STASH ];
-unsigned int ipc_kmsg_cache_avail[NCPUS];
-
-/*
- * Routine: ipc_kmsg_init
- * Purpose:
- * Initialize the kmsg system. For each CPU, we need to
- * pre-stuff the kmsg cache.
- */
-void
-ipc_kmsg_init()
-{
- unsigned int cpu, i;
-
- for (cpu = 0; cpu < NCPUS; ++cpu) {
- for (i = 0; i < IKM_STASH; ++i) {
- ipc_kmsg_t kmsg;
-
- kmsg = (ipc_kmsg_t)
- kalloc(ikm_plus_overhead(IKM_SAVED_MSG_SIZE));
- if (kmsg == IKM_NULL)
- panic("ipc_kmsg_init");
- ikm_init(kmsg, IKM_SAVED_MSG_SIZE);
- ipc_kmsg_cache[cpu][i] = kmsg;
- }
- ipc_kmsg_cache_avail[cpu] = IKM_STASH;
- }
-}
/*
* Routine: ipc_kmsg_alloc
ipc_kmsg_alloc(
mach_msg_size_t msg_and_trailer_size)
{
+ mach_msg_size_t max_expanded_size;
ipc_kmsg_t kmsg;
- if ((msg_and_trailer_size <= IKM_SAVED_MSG_SIZE)) {
- unsigned int cpu, i;
+ /*
+ * LP64support -
+ * Pad the allocation in case we need to expand the
+ * message descrptors for user spaces with pointers larger than
+ * the kernel's own, or vice versa. We don't know how many descriptors
+ * there are yet, so just assume the whole body could be
+ * descriptors (if there could be any at all).
+ *
+ * The expansion space is left in front of the header,
+ * because it is easier to pull the header and descriptors
+ * forward as we process them than it is to push all the
+ * data backwards.
+ */
+ mach_msg_size_t size = msg_and_trailer_size - MAX_TRAILER_SIZE;
+ if (size > sizeof(mach_msg_base_t)) {
+ mach_msg_size_t max_desc = (mach_msg_size_t)(((size - sizeof(mach_msg_base_t)) /
+ sizeof(mach_msg_ool_descriptor32_t)) *
+ DESC_SIZE_ADJUSTMENT);
+ if (msg_and_trailer_size > MACH_MSG_SIZE_MAX - max_desc)
+ return IKM_NULL;
+
+ max_expanded_size = msg_and_trailer_size + max_desc;
+ } else
+ max_expanded_size = msg_and_trailer_size;
+
+ if (max_expanded_size > ikm_less_overhead(MACH_MSG_SIZE_MAX))
+ return IKM_NULL;
+ else if (max_expanded_size < IKM_SAVED_MSG_SIZE)
+ max_expanded_size = IKM_SAVED_MSG_SIZE; /* round up for ikm_cache */
+
+ if (max_expanded_size == IKM_SAVED_MSG_SIZE) {
+ struct ikm_cache *cache;
+ unsigned int i;
disable_preemption();
- cpu = cpu_number();
- if ((i = ipc_kmsg_cache_avail[cpu]) > 0) {
+ cache = &PROCESSOR_DATA(current_processor(), ikm_cache);
+ if ((i = cache->avail) > 0) {
assert(i <= IKM_STASH);
- kmsg = ipc_kmsg_cache[cpu][--i];
- ipc_kmsg_cache_avail[cpu] = i;
- ikm_check_init(kmsg, IKM_SAVED_MSG_SIZE);
+ kmsg = cache->entries[--i];
+ cache->avail = i;
enable_preemption();
+ ikm_check_init(kmsg, max_expanded_size);
+ ikm_set_header(kmsg, msg_and_trailer_size);
return (kmsg);
}
enable_preemption();
+ kmsg = (ipc_kmsg_t)zalloc(ipc_kmsg_zone);
+ } else {
+ kmsg = (ipc_kmsg_t)kalloc(ikm_plus_overhead(max_expanded_size));
}
- /* round up for ikm_cache */
- if (msg_and_trailer_size < IKM_SAVED_MSG_SIZE)
- msg_and_trailer_size = IKM_SAVED_MSG_SIZE;
-
- kmsg = (ipc_kmsg_t)kalloc(ikm_plus_overhead(msg_and_trailer_size));
if (kmsg != IKM_NULL) {
- ikm_init(kmsg, msg_and_trailer_size);
+ ikm_init(kmsg, max_expanded_size);
+ ikm_set_header(kmsg, msg_and_trailer_size);
}
+
return(kmsg);
}
mach_msg_size_t size = kmsg->ikm_size;
ipc_port_t port;
+#if CONFIG_MACF_MACH
+ if (kmsg->ikm_sender != NULL) {
+ task_deallocate(kmsg->ikm_sender);
+ kmsg->ikm_sender = NULL;
+ }
+#endif
+
/*
* Check to see if the message is bound to the port. If so,
* mark it not in use. If the port isn't already dead, then
- * leave the message associated with it. Otherwise, free it
- * (not to the cache).
+ * leave the message associated with it. Otherwise, free it.
*/
port = ikm_prealloc_inuse_port(kmsg);
if (port != IP_NULL) {
return;
}
ip_check_unlock(port); /* May be last reference */
- goto free_it;
}
/*
* Peek and see if it has to go back in the cache.
*/
- if (kmsg->ikm_size == IKM_SAVED_MSG_SIZE &&
- ipc_kmsg_cache_avail[cpu_number()] < IKM_STASH) {
- unsigned int cpu, i;
+ if (kmsg->ikm_size == IKM_SAVED_MSG_SIZE) {
+ struct ikm_cache *cache;
+ unsigned int i;
disable_preemption();
- cpu = cpu_number();
-
- i = ipc_kmsg_cache_avail[cpu];
- if (i < IKM_STASH) {
- assert(i >= 0);
- ipc_kmsg_cache[cpu][i] = kmsg;
- ipc_kmsg_cache_avail[cpu] = i + 1;
+ cache = &PROCESSOR_DATA(current_processor(), ikm_cache);
+ if ((i = cache->avail) < IKM_STASH) {
+ cache->entries[i] = kmsg;
+ cache->avail = i + 1;
enable_preemption();
return;
}
enable_preemption();
+ zfree(ipc_kmsg_zone, kmsg);
+ return;
}
-
- free_it:
- kfree((vm_offset_t) kmsg, ikm_plus_overhead(size));
+ kfree(kmsg, ikm_plus_overhead(size));
}
prev->ikm_next = next;
}
/* XXX Temporary debug logic */
- assert(kmsg->ikm_next = IKM_BOGUS);
- assert(kmsg->ikm_prev = IKM_BOGUS);
+ assert((kmsg->ikm_next = IKM_BOGUS) == IKM_BOGUS);
+ assert((kmsg->ikm_prev = IKM_BOGUS) == IKM_BOGUS);
}
/*
* Conditions:
* No locks held.
*/
-
+void
ipc_kmsg_destroy_dest(
ipc_kmsg_t kmsg)
{
ipc_port_t port;
- port = kmsg->ikm_header.msgh_remote_port;
+ port = kmsg->ikm_header->msgh_remote_port;
ipc_port_release(port);
- kmsg->ikm_header.msgh_remote_port = MACH_PORT_NULL;
+ kmsg->ikm_header->msgh_remote_port = MACH_PORT_NULL;
ipc_kmsg_destroy(kmsg);
}
void
ipc_kmsg_clean_body(
- ipc_kmsg_t kmsg,
- mach_msg_type_number_t number)
+ __unused ipc_kmsg_t kmsg,
+ mach_msg_type_number_t number,
+ mach_msg_descriptor_t *saddr)
{
- mach_msg_descriptor_t *saddr, *eaddr;
+ mach_msg_type_number_t i;
if ( number == 0 )
return;
- saddr = (mach_msg_descriptor_t *)
- ((mach_msg_base_t *) &kmsg->ikm_header + 1);
- eaddr = saddr + number;
-
- for ( ; saddr < eaddr; saddr++ ) {
+ for (i = 0 ; i < number; i++, saddr++ ) {
switch (saddr->type.type) {
case MACH_MSG_OOL_DESCRIPTOR : {
mach_msg_ool_descriptor_t *dsc;
- dsc = &saddr->out_of_line;
+ dsc = (mach_msg_ool_descriptor_t *)&saddr->out_of_line;
/*
* Destroy memory carried in the message
mach_msg_type_number_t j;
mach_msg_ool_ports_descriptor_t *dsc;
- dsc = &saddr->ool_ports;
+ dsc = (mach_msg_ool_ports_descriptor_t *)&saddr->ool_ports;
objects = (ipc_object_t *) dsc->address;
if (dsc->count == 0) {
assert(dsc->count != 0);
- kfree((vm_offset_t) dsc->address,
- (vm_size_t) dsc->count * sizeof(mach_port_name_t));
+ kfree(dsc->address,
+ (vm_size_t) dsc->count * sizeof(mach_port_t));
break;
}
default : {
ipc_kmsg_clean_partial(
ipc_kmsg_t kmsg,
mach_msg_type_number_t number,
+ mach_msg_descriptor_t *desc,
vm_offset_t paddr,
vm_size_t length)
{
ipc_object_t object;
- mach_msg_bits_t mbits = kmsg->ikm_header.msgh_bits;
+ mach_msg_bits_t mbits = kmsg->ikm_header->msgh_bits;
- object = (ipc_object_t) kmsg->ikm_header.msgh_remote_port;
+ object = (ipc_object_t) kmsg->ikm_header->msgh_remote_port;
assert(IO_VALID(object));
ipc_object_destroy(object, MACH_MSGH_BITS_REMOTE(mbits));
- object = (ipc_object_t) kmsg->ikm_header.msgh_local_port;
+ object = (ipc_object_t) kmsg->ikm_header->msgh_local_port;
if (IO_VALID(object))
ipc_object_destroy(object, MACH_MSGH_BITS_LOCAL(mbits));
(void) vm_deallocate(ipc_kernel_copy_map, paddr, length);
}
- ipc_kmsg_clean_body(kmsg, number);
+ ipc_kmsg_clean_body(kmsg, number, desc);
}
/*
ipc_object_t object;
mach_msg_bits_t mbits;
- mbits = kmsg->ikm_header.msgh_bits;
- object = (ipc_object_t) kmsg->ikm_header.msgh_remote_port;
+ mbits = kmsg->ikm_header->msgh_bits;
+ object = (ipc_object_t) kmsg->ikm_header->msgh_remote_port;
if (IO_VALID(object))
ipc_object_destroy(object, MACH_MSGH_BITS_REMOTE(mbits));
- object = (ipc_object_t) kmsg->ikm_header.msgh_local_port;
+ object = (ipc_object_t) kmsg->ikm_header->msgh_local_port;
if (IO_VALID(object))
ipc_object_destroy(object, MACH_MSGH_BITS_LOCAL(mbits));
if (mbits & MACH_MSGH_BITS_COMPLEX) {
mach_msg_body_t *body;
- body = (mach_msg_body_t *) (&kmsg->ikm_header + 1);
- ipc_kmsg_clean_body(kmsg, body->msgh_descriptor_count);
+ body = (mach_msg_body_t *) (kmsg->ikm_header + 1);
+ ipc_kmsg_clean_body(kmsg, body->msgh_descriptor_count,
+ (mach_msg_descriptor_t *)(body + 1));
+ }
+
+#if CONFIG_MACF_MACH
+ if (kmsg->ikm_sender != NULL) {
+ task_deallocate(kmsg->ikm_sender);
+ kmsg->ikm_sender = NULL;
}
+#endif
}
/*
IP_CLEAR_PREALLOC(port, kmsg);
}
+/*
+ * Routine: ipc_kmsg_prealloc
+ * Purpose:
+ * Wraper to ipc_kmsg_alloc() to account for
+ * header expansion requirements.
+ */
+ipc_kmsg_t
+ipc_kmsg_prealloc(mach_msg_size_t size)
+{
+#if defined(__LP64__)
+ if (size > MACH_MSG_SIZE_MAX - LEGACY_HEADER_SIZE_DELTA)
+ return IKM_NULL;
+
+ size += LEGACY_HEADER_SIZE_DELTA;
+#endif
+ return ipc_kmsg_alloc(size);
+}
+
+
/*
* Routine: ipc_kmsg_get
* Purpose:
mach_msg_return_t
ipc_kmsg_get(
- mach_msg_header_t *msg,
- mach_msg_size_t size,
+ mach_vm_address_t msg_addr,
+ mach_msg_size_t size,
ipc_kmsg_t *kmsgp)
{
mach_msg_size_t msg_and_trailer_size;
ipc_kmsg_t kmsg;
- mach_msg_format_0_trailer_t *trailer;
- mach_port_name_t dest_name;
- ipc_entry_t dest_entry;
- ipc_port_t dest_port;
+ mach_msg_max_trailer_t *trailer;
+ mach_msg_legacy_base_t legacy_base;
+ mach_msg_size_t len_copied;
+ legacy_base.body.msgh_descriptor_count = 0;
- if ((size < sizeof(mach_msg_header_t)) || (size & 3))
+ if ((size < sizeof(mach_msg_legacy_header_t)) || (size & 3))
return MACH_SEND_MSG_TOO_SMALL;
- msg_and_trailer_size = size + MAX_TRAILER_SIZE;
+ if (size > MACH_MSG_SIZE_MAX - MAX_TRAILER_SIZE)
+ return MACH_SEND_TOO_LARGE;
- kmsg = ipc_kmsg_alloc(msg_and_trailer_size);
+ if(size == sizeof(mach_msg_legacy_header_t))
+ len_copied = sizeof(mach_msg_legacy_header_t);
+ else
+ len_copied = sizeof(mach_msg_legacy_base_t);
+
+ if (copyinmsg(msg_addr, (char *)&legacy_base, len_copied))
+ return MACH_SEND_INVALID_DATA;
+
+ msg_addr += sizeof(legacy_base.header);
+#if defined(__LP64__)
+ size += LEGACY_HEADER_SIZE_DELTA;
+#endif
+ if (DEBUG_KPRINT_SYSCALL_PREDICATE(DEBUG_KPRINT_SYSCALL_IPC_MASK)) {
+ unsigned int j;
+ for (j=0; j<sizeof(legacy_base.header); j++) {
+ kprintf("%02x\n", ((unsigned char*)&legacy_base.header)[j]);
+ }
+ }
+ msg_and_trailer_size = size + MAX_TRAILER_SIZE;
+ kmsg = ipc_kmsg_alloc(msg_and_trailer_size);
if (kmsg == IKM_NULL)
return MACH_SEND_NO_BUFFER;
- if (copyinmsg((char *) msg, (char *) &kmsg->ikm_header, size)) {
+ kmsg->ikm_header->msgh_size = size;
+ kmsg->ikm_header->msgh_bits = legacy_base.header.msgh_bits;
+ kmsg->ikm_header->msgh_remote_port = CAST_MACH_NAME_TO_PORT(legacy_base.header.msgh_remote_port);
+ kmsg->ikm_header->msgh_local_port = CAST_MACH_NAME_TO_PORT(legacy_base.header.msgh_local_port);
+ kmsg->ikm_header->msgh_reserved = legacy_base.header.msgh_reserved;
+ kmsg->ikm_header->msgh_id = legacy_base.header.msgh_id;
+
+ DEBUG_KPRINT_SYSCALL_IPC("ipc_kmsg_get header:\n"
+ " size: 0x%.8x\n"
+ " bits: 0x%.8x\n"
+ " remote_port: %p\n"
+ " local_port: %p\n"
+ " reserved: 0x%.8x\n"
+ " id: %.8d\n",
+ kmsg->ikm_header->msgh_size,
+ kmsg->ikm_header->msgh_bits,
+ kmsg->ikm_header->msgh_remote_port,
+ kmsg->ikm_header->msgh_local_port,
+ kmsg->ikm_header->msgh_reserved,
+ kmsg->ikm_header->msgh_id);
+
+ if (copyinmsg(msg_addr, (char *)(kmsg->ikm_header + 1), size - (mach_msg_size_t)sizeof(mach_msg_header_t))) {
ipc_kmsg_free(kmsg);
return MACH_SEND_INVALID_DATA;
}
- kmsg->ikm_header.msgh_size = size;
+ if (DEBUG_KPRINT_SYSCALL_PREDICATE(DEBUG_KPRINT_SYSCALL_IPC_MASK))
+ {
+ kprintf("body: size: %lu\n", (size - sizeof(mach_msg_header_t)));
+ uint32_t i;
+ for(i=0;i*4 < (size - sizeof(mach_msg_header_t));i++)
+ {
+ kprintf("%.4x\n",((uint32_t *)(kmsg->ikm_header + 1))[i]);
+ }
+ }
+ DEBUG_IPC_KMSG_PRINT(kmsg, "ipc_kmsg_get()");
/*
* I reserve for the trailer the largest space (MAX_TRAILER_SIZE)
* is initialized to the minimum (sizeof(mach_msg_trailer_t)), to optimize
* the cases where no implicit data is requested.
*/
- trailer = (mach_msg_format_0_trailer_t *) ((vm_offset_t)&kmsg->ikm_header + size);
- trailer->msgh_sender = current_thread()->top_act->task->sec_token;
+ trailer = (mach_msg_max_trailer_t *) ((vm_offset_t)kmsg->ikm_header + size);
+ trailer->msgh_sender = current_thread()->task->sec_token;
+ trailer->msgh_audit = current_thread()->task->audit_token;
trailer->msgh_trailer_type = MACH_MSG_TRAILER_FORMAT_0;
trailer->msgh_trailer_size = MACH_MSG_TRAILER_MINIMUM_SIZE;
+#ifdef ppc
+ if(trcWork.traceMask) dbgTrace(0x1100, (unsigned int)kmsg->ikm_header->msgh_id,
+ (unsigned int)kmsg->ikm_header->msgh_remote_port,
+ (unsigned int)kmsg->ikm_header->msgh_local_port, 0);
+#endif
+
+#if CONFIG_MACF_MACH
+ /* XXX - why do we zero sender labels here instead of in mach_msg()? */
+ task_t cur = current_task();
+ if (cur) {
+ task_reference(cur);
+ kmsg->ikm_sender = cur;
+ } else
+ trailer->msgh_labels.sender = 0;
+#else
+ trailer->msgh_labels.sender = 0;
+#endif
+
*kmsgp = kmsg;
return MACH_MSG_SUCCESS;
}
/*
* Routine: ipc_kmsg_get_from_kernel
* Purpose:
- * Allocates a kernel message buffer.
+ * First checks for a preallocated message
+ * reserved for kernel clients. If not found -
+ * allocates a new kernel message buffer.
* Copies a kernel message to the message buffer.
* Only resource errors are allowed.
* Conditions:
mach_msg_return_t
ipc_kmsg_get_from_kernel(
mach_msg_header_t *msg,
- mach_msg_size_t size,
+ mach_msg_size_t size,
ipc_kmsg_t *kmsgp)
{
ipc_kmsg_t kmsg;
mach_msg_size_t msg_and_trailer_size;
- mach_msg_format_0_trailer_t *trailer;
+ mach_msg_max_trailer_t *trailer;
ipc_port_t dest_port;
assert(size >= sizeof(mach_msg_header_t));
- assert((size & 3) == 0);
+// assert((size & 3) == 0);
assert(IP_VALID((ipc_port_t) msg->msgh_remote_port));
dest_port = (ipc_port_t)msg->msgh_remote_port;
* which cannot afford to wait.
*/
if (IP_PREALLOC(dest_port)) {
+ mach_msg_size_t max_desc = 0;
+
ip_lock(dest_port);
if (!ip_active(dest_port)) {
ip_unlock(dest_port);
}
assert(IP_PREALLOC(dest_port));
kmsg = dest_port->ip_premsg;
- if (msg_and_trailer_size > kmsg->ikm_size) {
- ip_unlock(dest_port);
- return MACH_SEND_TOO_LARGE;
- }
if (ikm_prealloc_inuse(kmsg)) {
ip_unlock(dest_port);
return MACH_SEND_NO_BUFFER;
}
+#if !defined(__LP64__)
+ if (msg->msgh_bits & MACH_MSGH_BITS_COMPLEX) {
+ assert(size > sizeof(mach_msg_base_t));
+ max_desc = ((mach_msg_base_t *)msg)->body.msgh_descriptor_count *
+ DESC_SIZE_ADJUSTMENT;
+ }
+#endif
+ if (msg_and_trailer_size > kmsg->ikm_size - max_desc) {
+ ip_unlock(dest_port);
+ return MACH_SEND_TOO_LARGE;
+ }
ikm_prealloc_set_inuse(kmsg, dest_port);
+ ikm_set_header(kmsg, msg_and_trailer_size);
ip_unlock(dest_port);
- } else {
+ }
+ else
+ {
kmsg = ipc_kmsg_alloc(msg_and_trailer_size);
if (kmsg == IKM_NULL)
return MACH_SEND_NO_BUFFER;
}
- (void) memcpy((void *) &kmsg->ikm_header, (const void *) msg, size);
+ (void) memcpy((void *) kmsg->ikm_header, (const void *) msg, size);
- kmsg->ikm_header.msgh_size = size;
+ kmsg->ikm_header->msgh_size = size;
/*
* I reserve for the trailer the largest space (MAX_TRAILER_SIZE)
* is initialized to the minimum (sizeof(mach_msg_trailer_t)), to
* optimize the cases where no implicit data is requested.
*/
- trailer = (mach_msg_format_0_trailer_t *)
- ((vm_offset_t)&kmsg->ikm_header + size);
+ trailer = (mach_msg_max_trailer_t *)
+ ((vm_offset_t)kmsg->ikm_header + size);
trailer->msgh_sender = KERNEL_SECURITY_TOKEN;
+ trailer->msgh_audit = KERNEL_AUDIT_TOKEN;
trailer->msgh_trailer_type = MACH_MSG_TRAILER_FORMAT_0;
trailer->msgh_trailer_size = MACH_MSG_TRAILER_MINIMUM_SIZE;
+ trailer->msgh_labels.sender = 0;
+
+#if CONFIG_MACF_MACH
+ kmsg->ikm_sender = NULL;
+#endif
*kmsgp = kmsg;
return MACH_MSG_SUCCESS;
}
ipc_kmsg_send(
ipc_kmsg_t kmsg,
mach_msg_option_t option,
- mach_msg_timeout_t timeout)
+ mach_msg_timeout_t send_timeout)
{
- kern_return_t save_wait_result;
-
ipc_port_t port;
- port = (ipc_port_t) kmsg->ikm_header.msgh_remote_port;
+ mach_msg_return_t error = MACH_MSG_SUCCESS;
+ spl_t s;
+
+ port = (ipc_port_t) kmsg->ikm_header->msgh_remote_port;
assert(IP_VALID(port));
+ if ((option & ~(MACH_SEND_TIMEOUT|MACH_SEND_ALWAYS)) != 0)
+ printf("ipc_kmsg_send: bad option 0x%x\n", option);
+
ip_lock(port);
if (port->ip_receiver == ipc_space_kernel) {
if (kmsg == IKM_NULL)
return MACH_MSG_SUCCESS;
- port = (ipc_port_t) kmsg->ikm_header.msgh_remote_port;
+ port = (ipc_port_t) kmsg->ikm_header->msgh_remote_port;
assert(IP_VALID(port));
ip_lock(port);
/* fall thru with reply - same options */
ip_release(port);
ip_check_unlock(port);
- kmsg->ikm_header.msgh_remote_port = MACH_PORT_NULL;
+ kmsg->ikm_header->msgh_remote_port = MACH_PORT_NULL;
ipc_kmsg_destroy(kmsg);
return MACH_MSG_SUCCESS;
}
- if (kmsg->ikm_header.msgh_bits & MACH_MSGH_BITS_CIRCULAR) {
+ if (kmsg->ikm_header->msgh_bits & MACH_MSGH_BITS_CIRCULAR) {
ip_unlock(port);
/* don't allow the creation of a circular loop */
/*
* We have a valid message and a valid reference on the port.
- * we can unlock the port and call mqueue_send() on it's message
- * queue.
+ * we can unlock the port and call mqueue_send() on its message
+ * queue. Lock message queue while port is locked.
*/
+ s = splsched();
+ imq_lock(&port->ip_messages);
ip_unlock(port);
- return (ipc_mqueue_send(&port->ip_messages, kmsg, option, timeout));
+ error = ipc_mqueue_send(&port->ip_messages, kmsg, option,
+ send_timeout, s);
+
+ /*
+ * If the port has been destroyed while we wait, treat the message
+ * as a successful delivery (like we do for an inactive port).
+ */
+ if (error == MACH_SEND_INVALID_DEST) {
+ kmsg->ikm_header->msgh_remote_port = MACH_PORT_NULL;
+ ipc_kmsg_destroy(kmsg);
+ return MACH_MSG_SUCCESS;
+ }
+ return error;
}
/*
mach_msg_return_t
ipc_kmsg_put(
- mach_msg_header_t *msg,
+ mach_vm_address_t msg_addr,
ipc_kmsg_t kmsg,
mach_msg_size_t size)
{
mach_msg_return_t mr;
- if (copyoutmsg((const char *) &kmsg->ikm_header, (char *) msg, size))
+ DEBUG_IPC_KMSG_PRINT(kmsg, "ipc_kmsg_put()");
+
+
+ DEBUG_KPRINT_SYSCALL_IPC("ipc_kmsg_put header:\n"
+ " size: 0x%.8x\n"
+ " bits: 0x%.8x\n"
+ " remote_port: %p\n"
+ " local_port: %p\n"
+ " reserved: 0x%.8x\n"
+ " id: %.8d\n",
+ kmsg->ikm_header->msgh_size,
+ kmsg->ikm_header->msgh_bits,
+ kmsg->ikm_header->msgh_remote_port,
+ kmsg->ikm_header->msgh_local_port,
+ kmsg->ikm_header->msgh_reserved,
+ kmsg->ikm_header->msgh_id);
+
+#if defined(__LP64__)
+ if (current_task() != kernel_task) { /* don't if receiver expects fully-cooked in-kernel msg; ux_exception */
+ mach_msg_legacy_header_t *legacy_header =
+ (mach_msg_legacy_header_t *)((vm_offset_t)(kmsg->ikm_header) + LEGACY_HEADER_SIZE_DELTA);
+
+ mach_msg_bits_t bits = kmsg->ikm_header->msgh_bits;
+ mach_msg_size_t msg_size = kmsg->ikm_header->msgh_size;
+ mach_port_name_t remote_port = CAST_MACH_PORT_TO_NAME(kmsg->ikm_header->msgh_remote_port);
+ mach_port_name_t local_port = CAST_MACH_PORT_TO_NAME(kmsg->ikm_header->msgh_local_port);
+ mach_msg_size_t reserved = kmsg->ikm_header->msgh_reserved;
+ mach_msg_id_t id = kmsg->ikm_header->msgh_id;
+
+ legacy_header->msgh_id = id;
+ legacy_header->msgh_reserved = reserved;
+ legacy_header->msgh_local_port = local_port;
+ legacy_header->msgh_remote_port = remote_port;
+ legacy_header->msgh_size = msg_size - LEGACY_HEADER_SIZE_DELTA;
+ legacy_header->msgh_bits = bits;
+
+ size -= LEGACY_HEADER_SIZE_DELTA;
+ kmsg->ikm_header = (mach_msg_header_t *)legacy_header;
+ }
+#endif
+
+ if (DEBUG_KPRINT_SYSCALL_PREDICATE(DEBUG_KPRINT_SYSCALL_IPC_MASK)) {
+ kprintf("ipc_kmsg_put header+body: %d\n", (size));
+ uint32_t i;
+ for(i=0;i*4 < size;i++)
+ {
+ kprintf("%.4x\n",((uint32_t *)kmsg->ikm_header)[i]);
+ }
+ kprintf("type: %d\n", ((mach_msg_type_descriptor_t *)(((mach_msg_base_t *)kmsg->ikm_header)+1))->type);
+ }
+ if (copyoutmsg((const char *) kmsg->ikm_header, msg_addr, size))
mr = MACH_RCV_INVALID_DATA;
else
mr = MACH_MSG_SUCCESS;
ipc_kmsg_t kmsg,
mach_msg_size_t size)
{
- (void) memcpy((void *) msg, (const void *) &kmsg->ikm_header, size);
+ (void) memcpy((void *) msg, (const void *) kmsg->ikm_header, size);
ipc_kmsg_free(kmsg);
}
mach_port_name_t notify)
{
mach_msg_bits_t mbits = msg->msgh_bits & MACH_MSGH_BITS_USER;
- mach_port_name_t dest_name = (mach_port_name_t)msg->msgh_remote_port;
- mach_port_name_t reply_name = (mach_port_name_t)msg->msgh_local_port;
+ mach_port_name_t dest_name = CAST_MACH_PORT_TO_NAME(msg->msgh_remote_port);
+ mach_port_name_t reply_name = CAST_MACH_PORT_TO_NAME(msg->msgh_local_port);
kern_return_t kr;
mach_msg_type_name_t dest_type = MACH_MSGH_BITS_REMOTE(mbits);
ipc_object_t dest_port, reply_port;
ipc_port_t dest_soright, reply_soright;
ipc_port_t notify_port;
+ ipc_entry_t entry;
if ((mbits != msg->msgh_bits) ||
(!MACH_MSG_TYPE_PORT_ANY_SEND(dest_type)) ||
if (!MACH_PORT_VALID(dest_name))
goto invalid_dest;
- if (notify != MACH_PORT_NULL) {
- ipc_entry_t entry;
+#if CONFIG_MACF_MACH
+ /*
+ * We do the port send check here instead of in ipc_kmsg_send()
+ * because copying the header involves copying the port rights too
+ * and we need to do the send check before anything is actually copied.
+ */
+ entry = ipc_entry_lookup(space, dest_name);
+ if (entry != IE_NULL) {
+ int error = 0;
+ ipc_port_t port = (ipc_port_t) entry->ie_object;
+ if (port == IP_NULL)
+ goto invalid_dest;
+ ip_lock(port);
+ if (ip_active(port)) {
+ task_t self = current_task();
+ tasklabel_lock(self);
+ error = mac_port_check_send(&self->maclabel,
+ &port->ip_label);
+ tasklabel_unlock(self);
+ }
+ ip_unlock(port);
+ if (error != 0)
+ goto invalid_dest;
+ }
+#endif
+ if (notify != MACH_PORT_NULL) {
if ((entry = ipc_entry_lookup(space, notify)) == IE_NULL) {
is_write_unlock(space);
return MACH_SEND_INVALID_NOTIFY;
}
notify_port = (ipc_port_t) entry->ie_object;
- }
+ } else
+ notify_port = IP_NULL;
if (dest_name == reply_name) {
- ipc_entry_t entry;
mach_port_name_t name = dest_name;
/*
}
}
} else if (!MACH_PORT_VALID(reply_name)) {
- ipc_entry_t entry;
-
/*
* No reply port! This is an easy case
* to make atomic. Just copyin the destination.
if (IE_BITS_TYPE(entry->ie_bits) == MACH_PORT_TYPE_NONE)
ipc_entry_dealloc(space, dest_name, entry);
- reply_port = (ipc_object_t) reply_name;
+ reply_port = (ipc_object_t)CAST_MACH_NAME_TO_PORT(reply_name);
reply_soright = IP_NULL;
} else {
ipc_entry_t dest_entry, reply_entry;
- ipc_port_t saved_reply;
/*
* This is the tough case to make atomic.
return MACH_SEND_INVALID_DEST;
}
+mach_msg_descriptor_t *ipc_kmsg_copyin_port_descriptor(
+ volatile mach_msg_port_descriptor_t *dsc,
+ mach_msg_legacy_port_descriptor_t *user_dsc,
+ ipc_space_t space,
+ ipc_object_t dest,
+ ipc_kmsg_t kmsg,
+ mach_msg_return_t *mr);
+
+void ipc_print_type_name(
+ int type_name);
+mach_msg_descriptor_t *
+ipc_kmsg_copyin_port_descriptor(
+ volatile mach_msg_port_descriptor_t *dsc,
+ mach_msg_legacy_port_descriptor_t *user_dsc_in,
+ ipc_space_t space,
+ ipc_object_t dest,
+ ipc_kmsg_t kmsg,
+ mach_msg_return_t *mr)
+{
+ volatile mach_msg_legacy_port_descriptor_t *user_dsc = user_dsc_in;
+ mach_msg_type_name_t user_disp;
+ mach_msg_type_name_t result_disp;
+ mach_port_name_t name;
+ ipc_object_t object;
+
+ user_disp = user_dsc->disposition;
+ result_disp = ipc_object_copyin_type(user_disp);
+
+ name = (mach_port_name_t)user_dsc->name;
+ if (MACH_PORT_VALID(name)) {
+
+ kern_return_t kr = ipc_object_copyin(space, name, user_disp, &object);
+ if (kr != KERN_SUCCESS) {
+ *mr = MACH_SEND_INVALID_RIGHT;
+ return NULL;
+ }
+
+ if ((result_disp == MACH_MSG_TYPE_PORT_RECEIVE) &&
+ ipc_port_check_circularity((ipc_port_t) object,
+ (ipc_port_t) dest)) {
+ kmsg->ikm_header->msgh_bits |= MACH_MSGH_BITS_CIRCULAR;
+ }
+ dsc->name = (ipc_port_t) object;
+ } else {
+ dsc->name = CAST_MACH_NAME_TO_PORT(name);
+ }
+ dsc->disposition = result_disp;
+ dsc->type = MACH_MSG_PORT_DESCRIPTOR;
+
+ dsc->pad_end = 0; // debug, unnecessary
+
+ return (mach_msg_descriptor_t *)(user_dsc_in+1);
+}
+
+mach_msg_descriptor_t * ipc_kmsg_copyin_ool_descriptor(
+ mach_msg_ool_descriptor_t *dsc,
+ mach_msg_descriptor_t *user_dsc,
+ int is_64bit,
+ vm_offset_t *paddr,
+ vm_map_copy_t *copy,
+ vm_size_t *space_needed,
+ vm_map_t map,
+ mach_msg_return_t *mr);
+mach_msg_descriptor_t *
+ipc_kmsg_copyin_ool_descriptor(
+ mach_msg_ool_descriptor_t *dsc,
+ mach_msg_descriptor_t *user_dsc,
+ int is_64bit,
+ vm_offset_t *paddr,
+ vm_map_copy_t *copy,
+ vm_size_t *space_needed,
+ vm_map_t map,
+ mach_msg_return_t *mr)
+{
+ vm_size_t length;
+ boolean_t dealloc;
+ mach_msg_copy_options_t copy_options;
+ mach_vm_offset_t addr;
+ mach_msg_descriptor_type_t dsc_type;
+
+ if (is_64bit) {
+ mach_msg_ool_descriptor64_t *user_ool_dsc = (typeof(user_ool_dsc))user_dsc;
+
+ addr = (mach_vm_offset_t) user_ool_dsc->address;
+ length = user_ool_dsc->size;
+ dealloc = user_ool_dsc->deallocate;
+ copy_options = user_ool_dsc->copy;
+ dsc_type = user_ool_dsc->type;
+
+ user_dsc = (typeof(user_dsc))(user_ool_dsc+1);
+ } else {
+ mach_msg_ool_descriptor32_t *user_ool_dsc = (typeof(user_ool_dsc))user_dsc;
+
+ addr = CAST_USER_ADDR_T(user_ool_dsc->address);
+ dealloc = user_ool_dsc->deallocate;
+ copy_options = user_ool_dsc->copy;
+ dsc_type = user_ool_dsc->type;
+ length = user_ool_dsc->size;
+
+ user_dsc = (typeof(user_dsc))(user_ool_dsc+1);
+ }
+
+ dsc->size = (mach_msg_size_t)length;
+ dsc->deallocate = dealloc;
+ dsc->copy = copy_options;
+ dsc->type = dsc_type;
+
+ if (length == 0) {
+ dsc->address = NULL;
+ } else if ((length >= MSG_OOL_SIZE_SMALL) &&
+ (copy_options == MACH_MSG_PHYSICAL_COPY) && !dealloc) {
+
+ /*
+ * If the request is a physical copy and the source
+ * is not being deallocated, then allocate space
+ * in the kernel's pageable ipc copy map and copy
+ * the data in. The semantics guarantee that the
+ * data will have been physically copied before
+ * the send operation terminates. Thus if the data
+ * is not being deallocated, we must be prepared
+ * to page if the region is sufficiently large.
+ */
+ if (copyin(addr, (char *)*paddr, length)) {
+ *mr = MACH_SEND_INVALID_MEMORY;
+ return NULL;
+ }
+
+ /*
+ * The kernel ipc copy map is marked no_zero_fill.
+ * If the transfer is not a page multiple, we need
+ * to zero fill the balance.
+ */
+ if (!page_aligned(length)) {
+ (void) memset((void *) (*paddr + length), 0,
+ round_page(length) - length);
+ }
+ if (vm_map_copyin(ipc_kernel_copy_map, (vm_map_address_t)*paddr,
+ (vm_map_size_t)length, TRUE, copy) != KERN_SUCCESS) {
+ *mr = MACH_MSG_VM_KERNEL;
+ return NULL;
+ }
+ dsc->address = (void *)*copy;
+ *paddr += round_page(length);
+ *space_needed -= round_page(length);
+ } else {
+
+ /*
+ * Make a vm_map_copy_t of the of the data. If the
+ * data is small, this will do an optimized physical
+ * copy. Otherwise, it will do a virtual copy.
+ *
+ * NOTE: A virtual copy is OK if the original is being
+ * deallocted, even if a physical copy was requested.
+ */
+ kern_return_t kr = vm_map_copyin(map, addr,
+ (vm_map_size_t)length, dealloc, copy);
+ if (kr != KERN_SUCCESS) {
+ *mr = (kr == KERN_RESOURCE_SHORTAGE) ?
+ MACH_MSG_VM_KERNEL :
+ MACH_SEND_INVALID_MEMORY;
+ return NULL;
+ }
+ dsc->address = (void *)*copy;
+ }
+ return user_dsc;
+}
+
+mach_msg_descriptor_t * ipc_kmsg_copyin_ool_ports_descriptor(
+ mach_msg_ool_ports_descriptor_t *dsc,
+ mach_msg_descriptor_t *user_dsc,
+ int is_64bit,
+ vm_map_t map,
+ ipc_space_t space,
+ ipc_object_t dest,
+ ipc_kmsg_t kmsg,
+ mach_msg_return_t *mr);
+mach_msg_descriptor_t *
+ipc_kmsg_copyin_ool_ports_descriptor(
+ mach_msg_ool_ports_descriptor_t *dsc,
+ mach_msg_descriptor_t *user_dsc,
+ int is_64bit,
+ vm_map_t map,
+ ipc_space_t space,
+ ipc_object_t dest,
+ ipc_kmsg_t kmsg,
+ mach_msg_return_t *mr)
+{
+ void *data;
+ ipc_object_t *objects;
+ unsigned int i;
+ mach_vm_offset_t addr;
+ mach_msg_type_name_t user_disp;
+ mach_msg_type_name_t result_disp;
+ mach_msg_type_number_t count;
+ mach_msg_copy_options_t copy_option;
+ boolean_t deallocate;
+ mach_msg_descriptor_type_t type;
+ vm_size_t ports_length, names_length;
+
+ if (is_64bit) {
+ mach_msg_ool_ports_descriptor64_t *user_ool_dsc = (typeof(user_ool_dsc))user_dsc;
+
+ addr = (mach_vm_offset_t)user_ool_dsc->address;
+ count = user_ool_dsc->count;
+ deallocate = user_ool_dsc->deallocate;
+ copy_option = user_ool_dsc->copy;
+ user_disp = user_ool_dsc->disposition;
+ type = user_ool_dsc->type;
+
+ user_dsc = (typeof(user_dsc))(user_ool_dsc+1);
+ } else {
+ mach_msg_ool_ports_descriptor32_t *user_ool_dsc = (typeof(user_ool_dsc))user_dsc;
+
+ addr = CAST_USER_ADDR_T(user_ool_dsc->address);
+ count = user_ool_dsc->count;
+ deallocate = user_ool_dsc->deallocate;
+ copy_option = user_ool_dsc->copy;
+ user_disp = user_ool_dsc->disposition;
+ type = user_ool_dsc->type;
+
+ user_dsc = (typeof(user_dsc))(user_ool_dsc+1);
+ }
+
+ dsc->deallocate = deallocate;
+ dsc->copy = copy_option;
+ dsc->type = type;
+ dsc->count = count;
+ dsc->address = NULL; /* for now */
+
+ result_disp = ipc_object_copyin_type(user_disp);
+ dsc->disposition = result_disp;
+
+ if (count > (INT_MAX / sizeof(mach_port_t))) {
+ *mr = MACH_SEND_TOO_LARGE;
+ return NULL;
+ }
+
+ /* calculate length of data in bytes, rounding up */
+ ports_length = count * sizeof(mach_port_t);
+ names_length = count * sizeof(mach_port_name_t);
+
+ if (ports_length == 0) {
+ return user_dsc;
+ }
+
+ data = kalloc(ports_length);
+
+ if (data == NULL) {
+ *mr = MACH_SEND_NO_BUFFER;
+ return NULL;
+ }
+
+#ifdef __LP64__
+ mach_port_name_t *names = &((mach_port_name_t *)data)[count];
+#else
+ mach_port_name_t *names = ((mach_port_name_t *)data);
+#endif
+
+ if (copyinmap(map, addr, names, names_length) != KERN_SUCCESS) {
+ kfree(data, ports_length);
+ *mr = MACH_SEND_INVALID_MEMORY;
+ return NULL;
+ }
+
+ if (deallocate) {
+ (void) mach_vm_deallocate(map, addr, (mach_vm_size_t)ports_length);
+ }
+
+ objects = (ipc_object_t *) data;
+ dsc->address = data;
+
+ for ( i = 0; i < count; i++) {
+ mach_port_name_t name = names[i];
+ ipc_object_t object;
+
+ if (!MACH_PORT_VALID(name)) {
+ objects[i] = (ipc_object_t)CAST_MACH_NAME_TO_PORT(name);
+ continue;
+ }
+
+ kern_return_t kr = ipc_object_copyin(space, name, user_disp, &object);
+
+ if (kr != KERN_SUCCESS) {
+ unsigned int j;
+
+ for(j = 0; j < i; j++) {
+ object = objects[j];
+ if (IPC_OBJECT_VALID(object))
+ ipc_object_destroy(object, result_disp);
+ }
+ kfree(data, ports_length);
+ dsc->address = NULL;
+ *mr = MACH_SEND_INVALID_RIGHT;
+ return NULL;
+ }
+
+ if ((dsc->disposition == MACH_MSG_TYPE_PORT_RECEIVE) &&
+ ipc_port_check_circularity(
+ (ipc_port_t) object,
+ (ipc_port_t) dest))
+ kmsg->ikm_header->msgh_bits |= MACH_MSGH_BITS_CIRCULAR;
+
+ objects[i] = object;
+ }
+
+ return user_dsc;
+}
+
/*
* Routine: ipc_kmsg_copyin_body
* Purpose:
{
ipc_object_t dest;
mach_msg_body_t *body;
- mach_msg_descriptor_t *saddr, *eaddr;
- boolean_t complex;
- mach_msg_return_t mr;
- int i;
- kern_return_t kr;
+ mach_msg_descriptor_t *daddr, *naddr;
+ mach_msg_descriptor_t *user_addr, *kern_addr;
+ mach_msg_type_number_t dsc_count;
+ boolean_t is_task_64bit = (map->max_offset > VM_MAX_ADDRESS);
+ boolean_t complex = FALSE;
vm_size_t space_needed = 0;
vm_offset_t paddr = 0;
- mach_msg_descriptor_t *sstart;
vm_map_copy_t copy = VM_MAP_COPY_NULL;
-
+ mach_msg_type_number_t i;
+ mach_msg_return_t mr = MACH_MSG_SUCCESS;
+
+ vm_size_t descriptor_size = 0;
+
/*
* Determine if the target is a kernel port.
*/
- dest = (ipc_object_t) kmsg->ikm_header.msgh_remote_port;
- complex = FALSE;
+ dest = (ipc_object_t) kmsg->ikm_header->msgh_remote_port;
+ body = (mach_msg_body_t *) (kmsg->ikm_header + 1);
+ naddr = (mach_msg_descriptor_t *) (body + 1);
+
+ dsc_count = body->msgh_descriptor_count;
+ if (dsc_count == 0)
+ return MACH_MSG_SUCCESS;
- body = (mach_msg_body_t *) (&kmsg->ikm_header + 1);
- saddr = (mach_msg_descriptor_t *) (body + 1);
- eaddr = saddr + body->msgh_descriptor_count;
-
- /* make sure the message does not ask for more msg descriptors
- * than the message can hold.
- */
-
- if (eaddr <= saddr ||
- eaddr > (mach_msg_descriptor_t *) (&kmsg->ikm_header +
- kmsg->ikm_header.msgh_size)) {
- ipc_kmsg_clean_partial(kmsg,0,0,0);
- return MACH_SEND_MSG_TOO_SMALL;
- }
-
/*
* Make an initial pass to determine kernal VM space requirements for
- * physical copies.
+ * physical copies and possible contraction of the descriptors from
+ * processes with pointers larger than the kernel's.
*/
- for (sstart = saddr; sstart < eaddr; sstart++) {
+ daddr = NULL;
+ for (i = 0; i < dsc_count; i++) {
+ daddr = naddr;
+
+ /* make sure the descriptor fits in the message */
+ if (is_task_64bit) {
+ switch (daddr->type.type) {
+ case MACH_MSG_OOL_DESCRIPTOR:
+ case MACH_MSG_OOL_VOLATILE_DESCRIPTOR:
+ case MACH_MSG_OOL_PORTS_DESCRIPTOR:
+ descriptor_size += 16;
+ naddr = (typeof(naddr))((vm_offset_t)daddr + 16);
+ break;
+ default:
+ descriptor_size += 12;
+ naddr = (typeof(naddr))((vm_offset_t)daddr + 12);
+ break;
+ }
+ } else {
+ descriptor_size += 12;
+ naddr = (typeof(naddr))((vm_offset_t)daddr + 12);
+ }
- if (sstart->type.type == MACH_MSG_OOL_DESCRIPTOR ||
- sstart->type.type == MACH_MSG_OOL_VOLATILE_DESCRIPTOR) {
+ if (naddr > (mach_msg_descriptor_t *)
+ ((vm_offset_t)kmsg->ikm_header + kmsg->ikm_header->msgh_size)) {
+ ipc_kmsg_clean_partial(kmsg, 0, NULL, 0, 0);
+ mr = MACH_SEND_MSG_TOO_SMALL;
+ goto out;
+ }
- if (sstart->out_of_line.copy != MACH_MSG_PHYSICAL_COPY &&
- sstart->out_of_line.copy != MACH_MSG_VIRTUAL_COPY) {
+ switch (daddr->type.type) {
+ mach_msg_size_t size;
+
+ case MACH_MSG_OOL_DESCRIPTOR:
+ case MACH_MSG_OOL_VOLATILE_DESCRIPTOR:
+ size = (is_task_64bit) ?
+ ((mach_msg_ool_descriptor64_t *)daddr)->size :
+ daddr->out_of_line.size;
+
+ if (daddr->out_of_line.copy != MACH_MSG_PHYSICAL_COPY &&
+ daddr->out_of_line.copy != MACH_MSG_VIRTUAL_COPY) {
+ /*
+ * Invalid copy option
+ */
+ ipc_kmsg_clean_partial(kmsg, 0, NULL, 0, 0);
+ mr = MACH_SEND_INVALID_TYPE;
+ goto out;
+ }
+
+ if ((size >= MSG_OOL_SIZE_SMALL) &&
+ (daddr->out_of_line.copy == MACH_MSG_PHYSICAL_COPY) &&
+ !(daddr->out_of_line.deallocate)) {
+
+ /*
+ * Out-of-line memory descriptor, accumulate kernel
+ * memory requirements
+ */
+ space_needed += round_page(size);
+ if (space_needed > ipc_kmsg_max_vm_space) {
+
/*
- * Invalid copy option
+ * Per message kernel memory limit exceeded
*/
- ipc_kmsg_clean_partial(kmsg,0,0,0);
- return MACH_SEND_INVALID_TYPE;
+ ipc_kmsg_clean_partial(kmsg, 0, NULL, 0, 0);
+ mr = MACH_MSG_VM_KERNEL;
+ goto out;
}
-
- if ((sstart->out_of_line.size >= MSG_OOL_SIZE_SMALL) &&
- (sstart->out_of_line.copy == MACH_MSG_PHYSICAL_COPY) &&
- !(sstart->out_of_line.deallocate)) {
-
- /*
- * Out-of-line memory descriptor, accumulate kernel
- * memory requirements
- */
- space_needed += round_page(sstart->out_of_line.size);
- if (space_needed > ipc_kmsg_max_vm_space) {
-
- /*
- * Per message kernel memory limit exceeded
- */
- ipc_kmsg_clean_partial(kmsg,0,0,0);
- return MACH_MSG_VM_KERNEL;
- }
- }
+ }
}
}
* space.
*/
if (space_needed) {
- if (vm_allocate(ipc_kernel_copy_map, &paddr, space_needed, TRUE) !=
- KERN_SUCCESS) {
- ipc_kmsg_clean_partial(kmsg,0,0,0);
- return MACH_MSG_VM_KERNEL;
- }
+ if (vm_allocate(ipc_kernel_copy_map, &paddr, space_needed,
+ VM_FLAGS_ANYWHERE) != KERN_SUCCESS) {
+ ipc_kmsg_clean_partial(kmsg, 0, NULL, 0, 0);
+ mr = MACH_MSG_VM_KERNEL;
+ goto out;
+ }
}
- /*
- * handle the OOL regions and port descriptors.
- * the check for complex messages was done earlier.
- */
+ /* user_addr = just after base as it was copied in */
+ user_addr = (mach_msg_descriptor_t *)((vm_offset_t)kmsg->ikm_header + sizeof(mach_msg_base_t));
+ /* Shift the mach_msg_base_t down to make for dsc_count*16bytes of descriptors */
+ if(descriptor_size != 16*dsc_count) {
+ vm_offset_t dsc_adjust = 16*dsc_count - descriptor_size;
+ memmove((char *)(((vm_offset_t)kmsg->ikm_header) - dsc_adjust), kmsg->ikm_header, sizeof(mach_msg_base_t));
+ kmsg->ikm_header = (mach_msg_header_t *)((vm_offset_t)kmsg->ikm_header - dsc_adjust);
+ /* Update the message size for the larger in-kernel representation */
+ kmsg->ikm_header->msgh_size += (mach_msg_size_t)dsc_adjust;
+ }
+
+
+ /* kern_addr = just after base after it has been (conditionally) moved */
+ kern_addr = (mach_msg_descriptor_t *)((vm_offset_t)kmsg->ikm_header + sizeof(mach_msg_base_t));
+
+ /* handle the OOL regions and port descriptors. */
+ for(i=0;i<dsc_count;i++) {
+ switch (user_addr->type.type) {
+ case MACH_MSG_PORT_DESCRIPTOR:
+ user_addr = ipc_kmsg_copyin_port_descriptor((mach_msg_port_descriptor_t *)kern_addr,
+ (mach_msg_legacy_port_descriptor_t *)user_addr, space, dest, kmsg, &mr);
+ kern_addr++;
+ complex = TRUE;
+ break;
+ case MACH_MSG_OOL_VOLATILE_DESCRIPTOR:
+ case MACH_MSG_OOL_DESCRIPTOR:
+ user_addr = ipc_kmsg_copyin_ool_descriptor((mach_msg_ool_descriptor_t *)kern_addr,
+ user_addr, is_task_64bit, &paddr, ©, &space_needed, map, &mr);
+ kern_addr++;
+ complex = TRUE;
+ break;
+ case MACH_MSG_OOL_PORTS_DESCRIPTOR:
+ user_addr = ipc_kmsg_copyin_ool_ports_descriptor((mach_msg_ool_ports_descriptor_t *)kern_addr,
+ user_addr, is_task_64bit, map, space, dest, kmsg, &mr);
+ kern_addr++;
+ complex = TRUE;
+ break;
+ default:
+ /* Invalid descriptor */
+ mr = MACH_SEND_INVALID_TYPE;
+ break;
+ }
+
+ if (MACH_MSG_SUCCESS != mr) {
+ /* clean from start of message descriptors to i */
+ ipc_kmsg_clean_partial(kmsg, i,
+ (mach_msg_descriptor_t *)((mach_msg_base_t *)kmsg->ikm_header + 1),
+ paddr, space_needed);
+ goto out;
+ }
+ } /* End of loop */
- for (i = 0, sstart = saddr; sstart < eaddr; sstart++) {
-
- switch (sstart->type.type) {
-
- case MACH_MSG_PORT_DESCRIPTOR: {
- mach_msg_type_name_t name;
- ipc_object_t object;
- mach_msg_port_descriptor_t *dsc;
-
- dsc = &sstart->port;
-
- /* this is really the type SEND, SEND_ONCE, etc. */
- name = dsc->disposition;
- dsc->disposition = ipc_object_copyin_type(name);
-
- if (!MACH_PORT_VALID((mach_port_name_t)dsc->name)) {
- complex = TRUE;
- break;
- }
- kr = ipc_object_copyin(space, (mach_port_name_t)dsc->name, name, &object);
- if (kr != KERN_SUCCESS) {
- ipc_kmsg_clean_partial(kmsg, i, paddr, space_needed);
- return MACH_SEND_INVALID_RIGHT;
- }
- if ((dsc->disposition == MACH_MSG_TYPE_PORT_RECEIVE) &&
- ipc_port_check_circularity((ipc_port_t) object,
- (ipc_port_t) dest)) {
- kmsg->ikm_header.msgh_bits |= MACH_MSGH_BITS_CIRCULAR;
- }
- dsc->name = (ipc_port_t) object;
- complex = TRUE;
- break;
- }
- case MACH_MSG_OOL_VOLATILE_DESCRIPTOR:
- case MACH_MSG_OOL_DESCRIPTOR: {
- vm_size_t length;
- boolean_t dealloc;
- vm_offset_t addr;
- vm_offset_t kaddr;
- mach_msg_ool_descriptor_t *dsc;
-
- dsc = &sstart->out_of_line;
- dealloc = dsc->deallocate;
- addr = (vm_offset_t) dsc->address;
-
- length = dsc->size;
-
- if (length == 0) {
- dsc->address = 0;
- } else if ((length >= MSG_OOL_SIZE_SMALL) &&
- (dsc->copy == MACH_MSG_PHYSICAL_COPY) && !dealloc) {
-
- /*
- * If the request is a physical copy and the source
- * is not being deallocated, then allocate space
- * in the kernel's pageable ipc copy map and copy
- * the data in. The semantics guarantee that the
- * data will have been physically copied before
- * the send operation terminates. Thus if the data
- * is not being deallocated, we must be prepared
- * to page if the region is sufficiently large.
- */
- if (copyin((const char *) addr, (char *) paddr,
- length)) {
- ipc_kmsg_clean_partial(kmsg, i, paddr,
- space_needed);
- return MACH_SEND_INVALID_MEMORY;
- }
+ if (!complex) {
+ kmsg->ikm_header->msgh_bits &= ~MACH_MSGH_BITS_COMPLEX;
+ }
+ out:
+ return mr;
+}
- /*
- * The kernel ipc copy map is marked no_zero_fill.
- * If the transfer is not a page multiple, we need
- * to zero fill the balance.
- */
- if (!page_aligned(length)) {
- (void) memset((void *) (paddr + length), 0,
- round_page(length) - length);
- }
- if (vm_map_copyin(ipc_kernel_copy_map, paddr, length,
- TRUE, ©) != KERN_SUCCESS) {
- ipc_kmsg_clean_partial(kmsg, i, paddr,
- space_needed);
- return MACH_MSG_VM_KERNEL;
- }
- dsc->address = (void *) copy;
- paddr += round_page(length);
- space_needed -= round_page(length);
- } else {
- /*
- * Make a vm_map_copy_t of the of the data. If the
- * data is small, this will do an optimized physical
- * copy. Otherwise, it will do a virtual copy.
- *
- * NOTE: A virtual copy is OK if the original is being
- * deallocted, even if a physical copy was requested.
- */
- kr = vm_map_copyin(map, addr, length, dealloc, ©);
- if (kr != KERN_SUCCESS) {
- ipc_kmsg_clean_partial(kmsg,i,paddr,space_needed);
- return (kr == KERN_RESOURCE_SHORTAGE) ?
- MACH_MSG_VM_KERNEL :
- MACH_SEND_INVALID_MEMORY;
- }
- dsc->address = (void *) copy;
- }
- complex = TRUE;
- break;
- }
- case MACH_MSG_OOL_PORTS_DESCRIPTOR: {
- vm_size_t length;
- vm_offset_t data;
- vm_offset_t addr;
- ipc_object_t *objects;
- int j;
- mach_msg_type_name_t name;
- mach_msg_ool_ports_descriptor_t *dsc;
-
- dsc = &sstart->ool_ports;
- addr = (vm_offset_t) dsc->address;
+/*
+ * Routine: ipc_kmsg_copyin
+ * Purpose:
+ * "Copy-in" port rights and out-of-line memory
+ * in the message.
+ *
+ * In all failure cases, the message is left holding
+ * no rights or memory. However, the message buffer
+ * is not deallocated. If successful, the message
+ * contains a valid destination port.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * MACH_MSG_SUCCESS Successful copyin.
+ * MACH_SEND_INVALID_HEADER
+ * Illegal value in the message header bits.
+ * MACH_SEND_INVALID_NOTIFY Bad notify port.
+ * MACH_SEND_INVALID_DEST Can't copyin destination port.
+ * MACH_SEND_INVALID_REPLY Can't copyin reply port.
+ * MACH_SEND_INVALID_MEMORY Can't grab out-of-line memory.
+ * MACH_SEND_INVALID_RIGHT Can't copyin port right in body.
+ * MACH_SEND_INVALID_TYPE Bad type specification.
+ * MACH_SEND_MSG_TOO_SMALL Body is too small for types/data.
+ */
- /* calculate length of data in bytes, rounding up */
- length = dsc->count * sizeof(mach_port_name_t);
-
- if (length == 0) {
- complex = TRUE;
- dsc->address = (void *) 0;
- break;
+mach_msg_return_t
+ipc_kmsg_copyin(
+ ipc_kmsg_t kmsg,
+ ipc_space_t space,
+ vm_map_t map,
+ mach_port_name_t notify)
+{
+ mach_msg_return_t mr;
+
+ mr = ipc_kmsg_copyin_header(kmsg->ikm_header, space, notify);
+ if (mr != MACH_MSG_SUCCESS)
+ return mr;
+
+ DEBUG_KPRINT_SYSCALL_IPC("ipc_kmsg_copyin header:\n%.8x\n%.8x\n%p\n%p\n%.8x\n%.8x\n",
+ kmsg->ikm_header->msgh_size,
+ kmsg->ikm_header->msgh_bits,
+ kmsg->ikm_header->msgh_remote_port,
+ kmsg->ikm_header->msgh_local_port,
+ kmsg->ikm_header->msgh_reserved,
+ kmsg->ikm_header->msgh_id);
+
+ if ((kmsg->ikm_header->msgh_bits & MACH_MSGH_BITS_COMPLEX) == 0)
+ return MACH_MSG_SUCCESS;
+
+ mr = ipc_kmsg_copyin_body( kmsg, space, map);
+
+ if (DEBUG_KPRINT_SYSCALL_PREDICATE(DEBUG_KPRINT_SYSCALL_IPC_MASK))
+ {
+ kprintf("body:\n");
+ uint32_t i;
+ for(i=0;i*4 < (kmsg->ikm_header->msgh_size - sizeof(mach_msg_header_t));i++)
+ {
+ kprintf("%.4x\n",((uint32_t *)(kmsg->ikm_header + 1))[i]);
}
+ }
+ return mr;
+}
- data = kalloc(length);
+/*
+ * Routine: ipc_kmsg_copyin_from_kernel
+ * Purpose:
+ * "Copy-in" port rights and out-of-line memory
+ * in a message sent from the kernel.
+ *
+ * Because the message comes from the kernel,
+ * the implementation assumes there are no errors
+ * or peculiarities in the message.
+ *
+ * Returns TRUE if queueing the message
+ * would result in a circularity.
+ * Conditions:
+ * Nothing locked.
+ */
- if (data == 0) {
- ipc_kmsg_clean_partial(kmsg, i, paddr, space_needed);
- return MACH_SEND_NO_BUFFER;
- }
-
- if (copyinmap(map, addr, data, length)) {
- kfree(data, length);
- ipc_kmsg_clean_partial(kmsg, i, paddr, space_needed);
- return MACH_SEND_INVALID_MEMORY;
- }
+void
+ipc_kmsg_copyin_from_kernel(
+ ipc_kmsg_t kmsg)
+{
+ mach_msg_bits_t bits = kmsg->ikm_header->msgh_bits;
+ mach_msg_type_name_t rname = MACH_MSGH_BITS_REMOTE(bits);
+ mach_msg_type_name_t lname = MACH_MSGH_BITS_LOCAL(bits);
+ ipc_object_t remote = (ipc_object_t) kmsg->ikm_header->msgh_remote_port;
+ ipc_object_t local = (ipc_object_t) kmsg->ikm_header->msgh_local_port;
- if (dsc->deallocate) {
- (void) vm_deallocate(map, addr, length);
- }
-
- dsc->address = (void *) data;
+ /* translate the destination and reply ports */
+
+ ipc_object_copyin_from_kernel(remote, rname);
+ if (IO_VALID(local))
+ ipc_object_copyin_from_kernel(local, lname);
+
+ /*
+ * The common case is a complex message with no reply port,
+ * because that is what the memory_object interface uses.
+ */
+
+ if (bits == (MACH_MSGH_BITS_COMPLEX |
+ MACH_MSGH_BITS(MACH_MSG_TYPE_COPY_SEND, 0))) {
+ bits = (MACH_MSGH_BITS_COMPLEX |
+ MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND, 0));
+
+ kmsg->ikm_header->msgh_bits = bits;
+ } else {
+ bits = (MACH_MSGH_BITS_OTHER(bits) |
+ MACH_MSGH_BITS(ipc_object_copyin_type(rname),
+ ipc_object_copyin_type(lname)));
+
+ kmsg->ikm_header->msgh_bits = bits;
+ if ((bits & MACH_MSGH_BITS_COMPLEX) == 0)
+ return;
+ }
+ {
+ mach_msg_descriptor_t *saddr;
+ mach_msg_body_t *body;
+ mach_msg_type_number_t i, count;
+
+ body = (mach_msg_body_t *) (kmsg->ikm_header + 1);
+ saddr = (mach_msg_descriptor_t *) (body + 1);
+ count = body->msgh_descriptor_count;
+
+ for (i = 0; i < count; i++, saddr++) {
+
+ switch (saddr->type.type) {
+
+ case MACH_MSG_PORT_DESCRIPTOR: {
+ mach_msg_type_name_t name;
+ ipc_object_t object;
+ mach_msg_port_descriptor_t *dsc;
- /* this is really the type SEND, SEND_ONCE, etc. */
- name = dsc->disposition;
- dsc->disposition = ipc_object_copyin_type(name);
+ dsc = &saddr->port;
- objects = (ipc_object_t *) data;
+ /* this is really the type SEND, SEND_ONCE, etc. */
+ name = dsc->disposition;
+ object = (ipc_object_t) dsc->name;
+ dsc->disposition = ipc_object_copyin_type(name);
- for ( j = 0; j < dsc->count; j++) {
- mach_port_name_t port = (mach_port_name_t) objects[j];
- ipc_object_t object;
-
- if (!MACH_PORT_VALID(port))
- continue;
-
- kr = ipc_object_copyin(space, port, name, &object);
+ if (!IO_VALID(object)) {
+ break;
+ }
- if (kr != KERN_SUCCESS) {
- int k;
+ ipc_object_copyin_from_kernel(object, name);
+
+ /* CDY avoid circularity when the destination is also */
+ /* the kernel. This check should be changed into an */
+ /* assert when the new kobject model is in place since*/
+ /* ports will not be used in kernel to kernel chats */
+
+ if (((ipc_port_t)remote)->ip_receiver != ipc_space_kernel) {
+ if ((dsc->disposition == MACH_MSG_TYPE_PORT_RECEIVE) &&
+ ipc_port_check_circularity((ipc_port_t) object,
+ (ipc_port_t) remote)) {
+ kmsg->ikm_header->msgh_bits |=
+ MACH_MSGH_BITS_CIRCULAR;
+ }
+ }
+ break;
+ }
+ case MACH_MSG_OOL_VOLATILE_DESCRIPTOR:
+ case MACH_MSG_OOL_DESCRIPTOR: {
+ /*
+ * The sender should supply ready-made memory, i.e.
+ * a vm_map_copy_t, so we don't need to do anything.
+ */
+ break;
+ }
+ case MACH_MSG_OOL_PORTS_DESCRIPTOR: {
+ ipc_object_t *objects;
+ unsigned int j;
+ mach_msg_type_name_t name;
+ mach_msg_ool_ports_descriptor_t *dsc;
+
+ dsc = (mach_msg_ool_ports_descriptor_t *)&saddr->ool_ports;
- for(k = 0; k < j; k++) {
- object = objects[k];
- if (!MACH_PORT_VALID(port))
- continue;
- ipc_object_destroy(object, dsc->disposition);
- }
- kfree(data, length);
- ipc_kmsg_clean_partial(kmsg, i, paddr, space_needed);
- return MACH_SEND_INVALID_RIGHT;
+ /* this is really the type SEND, SEND_ONCE, etc. */
+ name = dsc->disposition;
+ dsc->disposition = ipc_object_copyin_type(name);
+
+ objects = (ipc_object_t *) dsc->address;
+
+ for ( j = 0; j < dsc->count; j++) {
+ ipc_object_t object = objects[j];
+
+ if (!IO_VALID(object))
+ continue;
+
+ ipc_object_copyin_from_kernel(object, name);
+
+ if ((dsc->disposition == MACH_MSG_TYPE_PORT_RECEIVE) &&
+ ipc_port_check_circularity(
+ (ipc_port_t) object,
+ (ipc_port_t) remote))
+ kmsg->ikm_header->msgh_bits |= MACH_MSGH_BITS_CIRCULAR;
}
-
- if ((dsc->disposition == MACH_MSG_TYPE_PORT_RECEIVE) &&
- ipc_port_check_circularity(
- (ipc_port_t) object,
- (ipc_port_t) dest))
- kmsg->ikm_header.msgh_bits |= MACH_MSGH_BITS_CIRCULAR;
-
- objects[j] = object;
+ break;
+ }
+ default: {
+#if MACH_ASSERT
+ panic("ipc_kmsg_copyin_from_kernel: bad descriptor");
+#endif /* MACH_ASSERT */
}
-
- complex = TRUE;
- break;
- }
- default: {
- /*
- * Invalid descriptor
- */
- ipc_kmsg_clean_partial(kmsg, i, paddr, space_needed);
- return MACH_SEND_INVALID_TYPE;
}
}
- i++ ;
}
-
- if (!complex)
- kmsg->ikm_header.msgh_bits &= ~MACH_MSGH_BITS_COMPLEX;
- return MACH_MSG_SUCCESS;
-}
-
-
-/*
- * Routine: ipc_kmsg_copyin
- * Purpose:
- * "Copy-in" port rights and out-of-line memory
- * in the message.
- *
- * In all failure cases, the message is left holding
- * no rights or memory. However, the message buffer
- * is not deallocated. If successful, the message
- * contains a valid destination port.
- * Conditions:
- * Nothing locked.
- * Returns:
- * MACH_MSG_SUCCESS Successful copyin.
- * MACH_SEND_INVALID_HEADER
- * Illegal value in the message header bits.
- * MACH_SEND_INVALID_NOTIFY Bad notify port.
- * MACH_SEND_INVALID_DEST Can't copyin destination port.
- * MACH_SEND_INVALID_REPLY Can't copyin reply port.
- * MACH_SEND_INVALID_MEMORY Can't grab out-of-line memory.
- * MACH_SEND_INVALID_RIGHT Can't copyin port right in body.
- * MACH_SEND_INVALID_TYPE Bad type specification.
- * MACH_SEND_MSG_TOO_SMALL Body is too small for types/data.
- */
-
-mach_msg_return_t
-ipc_kmsg_copyin(
- ipc_kmsg_t kmsg,
- ipc_space_t space,
- vm_map_t map,
- mach_port_name_t notify)
-{
- mach_msg_return_t mr;
-
- mr = ipc_kmsg_copyin_header(&kmsg->ikm_header, space, notify);
- if (mr != MACH_MSG_SUCCESS)
- return mr;
-
- if ((kmsg->ikm_header.msgh_bits & MACH_MSGH_BITS_COMPLEX) == 0)
- return MACH_MSG_SUCCESS;
-
- return( ipc_kmsg_copyin_body( kmsg, space, map) );
}
-/*
- * Routine: ipc_kmsg_copyin_from_kernel
- * Purpose:
- * "Copy-in" port rights and out-of-line memory
- * in a message sent from the kernel.
- *
- * Because the message comes from the kernel,
- * the implementation assumes there are no errors
- * or peculiarities in the message.
- *
- * Returns TRUE if queueing the message
- * would result in a circularity.
- * Conditions:
- * Nothing locked.
- */
-
+#if IKM_SUPPORT_LEGACY
void
-ipc_kmsg_copyin_from_kernel(
+ipc_kmsg_copyin_from_kernel_legacy(
ipc_kmsg_t kmsg)
{
- mach_msg_bits_t bits = kmsg->ikm_header.msgh_bits;
+ mach_msg_bits_t bits = kmsg->ikm_header->msgh_bits;
mach_msg_type_name_t rname = MACH_MSGH_BITS_REMOTE(bits);
mach_msg_type_name_t lname = MACH_MSGH_BITS_LOCAL(bits);
- ipc_object_t remote = (ipc_object_t) kmsg->ikm_header.msgh_remote_port;
- ipc_object_t local = (ipc_object_t) kmsg->ikm_header.msgh_local_port;
+ ipc_object_t remote = (ipc_object_t) kmsg->ikm_header->msgh_remote_port;
+ ipc_object_t local = (ipc_object_t) kmsg->ikm_header->msgh_local_port;
/* translate the destination and reply ports */
bits = (MACH_MSGH_BITS_COMPLEX |
MACH_MSGH_BITS(MACH_MSG_TYPE_PORT_SEND, 0));
- kmsg->ikm_header.msgh_bits = bits;
+ kmsg->ikm_header->msgh_bits = bits;
} else {
bits = (MACH_MSGH_BITS_OTHER(bits) |
MACH_MSGH_BITS(ipc_object_copyin_type(rname),
ipc_object_copyin_type(lname)));
- kmsg->ikm_header.msgh_bits = bits;
+ kmsg->ikm_header->msgh_bits = bits;
if ((bits & MACH_MSGH_BITS_COMPLEX) == 0)
return;
}
{
- mach_msg_descriptor_t *saddr, *eaddr;
+ mach_msg_legacy_descriptor_t *saddr;
+ mach_msg_descriptor_t *daddr;
mach_msg_body_t *body;
+ mach_msg_type_number_t i, count;
+
+ body = (mach_msg_body_t *) (kmsg->ikm_header + 1);
+ saddr = (typeof(saddr)) (body + 1);
+ count = body->msgh_descriptor_count;
+
+ if(count) {
+ vm_offset_t dsc_adjust = 4*count;
+ memmove((char *)(((vm_offset_t)kmsg->ikm_header) - dsc_adjust), kmsg->ikm_header, sizeof(mach_msg_base_t));
+ kmsg->ikm_header = (mach_msg_header_t *)((vm_offset_t)kmsg->ikm_header - dsc_adjust);
+ /* Update the message size for the larger in-kernel representation */
+ kmsg->ikm_header->msgh_size += dsc_adjust;
+ }
+ daddr = (mach_msg_descriptor_t *)((vm_offset_t)kmsg->ikm_header + sizeof(mach_msg_base_t));
- body = (mach_msg_body_t *) (&kmsg->ikm_header + 1);
- saddr = (mach_msg_descriptor_t *) (body + 1);
- eaddr = (mach_msg_descriptor_t *) saddr + body->msgh_descriptor_count;
-
- for ( ; saddr < eaddr; saddr++) {
-
+ for (i = 0; i < count; i++, saddr++, daddr++) {
switch (saddr->type.type) {
case MACH_MSG_PORT_DESCRIPTOR: {
mach_msg_type_name_t name;
ipc_object_t object;
- mach_msg_port_descriptor_t *dsc;
+ mach_msg_legacy_port_descriptor_t *dsc;
+ mach_msg_port_descriptor_t *dest_dsc;
- dsc = &saddr->port;
+ dsc = (typeof(dsc))&saddr->port;
+ dest_dsc = &daddr->port;
/* this is really the type SEND, SEND_ONCE, etc. */
name = dsc->disposition;
- object = (ipc_object_t) dsc->name;
- dsc->disposition = ipc_object_copyin_type(name);
+ object = (ipc_object_t) CAST_MACH_NAME_TO_PORT(dsc->name);
+ dest_dsc->disposition = ipc_object_copyin_type(name);
+ dest_dsc->name = (mach_port_t)object;
+ dest_dsc->type = MACH_MSG_PORT_DESCRIPTOR;
if (!IO_VALID(object)) {
break;
/* ports will not be used in kernel to kernel chats */
if (((ipc_port_t)remote)->ip_receiver != ipc_space_kernel) {
- if ((dsc->disposition == MACH_MSG_TYPE_PORT_RECEIVE) &&
+ if ((dest_dsc->disposition == MACH_MSG_TYPE_PORT_RECEIVE) &&
ipc_port_check_circularity((ipc_port_t) object,
(ipc_port_t) remote)) {
- kmsg->ikm_header.msgh_bits |=
+ kmsg->ikm_header->msgh_bits |=
MACH_MSGH_BITS_CIRCULAR;
}
}
break;
}
case MACH_MSG_OOL_VOLATILE_DESCRIPTOR:
- case MACH_MSG_OOL_DESCRIPTOR: {
- /*
- * The sender should supply ready-made memory, i.e.
- * a vm_map_copy_t, so we don't need to do anything.
- */
+ case MACH_MSG_OOL_DESCRIPTOR: {
+ /* The sender should supply ready-made memory, i.e. a vm_map_copy_t
+ * so we don't need to do anything special. */
+
+ mach_msg_ool_descriptor32_t *source_dsc = &saddr->out_of_line32;
+ mach_msg_ool_descriptor_t *dest_dsc = (typeof(dest_dsc))&daddr->out_of_line;
+
+ vm_offset_t address = source_dsc->address;
+ vm_size_t size = source_dsc->size;
+ boolean_t deallocate = source_dsc->deallocate;
+ mach_msg_copy_options_t copy = source_dsc->copy;
+ mach_msg_descriptor_type_t type = source_dsc->type;
+
+ dest_dsc->address = (void *)address;
+ dest_dsc->size = size;
+ dest_dsc->deallocate = deallocate;
+ dest_dsc->copy = copy;
+ dest_dsc->type = type;
break;
}
- case MACH_MSG_OOL_PORTS_DESCRIPTOR: {
+ case MACH_MSG_OOL_PORTS_DESCRIPTOR: {
ipc_object_t *objects;
- int j;
+ unsigned int j;
mach_msg_type_name_t name;
- mach_msg_ool_ports_descriptor_t *dsc;
+ mach_msg_ool_ports_descriptor_t *dest_dsc;
- dsc = &saddr->ool_ports;
+ mach_msg_ool_ports_descriptor32_t *source_dsc = &saddr->ool_ports32;
+ dest_dsc = (typeof(dest_dsc))&daddr->ool_ports;
+
+ boolean_t deallocate = source_dsc->deallocate;
+ mach_msg_copy_options_t copy = source_dsc->copy;
+ mach_msg_size_t port_count = source_dsc->count;
+ mach_msg_type_name_t disposition = source_dsc->disposition;
/* this is really the type SEND, SEND_ONCE, etc. */
- name = dsc->disposition;
- dsc->disposition = ipc_object_copyin_type(name);
+ name = disposition;
+ disposition = ipc_object_copyin_type(name);
- objects = (ipc_object_t *) dsc->address;
+ objects = (ipc_object_t *) (uintptr_t)source_dsc->address;
- for ( j = 0; j < dsc->count; j++) {
+ for ( j = 0; j < port_count; j++) {
ipc_object_t object = objects[j];
if (!IO_VALID(object))
ipc_object_copyin_from_kernel(object, name);
- if ((dsc->disposition == MACH_MSG_TYPE_PORT_RECEIVE) &&
+ if ((disposition == MACH_MSG_TYPE_PORT_RECEIVE) &&
ipc_port_check_circularity(
(ipc_port_t) object,
(ipc_port_t) remote))
- kmsg->ikm_header.msgh_bits |= MACH_MSGH_BITS_CIRCULAR;
+ kmsg->ikm_header->msgh_bits |= MACH_MSGH_BITS_CIRCULAR;
}
+
+ dest_dsc->address = objects;
+ dest_dsc->deallocate = deallocate;
+ dest_dsc->copy = copy;
+ dest_dsc->disposition = disposition;
+ dest_dsc->type = MACH_MSG_OOL_PORTS_DESCRIPTOR;
+ dest_dsc->count = port_count;
break;
}
default: {
}
}
}
+#endif /* IKM_SUPPORT_LEGACY */
/*
* Routine: ipc_kmsg_copyout_header
notify_port = ipc_port_lookup_notify(space,
notify);
if (notify_port == IP_NULL) {
+ printf("ipc_kmsg_copyout_header: no notify port\n");
is_write_unlock(space);
return MACH_RCV_INVALID_NOTIFY;
}
goto copyout_dest;
}
- reply_name = (mach_port_name_t)reply;
+ reply_name = CAST_MACH_PORT_TO_NAME(reply);
kr = ipc_entry_get(space, &reply_name, &entry);
if (kr != KERN_SUCCESS) {
ip_unlock(reply);
if (kr != KERN_SUCCESS) {
/* space is unlocked */
- if (kr == KERN_RESOURCE_SHORTAGE)
+ if (kr == KERN_RESOURCE_SHORTAGE) {
+ printf("ipc_kmsg_copyout_header: can't grow kernel ipc space\n");
return (MACH_RCV_HEADER_ERROR|
MACH_MSG_IPC_KERNEL);
- else
+ } else {
+ printf("ipc_kmsg_copyout_header: can't grow user ipc space\n");
return (MACH_RCV_HEADER_ERROR|
MACH_MSG_IPC_SPACE);
+ }
}
/* space is locked again; start over */
kr = ipc_port_dngrow(reply, ITS_SIZE_NONE);
/* port is unlocked */
- if (kr != KERN_SUCCESS)
+ if (kr != KERN_SUCCESS) {
+ printf("ipc_kmsg_copyout_header: can't grow kernel ipc space2\n");
return (MACH_RCV_HEADER_ERROR|
MACH_MSG_IPC_KERNEL);
+ }
is_write_lock(space);
continue;
/* must check notify even though it won't be used */
if ((entry = ipc_entry_lookup(space, notify)) == IE_NULL) {
+ printf("ipc_kmsg_copyout_header: ipc_entry_lookup failed\n");
is_read_unlock(space);
return MACH_RCV_INVALID_NOTIFY;
}
if ((entry->ie_bits & MACH_PORT_TYPE_RECEIVE) == 0) {
+ printf("ipc_kmsg_copyout_header: MACH_PORT_TYPE_RECEIVE not set!\n");
is_read_unlock(space);
return MACH_RCV_INVALID_NOTIFY;
}
ip_lock(dest);
is_read_unlock(space);
- reply_name = (mach_port_name_t) reply;
+ reply_name = CAST_MACH_PORT_TO_NAME(reply);
}
/*
msg->msgh_bits = (MACH_MSGH_BITS_OTHER(mbits) |
MACH_MSGH_BITS(reply_type, dest_type));
- msg->msgh_local_port = (ipc_port_t)dest_name;
- msg->msgh_remote_port = (ipc_port_t)reply_name;
+ msg->msgh_local_port = CAST_MACH_NAME_TO_PORT(dest_name);
+ msg->msgh_remote_port = CAST_MACH_NAME_TO_PORT(reply_name);
}
return MACH_MSG_SUCCESS;
kern_return_t kr;
if (!IO_VALID(object)) {
- *namep = (mach_port_name_t) object;
+ *namep = CAST_MACH_PORT_TO_NAME(object);
return MACH_MSG_SUCCESS;
}
return MACH_MSG_SUCCESS;
}
+mach_msg_descriptor_t *
+ipc_kmsg_copyout_port_descriptor(mach_msg_descriptor_t *dsc,
+ mach_msg_descriptor_t *user_dsc,
+ ipc_space_t space,
+ kern_return_t *mr);
+mach_msg_descriptor_t *
+ipc_kmsg_copyout_port_descriptor(mach_msg_descriptor_t *dsc,
+ mach_msg_descriptor_t *dest_dsc,
+ ipc_space_t space,
+ kern_return_t *mr)
+{
+ mach_port_t port;
+ mach_port_name_t name;
+ mach_msg_type_name_t disp;
+
+
+ /* Copyout port right carried in the message */
+ port = dsc->port.name;
+ disp = dsc->port.disposition;
+ *mr |= ipc_kmsg_copyout_object(space,
+ (ipc_object_t)port,
+ disp,
+ &name);
+
+ if(current_task() == kernel_task)
+ {
+ mach_msg_port_descriptor_t *user_dsc = (typeof(user_dsc))dest_dsc;
+ user_dsc--; // point to the start of this port descriptor
+ user_dsc->name = CAST_MACH_NAME_TO_PORT(name);
+ user_dsc->disposition = disp;
+ user_dsc->type = MACH_MSG_PORT_DESCRIPTOR;
+ dest_dsc = (typeof(dest_dsc))user_dsc;
+ } else {
+ mach_msg_legacy_port_descriptor_t *user_dsc = (typeof(user_dsc))dest_dsc;
+ user_dsc--; // point to the start of this port descriptor
+ user_dsc->name = CAST_MACH_PORT_TO_NAME(name);
+ user_dsc->disposition = disp;
+ user_dsc->type = MACH_MSG_PORT_DESCRIPTOR;
+ dest_dsc = (typeof(dest_dsc))user_dsc;
+ }
+
+ return (mach_msg_descriptor_t *)dest_dsc;
+}
+
+mach_msg_descriptor_t *
+ipc_kmsg_copyout_ool_descriptor(mach_msg_ool_descriptor_t *dsc, mach_msg_descriptor_t *user_dsc, int is_64bit, vm_map_t map, mach_msg_return_t *mr);
+mach_msg_descriptor_t *
+ipc_kmsg_copyout_ool_descriptor(mach_msg_ool_descriptor_t *dsc, mach_msg_descriptor_t *user_dsc, int is_64bit, vm_map_t map, mach_msg_return_t *mr)
+{
+ vm_map_copy_t copy;
+ mach_vm_offset_t rcv_addr;
+ mach_msg_copy_options_t copy_options;
+ mach_msg_size_t size;
+ mach_msg_descriptor_type_t dsc_type;
+
+ //SKIP_PORT_DESCRIPTORS(saddr, sdsc_count);
+
+ copy = (vm_map_copy_t) dsc->address;
+ size = dsc->size;
+ copy_options = dsc->copy;
+ assert(copy_options != MACH_MSG_KALLOC_COPY_T);
+ dsc_type = dsc->type;
+ rcv_addr = 0;
+
+ if (copy != VM_MAP_COPY_NULL) {
+ /*
+ * Check to see if there is an overwrite descriptor
+ * specified in the scatter list for this ool data.
+ * The descriptor has already been verified.
+ */
+#if 0
+ if (saddr != MACH_MSG_DESCRIPTOR_NULL) {
+ if (differs) {
+ OTHER_OOL_DESCRIPTOR *scatter_dsc;
+
+ scatter_dsc = (OTHER_OOL_DESCRIPTOR *)saddr;
+ if (scatter_dsc->copy == MACH_MSG_OVERWRITE) {
+ rcv_addr = (mach_vm_offset_t) scatter_dsc->address;
+ copy_options = MACH_MSG_OVERWRITE;
+ } else {
+ copy_options = MACH_MSG_VIRTUAL_COPY;
+ }
+ } else {
+ mach_msg_ool_descriptor_t *scatter_dsc;
+
+ scatter_dsc = &saddr->out_of_line;
+ if (scatter_dsc->copy == MACH_MSG_OVERWRITE) {
+ rcv_addr = CAST_USER_ADDR_T(scatter_dsc->address);
+ copy_options = MACH_MSG_OVERWRITE;
+ } else {
+ copy_options = MACH_MSG_VIRTUAL_COPY;
+ }
+ }
+ INCREMENT_SCATTER(saddr, sdsc_count, differs);
+ }
+#endif
+
+
+ /*
+ * Whether the data was virtually or physically
+ * copied we have a vm_map_copy_t for it.
+ * If there's an overwrite region specified
+ * overwrite it, otherwise do a virtual copy out.
+ */
+ kern_return_t kr;
+ if (copy_options == MACH_MSG_OVERWRITE && rcv_addr != 0) {
+ kr = vm_map_copy_overwrite(map, rcv_addr,
+ copy, TRUE);
+ } else {
+ kr = vm_map_copyout(map, &rcv_addr, copy);
+ }
+ if (kr != KERN_SUCCESS) {
+ if (kr == KERN_RESOURCE_SHORTAGE)
+ *mr |= MACH_MSG_VM_KERNEL;
+ else
+ *mr |= MACH_MSG_VM_SPACE;
+ vm_map_copy_discard(copy);
+ rcv_addr = 0;
+ size = 0;
+ }
+ } else {
+ rcv_addr = 0;
+ size = 0;
+ }
+
+ /*
+ * Now update the descriptor as the user would see it.
+ * This may require expanding the descriptor to the user
+ * visible size. There is already space allocated for
+ * this in what naddr points to.
+ */
+ if(current_task() == kernel_task)
+ {
+ mach_msg_ool_descriptor_t *user_ool_dsc = (typeof(user_ool_dsc))user_dsc;
+ user_ool_dsc--;
+
+ user_ool_dsc->address = (void *)(uintptr_t)rcv_addr;
+ user_ool_dsc->deallocate = (copy_options == MACH_MSG_VIRTUAL_COPY) ?
+ TRUE : FALSE;
+ user_ool_dsc->copy = copy_options;
+ user_ool_dsc->type = dsc_type;
+ user_ool_dsc->size = size;
+
+ user_dsc = (typeof(user_dsc))user_ool_dsc;
+ } else if (is_64bit) {
+ mach_msg_ool_descriptor64_t *user_ool_dsc = (typeof(user_ool_dsc))user_dsc;
+ user_ool_dsc--;
+
+ user_ool_dsc->address = rcv_addr;
+ user_ool_dsc->deallocate = (copy_options == MACH_MSG_VIRTUAL_COPY) ?
+ TRUE : FALSE;
+ user_ool_dsc->copy = copy_options;
+ user_ool_dsc->type = dsc_type;
+ user_ool_dsc->size = size;
+
+ user_dsc = (typeof(user_dsc))user_ool_dsc;
+ } else {
+ mach_msg_ool_descriptor32_t *user_ool_dsc = (typeof(user_ool_dsc))user_dsc;
+ user_ool_dsc--;
+
+ user_ool_dsc->address = CAST_DOWN_EXPLICIT(uint32_t, rcv_addr);
+ user_ool_dsc->size = size;
+ user_ool_dsc->deallocate = (copy_options == MACH_MSG_VIRTUAL_COPY) ?
+ TRUE : FALSE;
+ user_ool_dsc->copy = copy_options;
+ user_ool_dsc->type = dsc_type;
+
+ user_dsc = (typeof(user_dsc))user_ool_dsc;
+ }
+ return user_dsc;
+}
+
+mach_msg_descriptor_t *
+ipc_kmsg_copyout_ool_ports_descriptor(mach_msg_ool_ports_descriptor_t *dsc,
+ mach_msg_descriptor_t *user_dsc,
+ int is_64bit,
+ vm_map_t map,
+ ipc_space_t space,
+ ipc_kmsg_t kmsg,
+ mach_msg_return_t *mr);
+mach_msg_descriptor_t *
+ipc_kmsg_copyout_ool_ports_descriptor(mach_msg_ool_ports_descriptor_t *dsc,
+ mach_msg_descriptor_t *user_dsc,
+ int is_64bit,
+ vm_map_t map,
+ ipc_space_t space,
+ ipc_kmsg_t kmsg,
+ mach_msg_return_t *mr)
+{
+ mach_vm_offset_t rcv_addr;
+ mach_msg_type_name_t disp;
+ mach_msg_type_number_t count, i;
+ vm_size_t ports_length, names_length;
+
+ mach_msg_copy_options_t copy_options = MACH_MSG_VIRTUAL_COPY;
+
+ //SKIP_PORT_DESCRIPTORS(saddr, sdsc_count);
+
+ count = dsc->count;
+ disp = dsc->disposition;
+ ports_length = count * sizeof(mach_port_t);
+ names_length = count * sizeof(mach_port_name_t);
+
+ if (ports_length != 0 && dsc->address != 0) {
+
+ /*
+ * Check to see if there is an overwrite descriptor
+ * specified in the scatter list for this ool data.
+ * The descriptor has already been verified.
+ */
+#if 0
+ if (saddr != MACH_MSG_DESCRIPTOR_NULL) {
+ if (differs) {
+ OTHER_OOL_DESCRIPTOR *scatter_dsc;
+
+ scatter_dsc = (OTHER_OOL_DESCRIPTOR *)saddr;
+ rcv_addr = (mach_vm_offset_t) scatter_dsc->address;
+ copy_options = scatter_dsc->copy;
+ } else {
+ mach_msg_ool_descriptor_t *scatter_dsc;
+
+ scatter_dsc = &saddr->out_of_line;
+ rcv_addr = CAST_USER_ADDR_T(scatter_dsc->address);
+ copy_options = scatter_dsc->copy;
+ }
+ INCREMENT_SCATTER(saddr, sdsc_count, differs);
+ }
+#endif
+
+ if (copy_options == MACH_MSG_VIRTUAL_COPY) {
+ /*
+ * Dynamically allocate the region
+ */
+ int anywhere = VM_MAKE_TAG(VM_MEMORY_MACH_MSG)|
+ VM_FLAGS_ANYWHERE;
+
+ kern_return_t kr;
+ if ((kr = mach_vm_allocate(map, &rcv_addr,
+ (mach_vm_size_t)names_length,
+ anywhere)) != KERN_SUCCESS) {
+ ipc_kmsg_clean_body(kmsg, 1, (mach_msg_descriptor_t *)dsc);
+ rcv_addr = 0;
+
+ if (kr == KERN_RESOURCE_SHORTAGE){
+ *mr |= MACH_MSG_VM_KERNEL;
+ } else {
+ *mr |= MACH_MSG_VM_SPACE;
+ }
+ }
+ }
+
+ /*
+ * Handle the port rights and copy out the names
+ * for those rights out to user-space.
+ */
+ if (rcv_addr != 0) {
+ mach_port_t *objects = (mach_port_t *) dsc->address;
+ mach_port_name_t *names = (mach_port_name_t *) dsc->address;
+
+ /* copyout port rights carried in the message */
+
+ for ( i = 0; i < count ; i++) {
+ ipc_object_t object = (ipc_object_t)objects[i];
+
+ *mr |= ipc_kmsg_copyout_object(space, object,
+ disp, &names[i]);
+ }
+
+ /* copyout to memory allocated above */
+ void *data = dsc->address;
+ if (copyoutmap(map, data, rcv_addr, names_length) != KERN_SUCCESS)
+ *mr |= MACH_MSG_VM_SPACE;
+ kfree(data, ports_length);
+ }
+ } else {
+ rcv_addr = 0;
+ }
+
+ /*
+ * Now update the descriptor based on the information
+ * calculated above.
+ */
+ if(current_task() == kernel_task) {
+ mach_msg_ool_ports_descriptor_t *user_ool_dsc = (typeof(user_ool_dsc))user_dsc;
+ user_ool_dsc--;
+
+ user_ool_dsc->address = (void *)(uintptr_t)rcv_addr;
+ user_ool_dsc->deallocate = (copy_options == MACH_MSG_VIRTUAL_COPY) ?
+ TRUE : FALSE;
+ user_ool_dsc->copy = copy_options;
+ user_ool_dsc->disposition = disp;
+ user_ool_dsc->type = MACH_MSG_OOL_PORTS_DESCRIPTOR;
+ user_ool_dsc->count = count;
+
+ user_dsc = (typeof(user_dsc))user_ool_dsc;
+ } if (is_64bit) {
+ mach_msg_ool_ports_descriptor64_t *user_ool_dsc = (typeof(user_ool_dsc))user_dsc;
+ user_ool_dsc--;
+
+ user_ool_dsc->address = rcv_addr;
+ user_ool_dsc->deallocate = (copy_options == MACH_MSG_VIRTUAL_COPY) ?
+ TRUE : FALSE;
+ user_ool_dsc->copy = copy_options;
+ user_ool_dsc->disposition = disp;
+ user_ool_dsc->type = MACH_MSG_OOL_PORTS_DESCRIPTOR;
+ user_ool_dsc->count = count;
+
+ user_dsc = (typeof(user_dsc))user_ool_dsc;
+ } else {
+ mach_msg_ool_ports_descriptor32_t *user_ool_dsc = (typeof(user_ool_dsc))user_dsc;
+ user_ool_dsc--;
+
+ user_ool_dsc->address = CAST_DOWN_EXPLICIT(uint32_t, rcv_addr);
+ user_ool_dsc->count = count;
+ user_ool_dsc->deallocate = (copy_options == MACH_MSG_VIRTUAL_COPY) ?
+ TRUE : FALSE;
+ user_ool_dsc->copy = copy_options;
+ user_ool_dsc->disposition = disp;
+ user_ool_dsc->type = MACH_MSG_OOL_PORTS_DESCRIPTOR;
+
+ user_dsc = (typeof(user_dsc))user_ool_dsc;
+ }
+ return user_dsc;
+}
+
/*
* Routine: ipc_kmsg_copyout_body
* Purpose:
mach_msg_body_t *slist)
{
mach_msg_body_t *body;
- mach_msg_descriptor_t *saddr, *eaddr;
+ mach_msg_descriptor_t *kern_dsc, *user_dsc;
+ mach_msg_descriptor_t *saddr;
+ mach_msg_type_number_t dsc_count, sdsc_count;
+ int i;
mach_msg_return_t mr = MACH_MSG_SUCCESS;
- kern_return_t kr;
- vm_offset_t data;
- mach_msg_descriptor_t *sstart, *send;
+ boolean_t is_task_64bit = (map->max_offset > VM_MAX_ADDRESS);
- body = (mach_msg_body_t *) (&kmsg->ikm_header + 1);
- saddr = (mach_msg_descriptor_t *) (body + 1);
- eaddr = saddr + body->msgh_descriptor_count;
+ body = (mach_msg_body_t *) (kmsg->ikm_header + 1);
+ dsc_count = body->msgh_descriptor_count;
+ kern_dsc = (mach_msg_descriptor_t *) (body + 1);
+ /* Point user_dsc just after the end of all the descriptors */
+ user_dsc = &kern_dsc[dsc_count];
- /*
- * Do scatter list setup
- */
+ /* Do scatter list setup */
if (slist != MACH_MSG_BODY_NULL) {
- sstart = (mach_msg_descriptor_t *) (slist + 1);
- send = sstart + slist->msgh_descriptor_count;
+ panic("Scatter lists disabled");
+ saddr = (mach_msg_descriptor_t *) (slist + 1);
+ sdsc_count = slist->msgh_descriptor_count;
}
else {
- sstart = MACH_MSG_DESCRIPTOR_NULL;
+ saddr = MACH_MSG_DESCRIPTOR_NULL;
+ sdsc_count = 0;
}
- for ( ; saddr < eaddr; saddr++ ) {
-
- switch (saddr->type.type) {
-
- case MACH_MSG_PORT_DESCRIPTOR: {
- mach_msg_port_descriptor_t *dsc;
-
- /*
- * Copyout port right carried in the message
- */
- dsc = &saddr->port;
- mr |= ipc_kmsg_copyout_object(space,
- (ipc_object_t) dsc->name,
- dsc->disposition,
- (mach_port_name_t *) &dsc->name);
-
- break;
- }
- case MACH_MSG_OOL_VOLATILE_DESCRIPTOR:
- case MACH_MSG_OOL_DESCRIPTOR : {
- vm_offset_t rcv_addr;
- vm_offset_t snd_addr;
- mach_msg_ool_descriptor_t *dsc;
- mach_msg_copy_options_t copy_option;
-
- SKIP_PORT_DESCRIPTORS(sstart, send);
-
- dsc = &saddr->out_of_line;
-
- assert(dsc->copy != MACH_MSG_KALLOC_COPY_T);
-
- copy_option = dsc->copy;
+ /* Now process the descriptors */
+ for (i = dsc_count-1; i >= 0; i--) {
+ switch (kern_dsc[i].type.type) {
+
+ case MACH_MSG_PORT_DESCRIPTOR:
+ user_dsc = ipc_kmsg_copyout_port_descriptor(&kern_dsc[i], user_dsc, space, &mr);
+ break;
+ case MACH_MSG_OOL_VOLATILE_DESCRIPTOR:
+ case MACH_MSG_OOL_DESCRIPTOR :
+ user_dsc = ipc_kmsg_copyout_ool_descriptor(
+ (mach_msg_ool_descriptor_t *)&kern_dsc[i], user_dsc, is_task_64bit, map, &mr);
+ break;
+ case MACH_MSG_OOL_PORTS_DESCRIPTOR :
+ user_dsc = ipc_kmsg_copyout_ool_ports_descriptor(
+ (mach_msg_ool_ports_descriptor_t *)&kern_dsc[i], user_dsc, is_task_64bit, map, space, kmsg, &mr);
+ break;
+ default : {
+ panic("untyped IPC copyout body: invalid message descriptor");
+ }
+ }
+ }
- if ((snd_addr = (vm_offset_t) dsc->address) != 0) {
- if (sstart != MACH_MSG_DESCRIPTOR_NULL &&
- sstart->out_of_line.copy == MACH_MSG_OVERWRITE) {
+ if(user_dsc != kern_dsc) {
+ vm_offset_t dsc_adjust = (vm_offset_t)user_dsc - (vm_offset_t)kern_dsc;
+ memmove((char *)((vm_offset_t)kmsg->ikm_header + dsc_adjust), kmsg->ikm_header, sizeof(mach_msg_base_t));
+ kmsg->ikm_header = (mach_msg_header_t *)((vm_offset_t)kmsg->ikm_header + dsc_adjust);
+ /* Update the message size for the smaller user representation */
+ kmsg->ikm_header->msgh_size -= (mach_msg_size_t)dsc_adjust;
+ }
- /*
- * There is an overwrite descriptor specified in the
- * scatter list for this ool data. The descriptor
- * has already been verified
- */
- rcv_addr = (vm_offset_t) sstart->out_of_line.address;
- dsc->copy = MACH_MSG_OVERWRITE;
- } else {
- dsc->copy = MACH_MSG_ALLOCATE;
- }
+ return mr;
+}
- /*
- * Whether the data was virtually or physically
- * copied we have a vm_map_copy_t for it.
- * If there's an overwrite region specified
- * overwrite it, otherwise do a virtual copy out.
- */
- if (dsc->copy == MACH_MSG_OVERWRITE) {
- kr = vm_map_copy_overwrite(map, rcv_addr,
- (vm_map_copy_t) dsc->address, TRUE);
- } else {
- kr = vm_map_copyout(map, &rcv_addr,
- (vm_map_copy_t) dsc->address);
- }
- if (kr != KERN_SUCCESS) {
- if (kr == KERN_RESOURCE_SHORTAGE)
- mr |= MACH_MSG_VM_KERNEL;
- else
- mr |= MACH_MSG_VM_SPACE;
- vm_map_copy_discard((vm_map_copy_t) dsc->address);
- dsc->address = 0;
- INCREMENT_SCATTER(sstart);
- break;
- }
- dsc->address = (void *) rcv_addr;
- }
- INCREMENT_SCATTER(sstart);
- break;
- }
- case MACH_MSG_OOL_PORTS_DESCRIPTOR : {
- vm_offset_t addr;
- mach_port_name_t *objects;
- mach_msg_type_number_t j;
- vm_size_t length;
- mach_msg_ool_ports_descriptor_t *dsc;
+/*
+ * Routine: ipc_kmsg_copyout_size
+ * Purpose:
+ * Compute the size of the message as copied out to the given
+ * map. If the destination map's pointers are a different size
+ * than the kernel's, we have to allow for expansion/
+ * contraction of the descriptors as appropriate.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * size of the message as it would be received.
+ */
- SKIP_PORT_DESCRIPTORS(sstart, send);
+mach_msg_size_t
+ipc_kmsg_copyout_size(
+ ipc_kmsg_t kmsg,
+ vm_map_t map)
+{
+ mach_msg_size_t send_size;
- dsc = &saddr->ool_ports;
+ send_size = kmsg->ikm_header->msgh_size;
- length = dsc->count * sizeof(mach_port_name_t);
+ boolean_t is_task_64bit = (map->max_offset > VM_MAX_ADDRESS);
- if (length != 0) {
- if (sstart != MACH_MSG_DESCRIPTOR_NULL &&
- sstart->ool_ports.copy == MACH_MSG_OVERWRITE) {
+#if defined(__LP64__)
+ send_size -= LEGACY_HEADER_SIZE_DELTA;
+#endif
- /*
- * There is an overwrite descriptor specified in the
- * scatter list for this ool data. The descriptor
- * has already been verified
- */
- addr = (vm_offset_t) sstart->out_of_line.address;
- dsc->copy = MACH_MSG_OVERWRITE;
- }
- else {
+ if (kmsg->ikm_header->msgh_bits & MACH_MSGH_BITS_COMPLEX) {
- /*
- * Dynamically allocate the region
- */
- int anywhere = VM_MAKE_TAG(VM_MEMORY_MACH_MSG)|
- VM_FLAGS_ANYWHERE;
-
- dsc->copy = MACH_MSG_ALLOCATE;
- if ((kr = vm_allocate(map, &addr, length,
- anywhere)) != KERN_SUCCESS) {
- ipc_kmsg_clean_body(kmsg,
- body->msgh_descriptor_count);
- dsc->address = 0;
-
- if (kr == KERN_RESOURCE_SHORTAGE){
- mr |= MACH_MSG_VM_KERNEL;
- } else {
- mr |= MACH_MSG_VM_SPACE;
- }
- INCREMENT_SCATTER(sstart);
- break;
- }
- }
- } else {
- INCREMENT_SCATTER(sstart);
- break;
- }
+ mach_msg_body_t *body;
+ mach_msg_descriptor_t *saddr, *eaddr;
-
- objects = (mach_port_name_t *) dsc->address ;
-
- /* copyout port rights carried in the message */
-
- for ( j = 0; j < dsc->count ; j++) {
- ipc_object_t object =
- (ipc_object_t) objects[j];
-
- mr |= ipc_kmsg_copyout_object(space, object,
- dsc->disposition, &objects[j]);
- }
+ body = (mach_msg_body_t *) (kmsg->ikm_header + 1);
+ saddr = (mach_msg_descriptor_t *) (body + 1);
+ eaddr = saddr + body->msgh_descriptor_count;
- /* copyout to memory allocated above */
-
- data = (vm_offset_t) dsc->address;
- (void) copyoutmap(map, data, addr, length);
- kfree(data, length);
-
- dsc->address = (void *) addr;
- INCREMENT_SCATTER(sstart);
- break;
- }
- default : {
- panic("untyped IPC copyout body: invalid message descriptor");
- }
- }
+ for ( ; saddr < eaddr; saddr++ ) {
+ switch (saddr->type.type) {
+ case MACH_MSG_OOL_DESCRIPTOR:
+ case MACH_MSG_OOL_VOLATILE_DESCRIPTOR:
+ case MACH_MSG_OOL_PORTS_DESCRIPTOR:
+ if(!is_task_64bit)
+ send_size -= DESC_SIZE_ADJUSTMENT;
+ break;
+ case MACH_MSG_PORT_DESCRIPTOR:
+ send_size -= DESC_SIZE_ADJUSTMENT;
+ break;
+ default:
+ break;
+ }
+ }
}
- return mr;
+ return send_size;
}
/*
{
mach_msg_return_t mr;
- mr = ipc_kmsg_copyout_header(&kmsg->ikm_header, space, notify);
- if (mr != MACH_MSG_SUCCESS)
+ mr = ipc_kmsg_copyout_header(kmsg->ikm_header, space, notify);
+ if (mr != MACH_MSG_SUCCESS) {
return mr;
+ }
- if (kmsg->ikm_header.msgh_bits & MACH_MSGH_BITS_COMPLEX) {
+ if (kmsg->ikm_header->msgh_bits & MACH_MSGH_BITS_COMPLEX) {
mr = ipc_kmsg_copyout_body(kmsg, space, map, slist);
if (mr != MACH_MSG_SUCCESS)
vm_map_t map,
mach_msg_body_t *slist)
{
- mach_msg_bits_t mbits = kmsg->ikm_header.msgh_bits;
- ipc_object_t dest = (ipc_object_t) kmsg->ikm_header.msgh_remote_port;
- ipc_object_t reply = (ipc_object_t) kmsg->ikm_header.msgh_local_port;
+ mach_msg_bits_t mbits = kmsg->ikm_header->msgh_bits;
+ ipc_object_t dest = (ipc_object_t) kmsg->ikm_header->msgh_remote_port;
+ ipc_object_t reply = (ipc_object_t) kmsg->ikm_header->msgh_local_port;
mach_msg_type_name_t dest_type = MACH_MSGH_BITS_REMOTE(mbits);
mach_msg_type_name_t reply_type = MACH_MSGH_BITS_LOCAL(mbits);
mach_port_name_t dest_name, reply_name;
mr = (ipc_kmsg_copyout_object(space, dest, dest_type, &dest_name) |
ipc_kmsg_copyout_object(space, reply, reply_type, &reply_name));
- kmsg->ikm_header.msgh_bits = mbits &~ MACH_MSGH_BITS_CIRCULAR;
- kmsg->ikm_header.msgh_remote_port = (ipc_port_t)dest_name;
- kmsg->ikm_header.msgh_local_port = (ipc_port_t)reply_name;
+ kmsg->ikm_header->msgh_bits = mbits &~ MACH_MSGH_BITS_CIRCULAR;
+ kmsg->ikm_header->msgh_remote_port = CAST_MACH_NAME_TO_PORT(dest_name);
+ kmsg->ikm_header->msgh_local_port = CAST_MACH_NAME_TO_PORT(reply_name);
if (mbits & MACH_MSGH_BITS_COMPLEX) {
mr |= ipc_kmsg_copyout_body(kmsg, space, map, slist);
mach_msg_type_name_t reply_type;
mach_port_name_t dest_name, reply_name;
- mbits = kmsg->ikm_header.msgh_bits;
- dest = (ipc_object_t) kmsg->ikm_header.msgh_remote_port;
- reply = (ipc_object_t) kmsg->ikm_header.msgh_local_port;
+ mbits = kmsg->ikm_header->msgh_bits;
+ dest = (ipc_object_t) kmsg->ikm_header->msgh_remote_port;
+ reply = (ipc_object_t) kmsg->ikm_header->msgh_local_port;
dest_type = MACH_MSGH_BITS_REMOTE(mbits);
reply_type = MACH_MSGH_BITS_LOCAL(mbits);
ipc_object_destroy(reply, reply_type);
reply_name = MACH_PORT_NULL;
} else
- reply_name = (mach_port_name_t) reply;
+ reply_name = CAST_MACH_PORT_TO_NAME(reply);
- kmsg->ikm_header.msgh_bits = (MACH_MSGH_BITS_OTHER(mbits) |
+ kmsg->ikm_header->msgh_bits = (MACH_MSGH_BITS_OTHER(mbits) |
MACH_MSGH_BITS(reply_type, dest_type));
- kmsg->ikm_header.msgh_local_port = (ipc_port_t)dest_name;
- kmsg->ikm_header.msgh_remote_port = (ipc_port_t)reply_name;
+ kmsg->ikm_header->msgh_local_port = CAST_MACH_NAME_TO_PORT(dest_name);
+ kmsg->ikm_header->msgh_remote_port = CAST_MACH_NAME_TO_PORT(reply_name);
if (mbits & MACH_MSGH_BITS_COMPLEX) {
mach_msg_body_t *body;
- body = (mach_msg_body_t *) (&kmsg->ikm_header + 1);
- ipc_kmsg_clean_body(kmsg, body->msgh_descriptor_count);
+ body = (mach_msg_body_t *) (kmsg->ikm_header + 1);
+ ipc_kmsg_clean_body(kmsg, body->msgh_descriptor_count,
+ (mach_msg_descriptor_t *)(body + 1));
}
}
+
/*
* Routine: ipc_kmsg_copyin_scatter
* Purpose:
*/
mach_msg_body_t *
-ipc_kmsg_copyin_scatter(
- mach_msg_header_t *msg,
- mach_msg_size_t slist_size,
- ipc_kmsg_t kmsg)
+ipc_kmsg_get_scatter(
+ mach_vm_address_t msg_addr,
+ mach_msg_size_t slist_size,
+ ipc_kmsg_t kmsg)
{
mach_msg_body_t *slist;
mach_msg_body_t *body;
mach_msg_descriptor_t *gstart, *gend;
mach_msg_descriptor_t *sstart, *send;
+#if defined(__LP64__)
+ panic("ipc_kmsg_get_scatter called!");
+#endif
if (slist_size < sizeof(mach_msg_base_t))
return MACH_MSG_BODY_NULL;
- slist_size -= sizeof(mach_msg_header_t);
+ slist_size -= (mach_msg_size_t)sizeof(mach_msg_header_t);
slist = (mach_msg_body_t *)kalloc(slist_size);
if (slist == MACH_MSG_BODY_NULL)
return slist;
- if (copyin((char *) (msg + 1), (char *)slist, slist_size)) {
- kfree((vm_offset_t)slist, slist_size);
+ if (copyin(msg_addr + sizeof(mach_msg_header_t), (char *)slist, slist_size)) {
+ kfree(slist, slist_size);
return MACH_MSG_BODY_NULL;
}
if ((slist->msgh_descriptor_count* sizeof(mach_msg_descriptor_t)
+ sizeof(mach_msg_size_t)) > slist_size) {
- kfree((vm_offset_t)slist, slist_size);
+ kfree(slist, slist_size);
return MACH_MSG_BODY_NULL;
}
- body = (mach_msg_body_t *) (&kmsg->ikm_header + 1);
+ body = (mach_msg_body_t *) (kmsg->ikm_header + 1);
gstart = (mach_msg_descriptor_t *) (body + 1);
gend = gstart + body->msgh_descriptor_count;
* automatic size mismatch.
*/
if (slist->msgh_descriptor_count == 0) {
- kfree((vm_offset_t)slist, slist_size);
+ kfree(slist, slist_size);
return MACH_MSG_BODY_NULL;
}
g_type == MACH_MSG_OOL_VOLATILE_DESCRIPTOR) {
if (sstart->type.type != MACH_MSG_OOL_DESCRIPTOR &&
sstart->type.type != MACH_MSG_OOL_VOLATILE_DESCRIPTOR) {
- kfree((vm_offset_t)slist, slist_size);
+ kfree(slist, slist_size);
return MACH_MSG_BODY_NULL;
}
if (sstart->out_of_line.copy == MACH_MSG_OVERWRITE &&
gstart->out_of_line.size > sstart->out_of_line.size) {
- kfree((vm_offset_t)slist, slist_size);
+ kfree(slist, slist_size);
return MACH_MSG_BODY_NULL;
}
}
else {
if (sstart->type.type != MACH_MSG_OOL_PORTS_DESCRIPTOR) {
- kfree((vm_offset_t)slist, slist_size);
+ kfree(slist, slist_size);
return MACH_MSG_BODY_NULL;
}
if (sstart->ool_ports.copy == MACH_MSG_OVERWRITE &&
gstart->ool_ports.count > sstart->ool_ports.count) {
- kfree((vm_offset_t)slist, slist_size);
+ kfree(slist, slist_size);
return MACH_MSG_BODY_NULL;
}
}
mach_msg_body_t *slist,
mach_msg_size_t slist_size)
{
- slist_size -= sizeof(mach_msg_header_t);
- kfree((vm_offset_t)slist, slist_size);
+#if defined(__LP64__)
+ panic("%s called; halting!", __func__);
+#endif
+
+ slist_size -= (mach_msg_size_t)sizeof(mach_msg_header_t);
+ kfree(slist, slist_size);
}
mach_msg_type_name_t reply_type;
mach_port_name_t dest_name, reply_name;
- dest = (ipc_object_t) kmsg->ikm_header.msgh_remote_port;
- reply = (ipc_object_t) kmsg->ikm_header.msgh_local_port;
- dest_type = MACH_MSGH_BITS_REMOTE(kmsg->ikm_header.msgh_bits);
- reply_type = MACH_MSGH_BITS_LOCAL(kmsg->ikm_header.msgh_bits);
+ dest = (ipc_object_t) kmsg->ikm_header->msgh_remote_port;
+ reply = (ipc_object_t) kmsg->ikm_header->msgh_local_port;
+ dest_type = MACH_MSGH_BITS_REMOTE(kmsg->ikm_header->msgh_bits);
+ reply_type = MACH_MSGH_BITS_LOCAL(kmsg->ikm_header->msgh_bits);
+
+ assert(IO_VALID(dest));
+
+ io_lock(dest);
+ if (io_active(dest)) {
+ ipc_object_copyout_dest(space, dest, dest_type, &dest_name);
+ /* dest is unlocked */
+ } else {
+ io_release(dest);
+ io_check_unlock(dest);
+ dest_name = MACH_PORT_DEAD;
+ }
+
+ reply_name = CAST_MACH_PORT_TO_NAME(reply);
+
+ kmsg->ikm_header->msgh_bits =
+ (MACH_MSGH_BITS_OTHER(kmsg->ikm_header->msgh_bits) |
+ MACH_MSGH_BITS(reply_type, dest_type));
+ kmsg->ikm_header->msgh_local_port = CAST_MACH_NAME_TO_PORT(dest_name);
+ kmsg->ikm_header->msgh_remote_port = CAST_MACH_NAME_TO_PORT(reply_name);
+}
+
+#if IKM_SUPPORT_LEGACY
+void
+ipc_kmsg_copyout_to_kernel_legacy(
+ ipc_kmsg_t kmsg,
+ ipc_space_t space)
+{
+ ipc_object_t dest;
+ ipc_object_t reply;
+ mach_msg_type_name_t dest_type;
+ mach_msg_type_name_t reply_type;
+ mach_port_name_t dest_name, reply_name;
+
+ dest = (ipc_object_t) kmsg->ikm_header->msgh_remote_port;
+ reply = (ipc_object_t) kmsg->ikm_header->msgh_local_port;
+ dest_type = MACH_MSGH_BITS_REMOTE(kmsg->ikm_header->msgh_bits);
+ reply_type = MACH_MSGH_BITS_LOCAL(kmsg->ikm_header->msgh_bits);
assert(IO_VALID(dest));
dest_name = MACH_PORT_DEAD;
}
- reply_name = (mach_port_name_t) reply;
+ reply_name = CAST_MACH_PORT_TO_NAME(reply);
- kmsg->ikm_header.msgh_bits =
- (MACH_MSGH_BITS_OTHER(kmsg->ikm_header.msgh_bits) |
+ kmsg->ikm_header->msgh_bits =
+ (MACH_MSGH_BITS_OTHER(kmsg->ikm_header->msgh_bits) |
MACH_MSGH_BITS(reply_type, dest_type));
- kmsg->ikm_header.msgh_local_port = (ipc_port_t)dest_name;
- kmsg->ikm_header.msgh_remote_port = (ipc_port_t)reply_name;
+ kmsg->ikm_header->msgh_local_port = CAST_MACH_NAME_TO_PORT(dest_name);
+ kmsg->ikm_header->msgh_remote_port = CAST_MACH_NAME_TO_PORT(reply_name);
+
+ mach_msg_descriptor_t *saddr;
+ mach_msg_legacy_descriptor_t *daddr;
+ mach_msg_type_number_t i, count = ((mach_msg_base_t *)kmsg->ikm_header)->body.msgh_descriptor_count;
+ saddr = (mach_msg_descriptor_t *) (((mach_msg_base_t *)kmsg->ikm_header) + 1);
+ saddr = &saddr[count-1];
+ daddr = (mach_msg_legacy_descriptor_t *)&saddr[count];
+ daddr--;
+
+ vm_offset_t dsc_adjust = 0;
+
+ for (i = 0; i < count; i++, saddr--, daddr--) {
+ switch (saddr->type.type) {
+ case MACH_MSG_PORT_DESCRIPTOR: {
+ mach_msg_port_descriptor_t *dsc = &saddr->port;
+ mach_msg_legacy_port_descriptor_t *dest_dsc = &daddr->port;
+
+ mach_port_t name = dsc->name;
+ mach_msg_type_name_t disposition = dsc->disposition;
+
+ dest_dsc->name = CAST_MACH_PORT_TO_NAME(name);
+ dest_dsc->disposition = disposition;
+ dest_dsc->type = MACH_MSG_PORT_DESCRIPTOR;
+ break;
+ }
+ case MACH_MSG_OOL_VOLATILE_DESCRIPTOR:
+ case MACH_MSG_OOL_DESCRIPTOR: {
+ /* The sender should supply ready-made memory, i.e. a vm_map_copy_t
+ * so we don't need to do anything special. */
+
+ mach_msg_ool_descriptor_t *source_dsc = (typeof(source_dsc))&saddr->out_of_line;
+
+ mach_msg_ool_descriptor32_t *dest_dsc = &daddr->out_of_line32;
+
+ vm_offset_t address = (vm_offset_t)source_dsc->address;
+ vm_size_t size = source_dsc->size;
+ boolean_t deallocate = source_dsc->deallocate;
+ mach_msg_copy_options_t copy = source_dsc->copy;
+ mach_msg_descriptor_type_t type = source_dsc->type;
+
+ dest_dsc->address = address;
+ dest_dsc->size = size;
+ dest_dsc->deallocate = deallocate;
+ dest_dsc->copy = copy;
+ dest_dsc->type = type;
+ break;
+ }
+ case MACH_MSG_OOL_PORTS_DESCRIPTOR: {
+ mach_msg_ool_ports_descriptor_t *source_dsc = (typeof(source_dsc))&saddr->ool_ports;
+
+ mach_msg_ool_ports_descriptor32_t *dest_dsc = &daddr->ool_ports32;
+
+ vm_offset_t address = (vm_offset_t)source_dsc->address;
+ vm_size_t port_count = source_dsc->count;
+ boolean_t deallocate = source_dsc->deallocate;
+ mach_msg_copy_options_t copy = source_dsc->copy;
+ mach_msg_descriptor_type_t type = source_dsc->type;
+
+ dest_dsc->address = address;
+ dest_dsc->count = port_count;
+ dest_dsc->deallocate = deallocate;
+ dest_dsc->copy = copy;
+ dest_dsc->type = type;
+ break;
+ }
+ default: {
+#if MACH_ASSERT
+ panic("ipc_kmsg_copyin_from_kernel: bad descriptor");
+#endif /* MACH_ASSERT */
+ }
+ }
+ }
+
+ if(count) {
+ dsc_adjust = 4*count;
+ memmove((char *)((vm_offset_t)kmsg->ikm_header + dsc_adjust), kmsg->ikm_header, sizeof(mach_msg_base_t));
+ kmsg->ikm_header = (mach_msg_header_t *)((vm_offset_t)kmsg->ikm_header + dsc_adjust);
+ /* Update the message size for the smaller user representation */
+ kmsg->ikm_header->msgh_size -= dsc_adjust;
+ }
}
+#endif /* IKM_SUPPORT_LEGACY */
+
#include <mach_kdb.h>
#if MACH_KDB
void ipc_msg_print_untyped(
mach_msg_body_t *body);
-char * ipc_type_name(
+const char * ipc_type_name(
int type_name,
boolean_t received);
-void ipc_print_type_name(
- int type_name);
-
-char *
+const char *
msgh_bit_decode(
mach_msg_bits_t bit);
-char *
+const char *
mm_copy_options_string(
mach_msg_copy_options_t option);
void db_print_msg_uid(mach_msg_header_t *);
-char *
+const char *
ipc_type_name(
int type_name,
boolean_t received)
ipc_print_type_name(
int type_name)
{
- char *name = ipc_type_name(type_name, TRUE);
+ const char *name = ipc_type_name(type_name, TRUE);
if (name) {
printf("%s", name);
} else {
kmsg->ikm_prev,
kmsg->ikm_size);
printf("\n");
- ipc_msg_print(&kmsg->ikm_header);
+ ipc_msg_print(kmsg->ikm_header);
}
-char *
+const char *
msgh_bit_decode(
mach_msg_bits_t bit)
{
{
mach_msg_bits_t mbits;
unsigned int bit, i;
- char *bit_name;
+ const char *bit_name;
int needs_comma;
mbits = msgh->msgh_bits;
}
if (msgh->msgh_local_port) {
- printf("%slocal=0x%x(", needs_comma ? "," : "",
+ printf("%slocal=%p(", needs_comma ? "," : "",
msgh->msgh_local_port);
ipc_print_type_name(MACH_MSGH_BITS_LOCAL(msgh->msgh_bits));
printf(")\n");
}
-char *
+const char *
mm_copy_options_string(
mach_msg_copy_options_t option)
{
- char *name;
+ const char *name;
switch (option) {
case MACH_MSG_PHYSICAL_COPY: