#include <kern/cpu_data.h>
#include <kern/policy_internal.h>
+#include <pthread/priority_private.h>
+
#include <machine/machlimits.h>
#include <vm/vm_map.h>
#define KMSG_TRACE_PORTS_SHIFT 0
#if (KDEBUG_LEVEL >= KDEBUG_LEVEL_STANDARD)
+#include <stdint.h>
extern boolean_t kdebug_debugid_enabled(uint32_t debugid);
void ipc_kmsg_trace_send(ipc_kmsg_t kmsg,
case IKOT_MASTER_DEVICE:
case IKOT_IOKIT_CONNECT:
case IKOT_IOKIT_OBJECT:
- case IKOT_IOKIT_SPARE:
+ case IKOT_IOKIT_IDENT:
msg_flags |= KMSG_TRACE_FLAG_IOKIT;
break;
default:
* Trailer contents
*/
trailer = (mach_msg_trailer_t *)((vm_offset_t)msg +
- (vm_offset_t)msg->msgh_size);
+ round_msg((vm_offset_t)msg->msgh_size));
if (trailer->msgh_trailer_size <= sizeof(mach_msg_security_trailer_t)) {
extern security_token_t KERNEL_SECURITY_TOKEN;
mach_msg_security_trailer_t *strailer;
vm_size_t length);
mach_msg_return_t ipc_kmsg_copyin_body(
- ipc_kmsg_t kmsg,
- ipc_space_t space,
- vm_map_t map);
+ ipc_kmsg_t kmsg,
+ ipc_space_t space,
+ vm_map_t map,
+ mach_msg_option_t *optionp);
/*
* We keep a per-processor cache of kernel message buffers.
max_expanded_size = IKM_SAVED_MSG_SIZE; /* round up for ikm_cache */
if (max_expanded_size == IKM_SAVED_MSG_SIZE) {
- struct ikm_cache *cache;
- unsigned int i;
-
- disable_preemption();
- cache = &PROCESSOR_DATA(current_processor(), ikm_cache);
- if ((i = cache->avail) > 0) {
- assert(i <= IKM_STASH);
- kmsg = cache->entries[--i];
- cache->avail = i;
- enable_preemption();
- ikm_check_init(kmsg, max_expanded_size);
- ikm_set_header(kmsg, msg_and_trailer_size);
- return (kmsg);
- }
- enable_preemption();
kmsg = (ipc_kmsg_t)zalloc(ipc_kmsg_zone);
} else {
kmsg = (ipc_kmsg_t)kalloc(ikm_plus_overhead(max_expanded_size));
ip_release(port); /* May be last reference */
}
- /*
- * Peek and see if it has to go back in the cache.
- */
if (kmsg->ikm_size == IKM_SAVED_MSG_SIZE) {
- struct ikm_cache *cache;
- unsigned int i;
-
- disable_preemption();
- cache = &PROCESSOR_DATA(current_processor(), ikm_cache);
- if ((i = cache->avail) < IKM_STASH) {
- cache->entries[i] = kmsg;
- cache->avail = i + 1;
- enable_preemption();
- return;
- }
- enable_preemption();
zfree(ipc_kmsg_zone, kmsg);
return;
}
cur->ikm_qos_override = override;
if (cur == first)
return TRUE;
- cur = cur->ikm_next;
+ cur = cur->ikm_prev;
}
return FALSE;
}
queue->ikmq_base = IKM_NULL;
} else {
+ if (__improbable(next->ikm_prev != kmsg || prev->ikm_next != kmsg)) {
+ panic("ipc_kmsg_rmqueue: inconsistent prev/next pointers. "
+ "(prev->next: %p, next->prev: %p, kmsg: %p)",
+ prev->ikm_next, next->ikm_prev, kmsg);
+ }
+
if (queue->ikmq_base == kmsg)
queue->ikmq_base = next;
assert(kmsg->ikm_prealloc == IP_NULL);
kmsg->ikm_prealloc = IP_NULL;
+
+ assert(port_send_turnstile(port) == TURNSTILE_NULL);
+ kmsg->ikm_turnstile = TURNSTILE_NULL;
IP_SET_PREALLOC(port, kmsg);
}
ipc_kmsg_t kmsg,
ipc_port_t port)
{
- assert(kmsg->ikm_prealloc == port);
-
- kmsg->ikm_prealloc = IP_NULL;
+ /* take the mqueue lock since the turnstile is protected under it */
+ imq_lock(&port->ip_messages);
+
IP_CLEAR_PREALLOC(port, kmsg);
+ set_port_send_turnstile(port, kmsg->ikm_turnstile);
+ imq_unlock(&port->ip_messages);
}
/*
if (copyinmsg(msg_addr, (char *)&legacy_base, len_copied))
return MACH_SEND_INVALID_DATA;
+ /*
+ * If the message claims to be complex, it must at least
+ * have the length of a "base" message (header + dsc_count).
+ */
+ if (len_copied < sizeof(mach_msg_legacy_base_t) &&
+ (legacy_base.header.msgh_bits & MACH_MSGH_BITS_COMPLEX))
+ return MACH_SEND_MSG_TOO_SMALL;
+
msg_addr += sizeof(legacy_base.header);
#if defined(__LP64__)
size += LEGACY_HEADER_SIZE_DELTA;
(void) memcpy((void *) kmsg->ikm_header, (const void *) msg, size);
+ ikm_qos_init(kmsg);
+
kmsg->ikm_header->msgh_size = size;
/*
* MACH_SEND_INTERRUPTED Caller still has message.
* MACH_SEND_INVALID_DEST Caller still has message.
*/
-
-
mach_msg_return_t
ipc_kmsg_send(
ipc_kmsg_t kmsg,
}
#if IMPORTANCE_INHERITANCE
- boolean_t did_importance = FALSE;
-#if IMPORTANCE_DEBUG
+ bool did_importance = false;
+#if IMPORTANCE_TRACE
mach_msg_id_t imp_msgh_id = -1;
int sender_pid = -1;
-#endif /* IMPORTANCE_DEBUG */
+#endif /* IMPORTANCE_TRACE */
#endif /* IMPORTANCE_INHERITANCE */
/* don't allow the creation of a circular loop */
if (MACH_NODE_VALID(kmsg->ikm_node) && FPORT_VALID(port->ip_messages.imq_fport))
flipc_msg_ack(kmsg->ikm_node, &port->ip_messages, FALSE);
#endif
+ if (did_importance) {
+ /*
+ * We're going to pretend we delivered this message
+ * successfully, and just eat the kmsg. However, the
+ * kmsg is actually visible via the importance_task!
+ * We need to cleanup this linkage before we destroy
+ * the message, and more importantly before we set the
+ * msgh_remote_port to NULL. See: 34302571
+ */
+ ipc_importance_clean(kmsg);
+ }
ip_release(port); /* JMM - Future: release right, not just ref */
kmsg->ikm_header->msgh_remote_port = MACH_PORT_NULL;
ipc_kmsg_destroy(kmsg);
* propagation. That routine can drop the port lock temporarily.
* If it does we'll have to revalidate the destination.
*/
- if (did_importance == FALSE) {
- did_importance = TRUE;
+ if (!did_importance) {
+ did_importance = true;
if (ipc_importance_send(kmsg, option))
goto retry;
}
* queue. Lock message queue while port is locked.
*/
imq_lock(&port->ip_messages);
+
+ set_ip_srp_msg_sent(port);
+
ip_unlock(port);
error = ipc_mqueue_send(&port->ip_messages, kmsg, option,
}
#if IMPORTANCE_INHERITANCE
- if (did_importance == TRUE) {
+ if (did_importance) {
__unused int importance_cleared = 0;
switch (error) {
case MACH_SEND_TIMED_OUT:
default:
break;
}
-#if IMPORTANCE_DEBUG
+#if IMPORTANCE_TRACE
KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, (IMPORTANCE_CODE(IMP_MSG, IMP_MSG_SEND)) | DBG_FUNC_END,
task_pid(current_task()), sender_pid, imp_msgh_id, importance_cleared, 0);
-#endif /* IMPORTANCE_DEBUG */
+#endif /* IMPORTANCE_TRACE */
}
#endif /* IMPORTANCE_INHERITANCE */
kmsg->ikm_header->msgh_id);
#if defined(__LP64__)
- if (current_task() != kernel_task) { /* don't if receiver expects fully-cooked in-kernel msg; ux_exception */
+ if (current_task() != kernel_task) { /* don't if receiver expects fully-cooked in-kernel msg; */
mach_msg_legacy_header_t *legacy_header =
(mach_msg_legacy_header_t *)((vm_offset_t)(kmsg->ikm_header) + LEGACY_HEADER_SIZE_DELTA);
ipc_kmsg_free(kmsg);
}
-unsigned long pthread_priority_canonicalize(unsigned long priority, boolean_t propagation);
+static mach_msg_priority_t
+ipc_get_current_thread_priority(void)
+{
+ thread_t thread = current_thread();
+ thread_qos_t qos;
+ int relpri;
+
+ qos = thread_get_requested_qos(thread, &relpri);
+ if (!qos) {
+ qos = thread_user_promotion_qos_for_pri(thread->base_pri);
+ relpri = 0;
+ }
+ return (mach_msg_priority_t)_pthread_priority_make_from_thread_qos(qos, relpri, 0);
+}
-static void
+static kern_return_t
ipc_kmsg_set_qos(
ipc_kmsg_t kmsg,
mach_msg_option_t options,
mach_msg_priority_t override)
{
kern_return_t kr;
+ ipc_port_t special_reply_port = kmsg->ikm_header->msgh_local_port;
+ ipc_port_t dest_port = kmsg->ikm_header->msgh_remote_port;
kr = ipc_get_pthpriority_from_kmsg_voucher(kmsg, &kmsg->ikm_qos);
if (kr != KERN_SUCCESS) {
- kmsg->ikm_qos = MACH_MSG_PRIORITY_UNSPECIFIED;
+ if (options & MACH_SEND_PROPAGATE_QOS) {
+ kmsg->ikm_qos = ipc_get_current_thread_priority();
+ } else {
+ kmsg->ikm_qos = MACH_MSG_PRIORITY_UNSPECIFIED;
+ }
}
kmsg->ikm_qos_override = kmsg->ikm_qos;
if (options & MACH_SEND_OVERRIDE) {
- unsigned long canonical;
- mach_msg_priority_t canon;
+ pthread_priority_t pp = _pthread_priority_normalize_for_ipc(override);
+ if (pp > kmsg->ikm_qos)
+ kmsg->ikm_qos_override = (mach_msg_priority_t)pp;
+ }
- canonical = pthread_priority_canonicalize(override, TRUE);
- canon = (mach_msg_priority_t)canonical;
- if (canon > kmsg->ikm_qos)
- kmsg->ikm_qos_override = canon;
+ kr = KERN_SUCCESS;
+ if ((options & MACH_SEND_SYNC_OVERRIDE)) {
+ if (IP_VALID(special_reply_port) &&
+ MACH_MSGH_BITS_LOCAL(kmsg->ikm_header->msgh_bits) == MACH_MSG_TYPE_PORT_SEND_ONCE) {
+ /*
+ * Link the destination port to special reply port and make sure that
+ * dest port has a send turnstile, else allocate one.
+ */
+ ipc_port_link_special_reply_port(special_reply_port, dest_port);
+ }
}
+ return kr;
}
/*
(voucher_type != MACH_MSG_TYPE_MOVE_SEND &&
voucher_type != MACH_MSG_TYPE_COPY_SEND)) {
is_write_unlock(space);
+ if ((*optionp & MACH_SEND_KERNEL) == 0) {
+ mach_port_guard_exception(voucher_name, 0, 0, kGUARD_EXC_SEND_INVALID_VOUCHER);
+ }
return MACH_SEND_INVALID_VOUCHER;
}
(voucher_entry->ie_bits & MACH_PORT_TYPE_SEND) == 0 ||
io_kotype(voucher_entry->ie_object) != IKOT_VOUCHER) {
is_write_unlock(space);
+ if ((*optionp & MACH_SEND_KERNEL) == 0) {
+ mach_port_guard_exception(voucher_name, 0, 0, kGUARD_EXC_SEND_INVALID_VOUCHER);
+ }
return MACH_SEND_INVALID_VOUCHER;
}
} else {
voucher_type = MACH_MSG_TYPE_MOVE_SEND;
}
- /* capture the qos value(s) for the kmsg */
- ipc_kmsg_set_qos(kmsg, *optionp, override);
-
msg->msgh_bits = MACH_MSGH_BITS_SET(dest_type, reply_type, voucher_type, mbits);
msg->msgh_remote_port = (ipc_port_t)dest_port;
msg->msgh_local_port = (ipc_port_t)reply_port;
+ /* capture the qos value(s) for the kmsg */
+ ipc_kmsg_set_qos(kmsg, *optionp, override);
+
if (release_port != IP_NULL)
ip_release(release_port);
assert(voucher_port == IP_NULL);
assert(voucher_soright == IP_NULL);
+ if ((*optionp & MACH_SEND_KERNEL) == 0) {
+ mach_port_guard_exception(reply_name, 0, 0, kGUARD_EXC_SEND_INVALID_REPLY);
+ }
return MACH_SEND_INVALID_REPLY;
invalid_dest:
}
mach_msg_descriptor_t *ipc_kmsg_copyin_port_descriptor(
- volatile mach_msg_port_descriptor_t *dsc,
- mach_msg_legacy_port_descriptor_t *user_dsc,
- ipc_space_t space,
- ipc_object_t dest,
- ipc_kmsg_t kmsg,
- mach_msg_return_t *mr);
+ volatile mach_msg_port_descriptor_t *dsc,
+ mach_msg_legacy_port_descriptor_t *user_dsc,
+ ipc_space_t space,
+ ipc_object_t dest,
+ ipc_kmsg_t kmsg,
+ mach_msg_option_t *optionp,
+ mach_msg_return_t *mr);
void ipc_print_type_name(
int type_name);
+
mach_msg_descriptor_t *
ipc_kmsg_copyin_port_descriptor(
- volatile mach_msg_port_descriptor_t *dsc,
- mach_msg_legacy_port_descriptor_t *user_dsc_in,
- ipc_space_t space,
- ipc_object_t dest,
- ipc_kmsg_t kmsg,
- mach_msg_return_t *mr)
+ volatile mach_msg_port_descriptor_t *dsc,
+ mach_msg_legacy_port_descriptor_t *user_dsc_in,
+ ipc_space_t space,
+ ipc_object_t dest,
+ ipc_kmsg_t kmsg,
+ mach_msg_option_t *optionp,
+ mach_msg_return_t *mr)
{
volatile mach_msg_legacy_port_descriptor_t *user_dsc = user_dsc_in;
mach_msg_type_name_t user_disp;
kern_return_t kr = ipc_object_copyin(space, name, user_disp, &object);
if (kr != KERN_SUCCESS) {
+ if ((*optionp & MACH_SEND_KERNEL) == 0) {
+ mach_port_guard_exception(name, 0, 0, kGUARD_EXC_SEND_INVALID_RIGHT);
+ }
*mr = MACH_SEND_INVALID_RIGHT;
return NULL;
}
}
mach_msg_descriptor_t * ipc_kmsg_copyin_ool_descriptor(
- mach_msg_ool_descriptor_t *dsc,
- mach_msg_descriptor_t *user_dsc,
- int is_64bit,
- vm_offset_t *paddr,
- vm_map_copy_t *copy,
- vm_size_t *space_needed,
- vm_map_t map,
- mach_msg_return_t *mr);
+ mach_msg_ool_descriptor_t *dsc,
+ mach_msg_descriptor_t *user_dsc,
+ int is_64bit,
+ vm_offset_t *paddr,
+ vm_map_copy_t *copy,
+ vm_size_t *space_needed,
+ vm_map_t map,
+ mach_msg_option_t *optionp,
+ mach_msg_return_t *mr);
+
mach_msg_descriptor_t *
ipc_kmsg_copyin_ool_descriptor(
- mach_msg_ool_descriptor_t *dsc,
- mach_msg_descriptor_t *user_dsc,
- int is_64bit,
- vm_offset_t *paddr,
- vm_map_copy_t *copy,
- vm_size_t *space_needed,
- vm_map_t map,
- mach_msg_return_t *mr)
+ mach_msg_ool_descriptor_t *dsc,
+ mach_msg_descriptor_t *user_dsc,
+ int is_64bit,
+ vm_offset_t *paddr,
+ vm_map_copy_t *copy,
+ vm_size_t *space_needed,
+ vm_map_t map,
+ __unused mach_msg_option_t *optionp,
+ mach_msg_return_t *mr)
{
vm_size_t length;
boolean_t dealloc;
}
mach_msg_descriptor_t * ipc_kmsg_copyin_ool_ports_descriptor(
- mach_msg_ool_ports_descriptor_t *dsc,
- mach_msg_descriptor_t *user_dsc,
- int is_64bit,
- vm_map_t map,
- ipc_space_t space,
- ipc_object_t dest,
- ipc_kmsg_t kmsg,
- mach_msg_return_t *mr);
+ mach_msg_ool_ports_descriptor_t *dsc,
+ mach_msg_descriptor_t *user_dsc,
+ int is_64bit,
+ vm_map_t map,
+ ipc_space_t space,
+ ipc_object_t dest,
+ ipc_kmsg_t kmsg,
+ mach_msg_option_t *optionp,
+ mach_msg_return_t *mr);
+
mach_msg_descriptor_t *
ipc_kmsg_copyin_ool_ports_descriptor(
- mach_msg_ool_ports_descriptor_t *dsc,
- mach_msg_descriptor_t *user_dsc,
- int is_64bit,
- vm_map_t map,
- ipc_space_t space,
- ipc_object_t dest,
- ipc_kmsg_t kmsg,
- mach_msg_return_t *mr)
+ mach_msg_ool_ports_descriptor_t *dsc,
+ mach_msg_descriptor_t *user_dsc,
+ int is_64bit,
+ vm_map_t map,
+ ipc_space_t space,
+ ipc_object_t dest,
+ ipc_kmsg_t kmsg,
+ mach_msg_option_t *optionp,
+ mach_msg_return_t *mr)
{
- void *data;
- ipc_object_t *objects;
- unsigned int i;
- mach_vm_offset_t addr;
- mach_msg_type_name_t user_disp;
- mach_msg_type_name_t result_disp;
- mach_msg_type_number_t count;
- mach_msg_copy_options_t copy_option;
- boolean_t deallocate;
- mach_msg_descriptor_type_t type;
- vm_size_t ports_length, names_length;
+ void *data;
+ ipc_object_t *objects;
+ unsigned int i;
+ mach_vm_offset_t addr;
+ mach_msg_type_name_t user_disp;
+ mach_msg_type_name_t result_disp;
+ mach_msg_type_number_t count;
+ mach_msg_copy_options_t copy_option;
+ boolean_t deallocate;
+ mach_msg_descriptor_type_t type;
+ vm_size_t ports_length, names_length;
if (is_64bit) {
mach_msg_ool_ports_descriptor64_t *user_ool_dsc = (typeof(user_ool_dsc))user_dsc;
}
kfree(data, ports_length);
dsc->address = NULL;
+ if ((*optionp & MACH_SEND_KERNEL) == 0) {
+ mach_port_guard_exception(name, 0, 0, kGUARD_EXC_SEND_INVALID_RIGHT);
+ }
*mr = MACH_SEND_INVALID_RIGHT;
return NULL;
}
ipc_kmsg_copyin_body(
ipc_kmsg_t kmsg,
ipc_space_t space,
- vm_map_t map)
+ vm_map_t map,
+ mach_msg_option_t *optionp)
{
ipc_object_t dest;
mach_msg_body_t *body;
* space.
*/
if (space_needed) {
- if (vm_allocate(ipc_kernel_copy_map, &paddr, space_needed,
- VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_KERN_MEMORY_IPC)) != KERN_SUCCESS) {
+ if (vm_allocate_kernel(ipc_kernel_copy_map, &paddr, space_needed,
+ VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_IPC) != KERN_SUCCESS) {
mr = MACH_MSG_VM_KERNEL;
goto clean_message;
}
switch (user_addr->type.type) {
case MACH_MSG_PORT_DESCRIPTOR:
user_addr = ipc_kmsg_copyin_port_descriptor((mach_msg_port_descriptor_t *)kern_addr,
- (mach_msg_legacy_port_descriptor_t *)user_addr, space, dest, kmsg, &mr);
+ (mach_msg_legacy_port_descriptor_t *)user_addr, space, dest, kmsg, optionp, &mr);
kern_addr++;
complex = TRUE;
break;
case MACH_MSG_OOL_VOLATILE_DESCRIPTOR:
case MACH_MSG_OOL_DESCRIPTOR:
user_addr = ipc_kmsg_copyin_ool_descriptor((mach_msg_ool_descriptor_t *)kern_addr,
- user_addr, is_task_64bit, &paddr, ©, &space_needed, map, &mr);
+ user_addr, is_task_64bit, &paddr, ©, &space_needed, map, optionp, &mr);
kern_addr++;
complex = TRUE;
break;
case MACH_MSG_OOL_PORTS_DESCRIPTOR:
user_addr = ipc_kmsg_copyin_ool_ports_descriptor((mach_msg_ool_ports_descriptor_t *)kern_addr,
- user_addr, is_task_64bit, map, space, dest, kmsg, &mr);
+ user_addr, is_task_64bit, map, space, dest, kmsg, optionp, &mr);
kern_addr++;
complex = TRUE;
break;
if ((kmsg->ikm_header->msgh_bits & MACH_MSGH_BITS_COMPLEX) == 0)
return MACH_MSG_SUCCESS;
- mr = ipc_kmsg_copyin_body( kmsg, space, map);
+ mr = ipc_kmsg_copyin_body( kmsg, space, map, optionp);
/* unreachable if !DEBUG */
__unreachable_ok_push
/*
* Dynamically allocate the region
*/
- int anywhere = VM_FLAGS_ANYWHERE;
- if (vm_kernel_map_is_kernel(map)) anywhere |= VM_MAKE_TAG(VM_KERN_MEMORY_IPC);
- else anywhere |= VM_MAKE_TAG(VM_MEMORY_MACH_MSG);
+ vm_tag_t tag;
+ if (vm_kernel_map_is_kernel(map)) tag = VM_KERN_MEMORY_IPC;
+ else tag = VM_MEMORY_MACH_MSG;
kern_return_t kr;
- if ((kr = mach_vm_allocate(map, &rcv_addr,
+ if ((kr = mach_vm_allocate_kernel(map, &rcv_addr,
(mach_vm_size_t)names_length,
- anywhere)) != KERN_SUCCESS) {
+ VM_FLAGS_ANYWHERE, tag)) != KERN_SUCCESS) {
ipc_kmsg_clean_body(kmsg, 1, (mach_msg_descriptor_t *)dsc);
rcv_addr = 0;
mach_port_name_t dest_name, reply_name;
mach_msg_return_t mr;
+ /* Set ith_knote to ITH_KNOTE_PSEUDO */
+ current_thread()->ith_knote = ITH_KNOTE_PSEUDO;
+
assert(IO_VALID(dest));
#if 0
ipc_space_t space)
{
ipc_object_t dest;
- ipc_object_t reply;
+ mach_port_t reply;
mach_msg_type_name_t dest_type;
mach_msg_type_name_t reply_type;
- mach_port_name_t dest_name, reply_name;
+ mach_port_name_t dest_name;
dest = (ipc_object_t) kmsg->ikm_header->msgh_remote_port;
- reply = (ipc_object_t) kmsg->ikm_header->msgh_local_port;
+ reply = kmsg->ikm_header->msgh_local_port;
dest_type = MACH_MSGH_BITS_REMOTE(kmsg->ikm_header->msgh_bits);
reply_type = MACH_MSGH_BITS_LOCAL(kmsg->ikm_header->msgh_bits);
dest_name = MACH_PORT_DEAD;
}
- reply_name = CAST_MACH_PORT_TO_NAME(reply);
-
kmsg->ikm_header->msgh_bits =
(MACH_MSGH_BITS_OTHER(kmsg->ikm_header->msgh_bits) |
MACH_MSGH_BITS(reply_type, dest_type));
kmsg->ikm_header->msgh_local_port = CAST_MACH_NAME_TO_PORT(dest_name);
- kmsg->ikm_header->msgh_remote_port = CAST_MACH_NAME_TO_PORT(reply_name);
+ kmsg->ikm_header->msgh_remote_port = reply;
}
#if IKM_SUPPORT_LEGACY
}
#endif /* IKM_SUPPORT_LEGACY */
+#ifdef __arm64__
+/*
+ * Just sets those parts of the trailer that aren't set up at allocation time.
+ */
+static void
+ipc_kmsg_munge_trailer(mach_msg_max_trailer_t *in, void *_out, boolean_t is64bit)
+{
+ if (is64bit) {
+ mach_msg_max_trailer64_t *out = (mach_msg_max_trailer64_t*)_out;
+ out->msgh_seqno = in->msgh_seqno;
+ out->msgh_context = in->msgh_context;
+ out->msgh_trailer_size = in->msgh_trailer_size;
+ out->msgh_ad = in->msgh_ad;
+ } else {
+ mach_msg_max_trailer32_t *out = (mach_msg_max_trailer32_t*)_out;
+ out->msgh_seqno = in->msgh_seqno;
+ out->msgh_context = (mach_port_context32_t)in->msgh_context;
+ out->msgh_trailer_size = in->msgh_trailer_size;
+ out->msgh_ad = in->msgh_ad;
+ }
+}
+#endif /* __arm64__ */
mach_msg_trailer_size_t
ipc_kmsg_add_trailer(ipc_kmsg_t kmsg, ipc_space_t space __unused,
{
mach_msg_max_trailer_t *trailer;
+#ifdef __arm64__
+ mach_msg_max_trailer_t tmp_trailer; /* This accommodates U64, and we'll munge */
+ void *real_trailer_out = (void*)(mach_msg_max_trailer_t *)
+ ((vm_offset_t)kmsg->ikm_header +
+ round_msg(kmsg->ikm_header->msgh_size));
+
+ /*
+ * Populate scratch with initial values set up at message allocation time.
+ * After, we reinterpret the space in the message as the right type
+ * of trailer for the address space in question.
+ */
+ bcopy(real_trailer_out, &tmp_trailer, MAX_TRAILER_SIZE);
+ trailer = &tmp_trailer;
+#else /* __arm64__ */
(void)thread;
trailer = (mach_msg_max_trailer_t *)
((vm_offset_t)kmsg->ikm_header +
round_msg(kmsg->ikm_header->msgh_size));
+#endif /* __arm64__ */
if (!(option & MACH_RCV_TRAILER_MASK)) {
return trailer->msgh_trailer_size;
trailer->msgh_seqno = seqno;
trailer->msgh_context = context;
- trailer->msgh_trailer_size = REQUESTED_TRAILER_SIZE(thread_is_64bit(thread), option);
+ trailer->msgh_trailer_size = REQUESTED_TRAILER_SIZE(thread_is_64bit_addr(thread), option);
if (minimal_trailer) {
goto done;
}
done:
+#ifdef __arm64__
+ ipc_kmsg_munge_trailer(trailer, real_trailer_out, thread_is_64bit_addr(thread));
+#endif /* __arm64__ */
return trailer->msgh_trailer_size;
}