/*
- * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
- *
+ *
* This file contains Original Code and/or Modifications of Original Code
* as defined in and that are subject to the Apple Public Source License
* Version 2.0 (the 'License'). You may not use this file except in
* unlawful or unlicensed copies of an Apple operating system, or to
* circumvent, violate, or enable the circumvention or violation of, any
* terms of an Apple operating system software license agreement.
- *
+ *
* Please obtain a copy of the License at
* http://www.opensource.apple.com/apsl/ and read it before using this file.
- *
+ *
* The Original Code and all software distributed under the License are
* distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
* Please see the License for the specific language governing rights and
* limitations under the License.
- *
+ *
* @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
/*
* @OSF_COPYRIGHT@
*/
-/*
+/*
* Mach Operating System
* Copyright (c) 1991,1990,1989 Carnegie Mellon University
* All Rights Reserved.
- *
+ *
* Permission to use, copy, modify and distribute this software and its
* documentation is hereby granted, provided that both the copyright
* notice and this permission notice appear in all copies of the
* software, derivative works or modified versions, and any portions
* thereof, and that both notices appear in supporting documentation.
- *
+ *
* CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
* CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
* ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
- *
+ *
* Carnegie Mellon requests users of this software to return to
- *
+ *
* Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
* School of Computer Science
* Carnegie Mellon University
* Pittsburgh PA 15213-3890
- *
+ *
* any improvements or extensions that they make and grant Carnegie Mellon
* the rights to redistribute these changes.
*/
/*
* File: ipc/mach_port.c
* Author: Rich Draves
- * Date: 1989
+ * Date: 1989
*
* Exported kernel calls. See mach/mach_port.defs.
*/
#include <mach_debug.h>
-#include <mach_rt.h>
#include <mach/port.h>
#include <mach/kern_return.h>
#include <mach/vm_prot.h>
#include <mach/vm_map.h>
#include <kern/task.h>
-#include <kern/counters.h>
#include <kern/thread.h>
-#include <kern/kalloc.h>
+#include <kern/exc_guard.h>
#include <mach/mach_port_server.h>
#include <vm/vm_map.h>
#include <vm/vm_kern.h>
+#include <ipc/port.h>
#include <ipc/ipc_entry.h>
#include <ipc/ipc_space.h>
#include <ipc/ipc_object.h>
#include <ipc/ipc_kmsg.h>
#include <kern/misc_protos.h>
#include <security/mac_mach_internal.h>
+#include <kern/work_interval.h>
+#include <kern/policy_internal.h>
#if IMPORTANCE_INHERITANCE
#include <ipc/ipc_importance.h>
#endif
-
-/*
- * Forward declarations
- */
-void mach_port_names_helper(
- ipc_port_timestamp_t timestamp,
- ipc_entry_t entry,
- mach_port_name_t name,
- mach_port_name_t *names,
- mach_port_type_t *types,
- ipc_entry_num_t *actualp);
-
-void mach_port_gst_helper(
- ipc_pset_t pset,
- ipc_entry_num_t maxnames,
- mach_port_name_t *names,
- ipc_entry_num_t *actualp);
-
-
-kern_return_t
-mach_port_guard_exception(
- mach_port_name_t name,
- uint64_t inguard,
- uint64_t portguard,
- unsigned reason);
-
-/* Needs port locked */
-void mach_port_get_status_helper(
- ipc_port_t port,
- mach_port_status_t *status);
+kern_return_t mach_port_get_attributes(ipc_space_t space, mach_port_name_t name,
+ int flavor, mach_port_info_t info, mach_msg_type_number_t *count);
+kern_return_t mach_port_get_context(ipc_space_t space, mach_port_name_t name,
+ mach_vm_address_t *context);
+kern_return_t mach_port_get_set_status(ipc_space_t space, mach_port_name_t name,
+ mach_port_name_t **members, mach_msg_type_number_t *membersCnt);
/* Zeroed template of qos flags */
-static mach_port_qos_t qos_template;
+static mach_port_qos_t qos_template;
/*
* Routine: mach_port_names_helper
* Conditions:
* Space containing entry is [at least] read-locked.
*/
-
-void
+static void
mach_port_names_helper(
- ipc_port_timestamp_t timestamp,
- ipc_entry_t entry,
- mach_port_name_t name,
- mach_port_name_t *names,
- mach_port_type_t *types,
- ipc_entry_num_t *actualp)
+ ipc_port_timestamp_t timestamp,
+ ipc_entry_t entry,
+ mach_port_name_t name,
+ mach_port_name_t *names,
+ mach_port_type_t *types,
+ ipc_entry_num_t *actualp)
{
ipc_entry_bits_t bits;
ipc_port_request_index_t request;
bits = entry->ie_bits;
request = entry->ie_request;
- __IGNORE_WCASTALIGN(port = (ipc_port_t) entry->ie_object);
+ port = ip_object_to_port(entry->ie_object);
if (bits & MACH_PORT_TYPE_RECEIVE) {
assert(IP_VALID(port));
if (request != IE_REQ_NONE) {
ip_lock(port);
- assert(ip_active(port));
+ require_ip_active(port);
type |= ipc_port_request_type(port, name, request);
ip_unlock(port);
}
-
} else if (bits & MACH_PORT_TYPE_SEND_RIGHTS) {
mach_port_type_t reqtype;
ip_lock(port);
reqtype = (request != IE_REQ_NONE) ?
- ipc_port_request_type(port, name, request) : 0;
-
+ ipc_port_request_type(port, name, request) : 0;
+
/*
* If the port is alive, or was alive when the mach_port_names
* started, then return that fact. Otherwise, pretend we found
bits &= ~(IE_BITS_TYPE_MASK);
bits |= MACH_PORT_TYPE_DEAD_NAME;
/* account for additional reference for dead-name notification */
- if (reqtype != 0)
+ if (reqtype != 0) {
bits++;
+ }
}
ip_unlock(port);
}
actual = *actualp;
names[actual] = name;
types[actual] = type;
- *actualp = actual+1;
+ *actualp = actual + 1;
}
/*
kern_return_t
mach_port_names(
- ipc_space_t space,
- mach_port_name_t **namesp,
- mach_msg_type_number_t *namesCnt,
- mach_port_type_t **typesp,
- mach_msg_type_number_t *typesCnt)
+ ipc_space_t space,
+ mach_port_name_t **namesp,
+ mach_msg_type_number_t *namesCnt,
+ mach_port_type_t **typesp,
+ mach_msg_type_number_t *typesCnt)
{
ipc_entry_t table;
ipc_entry_num_t tsize;
mach_port_index_t index;
- ipc_entry_num_t actual; /* this many names */
- ipc_port_timestamp_t timestamp; /* logical time of this operation */
+ ipc_entry_num_t actual; /* this many names */
+ ipc_port_timestamp_t timestamp; /* logical time of this operation */
mach_port_name_t *names;
mach_port_type_t *types;
kern_return_t kr;
- vm_size_t size; /* size of allocated memory */
- vm_offset_t addr1; /* allocated memory, for names */
- vm_offset_t addr2; /* allocated memory, for types */
- vm_map_copy_t memory1; /* copied-in memory, for names */
- vm_map_copy_t memory2; /* copied-in memory, for types */
+ vm_size_t size; /* size of allocated memory */
+ vm_offset_t addr1; /* allocated memory, for names */
+ vm_offset_t addr2; /* allocated memory, for types */
+ vm_map_copy_t memory1; /* copied-in memory, for names */
+ vm_map_copy_t memory2; /* copied-in memory, for types */
/* safe simplifying assumption */
- assert_static(sizeof(mach_port_name_t) == sizeof(mach_port_type_t));
+ static_assert(sizeof(mach_port_name_t) == sizeof(mach_port_type_t));
- if (space == IS_NULL)
+ if (space == IS_NULL) {
return KERN_INVALID_TASK;
+ }
size = 0;
(bound * sizeof(mach_port_name_t)),
VM_MAP_PAGE_MASK(ipc_kernel_map));
- if (size_needed <= size)
+ if (size_needed <= size) {
break;
+ }
is_read_unlock(space);
}
size = size_needed;
- kr = vm_allocate(ipc_kernel_map, &addr1, size, VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_KERN_MEMORY_IPC));
- if (kr != KERN_SUCCESS)
+ kr = vm_allocate_kernel(ipc_kernel_map, &addr1, size, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_IPC);
+ if (kr != KERN_SUCCESS) {
return KERN_RESOURCE_SHORTAGE;
+ }
- kr = vm_allocate(ipc_kernel_map, &addr2, size, VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_KERN_MEMORY_IPC));
+ kr = vm_allocate_kernel(ipc_kernel_map, &addr2, size, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_IPC);
if (kr != KERN_SUCCESS) {
kmem_free(ipc_kernel_map, addr1, size);
return KERN_RESOURCE_SHORTAGE;
/* can't fault while we hold locks */
- kr = vm_map_wire(
+ kr = vm_map_wire_kernel(
ipc_kernel_map,
vm_map_trunc_page(addr1,
- VM_MAP_PAGE_MASK(ipc_kernel_map)),
+ VM_MAP_PAGE_MASK(ipc_kernel_map)),
vm_map_round_page(addr1 + size,
- VM_MAP_PAGE_MASK(ipc_kernel_map)),
- VM_PROT_READ|VM_PROT_WRITE|VM_PROT_MEMORY_TAG_MAKE(VM_KERN_MEMORY_IPC),
+ VM_MAP_PAGE_MASK(ipc_kernel_map)),
+ VM_PROT_READ | VM_PROT_WRITE, VM_KERN_MEMORY_IPC,
FALSE);
if (kr != KERN_SUCCESS) {
kmem_free(ipc_kernel_map, addr1, size);
return KERN_RESOURCE_SHORTAGE;
}
- kr = vm_map_wire(
+ kr = vm_map_wire_kernel(
ipc_kernel_map,
vm_map_trunc_page(addr2,
- VM_MAP_PAGE_MASK(ipc_kernel_map)),
+ VM_MAP_PAGE_MASK(ipc_kernel_map)),
vm_map_round_page(addr2 + size,
- VM_MAP_PAGE_MASK(ipc_kernel_map)),
- VM_PROT_READ|VM_PROT_WRITE|VM_PROT_MEMORY_TAG_MAKE(VM_KERN_MEMORY_IPC),
+ VM_MAP_PAGE_MASK(ipc_kernel_map)),
+ VM_PROT_READ | VM_PROT_WRITE,
+ VM_KERN_MEMORY_IPC,
FALSE);
if (kr != KERN_SUCCESS) {
kmem_free(ipc_kernel_map, addr1, size);
kmem_free(ipc_kernel_map, addr2, size);
return KERN_RESOURCE_SHORTAGE;
}
-
}
/* space is read-locked and active */
name = MACH_PORT_MAKE(index, IE_BITS_GEN(bits));
mach_port_names_helper(timestamp, entry, name, names,
- types, &actual);
+ types, &actual);
}
}
size_used = actual * sizeof(mach_port_name_t);
vm_size_used =
- vm_map_round_page(size_used,
- VM_MAP_PAGE_MASK(ipc_kernel_map));
+ vm_map_round_page(size_used,
+ VM_MAP_PAGE_MASK(ipc_kernel_map));
/*
* Make used memory pageable and get it into
kr = vm_map_unwire(
ipc_kernel_map,
vm_map_trunc_page(addr1,
- VM_MAP_PAGE_MASK(ipc_kernel_map)),
+ VM_MAP_PAGE_MASK(ipc_kernel_map)),
vm_map_round_page(addr1 + vm_size_used,
- VM_MAP_PAGE_MASK(ipc_kernel_map)),
+ VM_MAP_PAGE_MASK(ipc_kernel_map)),
FALSE);
assert(kr == KERN_SUCCESS);
kr = vm_map_unwire(
ipc_kernel_map,
vm_map_trunc_page(addr2,
- VM_MAP_PAGE_MASK(ipc_kernel_map)),
+ VM_MAP_PAGE_MASK(ipc_kernel_map)),
vm_map_round_page(addr2 + vm_size_used,
- VM_MAP_PAGE_MASK(ipc_kernel_map)),
+ VM_MAP_PAGE_MASK(ipc_kernel_map)),
FALSE);
assert(kr == KERN_SUCCESS);
kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr1,
- (vm_map_size_t)size_used, TRUE, &memory1);
+ (vm_map_size_t)size_used, TRUE, &memory1);
assert(kr == KERN_SUCCESS);
kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr2,
- (vm_map_size_t)size_used, TRUE, &memory2);
+ (vm_map_size_t)size_used, TRUE, &memory2);
assert(kr == KERN_SUCCESS);
if (vm_size_used != size) {
kmem_free(ipc_kernel_map,
- addr1 + vm_size_used, size - vm_size_used);
+ addr1 + vm_size_used, size - vm_size_used);
kmem_free(ipc_kernel_map,
- addr2 + vm_size_used, size - vm_size_used);
+ addr2 + vm_size_used, size - vm_size_used);
}
}
kern_return_t
mach_port_type(
- ipc_space_t space,
- mach_port_name_t name,
- mach_port_type_t *typep)
+ ipc_space_t space,
+ mach_port_name_t name,
+ mach_port_type_t *typep)
{
mach_port_urefs_t urefs;
ipc_entry_t entry;
kern_return_t kr;
- if (space == IS_NULL)
+ if (space == IS_NULL) {
return KERN_INVALID_TASK;
+ }
- if (name == MACH_PORT_NULL)
+ if (name == MACH_PORT_NULL) {
return KERN_INVALID_NAME;
+ }
if (name == MACH_PORT_DEAD) {
*typep = MACH_PORT_TYPE_DEAD_NAME;
}
kr = ipc_right_lookup_write(space, name, &entry);
- if (kr != KERN_SUCCESS)
+ if (kr != KERN_SUCCESS) {
return kr;
+ }
/* space is write-locked and active */
kr = ipc_right_info(space, name, entry, typep, &urefs);
/* space is unlocked */
#if 1
- /* JMM - workaround rdar://problem/9121297 (CF being too picky on these bits). */
- *typep &= ~(MACH_PORT_TYPE_SPREQUEST | MACH_PORT_TYPE_SPREQUEST_DELAYED);
+ /* JMM - workaround rdar://problem/9121297 (CF being too picky on these bits). */
+ *typep &= ~(MACH_PORT_TYPE_SPREQUEST | MACH_PORT_TYPE_SPREQUEST_DELAYED);
#endif
return kr;
kern_return_t
mach_port_rename(
- __unused ipc_space_t space,
- __unused mach_port_name_t oname,
- __unused mach_port_name_t nname)
+ __unused ipc_space_t space,
+ __unused mach_port_name_t oname,
+ __unused mach_port_name_t nname)
{
return KERN_NOT_SUPPORTED;
}
kern_return_t
mach_port_allocate_name(
- ipc_space_t space,
- mach_port_right_t right,
- mach_port_name_t name)
+ ipc_space_t space,
+ mach_port_right_t right,
+ mach_port_name_t name)
{
- kern_return_t kr;
- mach_port_qos_t qos = qos_template;
+ kern_return_t kr;
+ mach_port_qos_t qos = qos_template;
qos.name = TRUE;
- if (!MACH_PORT_VALID(name))
+ if (!MACH_PORT_VALID(name)) {
return KERN_INVALID_VALUE;
+ }
- kr = mach_port_allocate_full (space, right, MACH_PORT_NULL,
- &qos, &name);
- return (kr);
+ kr = mach_port_allocate_full(space, right, MACH_PORT_NULL,
+ &qos, &name);
+ return kr;
}
/*
kern_return_t
mach_port_allocate(
- ipc_space_t space,
- mach_port_right_t right,
- mach_port_name_t *namep)
+ ipc_space_t space,
+ mach_port_right_t right,
+ mach_port_name_t *namep)
{
- kern_return_t kr;
- mach_port_qos_t qos = qos_template;
+ kern_return_t kr;
+ mach_port_qos_t qos = qos_template;
- kr = mach_port_allocate_full (space, right, MACH_PORT_NULL,
- &qos, namep);
- return (kr);
+ kr = mach_port_allocate_full(space, right, MACH_PORT_NULL,
+ &qos, namep);
+ return kr;
}
/*
* Routine: mach_port_allocate_qos [kernel call]
* Purpose:
- * Allocates a right, with qos options, in a space. Like
- * mach_port_allocate_name, except that the implementation
- * picks a name for the right. The name may be any legal name
+ * Allocates a right, with qos options, in a space. Like
+ * mach_port_allocate_name, except that the implementation
+ * picks a name for the right. The name may be any legal name
* in the space that doesn't currently denote a right.
* Conditions:
* Nothing locked.
kern_return_t
mach_port_allocate_qos(
- ipc_space_t space,
- mach_port_right_t right,
- mach_port_qos_t *qosp,
- mach_port_name_t *namep)
+ ipc_space_t space,
+ mach_port_right_t right,
+ mach_port_qos_t *qosp,
+ mach_port_name_t *namep)
{
- kern_return_t kr;
+ kern_return_t kr;
- if (qosp->name)
+ if (qosp->name) {
return KERN_INVALID_ARGUMENT;
- kr = mach_port_allocate_full (space, right, MACH_PORT_NULL,
- qosp, namep);
- return (kr);
+ }
+ kr = mach_port_allocate_full(space, right, MACH_PORT_NULL,
+ qosp, namep);
+ return kr;
}
/*
* Routine: mach_port_allocate_full [kernel call]
* Purpose:
- * Allocates a right in a space. Supports all of the
- * special cases, such as specifying a subsystem,
- * a specific name, a real-time port, etc.
- * The name may be any legal name in the space that doesn't
+ * Allocates a right in a space. Supports the
+ * special case of specifying a name. The name may
+ * be any legal name in the space that doesn't
* currently denote a right.
+ *
+ * While we no longer support users requesting
+ * preallocated message for the port, we still
+ * check for errors in such requests and then
+ * just clear the request.
* Conditions:
* Nothing locked.
* Returns:
kern_return_t
mach_port_allocate_full(
- ipc_space_t space,
- mach_port_right_t right,
- mach_port_t proto,
- mach_port_qos_t *qosp,
- mach_port_name_t *namep)
+ ipc_space_t space,
+ mach_port_right_t right,
+ mach_port_t proto,
+ mach_port_qos_t *qosp,
+ mach_port_name_t *namep)
{
- ipc_kmsg_t kmsg = IKM_NULL;
- kern_return_t kr;
+ kern_return_t kr;
- if (space == IS_NULL)
- return (KERN_INVALID_TASK);
+ if (space == IS_NULL) {
+ return KERN_INVALID_TASK;
+ }
- if (proto != MACH_PORT_NULL)
- return (KERN_INVALID_VALUE);
+ if (proto != MACH_PORT_NULL) {
+ return KERN_INVALID_VALUE;
+ }
if (qosp->name) {
- if (!MACH_PORT_VALID (*namep))
- return (KERN_INVALID_VALUE);
+ if (!MACH_PORT_VALID(*namep)) {
+ return KERN_INVALID_VALUE;
+ }
}
+ /*
+ * Don't actually honor prealloc requests from user-space
+ * (for security reasons, and because it isn't guaranteed anyway).
+ * Keep old errors for legacy reasons.
+ */
if (qosp->prealloc) {
if (qosp->len > MACH_MSG_SIZE_MAX - MAX_TRAILER_SIZE) {
return KERN_RESOURCE_SHORTAGE;
- } else {
- mach_msg_size_t size = qosp->len + MAX_TRAILER_SIZE;
-
- if (right != MACH_PORT_RIGHT_RECEIVE)
- return (KERN_INVALID_VALUE);
-
- kmsg = (ipc_kmsg_t)ipc_kmsg_prealloc(size);
- if (kmsg == IKM_NULL)
- return (KERN_RESOURCE_SHORTAGE);
}
+ if (right != MACH_PORT_RIGHT_RECEIVE) {
+ return KERN_INVALID_VALUE;
+ }
+ qosp->prealloc = 0;
}
+ kr = mach_port_allocate_internal(space, right, qosp, namep);
+ return kr;
+}
+
+
+/*
+ * Routine: mach_port_allocate_internal [kernel private]
+ * Purpose:
+ * Allocates a right in a space. Supports all of the
+ * special cases, a specific name, a real-time port, etc.
+ * The name may be any legal name in the space that doesn't
+ * currently denote a right.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS The right is allocated.
+ * KERN_INVALID_TASK The space is null.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_VALUE "right" isn't a legal kind of right.
+ * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
+ * KERN_NO_SPACE No room in space for another right.
+ */
+kern_return_t
+mach_port_allocate_internal(
+ ipc_space_t space,
+ mach_port_right_t right,
+ mach_port_qos_t *qosp,
+ mach_port_name_t *namep)
+{
+ kern_return_t kr;
+
+ assert(space != IS_NULL);
+
switch (right) {
- case MACH_PORT_RIGHT_RECEIVE:
- {
- ipc_port_t port;
-
- if (qosp->name)
- kr = ipc_port_alloc_name(space, *namep, &port);
- else
- kr = ipc_port_alloc(space, namep, &port);
+ case MACH_PORT_RIGHT_RECEIVE:
+ {
+ ipc_kmsg_t kmsg = IKM_NULL;
+ ipc_port_t port;
+
+ /*
+ * For in-kernel uses, only allow small (from the kmsg zone)
+ * preallocated messages for the port.
+ */
+ if (qosp->prealloc) {
+ mach_msg_size_t size = qosp->len;
+
+ if (size > IKM_SAVED_MSG_SIZE - MAX_TRAILER_SIZE) {
+ panic("mach_port_allocate_internal: too large a prealloc kmsg");
+ }
+ kmsg = (ipc_kmsg_t)ipc_kmsg_prealloc(size + MAX_TRAILER_SIZE);
+ if (kmsg == IKM_NULL) {
+ return KERN_RESOURCE_SHORTAGE;
+ }
+ }
+
+ if (qosp->name) {
+ kr = ipc_port_alloc_name(space, IPC_PORT_INIT_MESSAGE_QUEUE,
+ *namep, &port);
+ } else {
+ kr = ipc_port_alloc(space, IPC_PORT_INIT_MESSAGE_QUEUE,
+ namep, &port);
+ }
if (kr == KERN_SUCCESS) {
- if (kmsg != IKM_NULL)
+ if (kmsg != IKM_NULL) {
ipc_kmsg_set_prealloc(kmsg, port);
-
+ }
ip_unlock(port);
-
- } else if (kmsg != IKM_NULL)
+ } else if (kmsg != IKM_NULL) {
ipc_kmsg_free(kmsg);
+ }
break;
- }
+ }
- case MACH_PORT_RIGHT_PORT_SET:
- {
- ipc_pset_t pset;
+ case MACH_PORT_RIGHT_PORT_SET:
+ {
+ ipc_pset_t pset;
- if (qosp->name)
+ if (qosp->name) {
kr = ipc_pset_alloc_name(space, *namep, &pset);
- else
+ } else {
kr = ipc_pset_alloc(space, namep, &pset);
- if (kr == KERN_SUCCESS)
+ }
+ if (kr == KERN_SUCCESS) {
ips_unlock(pset);
+ }
break;
- }
+ }
- case MACH_PORT_RIGHT_DEAD_NAME:
+ case MACH_PORT_RIGHT_DEAD_NAME:
kr = ipc_object_alloc_dead(space, namep);
break;
- default:
+ default:
kr = KERN_INVALID_VALUE;
break;
}
- return (kr);
+ return kr;
}
/*
kern_return_t
mach_port_destroy(
- ipc_space_t space,
- mach_port_name_t name)
+ ipc_space_t space,
+ mach_port_name_t name)
{
ipc_entry_t entry;
kern_return_t kr;
- if (space == IS_NULL)
+ if (space == IS_NULL) {
return KERN_INVALID_TASK;
+ }
- if (!MACH_PORT_VALID(name))
+ if (!MACH_PORT_VALID(name)) {
return KERN_SUCCESS;
+ }
kr = ipc_right_lookup_write(space, name, &entry);
- if (kr != KERN_SUCCESS)
+ if (kr != KERN_SUCCESS) {
+ mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_NAME);
return kr;
+ }
/* space is write-locked and active */
kr = ipc_right_destroy(space, name, entry, TRUE, 0); /* unlocks space */
* Routine: mach_port_deallocate [kernel call]
* Purpose:
* Deallocates a user reference from a send right,
- * send-once right, or a dead-name right. May
- * deallocate the right, if this is the last uref,
+ * send-once right, dead-name right or a port_set right.
+ * May deallocate the right, if this is the last uref,
* and destroy the name, if it doesn't denote
* other rights.
* Conditions:
kern_return_t
mach_port_deallocate(
- ipc_space_t space,
- mach_port_name_t name)
+ ipc_space_t space,
+ mach_port_name_t name)
{
ipc_entry_t entry;
kern_return_t kr;
- if (space == IS_NULL)
+ if (space == IS_NULL) {
return KERN_INVALID_TASK;
+ }
- if (!MACH_PORT_VALID(name))
+ if (!MACH_PORT_VALID(name)) {
return KERN_SUCCESS;
+ }
kr = ipc_right_lookup_write(space, name, &entry);
- if (kr != KERN_SUCCESS)
+ if (kr != KERN_SUCCESS) {
+ mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_NAME);
return kr;
+ }
/* space is write-locked */
kr = ipc_right_dealloc(space, name, entry); /* unlocks space */
kern_return_t
mach_port_get_refs(
- ipc_space_t space,
- mach_port_name_t name,
- mach_port_right_t right,
- mach_port_urefs_t *urefsp)
+ ipc_space_t space,
+ mach_port_name_t name,
+ mach_port_right_t right,
+ mach_port_urefs_t *urefsp)
{
mach_port_type_t type;
mach_port_urefs_t urefs;
ipc_entry_t entry;
kern_return_t kr;
- if (space == IS_NULL)
+ if (space == IS_NULL) {
return KERN_INVALID_TASK;
+ }
- if (right >= MACH_PORT_RIGHT_NUMBER)
+ if (right >= MACH_PORT_RIGHT_NUMBER) {
return KERN_INVALID_VALUE;
+ }
if (!MACH_PORT_VALID(name)) {
- if (right == MACH_PORT_RIGHT_SEND ||
+ if (right == MACH_PORT_RIGHT_SEND ||
right == MACH_PORT_RIGHT_SEND_ONCE) {
*urefsp = 1;
return KERN_SUCCESS;
}
kr = ipc_right_lookup_write(space, name, &entry);
- if (kr != KERN_SUCCESS)
+ if (kr != KERN_SUCCESS) {
return kr;
+ }
/* space is write-locked and active */
kr = ipc_right_info(space, name, entry, &type, &urefs);
/* space is unlocked */
- if (kr != KERN_SUCCESS)
- return kr;
+ if (kr != KERN_SUCCESS) {
+ return kr;
+ }
- if (type & MACH_PORT_TYPE(right))
+ if (type & MACH_PORT_TYPE(right)) {
switch (right) {
- case MACH_PORT_RIGHT_SEND_ONCE:
+ case MACH_PORT_RIGHT_SEND_ONCE:
assert(urefs == 1);
- /* fall-through */
+ OS_FALLTHROUGH;
- case MACH_PORT_RIGHT_PORT_SET:
- case MACH_PORT_RIGHT_RECEIVE:
+ case MACH_PORT_RIGHT_PORT_SET:
+ case MACH_PORT_RIGHT_RECEIVE:
*urefsp = 1;
break;
- case MACH_PORT_RIGHT_DEAD_NAME:
- case MACH_PORT_RIGHT_SEND:
+ case MACH_PORT_RIGHT_DEAD_NAME:
+ case MACH_PORT_RIGHT_SEND:
assert(urefs > 0);
*urefsp = urefs;
break;
- default:
+ default:
panic("mach_port_get_refs: strange rights");
}
- else
+ } else {
*urefsp = 0;
+ }
return kr;
}
kern_return_t
mach_port_mod_refs(
- ipc_space_t space,
- mach_port_name_t name,
- mach_port_right_t right,
- mach_port_delta_t delta)
+ ipc_space_t space,
+ mach_port_name_t name,
+ mach_port_right_t right,
+ mach_port_delta_t delta)
{
ipc_entry_t entry;
kern_return_t kr;
- if (space == IS_NULL)
+ if (space == IS_NULL) {
return KERN_INVALID_TASK;
+ }
- if (right >= MACH_PORT_RIGHT_NUMBER)
+ if (right >= MACH_PORT_RIGHT_NUMBER) {
return KERN_INVALID_VALUE;
+ }
if (!MACH_PORT_VALID(name)) {
if (right == MACH_PORT_RIGHT_SEND ||
- right == MACH_PORT_RIGHT_SEND_ONCE)
+ right == MACH_PORT_RIGHT_SEND_ONCE) {
return KERN_SUCCESS;
+ }
return KERN_INVALID_NAME;
}
kr = ipc_right_lookup_write(space, name, &entry);
- if (kr != KERN_SUCCESS)
+ if (kr != KERN_SUCCESS) {
+ mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_NAME);
return kr;
+ }
+
/* space is write-locked and active */
- kr = ipc_right_delta(space, name, entry, right, delta); /* unlocks */
+ kr = ipc_right_delta(space, name, entry, right, delta); /* unlocks */
return kr;
}
kern_return_t
mach_port_peek(
- ipc_space_t space,
- mach_port_name_t name,
- mach_msg_trailer_type_t trailer_type,
- mach_port_seqno_t *seqnop,
- mach_msg_size_t *msg_sizep,
- mach_msg_id_t *msg_idp,
- mach_msg_trailer_info_t trailer_infop,
- mach_msg_type_number_t *trailer_sizep)
+ ipc_space_t space,
+ mach_port_name_t name,
+ mach_msg_trailer_type_t trailer_type,
+ mach_port_seqno_t *seqnop,
+ mach_msg_size_t *msg_sizep,
+ mach_msg_id_t *msg_idp,
+ mach_msg_trailer_info_t trailer_infop,
+ mach_msg_type_number_t *trailer_sizep)
{
ipc_port_t port;
kern_return_t kr;
boolean_t found;
mach_msg_max_trailer_t max_trailer;
- if (space == IS_NULL)
+ if (space == IS_NULL) {
return KERN_INVALID_TASK;
+ }
- if (!MACH_PORT_VALID(name))
+ if (!MACH_PORT_VALID(name)) {
return KERN_INVALID_RIGHT;
+ }
/*
* We don't allow anything greater than the audit trailer - to avoid
* leaking the context pointer and to avoid variable-sized context issues.
*/
if (GET_RCV_ELEMENTS(trailer_type) > MACH_RCV_TRAILER_AUDIT ||
- REQUESTED_TRAILER_SIZE(TRUE, trailer_type) > *trailer_sizep)
+ REQUESTED_TRAILER_SIZE(TRUE, trailer_type) > *trailer_sizep) {
+ mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_VALUE);
return KERN_INVALID_VALUE;
+ }
*trailer_sizep = REQUESTED_TRAILER_SIZE(TRUE, trailer_type);
kr = ipc_port_translate_receive(space, name, &port);
- if (kr != KERN_SUCCESS)
+ if (kr != KERN_SUCCESS) {
+ mach_port_guard_exception(name, 0, 0,
+ ((KERN_INVALID_NAME == kr) ?
+ kGUARD_EXC_INVALID_NAME :
+ kGUARD_EXC_INVALID_RIGHT));
return kr;
+ }
/* Port locked and active */
found = ipc_mqueue_peek(&port->ip_messages, seqnop,
- msg_sizep, msg_idp, &max_trailer);
+ msg_sizep, msg_idp, &max_trailer, NULL);
ip_unlock(port);
- if (found != TRUE)
+ if (found != TRUE) {
return KERN_FAILURE;
+ }
max_trailer.msgh_seqno = *seqnop;
memcpy(trailer_infop, &max_trailer, *trailer_sizep);
kern_return_t
mach_port_set_mscount(
- ipc_space_t space,
- mach_port_name_t name,
- mach_port_mscount_t mscount)
+ ipc_space_t space,
+ mach_port_name_t name,
+ mach_port_mscount_t mscount)
{
ipc_port_t port;
kern_return_t kr;
- if (space == IS_NULL)
+ if (space == IS_NULL) {
return KERN_INVALID_TASK;
+ }
- if (!MACH_PORT_VALID(name))
+ if (!MACH_PORT_VALID(name)) {
return KERN_INVALID_RIGHT;
+ }
kr = ipc_port_translate_receive(space, name, &port);
- if (kr != KERN_SUCCESS)
+ if (kr != KERN_SUCCESS) {
return kr;
+ }
/* port is locked and active */
- ipc_port_set_mscount(port, mscount);
-
+ port->ip_mscount = mscount;
ip_unlock(port);
return KERN_SUCCESS;
}
kern_return_t
mach_port_set_seqno(
- ipc_space_t space,
- mach_port_name_t name,
- mach_port_seqno_t seqno)
+ ipc_space_t space,
+ mach_port_name_t name,
+ mach_port_seqno_t seqno)
{
ipc_port_t port;
kern_return_t kr;
- if (space == IS_NULL)
+ if (space == IS_NULL) {
return KERN_INVALID_TASK;
+ }
- if (!MACH_PORT_VALID(name))
+ if (!MACH_PORT_VALID(name)) {
return KERN_INVALID_RIGHT;
+ }
kr = ipc_port_translate_receive(space, name, &port);
- if (kr != KERN_SUCCESS)
+ if (kr != KERN_SUCCESS) {
return kr;
+ }
/* port is locked and active */
ipc_mqueue_set_seqno(&port->ip_messages, seqno);
kern_return_t
mach_port_get_context(
- ipc_space_t space,
- mach_port_name_t name,
- mach_vm_address_t *context)
+ ipc_space_t space,
+ mach_port_name_t name,
+ mach_vm_address_t *context)
{
ipc_port_t port;
kern_return_t kr;
- if (space == IS_NULL)
+ if (space == IS_NULL) {
return KERN_INVALID_TASK;
+ }
- if (!MACH_PORT_VALID(name))
+ if (!MACH_PORT_VALID(name)) {
return KERN_INVALID_RIGHT;
+ }
kr = ipc_port_translate_receive(space, name, &port);
- if (kr != KERN_SUCCESS)
+ if (kr != KERN_SUCCESS) {
return kr;
+ }
/* Port locked and active */
/* For strictly guarded ports, return empty context (which acts as guard) */
- if (port->ip_strict_guard)
+ if (port->ip_strict_guard) {
*context = 0;
- else
+ } else {
*context = port->ip_context;
+ }
ip_unlock(port);
return KERN_SUCCESS;
}
+kern_return_t
+mach_port_get_context_from_user(
+ mach_port_t port,
+ mach_port_name_t name,
+ mach_vm_address_t *context)
+{
+ kern_return_t kr;
+
+ ipc_space_t space = convert_port_to_space_check_type(port, NULL, TASK_FLAVOR_READ, FALSE);
+
+ if (space == IPC_SPACE_NULL) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ kr = mach_port_get_context(space, name, context);
+
+ ipc_space_release(space);
+ return kr;
+}
/*
* Routine: mach_port_set_context [kernel call]
kern_return_t
mach_port_set_context(
- ipc_space_t space,
- mach_port_name_t name,
- mach_vm_address_t context)
+ ipc_space_t space,
+ mach_port_name_t name,
+ mach_vm_address_t context)
{
ipc_port_t port;
kern_return_t kr;
- if (space == IS_NULL)
+ if (space == IS_NULL) {
return KERN_INVALID_TASK;
+ }
- if (!MACH_PORT_VALID(name))
+ if (!MACH_PORT_VALID(name)) {
return KERN_INVALID_RIGHT;
+ }
kr = ipc_port_translate_receive(space, name, &port);
- if (kr != KERN_SUCCESS)
+ if (kr != KERN_SUCCESS) {
return kr;
+ }
/* port is locked and active */
- if(port->ip_strict_guard) {
+ if (port->ip_strict_guard) {
uint64_t portguard = port->ip_context;
ip_unlock(port);
/* For strictly guarded ports, disallow overwriting context; Raise Exception */
kern_return_t
mach_port_get_set_status(
- ipc_space_t space,
- mach_port_name_t name,
- mach_port_name_t **members,
- mach_msg_type_number_t *membersCnt)
+ ipc_space_t space,
+ mach_port_name_t name,
+ mach_port_name_t **members,
+ mach_msg_type_number_t *membersCnt)
{
- ipc_entry_num_t actual; /* this many members */
- ipc_entry_num_t maxnames; /* space for this many members */
+ ipc_entry_num_t actual; /* this many members */
+ ipc_entry_num_t maxnames; /* space for this many members */
kern_return_t kr;
- vm_size_t size; /* size of allocated memory */
- vm_offset_t addr; /* allocated memory */
- vm_map_copy_t memory; /* copied-in memory */
+ vm_size_t size; /* size of allocated memory */
+ vm_offset_t addr; /* allocated memory */
+ vm_map_copy_t memory; /* copied-in memory */
- if (space == IS_NULL)
+ if (space == IS_NULL) {
return KERN_INVALID_TASK;
+ }
- if (!MACH_PORT_VALID(name))
+ if (!MACH_PORT_VALID(name)) {
return KERN_INVALID_RIGHT;
+ }
- size = VM_MAP_PAGE_SIZE(ipc_kernel_map); /* initial guess */
+ size = VM_MAP_PAGE_SIZE(ipc_kernel_map); /* initial guess */
for (;;) {
mach_port_name_t *names;
ipc_object_t psobj;
ipc_pset_t pset;
- kr = vm_allocate(ipc_kernel_map, &addr, size, VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_KERN_MEMORY_IPC));
- if (kr != KERN_SUCCESS)
+ kr = vm_allocate_kernel(ipc_kernel_map, &addr, size, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_IPC);
+ if (kr != KERN_SUCCESS) {
return KERN_RESOURCE_SHORTAGE;
+ }
/* can't fault while we hold locks */
- kr = vm_map_wire(ipc_kernel_map, addr, addr + size,
- VM_PROT_READ|VM_PROT_WRITE|VM_PROT_MEMORY_TAG_MAKE(VM_KERN_MEMORY_IPC), FALSE);
+ kr = vm_map_wire_kernel(ipc_kernel_map, addr, addr + size,
+ VM_PROT_READ | VM_PROT_WRITE, VM_KERN_MEMORY_IPC, FALSE);
assert(kr == KERN_SUCCESS);
kr = ipc_object_translate(space, name, MACH_PORT_RIGHT_PORT_SET, &psobj);
}
/* just use a portset reference from here on out */
- __IGNORE_WCASTALIGN(pset = (ipc_pset_t) psobj);
+ pset = ips_object_to_pset(psobj);
ips_reference(pset);
- ips_unlock(pset);
+ ips_unlock(pset);
names = (mach_port_name_t *) addr;
maxnames = (ipc_entry_num_t)(size / sizeof(mach_port_name_t));
/* release the portset reference */
ips_release(pset);
- if (actual <= maxnames)
+ if (actual <= maxnames) {
break;
+ }
/* didn't have enough memory; allocate more */
kmem_free(ipc_kernel_map, addr, size);
size = vm_map_round_page(
(actual * sizeof(mach_port_name_t)),
- VM_MAP_PAGE_MASK(ipc_kernel_map)) +
- VM_MAP_PAGE_SIZE(ipc_kernel_map);
+ VM_MAP_PAGE_MASK(ipc_kernel_map)) +
+ VM_MAP_PAGE_SIZE(ipc_kernel_map);
}
if (actual == 0) {
kr = vm_map_unwire(
ipc_kernel_map,
vm_map_trunc_page(addr,
- VM_MAP_PAGE_MASK(ipc_kernel_map)),
+ VM_MAP_PAGE_MASK(ipc_kernel_map)),
vm_map_round_page(addr + vm_size_used,
- VM_MAP_PAGE_MASK(ipc_kernel_map)),
+ VM_MAP_PAGE_MASK(ipc_kernel_map)),
FALSE);
assert(kr == KERN_SUCCESS);
kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr,
- (vm_map_size_t)size_used, TRUE, &memory);
+ (vm_map_size_t)size_used, TRUE, &memory);
assert(kr == KERN_SUCCESS);
- if (vm_size_used != size)
+ if (vm_size_used != size) {
kmem_free(ipc_kernel_map,
- addr + vm_size_used, size - vm_size_used);
+ addr + vm_size_used, size - vm_size_used);
+ }
}
*members = (mach_port_name_t *) memory;
return KERN_SUCCESS;
}
+kern_return_t
+mach_port_get_set_status_from_user(
+ mach_port_t port,
+ mach_port_name_t name,
+ mach_port_name_t **members,
+ mach_msg_type_number_t *membersCnt)
+{
+ kern_return_t kr;
+
+ ipc_space_t space = convert_port_to_space_check_type(port, NULL, TASK_FLAVOR_READ, FALSE);
+
+ if (space == IPC_SPACE_NULL) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ kr = mach_port_get_set_status(space, name, members, membersCnt);
+
+ ipc_space_release(space);
+ return kr;
+}
+
/*
* Routine: mach_port_move_member [kernel call]
* Purpose:
kern_return_t
mach_port_move_member(
- ipc_space_t space,
- mach_port_name_t member,
- mach_port_name_t after)
+ ipc_space_t space,
+ mach_port_name_t member,
+ mach_port_name_t after)
{
- ipc_entry_t entry;
+ ipc_object_t port_obj, ps_obj;
ipc_port_t port;
- ipc_pset_t nset;
+ ipc_pset_t nset = IPS_NULL;
kern_return_t kr;
uint64_t wq_link_id = 0;
uint64_t wq_reserved_prepost = 0;
- if (space == IS_NULL)
+ if (space == IS_NULL) {
return KERN_INVALID_TASK;
+ }
- if (!MACH_PORT_VALID(member))
+ if (!MACH_PORT_VALID(member)) {
return KERN_INVALID_RIGHT;
+ }
if (after == MACH_PORT_DEAD) {
return KERN_INVALID_RIGHT;
*/
wq_link_id = waitq_link_reserve(NULL);
wq_reserved_prepost = waitq_prepost_reserve(NULL, 10,
- WAITQ_DONT_LOCK,
- NULL);
+ WAITQ_DONT_LOCK);
+ kr = ipc_pset_lazy_allocate(space, after);
+ if (kr != KERN_SUCCESS) {
+ goto done;
+ }
}
- kr = ipc_right_lookup_read(space, member, &entry);
- if (kr != KERN_SUCCESS)
- goto done;
- /* space is read-locked and active */
-
- if ((entry->ie_bits & MACH_PORT_TYPE_RECEIVE) == 0) {
- is_read_unlock(space);
- kr = KERN_INVALID_RIGHT;
+ if (after != MACH_PORT_NULL) {
+ kr = ipc_object_translate_two(space,
+ member, MACH_PORT_RIGHT_RECEIVE, &port_obj,
+ after, MACH_PORT_RIGHT_PORT_SET, &ps_obj);
+ } else {
+ kr = ipc_object_translate(space,
+ member, MACH_PORT_RIGHT_RECEIVE, &port_obj);
+ }
+ if (kr != KERN_SUCCESS) {
goto done;
}
- __IGNORE_WCASTALIGN(port = (ipc_port_t) entry->ie_object);
- assert(port != IP_NULL);
-
- if (after == MACH_PORT_NULL)
- nset = IPS_NULL;
- else {
- entry = ipc_entry_lookup(space, after);
- if (entry == IE_NULL) {
- is_read_unlock(space);
- kr = KERN_INVALID_NAME;
- goto done;
- }
-
- if ((entry->ie_bits & MACH_PORT_TYPE_PORT_SET) == 0) {
- is_read_unlock(space);
- kr = KERN_INVALID_RIGHT;
- goto done;
- }
-
- __IGNORE_WCASTALIGN(nset = (ipc_pset_t) entry->ie_object);
- assert(nset != IPS_NULL);
+ port = ip_object_to_port(port_obj);
+ if (after != MACH_PORT_NULL) {
+ nset = ips_object_to_pset(ps_obj);
}
- ip_lock(port);
+ /* port and nset are locked */
+
ipc_pset_remove_from_all(port);
- if (nset != IPS_NULL) {
- ips_lock(nset);
+ if (after != MACH_PORT_NULL) {
kr = ipc_pset_add(nset, port, &wq_link_id, &wq_reserved_prepost);
ips_unlock(nset);
}
- ip_unlock(port);
- is_read_unlock(space);
- done:
+ ip_unlock(port);
+done:
/*
* on success the ipc_pset_add() will consume the wq_link_id
* value (resetting it to 0), so this function is always safe to call.
* KERN_INVALID_CAPABILITY The notify port is dead.
* MACH_NOTIFY_PORT_DESTROYED:
* KERN_INVALID_VALUE Sync isn't zero.
+ * KERN_FAILURE Re-registering for this notification
* MACH_NOTIFY_DEAD_NAME:
* KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
* KERN_INVALID_ARGUMENT Name denotes dead name, but
kern_return_t
mach_port_request_notification(
- ipc_space_t space,
- mach_port_name_t name,
- mach_msg_id_t id,
- mach_port_mscount_t sync,
- ipc_port_t notify,
- ipc_port_t *previousp)
+ ipc_space_t space,
+ mach_port_name_t name,
+ mach_msg_id_t id,
+ mach_port_mscount_t sync,
+ ipc_port_t notify,
+ ipc_port_t *previousp)
{
kern_return_t kr;
- if (space == IS_NULL)
+ if (space == IS_NULL) {
return KERN_INVALID_TASK;
+ }
- if (notify == IP_DEAD)
+ if (notify == IP_DEAD) {
return KERN_INVALID_CAPABILITY;
+ }
-#if NOTYET
+#if NOTYET
/*
* Requesting notifications on RPC ports is an error.
*/
{
ipc_port_t port;
- ipc_entry_t entry;
+ ipc_entry_t entry;
- kr = ipc_right_lookup_write(space, name, &entry);
- if (kr != KERN_SUCCESS)
+ kr = ipc_right_lookup_write(space, name, &entry);
+ if (kr != KERN_SUCCESS) {
return kr;
+ }
- port = (ipc_port_t) entry->ie_object;
+ port = ip_object_to_port(entry->ie_object);
if (port->ip_subsystem != NULL) {
is_write_unlock(space);
- panic("mach_port_request_notification: on RPC port!!");
+ panic("mach_port_request_notification: on RPC port!!");
return KERN_INVALID_CAPABILITY;
}
is_write_unlock(space);
}
-#endif /* NOTYET */
+#endif /* NOTYET */
switch (id) {
- case MACH_NOTIFY_PORT_DESTROYED: {
- ipc_port_t port, previous;
+ case MACH_NOTIFY_PORT_DESTROYED: {
+ ipc_port_t port;
- if (sync != 0)
+ if (sync != 0) {
return KERN_INVALID_VALUE;
+ }
- if (!MACH_PORT_VALID(name))
+ if (!MACH_PORT_VALID(name)) {
return KERN_INVALID_RIGHT;
+ }
kr = ipc_port_translate_receive(space, name, &port);
- if (kr != KERN_SUCCESS)
+ if (kr != KERN_SUCCESS) {
return kr;
+ }
/* port is locked and active */
- ipc_port_pdrequest(port, notify, &previous);
- /* port is unlocked */
+ /*
+ * you cannot register for port death notifications on a kobject,
+ * kolabel or special reply port
+ */
+ if (ip_is_kobject(port) || ip_is_kolabeled(port) ||
+ port->ip_specialreply) {
+ ip_unlock(port);
+ return KERN_INVALID_RIGHT;
+ }
+
+ /* Allow only one registeration of this notification */
+ if (port->ip_pdrequest != IP_NULL) {
+ ip_unlock(port);
+ mach_port_guard_exception(name, 0, 0, kGUARD_EXC_KERN_FAILURE);
+ return KERN_FAILURE;
+ }
- *previousp = previous;
+ ipc_port_pdrequest(port, notify, previousp);
+ /* port is unlocked */
+ assert(*previousp == IP_NULL);
break;
- }
+ }
- case MACH_NOTIFY_NO_SENDERS: {
+ case MACH_NOTIFY_NO_SENDERS: {
ipc_port_t port;
- if (!MACH_PORT_VALID(name))
+ if (!MACH_PORT_VALID(name)) {
return KERN_INVALID_RIGHT;
+ }
kr = ipc_port_translate_receive(space, name, &port);
- if (kr != KERN_SUCCESS)
+ if (kr != KERN_SUCCESS) {
return kr;
+ }
/* port is locked and active */
ipc_port_nsrequest(port, sync, notify, previousp);
/* port is unlocked */
break;
- }
+ }
- case MACH_NOTIFY_SEND_POSSIBLE:
+ case MACH_NOTIFY_SEND_POSSIBLE:
- if (!MACH_PORT_VALID(name)) {
- return KERN_INVALID_ARGUMENT;
+ if (!MACH_PORT_VALID(name)) {
+ return KERN_INVALID_ARGUMENT;
}
kr = ipc_right_request_alloc(space, name, sync != 0,
- TRUE, notify, previousp);
- if (kr != KERN_SUCCESS)
+ TRUE, notify, previousp);
+ if (kr != KERN_SUCCESS) {
return kr;
+ }
break;
- case MACH_NOTIFY_DEAD_NAME:
+ case MACH_NOTIFY_DEAD_NAME:
- if (!MACH_PORT_VALID(name)) {
+ if (!MACH_PORT_VALID(name)) {
/*
* Already dead.
* Should do immediate delivery check -
* will do that in the near future.
*/
- return KERN_INVALID_ARGUMENT;
+ return KERN_INVALID_ARGUMENT;
}
kr = ipc_right_request_alloc(space, name, sync != 0,
- FALSE, notify, previousp);
- if (kr != KERN_SUCCESS)
+ FALSE, notify, previousp);
+ if (kr != KERN_SUCCESS) {
return kr;
+ }
break;
- default:
+ default:
return KERN_INVALID_VALUE;
}
kern_return_t
mach_port_insert_right(
- ipc_space_t space,
- mach_port_name_t name,
- ipc_port_t poly,
- mach_msg_type_name_t polyPoly)
+ ipc_space_t space,
+ mach_port_name_t name,
+ ipc_port_t poly,
+ mach_msg_type_name_t polyPoly)
{
- if (space == IS_NULL)
+ if (space == IS_NULL) {
return KERN_INVALID_TASK;
+ }
if (!MACH_PORT_VALID(name) ||
- !MACH_MSG_TYPE_PORT_ANY_RIGHT(polyPoly))
+ !MACH_MSG_TYPE_PORT_ANY_RIGHT(polyPoly)) {
return KERN_INVALID_VALUE;
+ }
- if (!IO_VALID((ipc_object_t) poly))
+ if (!IP_VALID(poly)) {
return KERN_INVALID_CAPABILITY;
+ }
- return ipc_object_copyout_name(space, (ipc_object_t) poly,
- polyPoly, FALSE, name);
+ return ipc_object_copyout_name(space, ip_to_object(poly),
+ polyPoly, name);
}
/*
kern_return_t
mach_port_extract_right(
- ipc_space_t space,
- mach_port_name_t name,
- mach_msg_type_name_t msgt_name,
- ipc_port_t *poly,
- mach_msg_type_name_t *polyPoly)
+ ipc_space_t space,
+ mach_port_name_t name,
+ mach_msg_type_name_t msgt_name,
+ ipc_port_t *poly,
+ mach_msg_type_name_t *polyPoly)
{
kern_return_t kr;
- if (space == IS_NULL)
+ if (space == IS_NULL) {
return KERN_INVALID_TASK;
+ }
- if (!MACH_MSG_TYPE_PORT_ANY(msgt_name))
+ if (!MACH_MSG_TYPE_PORT_ANY(msgt_name)) {
return KERN_INVALID_VALUE;
+ }
if (!MACH_PORT_VALID(name)) {
/*
return KERN_INVALID_RIGHT;
}
- kr = ipc_object_copyin(space, name, msgt_name, (ipc_object_t *) poly);
+ kr = ipc_object_copyin(space, name, msgt_name, (ipc_object_t *) poly, 0, NULL,
+ (space == current_space() && msgt_name == MACH_MSG_TYPE_COPY_SEND) ?
+ IPC_OBJECT_COPYIN_FLAGS_ALLOW_IMMOVABLE_SEND : IPC_OBJECT_COPYIN_FLAGS_SOFT_FAIL_IMMOVABLE_SEND);
- if (kr == KERN_SUCCESS)
+ if (kr == KERN_SUCCESS) {
*polyPoly = ipc_object_copyin_type(msgt_name);
+ }
return kr;
}
* Returns:
* None.
*/
-void mach_port_get_status_helper(
- ipc_port_t port,
- mach_port_status_t *statusp)
+static void
+mach_port_get_status_helper(
+ ipc_port_t port,
+ mach_port_status_t *statusp)
{
- spl_t s;
-
- s = splsched();
imq_lock(&port->ip_messages);
/* don't leak set IDs, just indicate that the port is in one or not */
statusp->mps_pset = !!(port->ip_in_pset);
statusp->mps_qlimit = port->ip_messages.imq_qlimit;
statusp->mps_msgcount = port->ip_messages.imq_msgcount;
imq_unlock(&port->ip_messages);
- splx(s);
-
+
statusp->mps_mscount = port->ip_mscount;
statusp->mps_sorights = port->ip_sorights;
statusp->mps_srights = port->ip_srights > 0;
if (port->ip_strict_guard) {
statusp->mps_flags |= MACH_PORT_STATUS_FLAG_STRICT_GUARD;
}
+ if (port->ip_immovable_receive) {
+ statusp->mps_flags |= MACH_PORT_STATUS_FLAG_GUARD_IMMOVABLE_RECEIVE;
+ }
+ }
+ if (port->ip_no_grant) {
+ statusp->mps_flags |= MACH_PORT_STATUS_FLAG_NO_GRANT;
}
return;
}
-
-
kern_return_t
mach_port_get_attributes(
- ipc_space_t space,
- mach_port_name_t name,
- int flavor,
- mach_port_info_t info,
- mach_msg_type_number_t *count)
+ ipc_space_t space,
+ mach_port_name_t name,
+ int flavor,
+ mach_port_info_t info,
+ mach_msg_type_number_t *count)
{
ipc_port_t port;
kern_return_t kr;
- if (space == IS_NULL)
+ if (space == IS_NULL) {
return KERN_INVALID_TASK;
+ }
- switch (flavor) {
- case MACH_PORT_LIMITS_INFO: {
- mach_port_limits_t *lp = (mach_port_limits_t *)info;
+ switch (flavor) {
+ case MACH_PORT_LIMITS_INFO: {
+ mach_port_limits_t *lp = (mach_port_limits_t *)info;
- if (*count < MACH_PORT_LIMITS_INFO_COUNT)
- return KERN_FAILURE;
+ if (*count < MACH_PORT_LIMITS_INFO_COUNT) {
+ return KERN_FAILURE;
+ }
- if (!MACH_PORT_VALID(name)) {
+ if (!MACH_PORT_VALID(name)) {
*count = 0;
break;
}
-
- kr = ipc_port_translate_receive(space, name, &port);
- if (kr != KERN_SUCCESS)
- return kr;
- /* port is locked and active */
-
- lp->mpl_qlimit = port->ip_messages.imq_qlimit;
- *count = MACH_PORT_LIMITS_INFO_COUNT;
- ip_unlock(port);
- break;
- }
- case MACH_PORT_RECEIVE_STATUS: {
+ kr = ipc_port_translate_receive(space, name, &port);
+ if (kr != KERN_SUCCESS) {
+ return kr;
+ }
+ /* port is locked and active */
+
+ lp->mpl_qlimit = port->ip_messages.imq_qlimit;
+ *count = MACH_PORT_LIMITS_INFO_COUNT;
+ ip_unlock(port);
+ break;
+ }
+
+ case MACH_PORT_RECEIVE_STATUS: {
mach_port_status_t *statusp = (mach_port_status_t *)info;
-
- if (*count < MACH_PORT_RECEIVE_STATUS_COUNT)
+
+ if (*count < MACH_PORT_RECEIVE_STATUS_COUNT) {
return KERN_FAILURE;
+ }
- if (!MACH_PORT_VALID(name))
+ if (!MACH_PORT_VALID(name)) {
return KERN_INVALID_RIGHT;
+ }
kr = ipc_port_translate_receive(space, name, &port);
- if (kr != KERN_SUCCESS)
+ if (kr != KERN_SUCCESS) {
return kr;
+ }
/* port is locked and active */
mach_port_get_status_helper(port, statusp);
*count = MACH_PORT_RECEIVE_STATUS_COUNT;
ip_unlock(port);
break;
}
-
+
case MACH_PORT_DNREQUESTS_SIZE: {
- ipc_port_request_t table;
-
- if (*count < MACH_PORT_DNREQUESTS_SIZE_COUNT)
- return KERN_FAILURE;
+ ipc_port_request_t table;
+
+ if (*count < MACH_PORT_DNREQUESTS_SIZE_COUNT) {
+ return KERN_FAILURE;
+ }
if (!MACH_PORT_VALID(name)) {
*(int *)info = 0;
break;
}
- kr = ipc_port_translate_receive(space, name, &port);
- if (kr != KERN_SUCCESS)
- return kr;
- /* port is locked and active */
-
+ kr = ipc_port_translate_receive(space, name, &port);
+ if (kr != KERN_SUCCESS) {
+ return kr;
+ }
+ /* port is locked and active */
+
table = port->ip_requests;
- if (table == IPR_NULL)
+ if (table == IPR_NULL) {
*(int *)info = 0;
- else
+ } else {
*(int *)info = table->ipr_size->its_size;
- *count = MACH_PORT_DNREQUESTS_SIZE_COUNT;
- ip_unlock(port);
+ }
+ *count = MACH_PORT_DNREQUESTS_SIZE_COUNT;
+ ip_unlock(port);
break;
}
case MACH_PORT_INFO_EXT: {
mach_port_info_ext_t *mp_info = (mach_port_info_ext_t *)info;
- if (*count < MACH_PORT_INFO_EXT_COUNT)
+ if (*count < MACH_PORT_INFO_EXT_COUNT) {
return KERN_FAILURE;
-
- if (!MACH_PORT_VALID(name))
+ }
+
+ if (!MACH_PORT_VALID(name)) {
return KERN_INVALID_RIGHT;
-
+ }
+
kr = ipc_port_translate_receive(space, name, &port);
- if (kr != KERN_SUCCESS)
+ if (kr != KERN_SUCCESS) {
return kr;
+ }
/* port is locked and active */
mach_port_get_status_helper(port, &mp_info->mpie_status);
mp_info->mpie_boost_cnt = port->ip_impcount;
break;
}
- default:
+ default:
return KERN_INVALID_ARGUMENT;
- /*NOTREACHED*/
- }
+ /*NOTREACHED*/
+ }
return KERN_SUCCESS;
}
+kern_return_t
+mach_port_get_attributes_from_user(
+ mach_port_t port,
+ mach_port_name_t name,
+ int flavor,
+ mach_port_info_t info,
+ mach_msg_type_number_t *count)
+{
+ kern_return_t kr;
+
+ ipc_space_t space = convert_port_to_space_check_type(port, NULL, TASK_FLAVOR_READ, FALSE);
+
+ if (space == IPC_SPACE_NULL) {
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ kr = mach_port_get_attributes(space, name, flavor, info, count);
+
+ ipc_space_release(space);
+ return kr;
+}
+
kern_return_t
mach_port_set_attributes(
- ipc_space_t space,
- mach_port_name_t name,
- int flavor,
- mach_port_info_t info,
- mach_msg_type_number_t count)
+ ipc_space_t space,
+ mach_port_name_t name,
+ int flavor,
+ mach_port_info_t info,
+ mach_msg_type_number_t count)
{
ipc_port_t port;
kern_return_t kr;
-
- if (space == IS_NULL)
+
+ if (space == IS_NULL) {
return KERN_INVALID_TASK;
+ }
+
+ switch (flavor) {
+ case MACH_PORT_LIMITS_INFO: {
+ mach_port_limits_t *mplp = (mach_port_limits_t *)info;
+
+ if (count < MACH_PORT_LIMITS_INFO_COUNT) {
+ return KERN_FAILURE;
+ }
- switch (flavor) {
-
- case MACH_PORT_LIMITS_INFO: {
- mach_port_limits_t *mplp = (mach_port_limits_t *)info;
-
- if (count < MACH_PORT_LIMITS_INFO_COUNT)
- return KERN_FAILURE;
-
- if (mplp->mpl_qlimit > MACH_PORT_QLIMIT_MAX)
- return KERN_INVALID_VALUE;
-
- if (!MACH_PORT_VALID(name))
+ if (mplp->mpl_qlimit > MACH_PORT_QLIMIT_MAX) {
+ return KERN_INVALID_VALUE;
+ }
+
+ if (!MACH_PORT_VALID(name)) {
return KERN_INVALID_RIGHT;
+ }
- kr = ipc_port_translate_receive(space, name, &port);
- if (kr != KERN_SUCCESS)
- return kr;
- /* port is locked and active */
+ kr = ipc_port_translate_receive(space, name, &port);
+ if (kr != KERN_SUCCESS) {
+ return kr;
+ }
+ /* port is locked and active */
- ipc_mqueue_set_qlimit(&port->ip_messages, mplp->mpl_qlimit);
- ip_unlock(port);
- break;
- }
+ ipc_mqueue_set_qlimit(&port->ip_messages, mplp->mpl_qlimit);
+ ip_unlock(port);
+ break;
+ }
case MACH_PORT_DNREQUESTS_SIZE: {
- if (count < MACH_PORT_DNREQUESTS_SIZE_COUNT)
- return KERN_FAILURE;
+ if (count < MACH_PORT_DNREQUESTS_SIZE_COUNT) {
+ return KERN_FAILURE;
+ }
- if (!MACH_PORT_VALID(name))
+ if (!MACH_PORT_VALID(name)) {
return KERN_INVALID_RIGHT;
-
- kr = ipc_port_translate_receive(space, name, &port);
- if (kr != KERN_SUCCESS)
- return kr;
- /* port is locked and active */
-
+ }
+
+ kr = ipc_port_translate_receive(space, name, &port);
+ if (kr != KERN_SUCCESS) {
+ return kr;
+ }
+ /* port is locked and active */
+
kr = ipc_port_request_grow(port, *(int *)info);
- if (kr != KERN_SUCCESS)
+ if (kr != KERN_SUCCESS) {
return kr;
+ }
break;
}
case MACH_PORT_TEMPOWNER:
- if (!MACH_PORT_VALID(name))
+ if (!MACH_PORT_VALID(name)) {
return KERN_INVALID_RIGHT;
+ }
ipc_importance_task_t release_imp_task = IIT_NULL;
natural_t assertcnt = 0;
kr = ipc_port_translate_receive(space, name, &port);
- if (kr != KERN_SUCCESS)
+ if (kr != KERN_SUCCESS) {
return kr;
+ }
/* port is locked and active */
- /*
+ /*
* don't allow temp-owner importance donation if user
- * associated it with a kobject already (timer, host_notify target).
+ * associated it with a kobject already (timer, host_notify target),
+ * or is a special reply port.
*/
- if (is_ipc_kobject(ip_kotype(port))) {
+ if (ip_is_kobject(port) || port->ip_specialreply) {
ip_unlock(port);
return KERN_INVALID_ARGUMENT;
}
/* drop assertions from previous destination task */
if (release_imp_task != IIT_NULL) {
assert(ipc_importance_task_is_any_receiver_type(release_imp_task));
- if (assertcnt > 0)
+ if (assertcnt > 0) {
ipc_importance_task_drop_internal_assertion(release_imp_task, assertcnt);
+ }
ipc_importance_task_release(release_imp_task);
} else if (assertcnt > 0) {
release_imp_task = current_task()->task_imp_base;
}
}
#else
- if (release_imp_task != IIT_NULL)
+ if (release_imp_task != IIT_NULL) {
ipc_importance_task_release(release_imp_task);
+ }
#endif /* IMPORTANCE_INHERITANCE */
break;
#if IMPORTANCE_INHERITANCE
case MACH_PORT_DENAP_RECEIVER:
case MACH_PORT_IMPORTANCE_RECEIVER:
- if (!MACH_PORT_VALID(name))
+ if (!MACH_PORT_VALID(name)) {
return KERN_INVALID_RIGHT;
+ }
kr = ipc_port_translate_receive(space, name, &port);
- if (kr != KERN_SUCCESS)
+ if (kr != KERN_SUCCESS) {
return kr;
+ }
- /*
+ /*
* don't allow importance donation if user associated
- * it with a kobject already (timer, host_notify target).
+ * it with a kobject already (timer, host_notify target),
+ * or is a special reply port.
*/
- if (is_ipc_kobject(ip_kotype(port))) {
+ if (ip_is_kobject(port) || port->ip_specialreply) {
ip_unlock(port);
return KERN_INVALID_ARGUMENT;
}
break;
#endif /* IMPORTANCE_INHERITANCE */
- default:
+ default:
return KERN_INVALID_ARGUMENT;
- /*NOTREACHED*/
- }
+ /*NOTREACHED*/
+ }
return KERN_SUCCESS;
}
kern_return_t
mach_port_insert_member(
- ipc_space_t space,
- mach_port_name_t name,
- mach_port_name_t psname)
+ ipc_space_t space,
+ mach_port_name_t name,
+ mach_port_name_t psname)
{
ipc_object_t obj;
ipc_object_t psobj;
uint64_t wq_link_id;
uint64_t wq_reserved_prepost;
- if (space == IS_NULL)
+ if (space == IS_NULL) {
return KERN_INVALID_TASK;
+ }
- if (!MACH_PORT_VALID(name) || !MACH_PORT_VALID(psname))
+ if (!MACH_PORT_VALID(name) || !MACH_PORT_VALID(psname)) {
return KERN_INVALID_RIGHT;
+ }
wq_link_id = waitq_link_reserve(NULL);
wq_reserved_prepost = waitq_prepost_reserve(NULL, 10,
- WAITQ_DONT_LOCK, NULL);
+ WAITQ_DONT_LOCK);
+ kr = ipc_pset_lazy_allocate(space, psname);
+ if (kr != KERN_SUCCESS) {
+ goto done;
+ }
- kr = ipc_object_translate_two(space,
- name, MACH_PORT_RIGHT_RECEIVE, &obj,
- psname, MACH_PORT_RIGHT_PORT_SET, &psobj);
- if (kr != KERN_SUCCESS)
+
+ kr = ipc_object_translate_two(space,
+ name, MACH_PORT_RIGHT_RECEIVE, &obj,
+ psname, MACH_PORT_RIGHT_PORT_SET, &psobj);
+ if (kr != KERN_SUCCESS) {
goto done;
+ }
/* obj and psobj are locked (and were locked in that order) */
assert(psobj != IO_NULL);
assert(obj != IO_NULL);
- __IGNORE_WCASTALIGN(kr = ipc_pset_add((ipc_pset_t)psobj, (ipc_port_t)obj,
- &wq_link_id, &wq_reserved_prepost));
+ kr = ipc_pset_add(ips_object_to_pset(psobj), ip_object_to_port(obj),
+ &wq_link_id, &wq_reserved_prepost);
io_unlock(psobj);
io_unlock(obj);
- done:
+done:
/* on success, wq_link_id is reset to 0, so this is always safe */
waitq_link_release(wq_link_id);
waitq_prepost_release_reserve(wq_reserved_prepost);
kern_return_t
mach_port_extract_member(
- ipc_space_t space,
- mach_port_name_t name,
- mach_port_name_t psname)
+ ipc_space_t space,
+ mach_port_name_t name,
+ mach_port_name_t psname)
{
ipc_object_t psobj;
ipc_object_t obj;
kern_return_t kr;
- if (space == IS_NULL)
+ if (space == IS_NULL) {
return KERN_INVALID_TASK;
+ }
- if (!MACH_PORT_VALID(name) || !MACH_PORT_VALID(psname))
+ if (!MACH_PORT_VALID(name) || !MACH_PORT_VALID(psname)) {
return KERN_INVALID_RIGHT;
+ }
- kr = ipc_object_translate_two(space,
- name, MACH_PORT_RIGHT_RECEIVE, &obj,
- psname, MACH_PORT_RIGHT_PORT_SET, &psobj);
- if (kr != KERN_SUCCESS)
+ kr = ipc_object_translate_two(space,
+ name, MACH_PORT_RIGHT_RECEIVE, &obj,
+ psname, MACH_PORT_RIGHT_PORT_SET, &psobj);
+ if (kr != KERN_SUCCESS) {
return kr;
+ }
/* obj and psobj are both locked (and were locked in that order) */
assert(psobj != IO_NULL);
assert(obj != IO_NULL);
- __IGNORE_WCASTALIGN(kr = ipc_pset_remove((ipc_pset_t)psobj, (ipc_port_t)obj));
+ kr = ipc_pset_remove(ips_object_to_pset(psobj), ip_object_to_port(obj));
io_unlock(psobj);
io_unlock(obj);
*/
kern_return_t
task_set_port_space(
- ipc_space_t space,
- int table_entries)
+ ipc_space_t space,
+ int table_entries)
{
kern_return_t kr;
-
- if (space == IS_NULL)
+
+ if (space == IS_NULL) {
return KERN_INVALID_TASK;
+ }
is_write_lock(space);
}
kr = ipc_entry_grow_table(space, table_entries);
- if (kr == KERN_SUCCESS)
+ if (kr == KERN_SUCCESS) {
is_write_unlock(space);
+ }
return kr;
}
*/
static kern_return_t
mach_port_guard_locked(
- ipc_port_t port,
- uint64_t guard,
- boolean_t strict)
+ ipc_port_t port,
+ uint64_t guard,
+ uint64_t flags)
{
- if (port->ip_context)
+ if (port->ip_context) {
return KERN_INVALID_ARGUMENT;
+ }
+ int strict = (flags & MPG_STRICT)? 1 : 0;
+ int immovable_receive = (flags & MPG_IMMOVABLE_RECEIVE)? 1 : 0;
+
+ imq_lock(&port->ip_messages);
port->ip_context = guard;
port->ip_guarded = 1;
- port->ip_strict_guard = (strict)?1:0;
+ port->ip_strict_guard = strict;
+ /* ip_immovable_receive bit is sticky and can't be un-guarded */
+ if (!port->ip_immovable_receive) {
+ port->ip_immovable_receive = immovable_receive;
+ }
+ imq_unlock(&port->ip_messages);
+
return KERN_SUCCESS;
}
*/
static kern_return_t
mach_port_unguard_locked(
- ipc_port_t port,
- mach_port_name_t name,
- uint64_t guard)
+ ipc_port_t port,
+ mach_port_name_t name,
+ uint64_t guard)
{
/* Port locked and active */
if (!port->ip_guarded) {
return KERN_INVALID_ARGUMENT;
}
+ imq_lock(&port->ip_messages);
port->ip_context = 0;
port->ip_guarded = port->ip_strict_guard = 0;
+ /* Don't clear the ip_immovable_receive bit */
+ imq_unlock(&port->ip_messages);
+
return KERN_SUCCESS;
}
* Returns:
* KERN_FAILURE Thread marked with AST_GUARD.
*/
-kern_return_t
+void
mach_port_guard_exception(
- mach_port_name_t name,
- uint64_t inguard,
- uint64_t portguard,
- unsigned reason)
+ mach_port_name_t name,
+ __unused uint64_t inguard,
+ uint64_t portguard,
+ unsigned reason)
{
+ mach_exception_code_t code = 0;
+ EXC_GUARD_ENCODE_TYPE(code, GUARD_TYPE_MACH_PORT);
+ EXC_GUARD_ENCODE_FLAVOR(code, reason);
+ EXC_GUARD_ENCODE_TARGET(code, name);
+ mach_exception_subcode_t subcode = (uint64_t)portguard;
thread_t t = current_thread();
- uint64_t code, subcode;
-
- /* Log exception info to syslog */
- printf( "Mach Port Guard Exception - "
- "Thread: 0x%x, "
- "Port Name: 0x%x, "
- "Expected Guard: 0x%x, "
- "Received Guard: 0x%x\n",
- (unsigned)VM_KERNEL_UNSLIDE_OR_PERM(t),
- (unsigned)name,
- (unsigned)portguard,
- (unsigned)inguard);
-
- /*
- * EXC_GUARD namespace for mach ports
- *
- *
- * Mach Port guards use the exception codes like
- *
- * code:
- * +----------------------------------------------------------------+
- * |[63:61] GUARD_TYPE_MACH_PORT | [60:32] flavor | [31:0] port name|
- * +----------------------------------------------------------------+
- *
- * subcode:
- * +----------------------------------------------------------------+
- * | [63:0] guard value |
- * +----------------------------------------------------------------+
- */
+ boolean_t fatal = FALSE;
+ if (t->task->task_exc_guard & TASK_EXC_GUARD_MP_FATAL) {
+ fatal = TRUE;
+ } else if (reason <= MAX_FATAL_kGUARD_EXC_CODE) {
+ fatal = TRUE;
+ }
+ thread_guard_violation(t, code, subcode, fatal);
+}
- code = (((uint64_t)GUARD_TYPE_MACH_PORT) << 61) |
- (((uint64_t)reason) << 32) |
- ((uint64_t)name);
- subcode = (uint64_t)(portguard);
-
- t->guard_exc_info.code = code;
- t->guard_exc_info.subcode = subcode;
-
- /* Mark thread with AST_GUARD */
- thread_guard_violation(t, GUARD_TYPE_MACH_PORT);
- return KERN_FAILURE;
+/*
+ * Temporary wrapper for immovable mach port guard exception.
+ *
+ * Condition: !(ip_is_control(port) && !immovable_control_port_enabled)
+ */
+void
+mach_port_guard_exception_immovable(
+ mach_port_name_t name,
+ mach_port_t port,
+ uint64_t portguard)
+{
+ if (ip_is_control(port) && immovable_control_port_enabled) {
+ mach_port_guard_exception(name, 0, portguard,
+ ipc_control_port_options & IPC_CONTROL_PORT_OPTIONS_IMMOVABLE_HARD ?
+ kGUARD_EXC_IMMOVABLE : kGUARD_EXC_IMMOVABLE_NON_FATAL);
+ } else if (!ip_is_control(port)) {
+ /* always fatal exception for non-control port violation */
+ mach_port_guard_exception(name, 0, portguard, kGUARD_EXC_IMMOVABLE);
+ } else {
+ /* ip_is_control(port) && !immovable_control_port_enabled */
+ panic("mach_port_guard_exception_immovable: condition does not hold.");
+ }
}
*/
void
-mach_port_guard_ast(thread_t t)
+mach_port_guard_ast(thread_t t,
+ mach_exception_data_type_t code, mach_exception_data_type_t subcode)
{
- /* Raise an EXC_GUARD exception */
- task_exception_notify(EXC_GUARD, t->guard_exc_info.code, t->guard_exc_info.subcode);
+ unsigned int reason = EXC_GUARD_DECODE_GUARD_FLAVOR(code);
+ task_t task = t->task;
+ unsigned int behavior = task->task_exc_guard;
+ assert(task == current_task());
+ assert(task != kernel_task);
- /* Terminate task which caused the exception */
- task_bsdtask_kill(current_task());
- return;
+ switch (reason) {
+ /*
+ * Fatal Mach port guards - always delivered synchronously
+ */
+ case kGUARD_EXC_DESTROY:
+ case kGUARD_EXC_MOD_REFS:
+ case kGUARD_EXC_SET_CONTEXT:
+ case kGUARD_EXC_UNGUARDED:
+ case kGUARD_EXC_INCORRECT_GUARD:
+ case kGUARD_EXC_IMMOVABLE:
+ case kGUARD_EXC_STRICT_REPLY:
+ case kGUARD_EXC_MSG_FILTERED:
+ task_exception_notify(EXC_GUARD, code, subcode);
+ task_bsdtask_kill(task);
+ break;
+
+ default:
+ /*
+ * Mach port guards controlled by task settings.
+ */
+
+ /* Is delivery enabled */
+ if ((behavior & TASK_EXC_GUARD_MP_DELIVER) == 0) {
+ return;
+ }
+
+ /* If only once, make sure we're that once */
+ while (behavior & TASK_EXC_GUARD_MP_ONCE) {
+ uint32_t new_behavior = behavior & ~TASK_EXC_GUARD_MP_DELIVER;
+
+ if (OSCompareAndSwap(behavior, new_behavior, &task->task_exc_guard)) {
+ break;
+ }
+ behavior = task->task_exc_guard;
+ if ((behavior & TASK_EXC_GUARD_MP_DELIVER) == 0) {
+ return;
+ }
+ }
+
+ /* Raise exception via corpse fork or synchronously */
+ if ((task->task_exc_guard & TASK_EXC_GUARD_MP_CORPSE) &&
+ (task->task_exc_guard & TASK_EXC_GUARD_MP_FATAL) == 0) {
+ task_violated_guard(code, subcode, NULL);
+ } else {
+ task_exception_notify(EXC_GUARD, code, subcode);
+ }
+
+ /* Terminate the task if desired */
+ if (task->task_exc_guard & TASK_EXC_GUARD_MP_FATAL) {
+ task_bsdtask_kill(task);
+ }
+ break;
+ }
}
/*
kern_return_t
mach_port_construct(
- ipc_space_t space,
- mach_port_options_t *options,
- uint64_t context,
- mach_port_name_t *name)
+ ipc_space_t space,
+ mach_port_options_t *options,
+ uint64_t context,
+ mach_port_name_t *name)
{
- kern_return_t kr;
- ipc_port_t port;
+ kern_return_t kr;
+ ipc_port_t port;
+ ipc_port_init_flags_t init_flags = IPC_PORT_INIT_MESSAGE_QUEUE;
+
+ if (space == IS_NULL) {
+ return KERN_INVALID_TASK;
+ }
+
+ if (options->flags & MPO_INSERT_SEND_RIGHT) {
+ init_flags |= IPC_PORT_INIT_MAKE_SEND_RIGHT;
+ }
+
+ if (options->flags & MPO_FILTER_MSG) {
+ init_flags |= IPC_PORT_INIT_FILTER_MESSAGE;
+ }
+
+ if (options->flags & MPO_TG_BLOCK_TRACKING) {
+ /* Check the task role to allow only TASK_GRAPHICS_SERVER to set this option */
+ if (proc_get_effective_task_policy(current_task(),
+ TASK_POLICY_ROLE) != TASK_GRAPHICS_SERVER) {
+ return KERN_DENIED;
+ }
- if (space == IS_NULL)
- return (KERN_INVALID_TASK);
+ /*
+ * Check the work interval port passed in to make sure it is the render server type.
+ * Since the creation of the render server work interval is privileged, this check
+ * acts as a guard to make sure only the render server is setting the thread group
+ * blocking behavior on the port.
+ */
+ mach_port_name_t wi_port_name = options->work_interval_port;
+ if (work_interval_port_type_render_server(wi_port_name) == false) {
+ return KERN_INVALID_ARGUMENT;
+ }
+ init_flags |= IPC_PORT_INIT_TG_BLOCK_TRACKING;
+ }
/* Allocate a new port in the IPC space */
- kr = ipc_port_alloc(space, name, &port);
- if (kr != KERN_SUCCESS)
+ kr = ipc_port_alloc(space, init_flags, name, &port);
+ if (kr != KERN_SUCCESS) {
return kr;
-
+ }
+
/* Port locked and active */
if (options->flags & MPO_CONTEXT_AS_GUARD) {
- kr = mach_port_guard_locked(port, (uint64_t) context, (options->flags & MPO_STRICT));
+ uint64_t flags = 0;
+ if (options->flags & MPO_STRICT) {
+ flags |= MPG_STRICT;
+ }
+ if (options->flags & MPO_IMMOVABLE_RECEIVE) {
+ flags |= MPG_IMMOVABLE_RECEIVE;
+ }
+ kr = mach_port_guard_locked(port, (uint64_t) context, flags);
/* A newly allocated and locked port should always be guarded successfully */
assert(kr == KERN_SUCCESS);
} else {
port->ip_context = context;
}
-
+
/* Unlock port */
ip_unlock(port);
if (options->flags & MPO_QLIMIT) {
kr = mach_port_set_attributes(space, *name, MACH_PORT_LIMITS_INFO,
- (mach_port_info_t)&options->mpl, sizeof(options->mpl)/sizeof(int));
- if (kr != KERN_SUCCESS)
- goto cleanup;
+ (mach_port_info_t)&options->mpl, sizeof(options->mpl) / sizeof(int));
+ if (kr != KERN_SUCCESS) {
+ goto cleanup;
+ }
}
if (options->flags & MPO_TEMPOWNER) {
kr = mach_port_set_attributes(space, *name, MACH_PORT_TEMPOWNER, NULL, 0);
- if (kr != KERN_SUCCESS)
+ if (kr != KERN_SUCCESS) {
goto cleanup;
+ }
}
if (options->flags & MPO_IMPORTANCE_RECEIVER) {
kr = mach_port_set_attributes(space, *name, MACH_PORT_IMPORTANCE_RECEIVER, NULL, 0);
- if (kr != KERN_SUCCESS)
+ if (kr != KERN_SUCCESS) {
goto cleanup;
+ }
}
if (options->flags & MPO_DENAP_RECEIVER) {
kr = mach_port_set_attributes(space, *name, MACH_PORT_DENAP_RECEIVER, NULL, 0);
- if (kr != KERN_SUCCESS)
- goto cleanup;
- }
-
- if (options->flags & MPO_INSERT_SEND_RIGHT) {
- kr = ipc_object_copyin(space, *name, MACH_MSG_TYPE_MAKE_SEND, (ipc_object_t *)&port);
- if (kr != KERN_SUCCESS)
- goto cleanup;
-
- kr = mach_port_insert_right(space, *name, port, MACH_MSG_TYPE_PORT_SEND);
- if (kr != KERN_SUCCESS)
+ if (kr != KERN_SUCCESS) {
goto cleanup;
+ }
}
return KERN_SUCCESS;
cleanup:
/* Attempt to destroy port. If its already destroyed by some other thread, we're done */
- (void) mach_port_destruct(space, *name, 0, context);
+ (void) mach_port_destruct(space, *name,
+ (options->flags & MPO_INSERT_SEND_RIGHT) ? -1 : 0, context);
return kr;
}
kern_return_t
mach_port_destruct(
- ipc_space_t space,
- mach_port_name_t name,
- mach_port_delta_t srdelta,
- uint64_t guard)
+ ipc_space_t space,
+ mach_port_name_t name,
+ mach_port_delta_t srdelta,
+ uint64_t guard)
{
- kern_return_t kr;
- ipc_entry_t entry;
+ kern_return_t kr;
+ ipc_entry_t entry;
- if (space == IS_NULL)
+ if (space == IS_NULL) {
return KERN_INVALID_TASK;
+ }
- if (!MACH_PORT_VALID(name))
+ if (!MACH_PORT_VALID(name)) {
return KERN_INVALID_NAME;
+ }
/* Remove reference for receive right */
kr = ipc_right_lookup_write(space, name, &entry);
- if (kr != KERN_SUCCESS)
+ if (kr != KERN_SUCCESS) {
+ mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_NAME);
return kr;
+ }
/* space is write-locked and active */
- kr = ipc_right_destruct(space, name, entry, srdelta, guard); /* unlocks */
+ kr = ipc_right_destruct(space, name, entry, srdelta, guard); /* unlocks */
return kr;
}
*/
kern_return_t
mach_port_guard(
- ipc_space_t space,
- mach_port_name_t name,
- uint64_t guard,
- boolean_t strict)
+ ipc_space_t space,
+ mach_port_name_t name,
+ uint64_t guard,
+ boolean_t strict)
{
- kern_return_t kr;
- ipc_port_t port;
+ kern_return_t kr;
+ ipc_port_t port;
+ uint64_t flags = 0;
- if (space == IS_NULL)
+ if (space == IS_NULL) {
return KERN_INVALID_TASK;
+ }
- if (!MACH_PORT_VALID(name))
+ if (!MACH_PORT_VALID(name)) {
return KERN_INVALID_NAME;
+ }
/* Guard can be applied only to receive rights */
kr = ipc_port_translate_receive(space, name, &port);
- if (kr != KERN_SUCCESS)
+ if (kr != KERN_SUCCESS) {
+ mach_port_guard_exception(name, 0, 0,
+ ((KERN_INVALID_NAME == kr) ?
+ kGUARD_EXC_INVALID_NAME :
+ kGUARD_EXC_INVALID_RIGHT));
return kr;
+ }
/* Port locked and active */
- kr = mach_port_guard_locked(port, guard, strict);
+ if (strict) {
+ flags = MPG_STRICT;
+ }
+
+ kr = mach_port_guard_locked(port, guard, flags);
ip_unlock(port);
- return kr;
+ if (KERN_INVALID_ARGUMENT == kr) {
+ mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_ARGUMENT);
+ }
+ return kr;
}
/*
*/
kern_return_t
mach_port_unguard(
- ipc_space_t space,
- mach_port_name_t name,
- uint64_t guard)
+ ipc_space_t space,
+ mach_port_name_t name,
+ uint64_t guard)
{
-
- kern_return_t kr;
- ipc_port_t port;
+ kern_return_t kr;
+ ipc_port_t port;
- if (space == IS_NULL)
+ if (space == IS_NULL) {
return KERN_INVALID_TASK;
+ }
- if (!MACH_PORT_VALID(name))
+ if (!MACH_PORT_VALID(name)) {
return KERN_INVALID_NAME;
+ }
kr = ipc_port_translate_receive(space, name, &port);
- if (kr != KERN_SUCCESS)
+ if (kr != KERN_SUCCESS) {
+ mach_port_guard_exception(name, 0, 0,
+ ((KERN_INVALID_NAME == kr) ?
+ kGUARD_EXC_INVALID_NAME :
+ kGUARD_EXC_INVALID_RIGHT));
return kr;
+ }
/* Port locked and active */
kr = mach_port_unguard_locked(port, name, guard);
ip_unlock(port);
+
return kr;
}
+/*
+ * Routine: mach_port_guard_with_flags [kernel call]
+ * Purpose:
+ * Guard a mach port with specified guard value and guard flags.
+ * The context field of the port is used as the guard.
+ * Conditions:
+ * Should hold receive right for that port
+ * Returns:
+ * KERN_SUCCESS The name is destroyed.
+ * KERN_INVALID_TASK The space is null.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_NAME The name doesn't denote a right.
+ * KERN_INVALID_RIGHT The right isn't correct.
+ * KERN_INVALID_ARGUMENT Port already contains a context/guard.
+ * KERN_INVALID_CAPABILITY Cannot set MPG_IMMOVABLE_RECEIVE flag for a port with
+ * a movable port-destroyed notification port
+ */
+kern_return_t
+mach_port_guard_with_flags(
+ ipc_space_t space,
+ mach_port_name_t name,
+ uint64_t guard,
+ uint64_t flags)
+{
+ kern_return_t kr;
+ ipc_port_t port;
+
+ if (space == IS_NULL) {
+ return KERN_INVALID_TASK;
+ }
+
+ if (!MACH_PORT_VALID(name)) {
+ return KERN_INVALID_NAME;
+ }
+
+ kr = ipc_port_translate_receive(space, name, &port);
+ if (kr != KERN_SUCCESS) {
+ mach_port_guard_exception(name, 0, 0,
+ ((KERN_INVALID_NAME == kr) ?
+ kGUARD_EXC_INVALID_NAME :
+ kGUARD_EXC_INVALID_RIGHT));
+ return kr;
+ }
+
+ /* Port locked and active */
+ kr = mach_port_guard_locked(port, guard, flags);
+ ip_unlock(port);
+
+ if (KERN_INVALID_ARGUMENT == kr) {
+ mach_port_guard_exception(name, 0, 0, kGUARD_EXC_INVALID_ARGUMENT);
+ }
+
+ return kr;
+}
+
+/*
+ * Routine: mach_port_swap_guard [kernel call]
+ * Purpose:
+ * Swap guard value.
+ * Conditions:
+ * Port should already be guarded.
+ * Returns:
+ * KERN_SUCCESS The name is destroyed.
+ * KERN_INVALID_TASK The space is null.
+ * KERN_INVALID_TASK The space is dead.
+ * KERN_INVALID_NAME The name doesn't denote a right.
+ * KERN_INVALID_RIGHT The right isn't correct.
+ * KERN_INVALID_ARGUMENT Port doesn't contain a guard; is strictly guarded
+ * or the old_guard doesnt match the context
+ */
+kern_return_t
+mach_port_swap_guard(
+ ipc_space_t space,
+ mach_port_name_t name,
+ uint64_t old_guard,
+ uint64_t new_guard)
+{
+ kern_return_t kr;
+ ipc_port_t port;
+
+ if (space == IS_NULL) {
+ return KERN_INVALID_TASK;
+ }
+
+ if (!MACH_PORT_VALID(name)) {
+ return KERN_INVALID_NAME;
+ }
+
+ kr = ipc_port_translate_receive(space, name, &port);
+ if (kr != KERN_SUCCESS) {
+ mach_port_guard_exception(name, 0, 0,
+ ((KERN_INVALID_NAME == kr) ?
+ kGUARD_EXC_INVALID_NAME :
+ kGUARD_EXC_INVALID_RIGHT));
+ return kr;
+ }
+
+ /* Port locked and active */
+ if (!port->ip_guarded) {
+ ip_unlock(port);
+ mach_port_guard_exception(name, old_guard, 0, kGUARD_EXC_UNGUARDED);
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ if (port->ip_strict_guard) {
+ uint64_t portguard = port->ip_context;
+ ip_unlock(port);
+ /* For strictly guarded ports, disallow overwriting context; Raise Exception */
+ mach_port_guard_exception(name, old_guard, portguard, kGUARD_EXC_SET_CONTEXT);
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ if (port->ip_context != old_guard) {
+ uint64_t portguard = port->ip_context;
+ ip_unlock(port);
+ mach_port_guard_exception(name, old_guard, portguard, kGUARD_EXC_INCORRECT_GUARD);
+ return KERN_INVALID_ARGUMENT;
+ }
+
+ imq_lock(&port->ip_messages);
+ port->ip_context = new_guard;
+ imq_unlock(&port->ip_messages);
+
+ ip_unlock(port);
+
+ return KERN_SUCCESS;
+}