*/
#include <mach_debug.h>
-#include <mach_rt.h>
#include <mach/port.h>
#include <mach/kern_return.h>
#include <kern/counters.h>
#include <kern/thread.h>
#include <kern/kalloc.h>
+#include <kern/exc_guard.h>
#include <mach/mach_port_server.h>
#include <vm/vm_map.h>
#include <vm/vm_kern.h>
}
size = size_needed;
- kr = vm_allocate(ipc_kernel_map, &addr1, size, VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_KERN_MEMORY_IPC));
+ kr = vm_allocate_kernel(ipc_kernel_map, &addr1, size, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_IPC);
if (kr != KERN_SUCCESS)
return KERN_RESOURCE_SHORTAGE;
- kr = vm_allocate(ipc_kernel_map, &addr2, size, VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_KERN_MEMORY_IPC));
+ kr = vm_allocate_kernel(ipc_kernel_map, &addr2, size, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_IPC);
if (kr != KERN_SUCCESS) {
kmem_free(ipc_kernel_map, addr1, size);
return KERN_RESOURCE_SHORTAGE;
/* can't fault while we hold locks */
- kr = vm_map_wire(
+ kr = vm_map_wire_kernel(
ipc_kernel_map,
vm_map_trunc_page(addr1,
VM_MAP_PAGE_MASK(ipc_kernel_map)),
vm_map_round_page(addr1 + size,
VM_MAP_PAGE_MASK(ipc_kernel_map)),
- VM_PROT_READ|VM_PROT_WRITE|VM_PROT_MEMORY_TAG_MAKE(VM_KERN_MEMORY_IPC),
+ VM_PROT_READ|VM_PROT_WRITE, VM_KERN_MEMORY_IPC,
FALSE);
if (kr != KERN_SUCCESS) {
kmem_free(ipc_kernel_map, addr1, size);
return KERN_RESOURCE_SHORTAGE;
}
- kr = vm_map_wire(
+ kr = vm_map_wire_kernel(
ipc_kernel_map,
vm_map_trunc_page(addr2,
VM_MAP_PAGE_MASK(ipc_kernel_map)),
vm_map_round_page(addr2 + size,
VM_MAP_PAGE_MASK(ipc_kernel_map)),
- VM_PROT_READ|VM_PROT_WRITE|VM_PROT_MEMORY_TAG_MAKE(VM_KERN_MEMORY_IPC),
+ VM_PROT_READ|VM_PROT_WRITE,
+ VM_KERN_MEMORY_IPC,
FALSE);
if (kr != KERN_SUCCESS) {
kmem_free(ipc_kernel_map, addr1, size);
* Routine: mach_port_deallocate [kernel call]
* Purpose:
* Deallocates a user reference from a send right,
- * send-once right, or a dead-name right. May
- * deallocate the right, if this is the last uref,
+ * send-once right, dead-name right or a port_set right.
+ * May deallocate the right, if this is the last uref,
* and destroy the name, if it doesn't denote
* other rights.
* Conditions:
ipc_object_t psobj;
ipc_pset_t pset;
- kr = vm_allocate(ipc_kernel_map, &addr, size, VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_KERN_MEMORY_IPC));
+ kr = vm_allocate_kernel(ipc_kernel_map, &addr, size, VM_FLAGS_ANYWHERE, VM_KERN_MEMORY_IPC);
if (kr != KERN_SUCCESS)
return KERN_RESOURCE_SHORTAGE;
/* can't fault while we hold locks */
- kr = vm_map_wire(ipc_kernel_map, addr, addr + size,
- VM_PROT_READ|VM_PROT_WRITE|VM_PROT_MEMORY_TAG_MAKE(VM_KERN_MEMORY_IPC), FALSE);
+ kr = vm_map_wire_kernel(ipc_kernel_map, addr, addr + size,
+ VM_PROT_READ|VM_PROT_WRITE, VM_KERN_MEMORY_IPC, FALSE);
assert(kr == KERN_SUCCESS);
kr = ipc_object_translate(space, name, MACH_PORT_RIGHT_PORT_SET, &psobj);
return kr;
/* port is locked and active */
+ /* you cannot register for port death notifications on a kobject */
+ if (ip_kotype(port) != IKOT_NONE) {
+ ip_unlock(port);
+ return KERN_INVALID_RIGHT;
+ }
+
ipc_port_pdrequest(port, notify, &previous);
/* port is unlocked */
ipc_port_t port,
mach_port_status_t *statusp)
{
- spl_t s;
-
- s = splsched();
imq_lock(&port->ip_messages);
/* don't leak set IDs, just indicate that the port is in one or not */
statusp->mps_pset = !!(port->ip_in_pset);
statusp->mps_qlimit = port->ip_messages.imq_qlimit;
statusp->mps_msgcount = port->ip_messages.imq_msgcount;
imq_unlock(&port->ip_messages);
- splx(s);
-
+
statusp->mps_mscount = port->ip_mscount;
statusp->mps_sorights = port->ip_sorights;
statusp->mps_srights = port->ip_srights > 0;
/*
* don't allow temp-owner importance donation if user
- * associated it with a kobject already (timer, host_notify target).
+ * associated it with a kobject already (timer, host_notify target),
+ * or is a special reply port.
*/
- if (is_ipc_kobject(ip_kotype(port))) {
+ if (is_ipc_kobject(ip_kotype(port)) || port->ip_specialreply) {
ip_unlock(port);
return KERN_INVALID_ARGUMENT;
}
/*
* don't allow importance donation if user associated
- * it with a kobject already (timer, host_notify target).
+ * it with a kobject already (timer, host_notify target),
+ * or is a special reply port.
*/
- if (is_ipc_kobject(ip_kotype(port))) {
+ if (is_ipc_kobject(ip_kotype(port)) || port->ip_specialreply) {
ip_unlock(port);
return KERN_INVALID_ARGUMENT;
}
uint64_t portguard,
unsigned reason)
{
+ mach_exception_code_t code = 0;
+ EXC_GUARD_ENCODE_TYPE(code, GUARD_TYPE_MACH_PORT);
+ EXC_GUARD_ENCODE_FLAVOR(code, reason);
+ EXC_GUARD_ENCODE_TARGET(code, name);
+ mach_exception_subcode_t subcode = (uint64_t)portguard;
thread_t t = current_thread();
- uint64_t code, subcode;
-
- /*
- * EXC_GUARD namespace for mach ports
- *
- *
- * Mach Port guards use the exception codes like
- *
- * code:
- * +----------------------------------------------------------------+
- * |[63:61] GUARD_TYPE_MACH_PORT | [60:32] flavor | [31:0] port name|
- * +----------------------------------------------------------------+
- *
- * subcode:
- * +----------------------------------------------------------------+
- * | [63:0] guard value |
- * +----------------------------------------------------------------+
- */
-
- code = (((uint64_t)GUARD_TYPE_MACH_PORT) << 61) |
- (((uint64_t)reason) << 32) |
- ((uint64_t)name);
- subcode = (uint64_t)(portguard);
-
- t->guard_exc_info.code = code;
- t->guard_exc_info.subcode = subcode;
-
- /* Mark thread with AST_GUARD */
- thread_guard_violation(t, GUARD_TYPE_MACH_PORT);
+ thread_guard_violation(t, code, subcode);
return KERN_FAILURE;
}
*/
void
-mach_port_guard_ast(thread_t t)
+mach_port_guard_ast(thread_t __unused t,
+ mach_exception_data_type_t code, mach_exception_data_type_t subcode)
{
+ assert(t->task != kernel_task);
+
/* Raise an EXC_GUARD exception */
- task_exception_notify(EXC_GUARD, t->guard_exc_info.code, t->guard_exc_info.subcode);
+ task_exception_notify(EXC_GUARD, code, subcode);
/* Terminate task which caused the exception */
task_bsdtask_kill(current_task());
- return;
}
/*