#include <ipc/ipc_hash.h>
#include <ipc/ipc_table.h>
#include <ipc/ipc_right.h>
+
+#include <security/mac_mach_internal.h>
#endif
/*
}
#endif /* MACH_IPC_DEBUG */
-/*
- * Routine: host_ipc_hash_info
- * Purpose:
- * Return information about the global reverse hash table.
- * Conditions:
- * Nothing locked. Obeys CountInOut protocol.
- * Returns:
- * KERN_SUCCESS Returned information.
- * KERN_INVALID_HOST The host is null.
- * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
- */
-
-#if !MACH_IPC_DEBUG
-kern_return_t
-host_ipc_hash_info(
- __unused host_t host,
- __unused hash_info_bucket_array_t *infop,
- __unused mach_msg_type_number_t *countp)
-{
- return KERN_FAILURE;
-}
-#else
-kern_return_t
-host_ipc_hash_info(
- host_t host,
- hash_info_bucket_array_t *infop,
- mach_msg_type_number_t *countp)
-{
- vm_map_copy_t copy;
- vm_offset_t addr;
- vm_size_t size;
- hash_info_bucket_t *info;
- natural_t count;
- kern_return_t kr;
-
- if (host == HOST_NULL)
- return KERN_INVALID_HOST;
-
- /* start with in-line data */
-
- count = ipc_hash_size();
- size = round_page(count * sizeof(hash_info_bucket_t));
- kr = kmem_alloc_pageable(ipc_kernel_map, &addr, size);
- if (kr != KERN_SUCCESS)
- return KERN_RESOURCE_SHORTAGE;
-
- info = (hash_info_bucket_t *) addr;
- count = ipc_hash_info(info, count);
-
- if (size > count * sizeof(hash_info_bucket_t))
- bzero((char *)&info[count], size - count * sizeof(hash_info_bucket_t));
-
- kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)addr,
- (vm_map_size_t)size, TRUE, ©);
- assert(kr == KERN_SUCCESS);
-
- *infop = (hash_info_bucket_t *) copy;
- *countp = count;
- return KERN_SUCCESS;
-}
-#endif /* MACH_IPC_DEBUG */
/*
* Routine: mach_port_space_info
ipc_info_space_t *infop,
ipc_info_name_array_t *tablep,
mach_msg_type_number_t *tableCntp,
- ipc_info_tree_name_array_t *treep,
- mach_msg_type_number_t *treeCntp)
+ __unused ipc_info_tree_name_array_t *treep,
+ __unused mach_msg_type_number_t *treeCntp)
{
ipc_info_name_t *table_info;
vm_offset_t table_addr;
vm_size_t table_size, table_size_needed;
- ipc_info_tree_name_t *tree_info;
- vm_offset_t tree_addr;
- vm_size_t tree_size, tree_size_needed;
- ipc_tree_entry_t tentry;
ipc_entry_t table;
ipc_entry_num_t tsize;
mach_port_index_t index;
if (space == IS_NULL)
return KERN_INVALID_TASK;
+#if !(DEVELOPMENT | DEBUG)
+ const boolean_t dbg_ok = (mac_task_check_expose_task(kernel_task) == 0);
+#else
+ const boolean_t dbg_ok = TRUE;
+#endif
+
/* start with in-line memory */
table_size = 0;
- tree_size = 0;
for (;;) {
is_read_lock(space);
- if (!space->is_active) {
+ if (!is_active(space)) {
is_read_unlock(space);
if (table_size != 0)
kmem_free(ipc_kernel_map,
table_addr, table_size);
- if (tree_size != 0)
- kmem_free(ipc_kernel_map,
- tree_addr, tree_size);
return KERN_INVALID_TASK;
}
- table_size_needed = round_page(space->is_table_size
- * sizeof(ipc_info_name_t));
- tree_size_needed = round_page(space->is_tree_total
- * sizeof(ipc_info_tree_name_t));
+ table_size_needed =
+ vm_map_round_page((space->is_table_size
+ * sizeof(ipc_info_name_t)),
+ VM_MAP_PAGE_MASK(ipc_kernel_map));
- if ((table_size_needed == table_size) &&
- (tree_size_needed == tree_size))
+ if (table_size_needed == table_size)
break;
is_read_unlock(space);
if (table_size != table_size_needed) {
if (table_size != 0)
kmem_free(ipc_kernel_map, table_addr, table_size);
- kr = kmem_alloc(ipc_kernel_map, &table_addr, table_size_needed);
+ kr = kmem_alloc(ipc_kernel_map, &table_addr, table_size_needed, VM_KERN_MEMORY_IPC);
if (kr != KERN_SUCCESS) {
- if (tree_size != 0)
- kmem_free(ipc_kernel_map, tree_addr, tree_size);
return KERN_RESOURCE_SHORTAGE;
}
table_size = table_size_needed;
}
- if (tree_size != tree_size_needed) {
- if (tree_size != 0)
- kmem_free(ipc_kernel_map, tree_addr, tree_size);
- kr = kmem_alloc(ipc_kernel_map, &tree_addr, tree_size_needed);
- if (kr != KERN_SUCCESS) {
- if (table_size != 0)
- kmem_free(ipc_kernel_map, table_addr, table_size);
- return KERN_RESOURCE_SHORTAGE;
- }
- tree_size = tree_size_needed;
- }
+
}
/* space is read-locked and active; we have enough wired memory */
infop->iis_genno_mask = MACH_PORT_NGEN(MACH_PORT_DEAD);
infop->iis_table_size = space->is_table_size;
infop->iis_table_next = space->is_table_next->its_size;
- infop->iis_tree_size = space->is_tree_total;
- infop->iis_tree_small = space->is_tree_small;
- infop->iis_tree_hash = space->is_tree_hash;
/* walk the table for this space */
table = space->is_table;
bits = entry->ie_bits;
iin->iin_name = MACH_PORT_MAKE(index, IE_BITS_GEN(bits));
- iin->iin_collision = (bits & IE_BITS_COLLISION) ? TRUE : FALSE;
+ iin->iin_collision = 0;
iin->iin_type = IE_BITS_TYPE(bits);
- if (entry->ie_request)
- iin->iin_type |= MACH_PORT_TYPE_DNREQUEST;
- iin->iin_urefs = IE_BITS_UREFS(bits);
- iin->iin_object = (natural_t)(uintptr_t)entry->ie_object;
- iin->iin_next = entry->ie_next;
- iin->iin_hash = entry->ie_index;
- }
-
- /* walk the splay tree for this space */
- tree_info = (ipc_info_tree_name_array_t)tree_addr;
- for (tentry = ipc_splay_traverse_start(&space->is_tree), index = 0;
- tentry != ITE_NULL;
- tentry = ipc_splay_traverse_next(&space->is_tree, FALSE)) {
- ipc_info_tree_name_t *iitn = &tree_info[index++];
- ipc_info_name_t *iin = &iitn->iitn_name;
- ipc_entry_t entry = &tentry->ite_entry;
- ipc_entry_bits_t bits = entry->ie_bits;
-
- assert(IE_BITS_TYPE(bits) != MACH_PORT_TYPE_NONE);
+ if ((entry->ie_bits & MACH_PORT_TYPE_PORT_RIGHTS) != MACH_PORT_TYPE_NONE &&
+ entry->ie_request != IE_REQ_NONE) {
+ __IGNORE_WCASTALIGN(ipc_port_t port = (ipc_port_t) entry->ie_object);
+
+ assert(IP_VALID(port));
+ ip_lock(port);
+ iin->iin_type |= ipc_port_request_type(port, iin->iin_name, entry->ie_request);
+ ip_unlock(port);
+ }
- iin->iin_name = tentry->ite_name;
- iin->iin_collision = (bits & IE_BITS_COLLISION) ? TRUE : FALSE;
- iin->iin_type = IE_BITS_TYPE(bits);
- if (entry->ie_request)
- iin->iin_type |= MACH_PORT_TYPE_DNREQUEST;
iin->iin_urefs = IE_BITS_UREFS(bits);
- iin->iin_object = (natural_t)(uintptr_t)entry->ie_object;
+ iin->iin_object = (dbg_ok) ? (natural_t)VM_KERNEL_ADDRPERM((uintptr_t)entry->ie_object) : 0;
iin->iin_next = entry->ie_next;
iin->iin_hash = entry->ie_index;
-
- if (tentry->ite_lchild == ITE_NULL)
- iitn->iitn_lchild = MACH_PORT_NULL;
- else
- iitn->iitn_lchild = tentry->ite_lchild->ite_name;
-
- if (tentry->ite_rchild == ITE_NULL)
- iitn->iitn_rchild = MACH_PORT_NULL;
- else
- iitn->iitn_rchild = tentry->ite_rchild->ite_name;
-
}
- ipc_splay_traverse_finish(&space->is_tree);
+
is_read_unlock(space);
/* prepare the table out-of-line data for return */
if (table_size > 0) {
- if (table_size > infop->iis_table_size * sizeof(ipc_info_name_t))
- bzero((char *)&table_info[infop->iis_table_size],
- table_size - infop->iis_table_size * sizeof(ipc_info_name_t));
+ vm_size_t used_table_size;
- kr = vm_map_unwire(ipc_kernel_map, vm_map_trunc_page(table_addr),
- vm_map_round_page(table_addr + table_size), FALSE);
+ used_table_size = infop->iis_table_size * sizeof(ipc_info_name_t);
+ if (table_size > used_table_size)
+ bzero((char *)&table_info[infop->iis_table_size],
+ table_size - used_table_size);
+
+ kr = vm_map_unwire(
+ ipc_kernel_map,
+ vm_map_trunc_page(table_addr,
+ VM_MAP_PAGE_MASK(ipc_kernel_map)),
+ vm_map_round_page(table_addr + table_size,
+ VM_MAP_PAGE_MASK(ipc_kernel_map)),
+ FALSE);
assert(kr == KERN_SUCCESS);
kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)table_addr,
- (vm_map_size_t)table_size, TRUE, ©);
+ (vm_map_size_t)used_table_size, TRUE, ©);
assert(kr == KERN_SUCCESS);
*tablep = (ipc_info_name_t *)copy;
*tableCntp = infop->iis_table_size;
*tableCntp = 0;
}
- /* prepare the tree out-of-line data for return */
- if (tree_size > 0) {
- if (tree_size > infop->iis_tree_size * sizeof(ipc_info_tree_name_t))
- bzero((char *)&tree_info[infop->iis_tree_size],
- tree_size - infop->iis_tree_size * sizeof(ipc_info_tree_name_t));
+ /* splay tree is obsolete, no work to do... */
+ *treep = (ipc_info_tree_name_t *)0;
+ *treeCntp = 0;
+ return KERN_SUCCESS;
+}
+#endif /* MACH_IPC_DEBUG */
- kr = vm_map_unwire(ipc_kernel_map, vm_map_trunc_page(tree_addr),
- vm_map_round_page(tree_addr + tree_size), FALSE);
- assert(kr == KERN_SUCCESS);
- kr = vm_map_copyin(ipc_kernel_map, (vm_map_address_t)tree_addr,
- (vm_map_size_t)tree_size, TRUE, ©);
- assert(kr == KERN_SUCCESS);
- *treep = (ipc_info_tree_name_t *)copy;
- *treeCntp = infop->iis_tree_size;
- } else {
- *treep = (ipc_info_tree_name_t *)0;
- *treeCntp = 0;
+/*
+ * Routine: mach_port_space_basic_info
+ * Purpose:
+ * Returns basic information about an IPC space.
+ * Conditions:
+ * Nothing locked.
+ * Returns:
+ * KERN_SUCCESS Returned information.
+ * KERN_FAILURE The call is not supported.
+ * KERN_INVALID_TASK The space is dead.
+ */
+
+#if !MACH_IPC_DEBUG
+kern_return_t
+mach_port_space_basic_info(
+ __unused ipc_space_t space,
+ __unused ipc_info_space_basic_t *infop)
+{
+ return KERN_FAILURE;
+}
+#else
+kern_return_t
+mach_port_space_basic_info(
+ ipc_space_t space,
+ ipc_info_space_basic_t *infop)
+{
+ if (space == IS_NULL)
+ return KERN_INVALID_TASK;
+
+
+ is_read_lock(space);
+ if (!is_active(space)) {
+ is_read_unlock(space);
+ return KERN_INVALID_TASK;
}
+
+ /* get the basic space info */
+ infop->iisb_genno_mask = MACH_PORT_NGEN(MACH_PORT_DEAD);
+ infop->iisb_table_size = space->is_table_size;
+ infop->iisb_table_next = space->is_table_next->its_size;
+ infop->iisb_table_inuse = space->is_table_size - space->is_table_free - 1;
+ infop->iisb_reserved[0] = 0;
+ infop->iisb_reserved[1] = 0;
+
+ is_read_unlock(space);
+
return KERN_SUCCESS;
}
#endif /* MACH_IPC_DEBUG */
return kr;
/* port is locked and active */
- if (port->ip_dnrequests == IPR_NULL) {
+ if (port->ip_requests == IPR_NULL) {
total = 0;
used = 0;
} else {
- ipc_port_request_t dnrequests = port->ip_dnrequests;
+ ipc_port_request_t requests = port->ip_requests;
ipc_port_request_index_t index;
- total = dnrequests->ipr_size->its_size;
+ total = requests->ipr_size->its_size;
for (index = 1, used = 0;
index < total; index++) {
- ipc_port_request_t ipr = &dnrequests[index];
+ ipc_port_request_t ipr = &requests[index];
if (ipr->ipr_name != MACH_PORT_NULL)
used++;
ipc_entry_t entry;
ipc_port_t port;
kern_return_t kr;
+ mach_vm_address_t kaddr;
if (space == IS_NULL)
return KERN_INVALID_TASK;
return KERN_INVALID_RIGHT;
}
- port = (ipc_port_t) entry->ie_object;
+ __IGNORE_WCASTALIGN(port = (ipc_port_t) entry->ie_object);
assert(port != IP_NULL);
ip_lock(port);
}
*typep = (unsigned int) ip_kotype(port);
- *addrp = (mach_vm_address_t)port->ip_kobject;
+ kaddr = (mach_vm_address_t)port->ip_kobject;
ip_unlock(port);
- return KERN_SUCCESS;
+#if (DEVELOPMENT || DEBUG)
+ if (0 != kaddr && is_ipc_kobject(*typep))
+ *addrp = VM_KERNEL_UNSLIDE_OR_PERM(kaddr);
+ else
+#endif
+ *addrp = 0;
+
+ return KERN_SUCCESS;
}
#endif /* MACH_IPC_DEBUG */
/*