* any improvements or extensions that they make and grant Carnegie Mellon
* the rights to redistribute these changes.
*/
+/*
+ * NOTICE: This file was modified by McAfee Research in 2004 to introduce
+ * support for mandatory and extensible security protections. This notice
+ * is included in support of clause 2.2 (b) of the Apple Public License,
+ * Version 2.0.
+ */
/*
*/
/*
* Functions to manipulate IPC capability spaces.
*/
-#include <mach_kdb.h>
-
#include <mach/boolean.h>
#include <mach/kern_return.h>
#include <mach/port.h>
#include <kern/zalloc.h>
#include <ipc/port.h>
#include <ipc/ipc_entry.h>
-#include <ipc/ipc_splay.h>
#include <ipc/ipc_object.h>
#include <ipc/ipc_hash.h>
#include <ipc/ipc_table.h>
#include <ipc/ipc_port.h>
#include <ipc/ipc_space.h>
#include <ipc/ipc_right.h>
+#include <prng/random.h>
#include <string.h>
+/* Remove this in the future so port names are less predictable. */
+#define CONFIG_SEMI_RANDOM_ENTRIES
+#ifdef CONFIG_SEMI_RANDOM_ENTRIES
+#define NUM_SEQ_ENTRIES 8
+#endif
+
zone_t ipc_space_zone;
ipc_space_t ipc_space_kernel;
ipc_space_t ipc_space_reply;
-#if MACH_KDB
-ipc_space_t default_pager_space;
-#endif /* MACH_KDB */
/*
* Routine: ipc_space_reference
* Routine: ipc_space_release
* Purpose:
- * Function versions of the IPC space macros.
- * The "is_" cover macros can be defined to use the
- * macros or the functions, as desired.
+ * Function versions of the IPC space inline reference.
*/
void
ipc_space_reference(
ipc_space_t space)
{
- ipc_space_reference_macro(space);
+ is_reference(space);
}
void
ipc_space_release(
ipc_space_t space)
{
- ipc_space_release_macro(space);
+ is_release(space);
+}
+
+/* Routine: ipc_space_get_rollpoint
+ * Purpose:
+ * Generate a new gencount rollover point from a space's entropy pool
+ */
+ipc_entry_bits_t
+ipc_space_get_rollpoint(
+ ipc_space_t space)
+{
+ return random_bool_gen_bits(
+ &space->bool_gen,
+ &space->is_entropy[0],
+ IS_ENTROPY_CNT,
+ IE_BITS_ROLL_BITS);
}
+/*
+ * Routine: ipc_entry_rand_freelist
+ * Purpose:
+ * Pseudo-randomly permute the order of entries in an IPC space
+ * Arguments:
+ * space: the ipc space to initialize.
+ * table: the corresponding ipc table to initialize.
+ * bottom: the start of the range to initialize (inclusive).
+ * top: the end of the range to initialize (noninclusive).
+ */
+void
+ipc_space_rand_freelist(
+ ipc_space_t space,
+ ipc_entry_t table,
+ mach_port_index_t bottom,
+ mach_port_index_t top)
+{
+#ifdef CONFIG_SEMI_RANDOM_ENTRIES
+ /*
+ * Only make sequential entries at the start of the table, and not when
+ * we're growing the space.
+ */
+ int at_start = (bottom == 0);
+ ipc_entry_num_t total = 0;
+#endif
+
+ /* First entry in the free list is always free, and is the start of the free list. */
+ mach_port_index_t curr = bottom;
+ bottom++;
+ top--;
+
+ /*
+ * Initialize the free list in the table.
+ * Add the entries in pseudo-random order and randomly set the generation
+ * number, in order to frustrate attacks involving port name reuse.
+ */
+ while (bottom <= top) {
+ ipc_entry_t entry = &table[curr];
+ int which;
+#ifdef CONFIG_SEMI_RANDOM_ENTRIES
+ /*
+ * XXX: This is a horrible hack to make sure that randomizing the port
+ * doesn't break programs that might have (sad) hard-coded values for
+ * certain port names.
+ */
+ if (at_start && total++ < NUM_SEQ_ENTRIES)
+ which = 0;
+ else
+#endif
+ which = random_bool_gen_bits(
+ &space->bool_gen,
+ &space->is_entropy[0],
+ IS_ENTROPY_CNT,
+ 1);
+
+ mach_port_index_t next;
+ if (which) {
+ next = top;
+ top--;
+ } else {
+ next = bottom;
+ bottom++;
+ }
+
+ /*
+ * The entry's gencount will roll over on its first allocation, at which
+ * point a random rollover will be set for the entry.
+ */
+ entry->ie_bits = IE_BITS_GEN_MASK;
+ entry->ie_next = next;
+ entry->ie_object = IO_NULL;
+ entry->ie_index = 0;
+ curr = next;
+ }
+ table[curr].ie_next = 0;
+ table[curr].ie_object = IO_NULL;
+ table[curr].ie_index = 0;
+ table[curr].ie_bits = IE_BITS_GEN_MASK;
+}
+
+
/*
* Routine: ipc_space_create
* Purpose:
ipc_space_t space;
ipc_entry_t table;
ipc_entry_num_t new_size;
- mach_port_index_t index;
space = is_alloc();
if (space == IS_NULL)
new_size = initial->its_size;
memset((void *) table, 0, new_size * sizeof(struct ipc_entry));
- /*
- * Initialize the free list in the table.
- * Add the entries in reverse order, and
- * set the generation number to -1, so that
- * initial allocations produce "natural" names.
- */
- for (index = 0; index < new_size; index++) {
- ipc_entry_t entry = &table[index];
-
- entry->ie_bits = IE_BITS_GEN_MASK;
- entry->ie_next = index+1;
- }
- table[new_size-1].ie_next = 0;
+ /* Set to 0 so entropy pool refills */
+ memset((void *) space->is_entropy, 0, sizeof(space->is_entropy));
- is_ref_lock_init(space);
- space->is_references = 2;
+ random_bool_init(&space->bool_gen);
+ ipc_space_rand_freelist(space, table, 0, new_size);
is_lock_init(space);
- space->is_active = TRUE;
- space->is_growing = FALSE;
- space->is_table = table;
+ space->is_bits = 2; /* 2 refs, active, not growing */
space->is_table_size = new_size;
+ space->is_table_free = new_size - 1;
+ space->is_table = table;
space->is_table_next = initial+1;
-
- ipc_splay_tree_init(&space->is_tree);
- space->is_tree_total = 0;
- space->is_tree_small = 0;
- space->is_tree_hash = 0;
+ space->is_task = NULL;
+ space->is_low_mod = new_size;
+ space->is_high_mod = 0;
+ space->is_node_id = HOST_LOCAL_NODE; /* HOST_LOCAL_NODE, except proxy spaces */
*spacep = space;
return KERN_SUCCESS;
if (space == IS_NULL)
return KERN_RESOURCE_SHORTAGE;
- is_ref_lock_init(space);
- space->is_references = 1;
-
is_lock_init(space);
- space->is_active = FALSE;
+
+ space->is_bits = IS_INACTIVE | 1; /* 1 ref, not active, not growing */
+ space->is_table = IE_NULL;
+ space->is_task = TASK_NULL;
+ space->is_table_next = 0;
+ space->is_low_mod = 0;
+ space->is_high_mod = 0;
+ space->is_node_id = HOST_LOCAL_NODE; /* HOST_LOCAL_NODE, except proxy spaces */
*spacep = space;
return KERN_SUCCESS;
ipc_space_clean(
ipc_space_t space)
{
- ipc_tree_entry_t tentry;
ipc_entry_t table;
ipc_entry_num_t size;
mach_port_index_t index;
* we must wait until they finish and figure
* out the space died.
*/
+ retry:
is_write_lock(space);
- while (space->is_growing)
+ while (is_growing(space))
is_write_sleep(space);
+ if (!is_active(space)) {
+ is_write_unlock(space);
+ return;
+ }
+
/*
* Now we can futz with it since we have the write lock.
*/
-#if MACH_KDB
- if (space == default_pager_space)
- default_pager_space = IS_NULL;
-#endif /* MACH_KDB */
table = space->is_table;
size = space->is_table_size;
if (type != MACH_PORT_TYPE_NONE) {
mach_port_name_t name = MACH_PORT_MAKE(index,
IE_BITS_GEN(entry->ie_bits));
- ipc_right_destroy(space, name, entry);
+ ipc_right_destroy(space, name, entry, FALSE, 0); /* unlocks space */
+ goto retry;
}
}
- /*
+ /*
* JMM - Now the table is cleaned out. We don't bother shrinking the
* size of the table at this point, but we probably should if it is
- * really large. Lets just clean up the splay tree.
+ * really large.
*/
- start_splay:
- for (tentry = ipc_splay_traverse_start(&space->is_tree);
- tentry != ITE_NULL;
- tentry = ipc_splay_traverse_next(&space->is_tree, TRUE)) {
- mach_port_type_t type;
- mach_port_name_t name = tentry->ite_name;
-
- type = IE_BITS_TYPE(tentry->ite_bits);
- /*
- * If it is a real right, then destroy it. This will have the
- * side effect of removing it from the splay, so start over.
- */
- if(type != MACH_PORT_TYPE_NONE) {
- ipc_splay_traverse_finish(&space->is_tree);
- ipc_right_destroy(space, name, &tentry->ite_entry);
- goto start_splay;
- }
- }
- ipc_splay_traverse_finish(&space->is_tree);
+
is_write_unlock(space);
}
/*
- * Routine: ipc_space_destroy
+ * Routine: ipc_space_terminate
* Purpose:
* Marks the space as dead and cleans up the entries.
* Does nothing if the space is already dead.
*/
void
-ipc_space_destroy(
+ipc_space_terminate(
ipc_space_t space)
{
- boolean_t active;
- ipc_tree_entry_t tentry;
ipc_entry_t table;
ipc_entry_num_t size;
mach_port_index_t index;
assert(space != IS_NULL);
is_write_lock(space);
- active = space->is_active;
- space->is_active = FALSE;
- is_write_unlock(space);
-
- if (!active)
+ if (!is_active(space)) {
+ is_write_unlock(space);
return;
-
+ }
+ is_mark_inactive(space);
/*
* If somebody is trying to grow the table,
* we must wait until they finish and figure
* out the space died.
*/
- is_read_lock(space);
- while (space->is_growing)
- is_read_sleep(space);
+ while (is_growing(space))
+ is_write_sleep(space);
+
+ is_write_unlock(space);
+
- is_read_unlock(space);
/*
* Now we can futz with it unlocked.
*/
-#if MACH_KDB
- if (space == default_pager_space)
- default_pager_space = IS_NULL;
-#endif /* MACH_KDB */
table = space->is_table;
size = space->is_table_size;
name = MACH_PORT_MAKE(index,
IE_BITS_GEN(entry->ie_bits));
- ipc_right_clean(space, name, entry);
+ ipc_right_terminate(space, name, entry);
}
}
it_entries_free(space->is_table_next-1, table);
space->is_table_size = 0;
-
- for (tentry = ipc_splay_traverse_start(&space->is_tree);
- tentry != ITE_NULL;
- tentry = ipc_splay_traverse_next(&space->is_tree, TRUE)) {
- mach_port_type_t type;
- mach_port_name_t name = tentry->ite_name;
-
- type = IE_BITS_TYPE(tentry->ite_bits);
- assert(type != MACH_PORT_TYPE_NONE);
-
- ipc_right_clean(space, name, &tentry->ite_entry);
-
- if(type == MACH_PORT_TYPE_SEND)
- ipc_hash_global_delete(space, tentry->ite_object,
- name, tentry);
- }
- ipc_splay_traverse_finish(&space->is_tree);
+ space->is_table_free = 0;
/*
* Because the space is now dead,