* @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
+#include <kern/backtrace.h>
#include <vm/vm_map_store_rb.h>
RB_GENERATE(rb_head, vm_map_store, entry, rb_node_compare);
RB_REMOVE( rb_head, rbh, store );
}
-void vm_map_store_copy_insert_rb( vm_map_t map, __unused vm_map_entry_t after_where, vm_map_copy_t copy)
-{
- struct vm_map_header *mapHdr = &(map->hdr);
- struct rb_head *rbh = &(mapHdr->rb_head_store);
- struct vm_map_store *store;
- vm_map_entry_t entry = vm_map_copy_first_entry(copy);
- int inserted=0, nentries = copy->cpy_hdr.nentries;
-
- while (entry != vm_map_copy_to_entry(copy) && nentries > 0) {
- vm_map_entry_t prev = entry;
- store = &(entry->store);
- if( RB_INSERT( rb_head, rbh, store ) != NULL){
- panic("VMSCIR1: INSERT FAILED: %d: %p, %p, %p, 0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%lx",inserted, prev, entry, vm_map_copy_to_entry(copy),
- (uintptr_t)prev->vme_start, (uintptr_t)prev->vme_end, (uintptr_t)entry->vme_start, (uintptr_t)entry->vme_end,
- (uintptr_t)(VME_FOR_STORE(rbh->rbh_root))->vme_start, (uintptr_t)(VME_FOR_STORE(rbh->rbh_root))->vme_end);
- } else {
-#if MAP_ENTRY_INSERTION_DEBUG
- fastbacktrace(&entry->vme_insertion_bt[0],
- (sizeof (entry->vme_insertion_bt) / sizeof (uintptr_t)));
-#endif
- entry = entry->vme_next;
- inserted++;
- nentries--;
- }
- }
-}
-
void
vm_map_store_copy_reset_rb( vm_map_copy_t copy, vm_map_entry_t entry, int nentries )
{
}
}
-void update_first_free_rb( __unused vm_map_t map, __unused vm_map_entry_t entry)
+extern zone_t vm_map_holes_zone; /* zone for vm map holes (vm_map_links) structures */
+
+void
+vm_map_combine_hole(vm_map_t map, vm_map_entry_t hole_entry);
+void
+vm_map_combine_hole(__unused vm_map_t map, vm_map_entry_t hole_entry)
+{
+
+ vm_map_entry_t middle_hole_entry, last_hole_entry;
+
+ hole_entry->vme_end = hole_entry->vme_next->vme_end;
+
+ middle_hole_entry = hole_entry->vme_next;
+ last_hole_entry = middle_hole_entry->vme_next;
+
+ assert(last_hole_entry->vme_prev == middle_hole_entry);
+ assert(middle_hole_entry->vme_end != last_hole_entry->vme_start);
+
+ last_hole_entry->vme_prev = hole_entry;
+ hole_entry->vme_next = last_hole_entry;
+
+ middle_hole_entry->vme_prev = NULL;
+ middle_hole_entry->vme_next = NULL;
+
+ zfree(vm_map_holes_zone, middle_hole_entry);
+
+ assert(hole_entry->vme_start < hole_entry->vme_end);
+ assert(last_hole_entry->vme_start < last_hole_entry->vme_end);
+}
+
+
+void
+vm_map_delete_hole(vm_map_t map, vm_map_entry_t hole_entry);
+void
+vm_map_delete_hole(vm_map_t map, vm_map_entry_t hole_entry)
+{
+ if (hole_entry == CAST_TO_VM_MAP_ENTRY(map->holes_list)) {
+
+ if (hole_entry->vme_next == CAST_TO_VM_MAP_ENTRY(map->holes_list)) {
+
+ map->holes_list = NULL;
+ SAVE_HINT_HOLE_WRITE(map, NULL);
+ } else {
+
+ vm_map_entry_t l_next, l_prev;
+
+ l_next = (vm_map_entry_t) map->holes_list->next;
+ l_prev = (vm_map_entry_t) map->holes_list->prev;
+ map->holes_list = (struct vm_map_links*) l_next;
+
+ l_next->vme_prev = l_prev;
+ l_prev->vme_next = l_next;
+
+ SAVE_HINT_HOLE_WRITE(map, (struct vm_map_links*) l_next);
+ }
+ } else {
+
+ SAVE_HINT_HOLE_WRITE(map, (struct vm_map_links*) hole_entry->vme_prev);
+
+ hole_entry->vme_prev->vme_next = hole_entry->vme_next;
+ hole_entry->vme_next->vme_prev = hole_entry->vme_prev;
+ }
+
+ hole_entry->vme_next = NULL;
+ hole_entry->vme_prev = NULL;
+ zfree(vm_map_holes_zone, hole_entry);
+}
+
+
+/*
+ * For Debugging.
+ */
+
+#if DEBUG
+static void
+check_map_sanity(vm_map_t map, vm_map_entry_t old_hole_entry)
+{
+ vm_map_entry_t hole_entry, next_hole_entry;
+ vm_map_entry_t map_entry, next_map_entry;
+
+ if (map->holes_list == NULL) {
+
+ return;
+ }
+
+ hole_entry = (vm_map_entry_t) map->holes_list;
+ next_hole_entry = hole_entry->vme_next;
+
+ map_entry = vm_map_first_entry(map);
+ next_map_entry = map_entry->vme_next;
+
+ while(map_entry->vme_start > hole_entry->vme_start) {
+ hole_entry = next_hole_entry;
+ next_hole_entry = hole_entry->vme_next;
+
+ if (hole_entry == (vm_map_entry_t)map->holes_list)
+ break;
+ }
+
+ while (map_entry != vm_map_to_entry(map)) {
+
+ if (map_entry->vme_start >= map->max_offset)
+ break;
+
+ if (map_entry->vme_end != map_entry->vme_next->vme_start) {
+
+ if (map_entry->vme_next == vm_map_to_entry(map))
+ break;
+
+ if (hole_entry->vme_start != map_entry->vme_end) {
+ panic("hole_entry not aligned %p(0x%llx), %p (0x%llx), %p", hole_entry, (unsigned long long)hole_entry->vme_start, map_entry->vme_next, (unsigned long long)map_entry->vme_end, old_hole_entry);
+ assert(hole_entry->vme_start == map_entry->vme_end);
+ }
+
+ if (hole_entry->vme_end != map_entry->vme_next->vme_start) {
+ panic("hole_entry not next aligned %p(0x%llx), %p (0x%llx), %p", hole_entry, (unsigned long long)hole_entry->vme_end, map_entry->vme_next, (unsigned long long)map_entry->vme_next->vme_start, old_hole_entry);
+ assert(hole_entry->vme_end == map_entry->vme_next->vme_start);
+ }
+
+ hole_entry = next_hole_entry;
+ next_hole_entry = hole_entry->vme_next;
+
+ if (hole_entry == (vm_map_entry_t)map->holes_list)
+ break;
+ }
+
+ map_entry = map_entry->vme_next;
+ }
+}
+
+/*
+ * For debugging.
+ */
+static void
+copy_hole_info(vm_map_entry_t hole_entry, vm_map_entry_t old_hole_entry)
+{
+ old_hole_entry->vme_prev = hole_entry->vme_prev;
+ old_hole_entry->vme_next = hole_entry->vme_next;
+ old_hole_entry->vme_start = hole_entry->vme_start;
+ old_hole_entry->vme_end = hole_entry->vme_end;
+}
+#endif /* DEBUG */
+
+void
+update_holes_on_entry_deletion(vm_map_t map, vm_map_entry_t old_entry);
+void
+update_holes_on_entry_deletion(vm_map_t map, vm_map_entry_t old_entry)
+{
+ /*
+ * Dealing with the deletion of an older entry.
+ */
+
+ vm_map_entry_t hole_entry, next_hole_entry;
+#if DEBUG
+ struct vm_map_entry old_hole_entry;
+#endif /* DEBUG */
+ boolean_t create_new_hole = TRUE;
+
+ hole_entry = CAST_TO_VM_MAP_ENTRY(map->hole_hint);
+
+ if (hole_entry) {
+
+ if (hole_entry->vme_end == old_entry->vme_start) {
+ /*
+ * Found a hole right after above our entry.
+ * Hit.
+ */
+
+ } else if (hole_entry->vme_start == old_entry->vme_end) {
+
+ if (hole_entry != CAST_TO_VM_MAP_ENTRY(map->holes_list)) {
+
+ /*
+ * Found a hole right after below our entry but
+ * make sure we don't erroneously extend backwards.
+ *
+ * Hit.
+ */
+
+ hole_entry = hole_entry->vme_prev;
+ }
+
+ } else if (hole_entry->vme_start > old_entry->vme_end) {
+
+ /*
+ * Useless hint. Start from the top.
+ */
+
+ hole_entry = CAST_TO_VM_MAP_ENTRY(map->holes_list);
+ }
+
+ if (hole_entry != CAST_TO_VM_MAP_ENTRY(map->holes_list)) {
+ if (hole_entry->vme_start > old_entry->vme_start) {
+ panic("Hole hint failed: Hole entry start: 0x%llx, entry start: 0x%llx, map hole start: 0x%llx, map hint start: 0x%llx\n",
+ (unsigned long long)hole_entry->vme_start,
+ (unsigned long long)old_entry->vme_start,
+ (unsigned long long)map->holes_list->start,
+ (unsigned long long)map->hole_hint->start);
+ }
+ if (hole_entry->vme_end > old_entry->vme_start) {
+ panic("Hole hint failed: Hole entry end: 0x%llx, entry start: 0x%llx, map hole start: 0x%llx, map hint start: 0x%llx\n",
+ (unsigned long long)hole_entry->vme_end,
+ (unsigned long long)old_entry->vme_start,
+ (unsigned long long)map->holes_list->start,
+ (unsigned long long)map->hole_hint->start);
+ }
+ }
+
+ while (1) {
+
+ next_hole_entry = hole_entry->vme_next;
+
+ /*
+ * Hole is right above the entry.
+ */
+ if (hole_entry->vme_end == old_entry->vme_start) {
+
+#if DEBUG
+ copy_hole_info(hole_entry, &old_hole_entry);
+#endif /* DEBUG */
+
+ /*
+ * Is there another hole right below the entry?
+ * Can we combine holes?
+ */
+
+ if (old_entry->vme_end == hole_entry->vme_next->vme_start) {
+
+ vm_map_combine_hole(map, hole_entry);
+ } else {
+
+ hole_entry->vme_end = old_entry->vme_end;
+ }
+ create_new_hole = FALSE;
+#if DEBUG
+ check_map_sanity(map, &old_hole_entry);
+#endif /* DEBUG */
+ break;
+ }
+
+ /*
+ * Hole is right below the entry.
+ */
+ if (hole_entry->vme_start == old_entry->vme_end) {
+
+#if DEBUG
+ copy_hole_info(hole_entry, &old_hole_entry);
+#endif /* DEBUG */
+
+ hole_entry->vme_start = old_entry->vme_start;
+ create_new_hole = FALSE;
+
+#if DEBUG
+ check_map_sanity(map, &old_hole_entry);
+#endif /* DEBUG */
+ break;
+ }
+
+ /*
+ * Hole is beyond our entry. Let's go back to the last hole
+ * before our entry so we have the right place to link up the
+ * new hole that will be needed.
+ */
+ if (hole_entry->vme_start > old_entry->vme_end) {
+
+#if DEBUG
+ copy_hole_info(hole_entry, &old_hole_entry);
+#endif /* DEBUG */
+
+ if (hole_entry != CAST_TO_VM_MAP_ENTRY(map->holes_list)) {
+ assert(hole_entry->vme_start != old_entry->vme_start);
+ hole_entry = hole_entry->vme_prev;
+ }
+ break;
+ }
+
+ hole_entry = next_hole_entry;
+
+ if (hole_entry == CAST_TO_VM_MAP_ENTRY(map->holes_list)) {
+ hole_entry = hole_entry->vme_prev;
+ break;
+ }
+ }
+ }
+
+ if (create_new_hole) {
+ struct vm_map_links *new_hole_entry = NULL;
+ vm_map_entry_t l_next, l_prev;
+
+ new_hole_entry = zalloc(vm_map_holes_zone);
+
+ /*
+ * First hole in the map?
+ * OR
+ * A hole that is located above the current first hole in the map?
+ */
+ if (map->holes_list == NULL || (hole_entry == CAST_TO_VM_MAP_ENTRY(map->holes_list) && hole_entry->vme_start > old_entry->vme_start)) {
+
+ if (map->holes_list == NULL) {
+
+ map->holes_list = new_hole_entry;
+ new_hole_entry->prev = new_hole_entry->next = CAST_TO_VM_MAP_ENTRY(map->holes_list);
+ } else {
+
+ l_next = CAST_TO_VM_MAP_ENTRY(map->holes_list);
+ l_prev = map->holes_list->prev;
+ map->holes_list = new_hole_entry;
+ new_hole_entry->next = l_next;
+ new_hole_entry->prev = l_prev;
+
+ l_prev->vme_next = l_next->vme_prev = CAST_TO_VM_MAP_ENTRY(new_hole_entry);
+ }
+ } else {
+
+ l_next = hole_entry->vme_next;
+ l_prev = hole_entry->vme_next->vme_prev;
+
+ new_hole_entry->prev = hole_entry;
+ new_hole_entry->next = l_next;
+
+ hole_entry->vme_next = CAST_TO_VM_MAP_ENTRY(new_hole_entry);
+ l_next->vme_prev = CAST_TO_VM_MAP_ENTRY(new_hole_entry);
+ }
+
+ new_hole_entry->start = old_entry->vme_start;
+ new_hole_entry->end = old_entry->vme_end;
+
+ hole_entry = CAST_TO_VM_MAP_ENTRY(new_hole_entry);
+
+ assert(new_hole_entry->start < new_hole_entry->end);
+ }
+
+#if DEBUG
+ check_map_sanity(map, &old_hole_entry);
+#endif /* DEBUG */
+
+ SAVE_HINT_HOLE_WRITE(map, (struct vm_map_links*) hole_entry);
+ return;
+}
+
+
+void
+update_holes_on_entry_creation(vm_map_t map, vm_map_entry_t new_entry);
+void
+update_holes_on_entry_creation(vm_map_t map, vm_map_entry_t new_entry)
{
- return ;
+
+ vm_map_entry_t hole_entry, next_hole_entry;
+#if DEBUG
+ struct vm_map_entry old_hole_entry;
+ vm_map_entry_t tmp_entry;
+ boolean_t check_map_with_hole_sanity = TRUE;
+#endif /* DEBUG */
+
+ /*
+ * Case A: The entry is aligned exactly with the start and end of the hole.
+ * This will delete the hole.
+ *
+ * Case B: The entry is completely within a hole but NOT aligned with the start/end of the hole.
+ * This will split a hole.
+ *
+ * Case C: The entry overlaps with the hole. The entry could be extending upwards (C1) or downwards (C2).
+ * This will reduce the size of the hole or delete the hole completely if it is smaller than the entry.
+ */
+
+ hole_entry = CAST_TO_VM_MAP_ENTRY(map->holes_list);
+ assert(hole_entry);
+ next_hole_entry = hole_entry->vme_next;
+
+ while (1) {
+
+#if DEBUG
+ /*
+ * If the entry doesn't exist in the RB tree, we are likely dealing with copy maps where
+ * the entries belonging to the copy map are linked into the list of entries silently and
+ * then added to the RB-tree later on.
+ * So sanity checks are useless in that case.
+ */
+ check_map_with_hole_sanity = vm_map_lookup_entry(map, new_entry->vme_start, &tmp_entry);
+#endif /* DEBUG */
+
+ if (hole_entry->vme_start == new_entry->vme_start &&
+ hole_entry->vme_end == new_entry->vme_end) {
+
+ /* Case A */
+#if DEBUG
+ copy_hole_info(hole_entry, &old_hole_entry);
+#endif /* DEBUG */
+
+ /*
+ * This check makes sense only for regular maps, not copy maps.
+ * With a regular map, the VM entry is first linked and then
+ * the hole is deleted. So the check below, which makes sure that
+ * the map's bounds are being respected, is valid.
+ * But for copy maps, the hole is deleted before the VM entry is
+ * linked (vm_map_store_copy_insert) and so this check is invalid.
+ *
+ if (hole_entry == (vm_map_entry_t) map->holes_list) {
+
+ if (hole_entry->vme_next == (vm_map_entry_t) map->holes_list) {
+
+ next_hole_entry = vm_map_last_entry(map);
+ assert(next_hole_entry->vme_end >= map->max_offset);
+ }
+ }
+ */
+
+ vm_map_delete_hole(map, hole_entry);
+
+#if DEBUG
+ if (check_map_with_hole_sanity)
+ check_map_sanity(map, &old_hole_entry);
+#endif /* DEBUG */
+ return;
+
+ } else if (hole_entry->vme_start < new_entry->vme_start &&
+ hole_entry->vme_end > new_entry->vme_end) {
+
+ /* Case B */
+ struct vm_map_links *new_hole_entry = NULL;
+
+ new_hole_entry = zalloc(vm_map_holes_zone);
+
+#if DEBUG
+ copy_hole_info(hole_entry, &old_hole_entry);
+#endif /* DEBUG */
+
+ new_hole_entry->prev = hole_entry;
+ new_hole_entry->next = hole_entry->vme_next;
+ hole_entry->vme_next->vme_prev = CAST_TO_VM_MAP_ENTRY(new_hole_entry);
+ hole_entry->vme_next = CAST_TO_VM_MAP_ENTRY(new_hole_entry);
+
+ new_hole_entry->start = new_entry->vme_end;
+ new_hole_entry->end = hole_entry->vme_end;
+ hole_entry->vme_end = new_entry->vme_start;
+
+ assert(hole_entry->vme_start < hole_entry->vme_end);
+ assert(new_hole_entry->start < new_hole_entry->end);
+
+#if DEBUG
+ if (check_map_with_hole_sanity)
+ check_map_sanity(map, &old_hole_entry);
+#endif /* DEBUG */
+
+ SAVE_HINT_HOLE_WRITE(map, (struct vm_map_links*) hole_entry);
+ return;
+
+ } else if ((new_entry->vme_start <= hole_entry->vme_start) && (hole_entry->vme_start < new_entry->vme_end)) {
+
+ /*
+ * Case C1: Entry moving upwards and a part/full hole lies within the bounds of the entry.
+ */
+
+#if DEBUG
+ copy_hole_info(hole_entry, &old_hole_entry);
+#endif /* DEBUG */
+
+ if (hole_entry->vme_end <= new_entry->vme_end) {
+
+ vm_map_delete_hole(map, hole_entry);
+ } else {
+ hole_entry->vme_start = new_entry->vme_end;
+ SAVE_HINT_HOLE_WRITE(map, (struct vm_map_links*) hole_entry);
+ }
+
+#if DEBUG
+ if (check_map_with_hole_sanity)
+ check_map_sanity(map, &old_hole_entry);
+#endif /* DEBUG */
+
+ return;
+
+ } else if ((new_entry->vme_start < hole_entry->vme_end) && (hole_entry->vme_end <= new_entry->vme_end)) {
+
+ /*
+ * Case C2: Entry moving downwards and a part/full hole lies within the bounds of the entry.
+ */
+
+#if DEBUG
+ copy_hole_info(hole_entry, &old_hole_entry);
+#endif /* DEBUG */
+
+ if (hole_entry->vme_start >= new_entry->vme_start) {
+ vm_map_delete_hole(map, hole_entry);
+ } else {
+ hole_entry->vme_end = new_entry->vme_start;
+ SAVE_HINT_HOLE_WRITE(map, (struct vm_map_links*) hole_entry);
+ }
+
+#if DEBUG
+ if (check_map_with_hole_sanity)
+ check_map_sanity(map, &old_hole_entry);
+#endif /* DEBUG */
+
+ return;
+ }
+
+ hole_entry = next_hole_entry;
+ next_hole_entry = hole_entry->vme_next;
+
+ if (hole_entry == CAST_TO_VM_MAP_ENTRY(map->holes_list))
+ break;
+ }
+
+ panic("Illegal action: h1: %p, s:0x%llx, e:0x%llx...h2:%p, s:0x%llx, e:0x%llx...h3:0x%p, s:0x%llx, e:0x%llx\n",
+ hole_entry->vme_prev,
+ (unsigned long long)hole_entry->vme_prev->vme_start,
+ (unsigned long long)hole_entry->vme_prev->vme_end,
+ hole_entry,
+ (unsigned long long)hole_entry->vme_start,
+ (unsigned long long)hole_entry->vme_end,
+ hole_entry->vme_next,
+ (unsigned long long)hole_entry->vme_next->vme_start,
+ (unsigned long long)hole_entry->vme_next->vme_end);
+
}
+void
+update_first_free_rb(vm_map_t map, vm_map_entry_t entry, boolean_t new_entry_creation)
+{
+
+ if (map->holelistenabled) {
+
+ /*
+ * Holes can be used to track ranges all the way up to MACH_VM_MAX_ADDRESS or more (e.g. kernel map).
+ */
+ vm_map_offset_t max_valid_offset = (map->max_offset > MACH_VM_MAX_ADDRESS) ? map->max_offset : MACH_VM_MAX_ADDRESS;
+
+ /*
+ * Clipping an entry will not result in the creation/deletion/modification of
+ * a hole. Those calls pass NULL for their target entry.
+ */
+ if (entry == NULL) {
+ return;
+ }
+
+ /*
+ * Commpage is pinned beyond the map's max offset. That shouldn't affect the
+ * holes within the bounds of the map.
+ */
+ if (vm_map_trunc_page(entry->vme_start, VM_MAP_PAGE_MASK(map)) >= max_valid_offset) {
+ return;
+ }
+
+ /*
+ *
+ * Note:
+ *
+ * - A new entry has already been added to the map
+ * OR
+ * - An older entry has already been deleted from the map
+ *
+ * We are updating the hole list after the fact (except in one special case involving copy maps).
+ *
+ */
+
+ if (new_entry_creation) {
+
+ update_holes_on_entry_creation(map, entry);
+ } else {
+
+ update_holes_on_entry_deletion(map, entry);
+ }
+ }
+}