2 * Copyright (c) 2009 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <kern/backtrace.h>
30 #include <vm/vm_map_store_rb.h>
32 RB_GENERATE(rb_head
, vm_map_store
, entry
, rb_node_compare
);
34 #define VME_FOR_STORE( store) \
35 (vm_map_entry_t)(((unsigned long)store) - ((unsigned long)sizeof(struct vm_map_links)))
38 vm_map_store_init_rb( struct vm_map_header
* hdr
)
40 RB_INIT(&(hdr
->rb_head_store
));
44 rb_node_compare(struct vm_map_store
*node
, struct vm_map_store
*parent
)
49 vme_c
= VME_FOR_STORE(node
);
50 vme_p
= VME_FOR_STORE(parent
);
51 if (vme_c
->vme_start
< vme_p
->vme_start
) {
54 if (vme_c
->vme_start
>= vme_p
->vme_end
) {
61 vm_map_store_walk_rb( vm_map_t map
, vm_map_entry_t
*wrong_vme
, vm_map_entry_t
*vm_entry
)
63 struct vm_map_header hdr
= map
->hdr
;
64 struct vm_map_store
*rb_entry
= RB_ROOT(&(hdr
.rb_head_store
));
65 vm_map_entry_t cur
= *vm_entry
;
67 rb_entry
= RB_FIND( rb_head
, &(hdr
.rb_head_store
), &(cur
->store
));
68 if (rb_entry
== NULL
) {
69 panic("NO SUCH ENTRY %p. Gave back %p", *vm_entry
, *wrong_vme
);
71 panic("Cur: %p, L: %p, R: %p", VME_FOR_STORE(rb_entry
), VME_FOR_STORE(RB_LEFT(rb_entry
, entry
)), VME_FOR_STORE(RB_RIGHT(rb_entry
, entry
)));
77 vm_map_store_lookup_entry_rb( vm_map_t map
, vm_map_offset_t address
, vm_map_entry_t
*vm_entry
)
79 struct vm_map_header hdr
= map
->hdr
;
80 struct vm_map_store
*rb_entry
= RB_ROOT(&(hdr
.rb_head_store
));
81 vm_map_entry_t cur
= vm_map_to_entry(map
);
82 vm_map_entry_t prev
= VM_MAP_ENTRY_NULL
;
84 while (rb_entry
!= (struct vm_map_store
*)NULL
) {
85 cur
= VME_FOR_STORE(rb_entry
);
86 if (cur
== VM_MAP_ENTRY_NULL
) {
89 if (address
>= cur
->vme_start
) {
90 if (address
< cur
->vme_end
) {
94 rb_entry
= RB_RIGHT(rb_entry
, entry
);
97 rb_entry
= RB_LEFT(rb_entry
, entry
);
100 if (prev
== VM_MAP_ENTRY_NULL
) {
101 prev
= vm_map_to_entry(map
);
108 vm_map_store_entry_link_rb( struct vm_map_header
*mapHdr
, __unused vm_map_entry_t after_where
, vm_map_entry_t entry
)
110 struct rb_head
*rbh
= &(mapHdr
->rb_head_store
);
111 struct vm_map_store
*store
= &(entry
->store
);
112 struct vm_map_store
*tmp_store
;
113 if ((tmp_store
= RB_INSERT( rb_head
, rbh
, store
)) != NULL
) {
114 panic("VMSEL: INSERT FAILED: 0x%lx, 0x%lx, 0x%lx, 0x%lx", (uintptr_t)entry
->vme_start
, (uintptr_t)entry
->vme_end
,
115 (uintptr_t)(VME_FOR_STORE(tmp_store
))->vme_start
, (uintptr_t)(VME_FOR_STORE(tmp_store
))->vme_end
);
120 vm_map_store_entry_unlink_rb( struct vm_map_header
*mapHdr
, vm_map_entry_t entry
)
122 struct rb_head
*rbh
= &(mapHdr
->rb_head_store
);
123 struct vm_map_store
*rb_entry
;
124 struct vm_map_store
*store
= &(entry
->store
);
126 rb_entry
= RB_FIND( rb_head
, rbh
, store
);
127 if (rb_entry
== NULL
) {
128 panic("NO ENTRY TO DELETE");
130 RB_REMOVE( rb_head
, rbh
, store
);
134 vm_map_store_copy_reset_rb( vm_map_copy_t copy
, vm_map_entry_t entry
, int nentries
)
136 struct vm_map_header
*mapHdr
= &(copy
->cpy_hdr
);
137 struct rb_head
*rbh
= &(mapHdr
->rb_head_store
);
138 struct vm_map_store
*store
;
141 while (entry
!= vm_map_copy_to_entry(copy
) && nentries
> 0) {
142 store
= &(entry
->store
);
143 RB_REMOVE( rb_head
, rbh
, store
);
144 entry
= entry
->vme_next
;
150 extern zone_t vm_map_holes_zone
; /* zone for vm map holes (vm_map_links) structures */
153 vm_map_combine_hole(vm_map_t map
, vm_map_entry_t hole_entry
);
155 vm_map_combine_hole(__unused vm_map_t map
, vm_map_entry_t hole_entry
)
157 vm_map_entry_t middle_hole_entry
, last_hole_entry
;
159 hole_entry
->vme_end
= hole_entry
->vme_next
->vme_end
;
161 middle_hole_entry
= hole_entry
->vme_next
;
162 last_hole_entry
= middle_hole_entry
->vme_next
;
164 assert(last_hole_entry
->vme_prev
== middle_hole_entry
);
165 assert(middle_hole_entry
->vme_end
!= last_hole_entry
->vme_start
);
167 last_hole_entry
->vme_prev
= hole_entry
;
168 hole_entry
->vme_next
= last_hole_entry
;
170 middle_hole_entry
->vme_prev
= NULL
;
171 middle_hole_entry
->vme_next
= NULL
;
173 zfree(vm_map_holes_zone
, middle_hole_entry
);
175 assert(hole_entry
->vme_start
< hole_entry
->vme_end
);
176 assert(last_hole_entry
->vme_start
< last_hole_entry
->vme_end
);
181 vm_map_delete_hole(vm_map_t map
, vm_map_entry_t hole_entry
);
183 vm_map_delete_hole(vm_map_t map
, vm_map_entry_t hole_entry
)
185 if (hole_entry
== CAST_TO_VM_MAP_ENTRY(map
->holes_list
)) {
186 if (hole_entry
->vme_next
== CAST_TO_VM_MAP_ENTRY(map
->holes_list
)) {
187 map
->holes_list
= NULL
;
188 SAVE_HINT_HOLE_WRITE(map
, NULL
);
190 vm_map_entry_t l_next
, l_prev
;
192 l_next
= (vm_map_entry_t
) map
->holes_list
->next
;
193 l_prev
= (vm_map_entry_t
) map
->holes_list
->prev
;
194 map
->holes_list
= (struct vm_map_links
*) l_next
;
196 l_next
->vme_prev
= l_prev
;
197 l_prev
->vme_next
= l_next
;
199 SAVE_HINT_HOLE_WRITE(map
, (struct vm_map_links
*) l_next
);
202 SAVE_HINT_HOLE_WRITE(map
, (struct vm_map_links
*) hole_entry
->vme_prev
);
204 hole_entry
->vme_prev
->vme_next
= hole_entry
->vme_next
;
205 hole_entry
->vme_next
->vme_prev
= hole_entry
->vme_prev
;
208 hole_entry
->vme_next
= NULL
;
209 hole_entry
->vme_prev
= NULL
;
210 zfree(vm_map_holes_zone
, hole_entry
);
220 check_map_sanity(vm_map_t map
, vm_map_entry_t old_hole_entry
)
222 vm_map_entry_t hole_entry
, next_hole_entry
;
223 vm_map_entry_t map_entry
, next_map_entry
;
225 if (map
->holes_list
== NULL
) {
229 hole_entry
= (vm_map_entry_t
) map
->holes_list
;
230 next_hole_entry
= hole_entry
->vme_next
;
232 map_entry
= vm_map_first_entry(map
);
233 next_map_entry
= map_entry
->vme_next
;
235 while (map_entry
->vme_start
> hole_entry
->vme_start
) {
236 hole_entry
= next_hole_entry
;
237 next_hole_entry
= hole_entry
->vme_next
;
239 if (hole_entry
== (vm_map_entry_t
)map
->holes_list
) {
244 while (map_entry
!= vm_map_to_entry(map
)) {
245 if (map_entry
->vme_start
>= map
->max_offset
) {
249 if (map_entry
->vme_end
!= map_entry
->vme_next
->vme_start
) {
250 if (map_entry
->vme_next
== vm_map_to_entry(map
)) {
254 if (hole_entry
->vme_start
!= map_entry
->vme_end
) {
255 panic("hole_entry not aligned %p(0x%llx), %p (0x%llx), %p", hole_entry
, (unsigned long long)hole_entry
->vme_start
, map_entry
->vme_next
, (unsigned long long)map_entry
->vme_end
, old_hole_entry
);
256 assert(hole_entry
->vme_start
== map_entry
->vme_end
);
259 if (hole_entry
->vme_end
!= map_entry
->vme_next
->vme_start
) {
260 panic("hole_entry not next aligned %p(0x%llx), %p (0x%llx), %p", hole_entry
, (unsigned long long)hole_entry
->vme_end
, map_entry
->vme_next
, (unsigned long long)map_entry
->vme_next
->vme_start
, old_hole_entry
);
261 assert(hole_entry
->vme_end
== map_entry
->vme_next
->vme_start
);
264 hole_entry
= next_hole_entry
;
265 next_hole_entry
= hole_entry
->vme_next
;
267 if (hole_entry
== (vm_map_entry_t
)map
->holes_list
) {
272 map_entry
= map_entry
->vme_next
;
280 copy_hole_info(vm_map_entry_t hole_entry
, vm_map_entry_t old_hole_entry
)
282 old_hole_entry
->vme_prev
= hole_entry
->vme_prev
;
283 old_hole_entry
->vme_next
= hole_entry
->vme_next
;
284 old_hole_entry
->vme_start
= hole_entry
->vme_start
;
285 old_hole_entry
->vme_end
= hole_entry
->vme_end
;
290 update_holes_on_entry_deletion(vm_map_t map
, vm_map_entry_t old_entry
);
292 update_holes_on_entry_deletion(vm_map_t map
, vm_map_entry_t old_entry
)
295 * Dealing with the deletion of an older entry.
298 vm_map_entry_t hole_entry
, next_hole_entry
;
300 struct vm_map_entry old_hole_entry
;
302 boolean_t create_new_hole
= TRUE
;
304 hole_entry
= CAST_TO_VM_MAP_ENTRY(map
->hole_hint
);
307 if (hole_entry
->vme_end
== old_entry
->vme_start
) {
309 * Found a hole right after above our entry.
312 } else if (hole_entry
->vme_start
== old_entry
->vme_end
) {
313 if (hole_entry
!= CAST_TO_VM_MAP_ENTRY(map
->holes_list
)) {
315 * Found a hole right after below our entry but
316 * make sure we don't erroneously extend backwards.
321 hole_entry
= hole_entry
->vme_prev
;
323 } else if (hole_entry
->vme_start
> old_entry
->vme_end
) {
325 * Useless hint. Start from the top.
328 hole_entry
= CAST_TO_VM_MAP_ENTRY(map
->holes_list
);
331 if (hole_entry
!= CAST_TO_VM_MAP_ENTRY(map
->holes_list
)) {
332 if (hole_entry
->vme_start
> old_entry
->vme_start
) {
333 panic("Hole hint failed: Hole entry start: 0x%llx, entry start: 0x%llx, map hole start: 0x%llx, map hint start: 0x%llx\n",
334 (unsigned long long)hole_entry
->vme_start
,
335 (unsigned long long)old_entry
->vme_start
,
336 (unsigned long long)map
->holes_list
->start
,
337 (unsigned long long)map
->hole_hint
->start
);
339 if (hole_entry
->vme_end
> old_entry
->vme_start
) {
340 panic("Hole hint failed: Hole entry end: 0x%llx, entry start: 0x%llx, map hole start: 0x%llx, map hint start: 0x%llx\n",
341 (unsigned long long)hole_entry
->vme_end
,
342 (unsigned long long)old_entry
->vme_start
,
343 (unsigned long long)map
->holes_list
->start
,
344 (unsigned long long)map
->hole_hint
->start
);
349 next_hole_entry
= hole_entry
->vme_next
;
352 * Hole is right above the entry.
354 if (hole_entry
->vme_end
== old_entry
->vme_start
) {
356 copy_hole_info(hole_entry
, &old_hole_entry
);
360 * Is there another hole right below the entry?
361 * Can we combine holes?
364 if (old_entry
->vme_end
== hole_entry
->vme_next
->vme_start
) {
365 vm_map_combine_hole(map
, hole_entry
);
367 hole_entry
->vme_end
= old_entry
->vme_end
;
369 create_new_hole
= FALSE
;
371 check_map_sanity(map
, &old_hole_entry
);
377 * Hole is right below the entry.
379 if (hole_entry
->vme_start
== old_entry
->vme_end
) {
381 copy_hole_info(hole_entry
, &old_hole_entry
);
384 hole_entry
->vme_start
= old_entry
->vme_start
;
385 create_new_hole
= FALSE
;
388 check_map_sanity(map
, &old_hole_entry
);
394 * Hole is beyond our entry. Let's go back to the last hole
395 * before our entry so we have the right place to link up the
396 * new hole that will be needed.
398 if (hole_entry
->vme_start
> old_entry
->vme_end
) {
400 copy_hole_info(hole_entry
, &old_hole_entry
);
403 if (hole_entry
!= CAST_TO_VM_MAP_ENTRY(map
->holes_list
)) {
404 assert(hole_entry
->vme_start
!= old_entry
->vme_start
);
405 hole_entry
= hole_entry
->vme_prev
;
410 hole_entry
= next_hole_entry
;
412 if (hole_entry
== CAST_TO_VM_MAP_ENTRY(map
->holes_list
)) {
413 hole_entry
= hole_entry
->vme_prev
;
419 if (create_new_hole
) {
420 struct vm_map_links
*new_hole_entry
= NULL
;
421 vm_map_entry_t l_next
, l_prev
;
423 new_hole_entry
= zalloc(vm_map_holes_zone
);
426 * First hole in the map?
428 * A hole that is located above the current first hole in the map?
430 if (map
->holes_list
== NULL
|| (hole_entry
== CAST_TO_VM_MAP_ENTRY(map
->holes_list
) && hole_entry
->vme_start
> old_entry
->vme_start
)) {
431 if (map
->holes_list
== NULL
) {
432 map
->holes_list
= new_hole_entry
;
433 new_hole_entry
->prev
= new_hole_entry
->next
= CAST_TO_VM_MAP_ENTRY(map
->holes_list
);
435 l_next
= CAST_TO_VM_MAP_ENTRY(map
->holes_list
);
436 l_prev
= map
->holes_list
->prev
;
437 map
->holes_list
= new_hole_entry
;
438 new_hole_entry
->next
= l_next
;
439 new_hole_entry
->prev
= l_prev
;
441 l_prev
->vme_next
= l_next
->vme_prev
= CAST_TO_VM_MAP_ENTRY(new_hole_entry
);
444 l_next
= hole_entry
->vme_next
;
445 l_prev
= hole_entry
->vme_next
->vme_prev
;
447 new_hole_entry
->prev
= hole_entry
;
448 new_hole_entry
->next
= l_next
;
450 hole_entry
->vme_next
= CAST_TO_VM_MAP_ENTRY(new_hole_entry
);
451 l_next
->vme_prev
= CAST_TO_VM_MAP_ENTRY(new_hole_entry
);
454 new_hole_entry
->start
= old_entry
->vme_start
;
455 new_hole_entry
->end
= old_entry
->vme_end
;
457 hole_entry
= CAST_TO_VM_MAP_ENTRY(new_hole_entry
);
459 assert(new_hole_entry
->start
< new_hole_entry
->end
);
463 check_map_sanity(map
, &old_hole_entry
);
466 SAVE_HINT_HOLE_WRITE(map
, (struct vm_map_links
*) hole_entry
);
472 update_holes_on_entry_creation(vm_map_t map
, vm_map_entry_t new_entry
);
474 update_holes_on_entry_creation(vm_map_t map
, vm_map_entry_t new_entry
)
476 vm_map_entry_t hole_entry
, next_hole_entry
;
478 struct vm_map_entry old_hole_entry
;
479 vm_map_entry_t tmp_entry
;
480 boolean_t check_map_with_hole_sanity
= TRUE
;
484 * Case A: The entry is aligned exactly with the start and end of the hole.
485 * This will delete the hole.
487 * Case B: The entry is completely within a hole but NOT aligned with the start/end of the hole.
488 * This will split a hole.
490 * Case C: The entry overlaps with the hole. The entry could be extending upwards (C1) or downwards (C2).
491 * This will reduce the size of the hole or delete the hole completely if it is smaller than the entry.
494 hole_entry
= CAST_TO_VM_MAP_ENTRY(map
->holes_list
);
496 next_hole_entry
= hole_entry
->vme_next
;
501 * If the entry doesn't exist in the RB tree, we are likely dealing with copy maps where
502 * the entries belonging to the copy map are linked into the list of entries silently and
503 * then added to the RB-tree later on.
504 * So sanity checks are useless in that case.
506 check_map_with_hole_sanity
= vm_map_lookup_entry(map
, new_entry
->vme_start
, &tmp_entry
);
509 if (hole_entry
->vme_start
== new_entry
->vme_start
&&
510 hole_entry
->vme_end
== new_entry
->vme_end
) {
513 copy_hole_info(hole_entry
, &old_hole_entry
);
517 * This check makes sense only for regular maps, not copy maps.
518 * With a regular map, the VM entry is first linked and then
519 * the hole is deleted. So the check below, which makes sure that
520 * the map's bounds are being respected, is valid.
521 * But for copy maps, the hole is deleted before the VM entry is
522 * linked (vm_map_store_copy_insert) and so this check is invalid.
524 * if (hole_entry == (vm_map_entry_t) map->holes_list) {
526 * if (hole_entry->vme_next == (vm_map_entry_t) map->holes_list) {
528 * next_hole_entry = vm_map_last_entry(map);
529 * assert(next_hole_entry->vme_end >= map->max_offset);
534 vm_map_delete_hole(map
, hole_entry
);
537 if (check_map_with_hole_sanity
) {
538 check_map_sanity(map
, &old_hole_entry
);
542 } else if (hole_entry
->vme_start
< new_entry
->vme_start
&&
543 hole_entry
->vme_end
> new_entry
->vme_end
) {
545 struct vm_map_links
*new_hole_entry
= NULL
;
547 new_hole_entry
= zalloc(vm_map_holes_zone
);
550 copy_hole_info(hole_entry
, &old_hole_entry
);
553 new_hole_entry
->prev
= hole_entry
;
554 new_hole_entry
->next
= hole_entry
->vme_next
;
555 hole_entry
->vme_next
->vme_prev
= CAST_TO_VM_MAP_ENTRY(new_hole_entry
);
556 hole_entry
->vme_next
= CAST_TO_VM_MAP_ENTRY(new_hole_entry
);
558 new_hole_entry
->start
= new_entry
->vme_end
;
559 new_hole_entry
->end
= hole_entry
->vme_end
;
560 hole_entry
->vme_end
= new_entry
->vme_start
;
562 assert(hole_entry
->vme_start
< hole_entry
->vme_end
);
563 assert(new_hole_entry
->start
< new_hole_entry
->end
);
566 if (check_map_with_hole_sanity
) {
567 check_map_sanity(map
, &old_hole_entry
);
571 SAVE_HINT_HOLE_WRITE(map
, (struct vm_map_links
*) hole_entry
);
573 } else if ((new_entry
->vme_start
<= hole_entry
->vme_start
) && (hole_entry
->vme_start
< new_entry
->vme_end
)) {
575 * Case C1: Entry moving upwards and a part/full hole lies within the bounds of the entry.
579 copy_hole_info(hole_entry
, &old_hole_entry
);
582 if (hole_entry
->vme_end
<= new_entry
->vme_end
) {
583 vm_map_delete_hole(map
, hole_entry
);
585 hole_entry
->vme_start
= new_entry
->vme_end
;
586 SAVE_HINT_HOLE_WRITE(map
, (struct vm_map_links
*) hole_entry
);
590 if (check_map_with_hole_sanity
) {
591 check_map_sanity(map
, &old_hole_entry
);
596 } else if ((new_entry
->vme_start
< hole_entry
->vme_end
) && (hole_entry
->vme_end
<= new_entry
->vme_end
)) {
598 * Case C2: Entry moving downwards and a part/full hole lies within the bounds of the entry.
602 copy_hole_info(hole_entry
, &old_hole_entry
);
605 if (hole_entry
->vme_start
>= new_entry
->vme_start
) {
606 vm_map_delete_hole(map
, hole_entry
);
608 hole_entry
->vme_end
= new_entry
->vme_start
;
609 SAVE_HINT_HOLE_WRITE(map
, (struct vm_map_links
*) hole_entry
);
613 if (check_map_with_hole_sanity
) {
614 check_map_sanity(map
, &old_hole_entry
);
621 hole_entry
= next_hole_entry
;
622 next_hole_entry
= hole_entry
->vme_next
;
624 if (hole_entry
== CAST_TO_VM_MAP_ENTRY(map
->holes_list
)) {
629 panic("Illegal action: h1: %p, s:0x%llx, e:0x%llx...h2:%p, s:0x%llx, e:0x%llx...h3:0x%p, s:0x%llx, e:0x%llx\n",
630 hole_entry
->vme_prev
,
631 (unsigned long long)hole_entry
->vme_prev
->vme_start
,
632 (unsigned long long)hole_entry
->vme_prev
->vme_end
,
634 (unsigned long long)hole_entry
->vme_start
,
635 (unsigned long long)hole_entry
->vme_end
,
636 hole_entry
->vme_next
,
637 (unsigned long long)hole_entry
->vme_next
->vme_start
,
638 (unsigned long long)hole_entry
->vme_next
->vme_end
);
642 update_first_free_rb(vm_map_t map
, vm_map_entry_t entry
, boolean_t new_entry_creation
)
644 if (map
->holelistenabled
) {
646 * Holes can be used to track ranges all the way up to MACH_VM_MAX_ADDRESS or more (e.g. kernel map).
648 vm_map_offset_t max_valid_offset
= (map
->max_offset
> MACH_VM_MAX_ADDRESS
) ? map
->max_offset
: MACH_VM_MAX_ADDRESS
;
651 * Clipping an entry will not result in the creation/deletion/modification of
652 * a hole. Those calls pass NULL for their target entry.
659 * Commpage is pinned beyond the map's max offset. That shouldn't affect the
660 * holes within the bounds of the map.
662 if (vm_map_trunc_page(entry
->vme_start
, VM_MAP_PAGE_MASK(map
)) >= max_valid_offset
) {
670 * - A new entry has already been added to the map
672 * - An older entry has already been deleted from the map
674 * We are updating the hole list after the fact (except in one special case involving copy maps).
678 if (new_entry_creation
) {
679 update_holes_on_entry_creation(map
, entry
);
681 update_holes_on_entry_deletion(map
, entry
);