2 * Copyright (c) 2009 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <vm/vm_map_store_rb.h>
31 RB_GENERATE(rb_head
, vm_map_store
, entry
, rb_node_compare
);
33 #define VME_FOR_STORE( store) \
34 (vm_map_entry_t)(((unsigned long)store) - ((unsigned long)sizeof(struct vm_map_links)))
37 vm_map_store_init_rb( struct vm_map_header
* hdr
)
39 RB_INIT(&(hdr
->rb_head_store
));
42 int rb_node_compare(struct vm_map_store
*node
, struct vm_map_store
*parent
)
47 vme_c
= VME_FOR_STORE(node
);
48 vme_p
= VME_FOR_STORE(parent
);
49 if (vme_c
->vme_start
< vme_p
->vme_start
)
51 if (vme_c
->vme_start
>= vme_p
->vme_end
)
56 void vm_map_store_walk_rb( vm_map_t map
, vm_map_entry_t
*wrong_vme
, vm_map_entry_t
*vm_entry
)
58 struct vm_map_header hdr
= map
->hdr
;
59 struct vm_map_store
*rb_entry
= RB_ROOT(&(hdr
.rb_head_store
));
60 vm_map_entry_t cur
= *vm_entry
;
62 rb_entry
= RB_FIND( rb_head
, &(hdr
.rb_head_store
), &(cur
->store
));
64 panic("NO SUCH ENTRY %p. Gave back %p", *vm_entry
, *wrong_vme
);
66 panic("Cur: %p, L: %p, R: %p", VME_FOR_STORE(rb_entry
), VME_FOR_STORE(RB_LEFT(rb_entry
,entry
)), VME_FOR_STORE(RB_RIGHT(rb_entry
,entry
)));
70 boolean_t
vm_map_store_lookup_entry_rb( vm_map_t map
, vm_map_offset_t address
, vm_map_entry_t
*vm_entry
)
72 struct vm_map_header hdr
= map
->hdr
;
73 struct vm_map_store
*rb_entry
= RB_ROOT(&(hdr
.rb_head_store
));
74 vm_map_entry_t cur
= vm_map_to_entry(map
);
75 vm_map_entry_t prev
= VM_MAP_ENTRY_NULL
;
77 while (rb_entry
!= (struct vm_map_store
*)NULL
) {
78 cur
= VME_FOR_STORE(rb_entry
);
79 if(cur
== VM_MAP_ENTRY_NULL
)
81 if (address
>= cur
->vme_start
) {
82 if (address
< cur
->vme_end
) {
86 rb_entry
= RB_RIGHT(rb_entry
, entry
);
89 rb_entry
= RB_LEFT(rb_entry
, entry
);
92 if( prev
== VM_MAP_ENTRY_NULL
){
93 prev
= vm_map_to_entry(map
);
99 void vm_map_store_entry_link_rb( struct vm_map_header
*mapHdr
, __unused vm_map_entry_t after_where
, vm_map_entry_t entry
)
101 struct rb_head
*rbh
= &(mapHdr
->rb_head_store
);
102 struct vm_map_store
*store
= &(entry
->store
);
103 struct vm_map_store
*tmp_store
;
104 if((tmp_store
= RB_INSERT( rb_head
, rbh
, store
)) != NULL
) {
105 panic("VMSEL: INSERT FAILED: 0x%lx, 0x%lx, 0x%lx, 0x%lx", (uintptr_t)entry
->vme_start
, (uintptr_t)entry
->vme_end
,
106 (uintptr_t)(VME_FOR_STORE(tmp_store
))->vme_start
, (uintptr_t)(VME_FOR_STORE(tmp_store
))->vme_end
);
110 void vm_map_store_entry_unlink_rb( struct vm_map_header
*mapHdr
, vm_map_entry_t entry
)
112 struct rb_head
*rbh
= &(mapHdr
->rb_head_store
);
113 struct vm_map_store
*rb_entry
;
114 struct vm_map_store
*store
= &(entry
->store
);
116 rb_entry
= RB_FIND( rb_head
, rbh
, store
);
118 panic("NO ENTRY TO DELETE");
119 RB_REMOVE( rb_head
, rbh
, store
);
122 void vm_map_store_copy_insert_rb( vm_map_t map
, __unused vm_map_entry_t after_where
, vm_map_copy_t copy
)
124 struct vm_map_header
*mapHdr
= &(map
->hdr
);
125 struct rb_head
*rbh
= &(mapHdr
->rb_head_store
);
126 struct vm_map_store
*store
;
127 vm_map_entry_t entry
= vm_map_copy_first_entry(copy
);
128 int inserted
=0, nentries
= copy
->cpy_hdr
.nentries
;
130 while (entry
!= vm_map_copy_to_entry(copy
) && nentries
> 0) {
131 vm_map_entry_t prev
= entry
;
132 store
= &(entry
->store
);
133 if( RB_INSERT( rb_head
, rbh
, store
) != NULL
){
134 panic("VMSCIR1: INSERT FAILED: %d: %p, %p, %p, 0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%lx",inserted
, prev
, entry
, vm_map_copy_to_entry(copy
),
135 (uintptr_t)prev
->vme_start
, (uintptr_t)prev
->vme_end
, (uintptr_t)entry
->vme_start
, (uintptr_t)entry
->vme_end
,
136 (uintptr_t)(VME_FOR_STORE(rbh
->rbh_root
))->vme_start
, (uintptr_t)(VME_FOR_STORE(rbh
->rbh_root
))->vme_end
);
138 #if MAP_ENTRY_INSERTION_DEBUG
139 fastbacktrace(&entry
->vme_insertion_bt
[0],
140 (sizeof (entry
->vme_insertion_bt
) / sizeof (uintptr_t)));
142 entry
= entry
->vme_next
;
150 vm_map_store_copy_reset_rb( vm_map_copy_t copy
, vm_map_entry_t entry
, int nentries
)
152 struct vm_map_header
*mapHdr
= &(copy
->cpy_hdr
);
153 struct rb_head
*rbh
= &(mapHdr
->rb_head_store
);
154 struct vm_map_store
*store
;
157 while (entry
!= vm_map_copy_to_entry(copy
) && nentries
> 0) {
158 store
= &(entry
->store
);
159 RB_REMOVE( rb_head
, rbh
, store
);
160 entry
= entry
->vme_next
;
166 extern zone_t vm_map_holes_zone
; /* zone for vm map holes (vm_map_links) structures */
169 vm_map_combine_hole(vm_map_t map
, vm_map_entry_t hole_entry
);
171 vm_map_combine_hole(__unused vm_map_t map
, vm_map_entry_t hole_entry
)
174 vm_map_entry_t middle_hole_entry
, last_hole_entry
;
176 hole_entry
->vme_end
= hole_entry
->vme_next
->vme_end
;
178 middle_hole_entry
= hole_entry
->vme_next
;
179 last_hole_entry
= middle_hole_entry
->vme_next
;
181 assert(last_hole_entry
->vme_prev
== middle_hole_entry
);
182 assert(middle_hole_entry
->vme_end
!= last_hole_entry
->vme_start
);
184 last_hole_entry
->vme_prev
= hole_entry
;
185 hole_entry
->vme_next
= last_hole_entry
;
187 middle_hole_entry
->vme_prev
= NULL
;
188 middle_hole_entry
->vme_next
= NULL
;
190 zfree(vm_map_holes_zone
, middle_hole_entry
);
192 assert(hole_entry
->vme_start
< hole_entry
->vme_end
);
193 assert(last_hole_entry
->vme_start
< last_hole_entry
->vme_end
);
198 vm_map_delete_hole(vm_map_t map
, vm_map_entry_t hole_entry
);
200 vm_map_delete_hole(vm_map_t map
, vm_map_entry_t hole_entry
)
202 if (hole_entry
== (vm_map_entry_t
) map
->holes_list
) {
204 if (hole_entry
->vme_next
== (vm_map_entry_t
) map
->holes_list
) {
206 map
->holes_list
= NULL
;
207 SAVE_HINT_HOLE_WRITE(map
, NULL
);
210 vm_map_entry_t l_next
, l_prev
;
212 l_next
= (vm_map_entry_t
) map
->holes_list
->next
;
213 l_prev
= (vm_map_entry_t
) map
->holes_list
->prev
;
214 map
->holes_list
= (struct vm_map_links
*) l_next
;
216 l_next
->vme_prev
= l_prev
;
217 l_prev
->vme_next
= l_next
;
219 SAVE_HINT_HOLE_WRITE(map
, (struct vm_map_links
*) l_next
);
223 SAVE_HINT_HOLE_WRITE(map
, (struct vm_map_links
*) hole_entry
->vme_prev
);
225 hole_entry
->vme_prev
->vme_next
= hole_entry
->vme_next
;
226 hole_entry
->vme_next
->vme_prev
= hole_entry
->vme_prev
;
229 hole_entry
->vme_next
= NULL
;
230 hole_entry
->vme_prev
= NULL
;
231 zfree(vm_map_holes_zone
, hole_entry
);
241 check_map_sanity(vm_map_t map
, vm_map_entry_t old_hole_entry
)
243 vm_map_entry_t hole_entry
, next_hole_entry
;
244 vm_map_entry_t map_entry
, next_map_entry
;
246 if (map
->holes_list
== NULL
) {
251 hole_entry
= (vm_map_entry_t
) map
->holes_list
;
252 next_hole_entry
= hole_entry
->vme_next
;
254 map_entry
= vm_map_first_entry(map
);
255 next_map_entry
= map_entry
->vme_next
;
257 while(map_entry
->vme_start
> hole_entry
->vme_start
) {
258 hole_entry
= next_hole_entry
;
259 next_hole_entry
= hole_entry
->vme_next
;
261 if (hole_entry
== (vm_map_entry_t
)map
->holes_list
)
265 while (map_entry
!= vm_map_to_entry(map
)) {
267 if (map_entry
->vme_start
>= map
->max_offset
)
270 if (map_entry
->vme_end
!= map_entry
->vme_next
->vme_start
) {
272 if (map_entry
->vme_next
== vm_map_to_entry(map
))
275 if (hole_entry
->vme_start
!= map_entry
->vme_end
) {
276 panic("hole_entry not aligned %p(0x%llx), %p (0x%llx), %p", hole_entry
, (unsigned long long)hole_entry
->vme_start
, map_entry
->vme_next
, (unsigned long long)map_entry
->vme_end
, old_hole_entry
);
277 assert(hole_entry
->vme_start
== map_entry
->vme_end
);
280 if (hole_entry
->vme_end
!= map_entry
->vme_next
->vme_start
) {
281 panic("hole_entry not next aligned %p(0x%llx), %p (0x%llx), %p", hole_entry
, (unsigned long long)hole_entry
->vme_end
, map_entry
->vme_next
, (unsigned long long)map_entry
->vme_next
->vme_start
, old_hole_entry
);
282 assert(hole_entry
->vme_end
== map_entry
->vme_next
->vme_start
);
285 hole_entry
= next_hole_entry
;
286 next_hole_entry
= hole_entry
->vme_next
;
288 if (hole_entry
== (vm_map_entry_t
)map
->holes_list
)
292 map_entry
= map_entry
->vme_next
;
300 copy_hole_info(vm_map_entry_t hole_entry
, vm_map_entry_t old_hole_entry
)
302 old_hole_entry
->vme_prev
= hole_entry
->vme_prev
;
303 old_hole_entry
->vme_next
= hole_entry
->vme_next
;
304 old_hole_entry
->vme_start
= hole_entry
->vme_start
;
305 old_hole_entry
->vme_end
= hole_entry
->vme_end
;
310 update_holes_on_entry_deletion(vm_map_t map
, vm_map_entry_t old_entry
);
312 update_holes_on_entry_deletion(vm_map_t map
, vm_map_entry_t old_entry
)
315 * Dealing with the deletion of an older entry.
318 vm_map_entry_t hole_entry
, next_hole_entry
;
320 struct vm_map_entry old_hole_entry
;
322 boolean_t create_new_hole
= TRUE
;
324 hole_entry
= (vm_map_entry_t
) map
->hole_hint
;
328 if (hole_entry
->vme_end
== old_entry
->vme_start
) {
330 * Found a hole right after above our entry.
334 } else if (hole_entry
->vme_start
== old_entry
->vme_end
) {
336 if (hole_entry
!= (vm_map_entry_t
) map
->holes_list
) {
339 * Found a hole right after below our entry but
340 * make sure we don't erroneously extend backwards.
345 hole_entry
= hole_entry
->vme_prev
;
348 } else if (hole_entry
->vme_start
> old_entry
->vme_end
) {
351 * Useless hint. Start from the top.
354 hole_entry
= (vm_map_entry_t
) map
->holes_list
;
357 if (hole_entry
!= (vm_map_entry_t
) map
->holes_list
) {
358 if (hole_entry
->vme_start
> old_entry
->vme_start
) {
359 panic("Hole hint failed: Hole entry start: 0x%llx, entry start: 0x%llx, map hole start: 0x%llx, map hint start: 0x%llx\n",
360 (unsigned long long)hole_entry
->vme_start
,
361 (unsigned long long)old_entry
->vme_start
,
362 (unsigned long long)map
->holes_list
->start
,
363 (unsigned long long)map
->hole_hint
->start
);
365 if (hole_entry
->vme_end
> old_entry
->vme_start
) {
366 panic("Hole hint failed: Hole entry end: 0x%llx, entry start: 0x%llx, map hole start: 0x%llx, map hint start: 0x%llx\n",
367 (unsigned long long)hole_entry
->vme_end
,
368 (unsigned long long)old_entry
->vme_start
,
369 (unsigned long long)map
->holes_list
->start
,
370 (unsigned long long)map
->hole_hint
->start
);
376 next_hole_entry
= hole_entry
->vme_next
;
379 * Hole is right above the entry.
381 if (hole_entry
->vme_end
== old_entry
->vme_start
) {
384 copy_hole_info(hole_entry
, &old_hole_entry
);
388 * Is there another hole right below the entry?
389 * Can we combine holes?
392 if (old_entry
->vme_end
== hole_entry
->vme_next
->vme_start
) {
394 vm_map_combine_hole(map
, hole_entry
);
397 hole_entry
->vme_end
= old_entry
->vme_end
;
399 create_new_hole
= FALSE
;
401 check_map_sanity(map
, &old_hole_entry
);
407 * Hole is right below the entry.
409 if (hole_entry
->vme_start
== old_entry
->vme_end
) {
412 copy_hole_info(hole_entry
, &old_hole_entry
);
415 hole_entry
->vme_start
= old_entry
->vme_start
;
416 create_new_hole
= FALSE
;
419 check_map_sanity(map
, &old_hole_entry
);
425 * Hole is beyond our entry. Let's go back to the last hole
426 * before our entry so we have the right place to link up the
427 * new hole that will be needed.
429 if (hole_entry
->vme_start
> old_entry
->vme_end
) {
432 copy_hole_info(hole_entry
, &old_hole_entry
);
435 if (hole_entry
!= (vm_map_entry_t
) map
->holes_list
) {
436 assert(hole_entry
->vme_start
!= old_entry
->vme_start
);
437 hole_entry
= hole_entry
->vme_prev
;
442 hole_entry
= next_hole_entry
;
444 if (hole_entry
== (vm_map_entry_t
)map
->holes_list
) {
445 hole_entry
= hole_entry
->vme_prev
;
451 if (create_new_hole
) {
452 struct vm_map_links
*new_hole_entry
= NULL
;
453 vm_map_entry_t l_next
, l_prev
;
455 new_hole_entry
= zalloc(vm_map_holes_zone
);
458 * First hole in the map?
460 * A hole that is located above the current first hole in the map?
462 if (map
->holes_list
== NULL
|| (hole_entry
== (vm_map_entry_t
) map
->holes_list
&& hole_entry
->vme_start
> old_entry
->vme_start
)) {
464 if (map
->holes_list
== NULL
) {
466 map
->holes_list
= new_hole_entry
;
467 new_hole_entry
->prev
= new_hole_entry
->next
= (vm_map_entry_t
)map
->holes_list
;
470 l_next
= (vm_map_entry_t
) map
->holes_list
;
471 l_prev
= map
->holes_list
->prev
;
472 map
->holes_list
= new_hole_entry
;
473 new_hole_entry
->next
= l_next
;
474 new_hole_entry
->prev
= l_prev
;
476 l_prev
->vme_next
= l_next
->vme_prev
= (vm_map_entry_t
) new_hole_entry
;
480 l_next
= hole_entry
->vme_next
;
481 l_prev
= hole_entry
->vme_next
->vme_prev
;
483 new_hole_entry
->prev
= hole_entry
;
484 new_hole_entry
->next
= l_next
;
486 hole_entry
->vme_next
= (vm_map_entry_t
)new_hole_entry
;
487 l_next
->vme_prev
= (vm_map_entry_t
) new_hole_entry
;
490 new_hole_entry
->start
= old_entry
->vme_start
;
491 new_hole_entry
->end
= old_entry
->vme_end
;
493 hole_entry
= (vm_map_entry_t
) new_hole_entry
;
495 assert(new_hole_entry
->start
< new_hole_entry
->end
);
499 check_map_sanity(map
, &old_hole_entry
);
502 SAVE_HINT_HOLE_WRITE(map
, (struct vm_map_links
*) hole_entry
);
508 update_holes_on_entry_creation(vm_map_t map
, vm_map_entry_t new_entry
);
510 update_holes_on_entry_creation(vm_map_t map
, vm_map_entry_t new_entry
)
513 vm_map_entry_t hole_entry
, next_hole_entry
;
515 struct vm_map_entry old_hole_entry
;
516 vm_map_entry_t tmp_entry
;
517 boolean_t check_map_with_hole_sanity
= TRUE
;
521 * Case A: The entry is aligned exactly with the start and end of the hole.
522 * This will delete the hole.
524 * Case B: The entry is completely within a hole but NOT aligned with the start/end of the hole.
525 * This will split a hole.
527 * Case C: The entry overlaps with the hole. The entry could be extending upwards (C1) or downwards (C2).
528 * This will reduce the size of the hole or delete the hole completely if it is smaller than the entry.
531 hole_entry
= (vm_map_entry_t
) map
->holes_list
;
533 next_hole_entry
= hole_entry
->vme_next
;
539 * If the entry doesn't exist in the RB tree, we are likely dealing with copy maps where
540 * the entries belonging to the copy map are linked into the list of entries silently and
541 * then added to the RB-tree later on.
542 * So sanity checks are useless in that case.
544 check_map_with_hole_sanity
= vm_map_lookup_entry(map
, new_entry
->vme_start
, &tmp_entry
);
547 if (hole_entry
->vme_start
== new_entry
->vme_start
&&
548 hole_entry
->vme_end
== new_entry
->vme_end
) {
552 copy_hole_info(hole_entry
, &old_hole_entry
);
555 if (hole_entry
== (vm_map_entry_t
) map
->holes_list
) {
557 if (hole_entry
->vme_next
== (vm_map_entry_t
) map
->holes_list
) {
559 next_hole_entry
= vm_map_last_entry(map
);
560 assert(next_hole_entry
->vme_end
>= map
->max_offset
);
564 vm_map_delete_hole(map
, hole_entry
);
567 if (check_map_with_hole_sanity
)
568 check_map_sanity(map
, &old_hole_entry
);
572 } else if (hole_entry
->vme_start
< new_entry
->vme_start
&&
573 hole_entry
->vme_end
> new_entry
->vme_end
) {
576 struct vm_map_links
*new_hole_entry
= NULL
;
578 new_hole_entry
= zalloc(vm_map_holes_zone
);
581 copy_hole_info(hole_entry
, &old_hole_entry
);
584 new_hole_entry
->prev
= hole_entry
;
585 new_hole_entry
->next
= hole_entry
->vme_next
;
586 hole_entry
->vme_next
->vme_prev
= (vm_map_entry_t
)new_hole_entry
;
587 hole_entry
->vme_next
= (vm_map_entry_t
)new_hole_entry
;
589 new_hole_entry
->start
= new_entry
->vme_end
;
590 new_hole_entry
->end
= hole_entry
->vme_end
;
591 hole_entry
->vme_end
= new_entry
->vme_start
;
593 assert(hole_entry
->vme_start
< hole_entry
->vme_end
);
594 assert(new_hole_entry
->start
< new_hole_entry
->end
);
597 if (check_map_with_hole_sanity
)
598 check_map_sanity(map
, &old_hole_entry
);
601 SAVE_HINT_HOLE_WRITE(map
, (struct vm_map_links
*) hole_entry
);
604 } else if ((new_entry
->vme_start
<= hole_entry
->vme_start
) && (hole_entry
->vme_start
< new_entry
->vme_end
)) {
607 * Case C1: Entry moving upwards and a part/full hole lies within the bounds of the entry.
611 copy_hole_info(hole_entry
, &old_hole_entry
);
614 if (hole_entry
->vme_end
<= new_entry
->vme_end
) {
616 vm_map_delete_hole(map
, hole_entry
);
618 hole_entry
->vme_start
= new_entry
->vme_end
;
619 SAVE_HINT_HOLE_WRITE(map
, (struct vm_map_links
*) hole_entry
);
623 if (check_map_with_hole_sanity
)
624 check_map_sanity(map
, &old_hole_entry
);
629 } else if ((new_entry
->vme_start
< hole_entry
->vme_end
) && (hole_entry
->vme_end
<= new_entry
->vme_end
)) {
632 * Case C2: Entry moving downwards and a part/full hole lies within the bounds of the entry.
636 copy_hole_info(hole_entry
, &old_hole_entry
);
639 if (hole_entry
->vme_start
>= new_entry
->vme_start
) {
640 vm_map_delete_hole(map
, hole_entry
);
642 hole_entry
->vme_end
= new_entry
->vme_start
;
643 SAVE_HINT_HOLE_WRITE(map
, (struct vm_map_links
*) hole_entry
);
647 if (check_map_with_hole_sanity
)
648 check_map_sanity(map
, &old_hole_entry
);
654 hole_entry
= next_hole_entry
;
655 next_hole_entry
= hole_entry
->vme_next
;
657 if (hole_entry
== (vm_map_entry_t
)map
->holes_list
)
661 panic("Illegal action: h1: %p, s:0x%llx, e:0x%llx...h2:%p, s:0x%llx, e:0x%llx...h3:0x%p, s:0x%llx, e:0x%llx\n",
662 hole_entry
->vme_prev
,
663 (unsigned long long)hole_entry
->vme_prev
->vme_start
,
664 (unsigned long long)hole_entry
->vme_prev
->vme_end
,
666 (unsigned long long)hole_entry
->vme_start
,
667 (unsigned long long)hole_entry
->vme_end
,
668 hole_entry
->vme_next
,
669 (unsigned long long)hole_entry
->vme_next
->vme_start
,
670 (unsigned long long)hole_entry
->vme_next
->vme_end
);
675 update_first_free_rb(vm_map_t map
, vm_map_entry_t entry
, boolean_t new_entry_creation
)
678 if (map
->holelistenabled
) {
681 * Holes can be used to track ranges all the way up to MACH_VM_MAX_ADDRESS or more (e.g. kernel map).
683 vm_map_offset_t max_valid_offset
= (map
->max_offset
> MACH_VM_MAX_ADDRESS
) ? map
->max_offset
: MACH_VM_MAX_ADDRESS
;
686 * Clipping an entry will not result in the creation/deletion/modification of
687 * a hole. Those calls pass NULL for their target entry.
694 * Commpage is pinned beyond the map's max offset. That shouldn't affect the
695 * holes within the bounds of the map.
697 if (vm_map_trunc_page(entry
->vme_start
, VM_MAP_PAGE_MASK(map
)) >= max_valid_offset
) {
705 * - A new entry has already been added to the map
707 * - An older entry has already been deleted from the map
709 * We are updating the hole list after the fact (except in one special case involving copy maps).
713 if (new_entry_creation
) {
715 update_holes_on_entry_creation(map
, entry
);
718 update_holes_on_entry_deletion(map
, entry
);