]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_map_store_rb.c
xnu-4903.241.1.tar.gz
[apple/xnu.git] / osfmk / vm / vm_map_store_rb.c
1 /*
2 * Copyright (c) 2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <kern/backtrace.h>
30 #include <vm/vm_map_store_rb.h>
31
32 RB_GENERATE(rb_head, vm_map_store, entry, rb_node_compare);
33
34 #define VME_FOR_STORE( store) \
35 (vm_map_entry_t)(((unsigned long)store) - ((unsigned long)sizeof(struct vm_map_links)))
36
37 void
38 vm_map_store_init_rb( struct vm_map_header* hdr )
39 {
40 RB_INIT(&(hdr->rb_head_store));
41 }
42
43 int rb_node_compare(struct vm_map_store *node, struct vm_map_store *parent)
44 {
45 vm_map_entry_t vme_c;
46 vm_map_entry_t vme_p;
47
48 vme_c = VME_FOR_STORE(node);
49 vme_p = VME_FOR_STORE(parent);
50 if (vme_c->vme_start < vme_p->vme_start)
51 return -1;
52 if (vme_c->vme_start >= vme_p->vme_end)
53 return 1;
54 return 0;
55 }
56
57 void vm_map_store_walk_rb( vm_map_t map, vm_map_entry_t *wrong_vme, vm_map_entry_t *vm_entry)
58 {
59 struct vm_map_header hdr = map->hdr;
60 struct vm_map_store *rb_entry = RB_ROOT(&(hdr.rb_head_store));
61 vm_map_entry_t cur = *vm_entry;
62
63 rb_entry = RB_FIND( rb_head, &(hdr.rb_head_store), &(cur->store));
64 if(rb_entry == NULL)
65 panic("NO SUCH ENTRY %p. Gave back %p", *vm_entry, *wrong_vme);
66 else
67 panic("Cur: %p, L: %p, R: %p", VME_FOR_STORE(rb_entry), VME_FOR_STORE(RB_LEFT(rb_entry,entry)), VME_FOR_STORE(RB_RIGHT(rb_entry,entry)));
68 }
69
70
71 boolean_t vm_map_store_lookup_entry_rb( vm_map_t map, vm_map_offset_t address, vm_map_entry_t *vm_entry)
72 {
73 struct vm_map_header hdr = map->hdr;
74 struct vm_map_store *rb_entry = RB_ROOT(&(hdr.rb_head_store));
75 vm_map_entry_t cur = vm_map_to_entry(map);
76 vm_map_entry_t prev = VM_MAP_ENTRY_NULL;
77
78 while (rb_entry != (struct vm_map_store*)NULL) {
79 cur = VME_FOR_STORE(rb_entry);
80 if(cur == VM_MAP_ENTRY_NULL)
81 panic("no entry");
82 if (address >= cur->vme_start) {
83 if (address < cur->vme_end) {
84 *vm_entry = cur;
85 return TRUE;
86 }
87 rb_entry = RB_RIGHT(rb_entry, entry);
88 prev = cur;
89 } else {
90 rb_entry = RB_LEFT(rb_entry, entry);
91 }
92 }
93 if( prev == VM_MAP_ENTRY_NULL){
94 prev = vm_map_to_entry(map);
95 }
96 *vm_entry = prev;
97 return FALSE;
98 }
99
100 void vm_map_store_entry_link_rb( struct vm_map_header *mapHdr, __unused vm_map_entry_t after_where, vm_map_entry_t entry)
101 {
102 struct rb_head *rbh = &(mapHdr->rb_head_store);
103 struct vm_map_store *store = &(entry->store);
104 struct vm_map_store *tmp_store;
105 if((tmp_store = RB_INSERT( rb_head, rbh, store )) != NULL) {
106 panic("VMSEL: INSERT FAILED: 0x%lx, 0x%lx, 0x%lx, 0x%lx", (uintptr_t)entry->vme_start, (uintptr_t)entry->vme_end,
107 (uintptr_t)(VME_FOR_STORE(tmp_store))->vme_start, (uintptr_t)(VME_FOR_STORE(tmp_store))->vme_end);
108 }
109 }
110
111 void vm_map_store_entry_unlink_rb( struct vm_map_header *mapHdr, vm_map_entry_t entry)
112 {
113 struct rb_head *rbh = &(mapHdr->rb_head_store);
114 struct vm_map_store *rb_entry;
115 struct vm_map_store *store = &(entry->store);
116
117 rb_entry = RB_FIND( rb_head, rbh, store);
118 if(rb_entry == NULL)
119 panic("NO ENTRY TO DELETE");
120 RB_REMOVE( rb_head, rbh, store );
121 }
122
123 void
124 vm_map_store_copy_reset_rb( vm_map_copy_t copy, vm_map_entry_t entry, int nentries )
125 {
126 struct vm_map_header *mapHdr = &(copy->cpy_hdr);
127 struct rb_head *rbh = &(mapHdr->rb_head_store);
128 struct vm_map_store *store;
129 int deleted=0;
130
131 while (entry != vm_map_copy_to_entry(copy) && nentries > 0) {
132 store = &(entry->store);
133 RB_REMOVE( rb_head, rbh, store );
134 entry = entry->vme_next;
135 deleted++;
136 nentries--;
137 }
138 }
139
140 extern zone_t vm_map_holes_zone; /* zone for vm map holes (vm_map_links) structures */
141
142 void
143 vm_map_combine_hole(vm_map_t map, vm_map_entry_t hole_entry);
144 void
145 vm_map_combine_hole(__unused vm_map_t map, vm_map_entry_t hole_entry)
146 {
147
148 vm_map_entry_t middle_hole_entry, last_hole_entry;
149
150 hole_entry->vme_end = hole_entry->vme_next->vme_end;
151
152 middle_hole_entry = hole_entry->vme_next;
153 last_hole_entry = middle_hole_entry->vme_next;
154
155 assert(last_hole_entry->vme_prev == middle_hole_entry);
156 assert(middle_hole_entry->vme_end != last_hole_entry->vme_start);
157
158 last_hole_entry->vme_prev = hole_entry;
159 hole_entry->vme_next = last_hole_entry;
160
161 middle_hole_entry->vme_prev = NULL;
162 middle_hole_entry->vme_next = NULL;
163
164 zfree(vm_map_holes_zone, middle_hole_entry);
165
166 assert(hole_entry->vme_start < hole_entry->vme_end);
167 assert(last_hole_entry->vme_start < last_hole_entry->vme_end);
168 }
169
170
171 void
172 vm_map_delete_hole(vm_map_t map, vm_map_entry_t hole_entry);
173 void
174 vm_map_delete_hole(vm_map_t map, vm_map_entry_t hole_entry)
175 {
176 if (hole_entry == CAST_TO_VM_MAP_ENTRY(map->holes_list)) {
177
178 if (hole_entry->vme_next == CAST_TO_VM_MAP_ENTRY(map->holes_list)) {
179
180 map->holes_list = NULL;
181 SAVE_HINT_HOLE_WRITE(map, NULL);
182 } else {
183
184 vm_map_entry_t l_next, l_prev;
185
186 l_next = (vm_map_entry_t) map->holes_list->next;
187 l_prev = (vm_map_entry_t) map->holes_list->prev;
188 map->holes_list = (struct vm_map_links*) l_next;
189
190 l_next->vme_prev = l_prev;
191 l_prev->vme_next = l_next;
192
193 SAVE_HINT_HOLE_WRITE(map, (struct vm_map_links*) l_next);
194 }
195 } else {
196
197 SAVE_HINT_HOLE_WRITE(map, (struct vm_map_links*) hole_entry->vme_prev);
198
199 hole_entry->vme_prev->vme_next = hole_entry->vme_next;
200 hole_entry->vme_next->vme_prev = hole_entry->vme_prev;
201 }
202
203 hole_entry->vme_next = NULL;
204 hole_entry->vme_prev = NULL;
205 zfree(vm_map_holes_zone, hole_entry);
206 }
207
208
209 /*
210 * For Debugging.
211 */
212
213 #if DEBUG
214 static void
215 check_map_sanity(vm_map_t map, vm_map_entry_t old_hole_entry)
216 {
217 vm_map_entry_t hole_entry, next_hole_entry;
218 vm_map_entry_t map_entry, next_map_entry;
219
220 if (map->holes_list == NULL) {
221
222 return;
223 }
224
225 hole_entry = (vm_map_entry_t) map->holes_list;
226 next_hole_entry = hole_entry->vme_next;
227
228 map_entry = vm_map_first_entry(map);
229 next_map_entry = map_entry->vme_next;
230
231 while(map_entry->vme_start > hole_entry->vme_start) {
232 hole_entry = next_hole_entry;
233 next_hole_entry = hole_entry->vme_next;
234
235 if (hole_entry == (vm_map_entry_t)map->holes_list)
236 break;
237 }
238
239 while (map_entry != vm_map_to_entry(map)) {
240
241 if (map_entry->vme_start >= map->max_offset)
242 break;
243
244 if (map_entry->vme_end != map_entry->vme_next->vme_start) {
245
246 if (map_entry->vme_next == vm_map_to_entry(map))
247 break;
248
249 if (hole_entry->vme_start != map_entry->vme_end) {
250 panic("hole_entry not aligned %p(0x%llx), %p (0x%llx), %p", hole_entry, (unsigned long long)hole_entry->vme_start, map_entry->vme_next, (unsigned long long)map_entry->vme_end, old_hole_entry);
251 assert(hole_entry->vme_start == map_entry->vme_end);
252 }
253
254 if (hole_entry->vme_end != map_entry->vme_next->vme_start) {
255 panic("hole_entry not next aligned %p(0x%llx), %p (0x%llx), %p", hole_entry, (unsigned long long)hole_entry->vme_end, map_entry->vme_next, (unsigned long long)map_entry->vme_next->vme_start, old_hole_entry);
256 assert(hole_entry->vme_end == map_entry->vme_next->vme_start);
257 }
258
259 hole_entry = next_hole_entry;
260 next_hole_entry = hole_entry->vme_next;
261
262 if (hole_entry == (vm_map_entry_t)map->holes_list)
263 break;
264 }
265
266 map_entry = map_entry->vme_next;
267 }
268 }
269
270 /*
271 * For debugging.
272 */
273 static void
274 copy_hole_info(vm_map_entry_t hole_entry, vm_map_entry_t old_hole_entry)
275 {
276 old_hole_entry->vme_prev = hole_entry->vme_prev;
277 old_hole_entry->vme_next = hole_entry->vme_next;
278 old_hole_entry->vme_start = hole_entry->vme_start;
279 old_hole_entry->vme_end = hole_entry->vme_end;
280 }
281 #endif /* DEBUG */
282
283 void
284 update_holes_on_entry_deletion(vm_map_t map, vm_map_entry_t old_entry);
285 void
286 update_holes_on_entry_deletion(vm_map_t map, vm_map_entry_t old_entry)
287 {
288 /*
289 * Dealing with the deletion of an older entry.
290 */
291
292 vm_map_entry_t hole_entry, next_hole_entry;
293 #if DEBUG
294 struct vm_map_entry old_hole_entry;
295 #endif /* DEBUG */
296 boolean_t create_new_hole = TRUE;
297
298 hole_entry = CAST_TO_VM_MAP_ENTRY(map->hole_hint);
299
300 if (hole_entry) {
301
302 if (hole_entry->vme_end == old_entry->vme_start) {
303 /*
304 * Found a hole right after above our entry.
305 * Hit.
306 */
307
308 } else if (hole_entry->vme_start == old_entry->vme_end) {
309
310 if (hole_entry != CAST_TO_VM_MAP_ENTRY(map->holes_list)) {
311
312 /*
313 * Found a hole right after below our entry but
314 * make sure we don't erroneously extend backwards.
315 *
316 * Hit.
317 */
318
319 hole_entry = hole_entry->vme_prev;
320 }
321
322 } else if (hole_entry->vme_start > old_entry->vme_end) {
323
324 /*
325 * Useless hint. Start from the top.
326 */
327
328 hole_entry = CAST_TO_VM_MAP_ENTRY(map->holes_list);
329 }
330
331 if (hole_entry != CAST_TO_VM_MAP_ENTRY(map->holes_list)) {
332 if (hole_entry->vme_start > old_entry->vme_start) {
333 panic("Hole hint failed: Hole entry start: 0x%llx, entry start: 0x%llx, map hole start: 0x%llx, map hint start: 0x%llx\n",
334 (unsigned long long)hole_entry->vme_start,
335 (unsigned long long)old_entry->vme_start,
336 (unsigned long long)map->holes_list->start,
337 (unsigned long long)map->hole_hint->start);
338 }
339 if (hole_entry->vme_end > old_entry->vme_start) {
340 panic("Hole hint failed: Hole entry end: 0x%llx, entry start: 0x%llx, map hole start: 0x%llx, map hint start: 0x%llx\n",
341 (unsigned long long)hole_entry->vme_end,
342 (unsigned long long)old_entry->vme_start,
343 (unsigned long long)map->holes_list->start,
344 (unsigned long long)map->hole_hint->start);
345 }
346 }
347
348 while (1) {
349
350 next_hole_entry = hole_entry->vme_next;
351
352 /*
353 * Hole is right above the entry.
354 */
355 if (hole_entry->vme_end == old_entry->vme_start) {
356
357 #if DEBUG
358 copy_hole_info(hole_entry, &old_hole_entry);
359 #endif /* DEBUG */
360
361 /*
362 * Is there another hole right below the entry?
363 * Can we combine holes?
364 */
365
366 if (old_entry->vme_end == hole_entry->vme_next->vme_start) {
367
368 vm_map_combine_hole(map, hole_entry);
369 } else {
370
371 hole_entry->vme_end = old_entry->vme_end;
372 }
373 create_new_hole = FALSE;
374 #if DEBUG
375 check_map_sanity(map, &old_hole_entry);
376 #endif /* DEBUG */
377 break;
378 }
379
380 /*
381 * Hole is right below the entry.
382 */
383 if (hole_entry->vme_start == old_entry->vme_end) {
384
385 #if DEBUG
386 copy_hole_info(hole_entry, &old_hole_entry);
387 #endif /* DEBUG */
388
389 hole_entry->vme_start = old_entry->vme_start;
390 create_new_hole = FALSE;
391
392 #if DEBUG
393 check_map_sanity(map, &old_hole_entry);
394 #endif /* DEBUG */
395 break;
396 }
397
398 /*
399 * Hole is beyond our entry. Let's go back to the last hole
400 * before our entry so we have the right place to link up the
401 * new hole that will be needed.
402 */
403 if (hole_entry->vme_start > old_entry->vme_end) {
404
405 #if DEBUG
406 copy_hole_info(hole_entry, &old_hole_entry);
407 #endif /* DEBUG */
408
409 if (hole_entry != CAST_TO_VM_MAP_ENTRY(map->holes_list)) {
410 assert(hole_entry->vme_start != old_entry->vme_start);
411 hole_entry = hole_entry->vme_prev;
412 }
413 break;
414 }
415
416 hole_entry = next_hole_entry;
417
418 if (hole_entry == CAST_TO_VM_MAP_ENTRY(map->holes_list)) {
419 hole_entry = hole_entry->vme_prev;
420 break;
421 }
422 }
423 }
424
425 if (create_new_hole) {
426 struct vm_map_links *new_hole_entry = NULL;
427 vm_map_entry_t l_next, l_prev;
428
429 new_hole_entry = zalloc(vm_map_holes_zone);
430
431 /*
432 * First hole in the map?
433 * OR
434 * A hole that is located above the current first hole in the map?
435 */
436 if (map->holes_list == NULL || (hole_entry == CAST_TO_VM_MAP_ENTRY(map->holes_list) && hole_entry->vme_start > old_entry->vme_start)) {
437
438 if (map->holes_list == NULL) {
439
440 map->holes_list = new_hole_entry;
441 new_hole_entry->prev = new_hole_entry->next = CAST_TO_VM_MAP_ENTRY(map->holes_list);
442 } else {
443
444 l_next = CAST_TO_VM_MAP_ENTRY(map->holes_list);
445 l_prev = map->holes_list->prev;
446 map->holes_list = new_hole_entry;
447 new_hole_entry->next = l_next;
448 new_hole_entry->prev = l_prev;
449
450 l_prev->vme_next = l_next->vme_prev = CAST_TO_VM_MAP_ENTRY(new_hole_entry);
451 }
452 } else {
453
454 l_next = hole_entry->vme_next;
455 l_prev = hole_entry->vme_next->vme_prev;
456
457 new_hole_entry->prev = hole_entry;
458 new_hole_entry->next = l_next;
459
460 hole_entry->vme_next = CAST_TO_VM_MAP_ENTRY(new_hole_entry);
461 l_next->vme_prev = CAST_TO_VM_MAP_ENTRY(new_hole_entry);
462 }
463
464 new_hole_entry->start = old_entry->vme_start;
465 new_hole_entry->end = old_entry->vme_end;
466
467 hole_entry = CAST_TO_VM_MAP_ENTRY(new_hole_entry);
468
469 assert(new_hole_entry->start < new_hole_entry->end);
470 }
471
472 #if DEBUG
473 check_map_sanity(map, &old_hole_entry);
474 #endif /* DEBUG */
475
476 SAVE_HINT_HOLE_WRITE(map, (struct vm_map_links*) hole_entry);
477 return;
478 }
479
480
481 void
482 update_holes_on_entry_creation(vm_map_t map, vm_map_entry_t new_entry);
483 void
484 update_holes_on_entry_creation(vm_map_t map, vm_map_entry_t new_entry)
485 {
486
487 vm_map_entry_t hole_entry, next_hole_entry;
488 #if DEBUG
489 struct vm_map_entry old_hole_entry;
490 vm_map_entry_t tmp_entry;
491 boolean_t check_map_with_hole_sanity = TRUE;
492 #endif /* DEBUG */
493
494 /*
495 * Case A: The entry is aligned exactly with the start and end of the hole.
496 * This will delete the hole.
497 *
498 * Case B: The entry is completely within a hole but NOT aligned with the start/end of the hole.
499 * This will split a hole.
500 *
501 * Case C: The entry overlaps with the hole. The entry could be extending upwards (C1) or downwards (C2).
502 * This will reduce the size of the hole or delete the hole completely if it is smaller than the entry.
503 */
504
505 hole_entry = CAST_TO_VM_MAP_ENTRY(map->holes_list);
506 assert(hole_entry);
507 next_hole_entry = hole_entry->vme_next;
508
509 while (1) {
510
511 #if DEBUG
512 /*
513 * If the entry doesn't exist in the RB tree, we are likely dealing with copy maps where
514 * the entries belonging to the copy map are linked into the list of entries silently and
515 * then added to the RB-tree later on.
516 * So sanity checks are useless in that case.
517 */
518 check_map_with_hole_sanity = vm_map_lookup_entry(map, new_entry->vme_start, &tmp_entry);
519 #endif /* DEBUG */
520
521 if (hole_entry->vme_start == new_entry->vme_start &&
522 hole_entry->vme_end == new_entry->vme_end) {
523
524 /* Case A */
525 #if DEBUG
526 copy_hole_info(hole_entry, &old_hole_entry);
527 #endif /* DEBUG */
528
529 /*
530 * This check makes sense only for regular maps, not copy maps.
531 * With a regular map, the VM entry is first linked and then
532 * the hole is deleted. So the check below, which makes sure that
533 * the map's bounds are being respected, is valid.
534 * But for copy maps, the hole is deleted before the VM entry is
535 * linked (vm_map_store_copy_insert) and so this check is invalid.
536 *
537 if (hole_entry == (vm_map_entry_t) map->holes_list) {
538
539 if (hole_entry->vme_next == (vm_map_entry_t) map->holes_list) {
540
541 next_hole_entry = vm_map_last_entry(map);
542 assert(next_hole_entry->vme_end >= map->max_offset);
543 }
544 }
545 */
546
547 vm_map_delete_hole(map, hole_entry);
548
549 #if DEBUG
550 if (check_map_with_hole_sanity)
551 check_map_sanity(map, &old_hole_entry);
552 #endif /* DEBUG */
553 return;
554
555 } else if (hole_entry->vme_start < new_entry->vme_start &&
556 hole_entry->vme_end > new_entry->vme_end) {
557
558 /* Case B */
559 struct vm_map_links *new_hole_entry = NULL;
560
561 new_hole_entry = zalloc(vm_map_holes_zone);
562
563 #if DEBUG
564 copy_hole_info(hole_entry, &old_hole_entry);
565 #endif /* DEBUG */
566
567 new_hole_entry->prev = hole_entry;
568 new_hole_entry->next = hole_entry->vme_next;
569 hole_entry->vme_next->vme_prev = CAST_TO_VM_MAP_ENTRY(new_hole_entry);
570 hole_entry->vme_next = CAST_TO_VM_MAP_ENTRY(new_hole_entry);
571
572 new_hole_entry->start = new_entry->vme_end;
573 new_hole_entry->end = hole_entry->vme_end;
574 hole_entry->vme_end = new_entry->vme_start;
575
576 assert(hole_entry->vme_start < hole_entry->vme_end);
577 assert(new_hole_entry->start < new_hole_entry->end);
578
579 #if DEBUG
580 if (check_map_with_hole_sanity)
581 check_map_sanity(map, &old_hole_entry);
582 #endif /* DEBUG */
583
584 SAVE_HINT_HOLE_WRITE(map, (struct vm_map_links*) hole_entry);
585 return;
586
587 } else if ((new_entry->vme_start <= hole_entry->vme_start) && (hole_entry->vme_start < new_entry->vme_end)) {
588
589 /*
590 * Case C1: Entry moving upwards and a part/full hole lies within the bounds of the entry.
591 */
592
593 #if DEBUG
594 copy_hole_info(hole_entry, &old_hole_entry);
595 #endif /* DEBUG */
596
597 if (hole_entry->vme_end <= new_entry->vme_end) {
598
599 vm_map_delete_hole(map, hole_entry);
600 } else {
601 hole_entry->vme_start = new_entry->vme_end;
602 SAVE_HINT_HOLE_WRITE(map, (struct vm_map_links*) hole_entry);
603 }
604
605 #if DEBUG
606 if (check_map_with_hole_sanity)
607 check_map_sanity(map, &old_hole_entry);
608 #endif /* DEBUG */
609
610 return;
611
612 } else if ((new_entry->vme_start < hole_entry->vme_end) && (hole_entry->vme_end <= new_entry->vme_end)) {
613
614 /*
615 * Case C2: Entry moving downwards and a part/full hole lies within the bounds of the entry.
616 */
617
618 #if DEBUG
619 copy_hole_info(hole_entry, &old_hole_entry);
620 #endif /* DEBUG */
621
622 if (hole_entry->vme_start >= new_entry->vme_start) {
623 vm_map_delete_hole(map, hole_entry);
624 } else {
625 hole_entry->vme_end = new_entry->vme_start;
626 SAVE_HINT_HOLE_WRITE(map, (struct vm_map_links*) hole_entry);
627 }
628
629 #if DEBUG
630 if (check_map_with_hole_sanity)
631 check_map_sanity(map, &old_hole_entry);
632 #endif /* DEBUG */
633
634 return;
635 }
636
637 hole_entry = next_hole_entry;
638 next_hole_entry = hole_entry->vme_next;
639
640 if (hole_entry == CAST_TO_VM_MAP_ENTRY(map->holes_list))
641 break;
642 }
643
644 panic("Illegal action: h1: %p, s:0x%llx, e:0x%llx...h2:%p, s:0x%llx, e:0x%llx...h3:0x%p, s:0x%llx, e:0x%llx\n",
645 hole_entry->vme_prev,
646 (unsigned long long)hole_entry->vme_prev->vme_start,
647 (unsigned long long)hole_entry->vme_prev->vme_end,
648 hole_entry,
649 (unsigned long long)hole_entry->vme_start,
650 (unsigned long long)hole_entry->vme_end,
651 hole_entry->vme_next,
652 (unsigned long long)hole_entry->vme_next->vme_start,
653 (unsigned long long)hole_entry->vme_next->vme_end);
654
655 }
656
657 void
658 update_first_free_rb(vm_map_t map, vm_map_entry_t entry, boolean_t new_entry_creation)
659 {
660
661 if (map->holelistenabled) {
662
663 /*
664 * Holes can be used to track ranges all the way up to MACH_VM_MAX_ADDRESS or more (e.g. kernel map).
665 */
666 vm_map_offset_t max_valid_offset = (map->max_offset > MACH_VM_MAX_ADDRESS) ? map->max_offset : MACH_VM_MAX_ADDRESS;
667
668 /*
669 * Clipping an entry will not result in the creation/deletion/modification of
670 * a hole. Those calls pass NULL for their target entry.
671 */
672 if (entry == NULL) {
673 return;
674 }
675
676 /*
677 * Commpage is pinned beyond the map's max offset. That shouldn't affect the
678 * holes within the bounds of the map.
679 */
680 if (vm_map_trunc_page(entry->vme_start, VM_MAP_PAGE_MASK(map)) >= max_valid_offset) {
681 return;
682 }
683
684 /*
685 *
686 * Note:
687 *
688 * - A new entry has already been added to the map
689 * OR
690 * - An older entry has already been deleted from the map
691 *
692 * We are updating the hole list after the fact (except in one special case involving copy maps).
693 *
694 */
695
696 if (new_entry_creation) {
697
698 update_holes_on_entry_creation(map, entry);
699 } else {
700
701 update_holes_on_entry_deletion(map, entry);
702 }
703 }
704 }