]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_map_store_rb.c
xnu-6153.141.1.tar.gz
[apple/xnu.git] / osfmk / vm / vm_map_store_rb.c
1 /*
2 * Copyright (c) 2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <kern/backtrace.h>
30 #include <vm/vm_map_store_rb.h>
31
32 RB_GENERATE(rb_head, vm_map_store, entry, rb_node_compare);
33
34 #define VME_FOR_STORE( store) \
35 (vm_map_entry_t)(((unsigned long)store) - ((unsigned long)sizeof(struct vm_map_links)))
36
37 void
38 vm_map_store_init_rb( struct vm_map_header* hdr )
39 {
40 RB_INIT(&(hdr->rb_head_store));
41 }
42
43 int
44 rb_node_compare(struct vm_map_store *node, struct vm_map_store *parent)
45 {
46 vm_map_entry_t vme_c;
47 vm_map_entry_t vme_p;
48
49 vme_c = VME_FOR_STORE(node);
50 vme_p = VME_FOR_STORE(parent);
51 if (vme_c->vme_start < vme_p->vme_start) {
52 return -1;
53 }
54 if (vme_c->vme_start >= vme_p->vme_end) {
55 return 1;
56 }
57 return 0;
58 }
59
60 __dead2
61 void
62 vm_map_store_walk_rb(vm_map_t map, vm_map_entry_t *wrong_vme, vm_map_entry_t *vm_entry)
63 {
64 struct vm_map_header *hdr = &map->hdr;
65 struct vm_map_store *rb_entry = RB_ROOT(&hdr->rb_head_store);
66 vm_map_entry_t cur = *vm_entry;
67
68 rb_entry = RB_FIND(rb_head, &hdr->rb_head_store, &(cur->store));
69 if (rb_entry == NULL) {
70 panic("NO SUCH ENTRY %p. Gave back %p", *vm_entry, *wrong_vme);
71 } else {
72 panic("Cur: %p, L: %p, R: %p", VME_FOR_STORE(rb_entry), VME_FOR_STORE(RB_LEFT(rb_entry, entry)), VME_FOR_STORE(RB_RIGHT(rb_entry, entry)));
73 }
74 }
75
76
77 boolean_t
78 vm_map_store_lookup_entry_rb(vm_map_t map, vm_map_offset_t address, vm_map_entry_t *vm_entry)
79 {
80 struct vm_map_header *hdr = &map->hdr;
81 struct vm_map_store *rb_entry = RB_ROOT(&hdr->rb_head_store);
82 vm_map_entry_t cur = vm_map_to_entry(map);
83 vm_map_entry_t prev = VM_MAP_ENTRY_NULL;
84
85 while (rb_entry != (struct vm_map_store*)NULL) {
86 cur = VME_FOR_STORE(rb_entry);
87 if (cur == VM_MAP_ENTRY_NULL) {
88 panic("no entry");
89 }
90 if (address >= cur->vme_start) {
91 if (address < cur->vme_end) {
92 *vm_entry = cur;
93 return TRUE;
94 }
95 rb_entry = RB_RIGHT(rb_entry, entry);
96 prev = cur;
97 } else {
98 rb_entry = RB_LEFT(rb_entry, entry);
99 }
100 }
101 if (prev == VM_MAP_ENTRY_NULL) {
102 prev = vm_map_to_entry(map);
103 }
104 *vm_entry = prev;
105 return FALSE;
106 }
107
108 void
109 vm_map_store_entry_link_rb( struct vm_map_header *mapHdr, __unused vm_map_entry_t after_where, vm_map_entry_t entry)
110 {
111 struct rb_head *rbh = &(mapHdr->rb_head_store);
112 struct vm_map_store *store = &(entry->store);
113 struct vm_map_store *tmp_store;
114 if ((tmp_store = RB_INSERT( rb_head, rbh, store )) != NULL) {
115 panic("VMSEL: INSERT FAILED: 0x%lx, 0x%lx, 0x%lx, 0x%lx", (uintptr_t)entry->vme_start, (uintptr_t)entry->vme_end,
116 (uintptr_t)(VME_FOR_STORE(tmp_store))->vme_start, (uintptr_t)(VME_FOR_STORE(tmp_store))->vme_end);
117 }
118 }
119
120 void
121 vm_map_store_entry_unlink_rb( struct vm_map_header *mapHdr, vm_map_entry_t entry)
122 {
123 struct rb_head *rbh = &(mapHdr->rb_head_store);
124 struct vm_map_store *rb_entry;
125 struct vm_map_store *store = &(entry->store);
126
127 rb_entry = RB_FIND( rb_head, rbh, store);
128 if (rb_entry == NULL) {
129 panic("NO ENTRY TO DELETE");
130 }
131 RB_REMOVE( rb_head, rbh, store );
132 }
133
134 void
135 vm_map_store_copy_reset_rb( vm_map_copy_t copy, vm_map_entry_t entry, int nentries )
136 {
137 struct vm_map_header *mapHdr = &(copy->cpy_hdr);
138 struct rb_head *rbh = &(mapHdr->rb_head_store);
139 struct vm_map_store *store;
140 int deleted = 0;
141
142 while (entry != vm_map_copy_to_entry(copy) && nentries > 0) {
143 store = &(entry->store);
144 RB_REMOVE( rb_head, rbh, store );
145 entry = entry->vme_next;
146 deleted++;
147 nentries--;
148 }
149 }
150
151 extern zone_t vm_map_holes_zone; /* zone for vm map holes (vm_map_links) structures */
152
153 void
154 vm_map_combine_hole(vm_map_t map, vm_map_entry_t hole_entry);
155 void
156 vm_map_combine_hole(__unused vm_map_t map, vm_map_entry_t hole_entry)
157 {
158 vm_map_entry_t middle_hole_entry, last_hole_entry;
159
160 hole_entry->vme_end = hole_entry->vme_next->vme_end;
161
162 middle_hole_entry = hole_entry->vme_next;
163 last_hole_entry = middle_hole_entry->vme_next;
164
165 assert(last_hole_entry->vme_prev == middle_hole_entry);
166 assert(middle_hole_entry->vme_end != last_hole_entry->vme_start);
167
168 last_hole_entry->vme_prev = hole_entry;
169 hole_entry->vme_next = last_hole_entry;
170
171 middle_hole_entry->vme_prev = NULL;
172 middle_hole_entry->vme_next = NULL;
173
174 zfree(vm_map_holes_zone, middle_hole_entry);
175
176 assert(hole_entry->vme_start < hole_entry->vme_end);
177 assert(last_hole_entry->vme_start < last_hole_entry->vme_end);
178 }
179
180
181 void
182 vm_map_delete_hole(vm_map_t map, vm_map_entry_t hole_entry);
183 void
184 vm_map_delete_hole(vm_map_t map, vm_map_entry_t hole_entry)
185 {
186 if (hole_entry == CAST_TO_VM_MAP_ENTRY(map->holes_list)) {
187 if (hole_entry->vme_next == CAST_TO_VM_MAP_ENTRY(map->holes_list)) {
188 map->holes_list = NULL;
189 SAVE_HINT_HOLE_WRITE(map, NULL);
190 } else {
191 vm_map_entry_t l_next, l_prev;
192
193 l_next = (vm_map_entry_t) map->holes_list->next;
194 l_prev = (vm_map_entry_t) map->holes_list->prev;
195 map->holes_list = (struct vm_map_links*) l_next;
196
197 l_next->vme_prev = l_prev;
198 l_prev->vme_next = l_next;
199
200 SAVE_HINT_HOLE_WRITE(map, (struct vm_map_links*) l_next);
201 }
202 } else {
203 SAVE_HINT_HOLE_WRITE(map, (struct vm_map_links*) hole_entry->vme_prev);
204
205 hole_entry->vme_prev->vme_next = hole_entry->vme_next;
206 hole_entry->vme_next->vme_prev = hole_entry->vme_prev;
207 }
208
209 hole_entry->vme_next = NULL;
210 hole_entry->vme_prev = NULL;
211 zfree(vm_map_holes_zone, hole_entry);
212 }
213
214
215 /*
216 * For Debugging.
217 */
218
219 #if DEBUG
220 extern int vm_check_map_sanity;
221
222 static void
223 check_map_sanity(vm_map_t map, vm_map_entry_t old_hole_entry)
224 {
225 vm_map_entry_t hole_entry, next_hole_entry;
226 vm_map_entry_t map_entry, next_map_entry;
227
228 if (map->holes_list == NULL) {
229 return;
230 }
231
232 hole_entry = CAST_DOWN(vm_map_entry_t, map->holes_list);
233 next_hole_entry = hole_entry->vme_next;
234
235 map_entry = vm_map_first_entry(map);
236 next_map_entry = map_entry->vme_next;
237
238 while (map_entry->vme_start > hole_entry->vme_start) {
239 hole_entry = next_hole_entry;
240 next_hole_entry = hole_entry->vme_next;
241
242 if (hole_entry == CAST_DOWN(vm_map_entry_t, map->holes_list)) {
243 break;
244 }
245 }
246
247 while (map_entry != vm_map_to_entry(map)) {
248 if (map_entry->vme_start >= map->max_offset) {
249 break;
250 }
251
252 if (map_entry->vme_end != map_entry->vme_next->vme_start) {
253 if (map_entry->vme_next == vm_map_to_entry(map)) {
254 break;
255 }
256
257 if (hole_entry->vme_start != map_entry->vme_end) {
258 panic("hole_entry not aligned %p(0x%llx), %p (0x%llx), %p", hole_entry, (unsigned long long)hole_entry->vme_start, map_entry->vme_next, (unsigned long long)map_entry->vme_end, old_hole_entry);
259 assert(hole_entry->vme_start == map_entry->vme_end);
260 }
261
262 if (hole_entry->vme_end != map_entry->vme_next->vme_start) {
263 panic("hole_entry not next aligned %p(0x%llx), %p (0x%llx), %p", hole_entry, (unsigned long long)hole_entry->vme_end, map_entry->vme_next, (unsigned long long)map_entry->vme_next->vme_start, old_hole_entry);
264 assert(hole_entry->vme_end == map_entry->vme_next->vme_start);
265 }
266
267 hole_entry = next_hole_entry;
268 next_hole_entry = hole_entry->vme_next;
269
270 if (hole_entry == CAST_DOWN(vm_map_entry_t, map->holes_list)) {
271 break;
272 }
273 }
274
275 map_entry = map_entry->vme_next;
276 }
277 }
278
279 /*
280 * For debugging.
281 */
282 static void
283 copy_hole_info(vm_map_entry_t hole_entry, vm_map_entry_t old_hole_entry)
284 {
285 old_hole_entry->vme_prev = hole_entry->vme_prev;
286 old_hole_entry->vme_next = hole_entry->vme_next;
287 old_hole_entry->vme_start = hole_entry->vme_start;
288 old_hole_entry->vme_end = hole_entry->vme_end;
289 }
290 #endif /* DEBUG */
291
292 void
293 update_holes_on_entry_deletion(vm_map_t map, vm_map_entry_t old_entry);
294 void
295 update_holes_on_entry_deletion(vm_map_t map, vm_map_entry_t old_entry)
296 {
297 /*
298 * Dealing with the deletion of an older entry.
299 */
300
301 vm_map_entry_t hole_entry, next_hole_entry;
302 #if DEBUG
303 struct vm_map_entry old_hole_entry;
304 #endif /* DEBUG */
305 boolean_t create_new_hole = TRUE;
306
307 hole_entry = CAST_TO_VM_MAP_ENTRY(map->hole_hint);
308
309 if (hole_entry) {
310 if (hole_entry->vme_end == old_entry->vme_start) {
311 /*
312 * Found a hole right after above our entry.
313 * Hit.
314 */
315 } else if (hole_entry->vme_start == old_entry->vme_end) {
316 if (hole_entry != CAST_TO_VM_MAP_ENTRY(map->holes_list)) {
317 /*
318 * Found a hole right after below our entry but
319 * make sure we don't erroneously extend backwards.
320 *
321 * Hit.
322 */
323
324 hole_entry = hole_entry->vme_prev;
325 }
326 } else if (hole_entry->vme_start > old_entry->vme_end) {
327 /*
328 * Useless hint. Start from the top.
329 */
330
331 hole_entry = CAST_TO_VM_MAP_ENTRY(map->holes_list);
332 }
333
334 if (hole_entry != CAST_TO_VM_MAP_ENTRY(map->holes_list)) {
335 if (hole_entry->vme_start > old_entry->vme_start) {
336 panic("Hole hint failed: Hole entry start: 0x%llx, entry start: 0x%llx, map hole start: 0x%llx, map hint start: 0x%llx\n",
337 (unsigned long long)hole_entry->vme_start,
338 (unsigned long long)old_entry->vme_start,
339 (unsigned long long)map->holes_list->start,
340 (unsigned long long)map->hole_hint->start);
341 }
342 if (hole_entry->vme_end > old_entry->vme_start) {
343 panic("Hole hint failed: Hole entry end: 0x%llx, entry start: 0x%llx, map hole start: 0x%llx, map hint start: 0x%llx\n",
344 (unsigned long long)hole_entry->vme_end,
345 (unsigned long long)old_entry->vme_start,
346 (unsigned long long)map->holes_list->start,
347 (unsigned long long)map->hole_hint->start);
348 }
349 }
350
351 while (1) {
352 next_hole_entry = hole_entry->vme_next;
353
354 /*
355 * Hole is right above the entry.
356 */
357 if (hole_entry->vme_end == old_entry->vme_start) {
358 #if DEBUG
359 copy_hole_info(hole_entry, &old_hole_entry);
360 #endif /* DEBUG */
361
362 /*
363 * Is there another hole right below the entry?
364 * Can we combine holes?
365 */
366
367 if (old_entry->vme_end == hole_entry->vme_next->vme_start) {
368 vm_map_combine_hole(map, hole_entry);
369 } else {
370 hole_entry->vme_end = old_entry->vme_end;
371 }
372 create_new_hole = FALSE;
373 #if DEBUG
374 if (vm_check_map_sanity) {
375 check_map_sanity(map, &old_hole_entry);
376 }
377 #endif /* DEBUG */
378 break;
379 }
380
381 /*
382 * Hole is right below the entry.
383 */
384 if (hole_entry->vme_start == old_entry->vme_end) {
385 #if DEBUG
386 copy_hole_info(hole_entry, &old_hole_entry);
387 #endif /* DEBUG */
388
389 hole_entry->vme_start = old_entry->vme_start;
390 create_new_hole = FALSE;
391
392 #if DEBUG
393 if (vm_check_map_sanity) {
394 check_map_sanity(map, &old_hole_entry);
395 }
396 #endif /* DEBUG */
397 break;
398 }
399
400 /*
401 * Hole is beyond our entry. Let's go back to the last hole
402 * before our entry so we have the right place to link up the
403 * new hole that will be needed.
404 */
405 if (hole_entry->vme_start > old_entry->vme_end) {
406 #if DEBUG
407 copy_hole_info(hole_entry, &old_hole_entry);
408 #endif /* DEBUG */
409
410 if (hole_entry != CAST_TO_VM_MAP_ENTRY(map->holes_list)) {
411 assert(hole_entry->vme_start != old_entry->vme_start);
412 hole_entry = hole_entry->vme_prev;
413 }
414 break;
415 }
416
417 hole_entry = next_hole_entry;
418
419 if (hole_entry == CAST_TO_VM_MAP_ENTRY(map->holes_list)) {
420 hole_entry = hole_entry->vme_prev;
421 break;
422 }
423 }
424 }
425
426 if (create_new_hole) {
427 struct vm_map_links *new_hole_entry = NULL;
428 vm_map_entry_t l_next, l_prev;
429
430 new_hole_entry = zalloc(vm_map_holes_zone);
431
432 /*
433 * First hole in the map?
434 * OR
435 * A hole that is located above the current first hole in the map?
436 */
437 if (map->holes_list == NULL || (hole_entry == CAST_TO_VM_MAP_ENTRY(map->holes_list) && hole_entry->vme_start > old_entry->vme_start)) {
438 if (map->holes_list == NULL) {
439 map->holes_list = new_hole_entry;
440 new_hole_entry->prev = new_hole_entry->next = CAST_TO_VM_MAP_ENTRY(map->holes_list);
441 } else {
442 l_next = CAST_TO_VM_MAP_ENTRY(map->holes_list);
443 l_prev = map->holes_list->prev;
444 map->holes_list = new_hole_entry;
445 new_hole_entry->next = l_next;
446 new_hole_entry->prev = l_prev;
447
448 l_prev->vme_next = l_next->vme_prev = CAST_TO_VM_MAP_ENTRY(new_hole_entry);
449 }
450 } else {
451 l_next = hole_entry->vme_next;
452 l_prev = hole_entry->vme_next->vme_prev;
453
454 new_hole_entry->prev = hole_entry;
455 new_hole_entry->next = l_next;
456
457 hole_entry->vme_next = CAST_TO_VM_MAP_ENTRY(new_hole_entry);
458 l_next->vme_prev = CAST_TO_VM_MAP_ENTRY(new_hole_entry);
459 }
460
461 new_hole_entry->start = old_entry->vme_start;
462 new_hole_entry->end = old_entry->vme_end;
463
464 hole_entry = CAST_TO_VM_MAP_ENTRY(new_hole_entry);
465
466 assert(new_hole_entry->start < new_hole_entry->end);
467 }
468
469 #if DEBUG
470 if (vm_check_map_sanity) {
471 check_map_sanity(map, &old_hole_entry);
472 }
473 #endif /* DEBUG */
474
475 SAVE_HINT_HOLE_WRITE(map, (struct vm_map_links*) hole_entry);
476 return;
477 }
478
479
480 void
481 update_holes_on_entry_creation(vm_map_t map, vm_map_entry_t new_entry);
482 void
483 update_holes_on_entry_creation(vm_map_t map, vm_map_entry_t new_entry)
484 {
485 vm_map_entry_t hole_entry, next_hole_entry;
486 #if DEBUG
487 struct vm_map_entry old_hole_entry;
488 vm_map_entry_t tmp_entry;
489 boolean_t check_map_with_hole_sanity = TRUE;
490 #endif /* DEBUG */
491
492 /*
493 * Case A: The entry is aligned exactly with the start and end of the hole.
494 * This will delete the hole.
495 *
496 * Case B: The entry is completely within a hole but NOT aligned with the start/end of the hole.
497 * This will split a hole.
498 *
499 * Case C: The entry overlaps with the hole. The entry could be extending upwards (C1) or downwards (C2).
500 * This will reduce the size of the hole or delete the hole completely if it is smaller than the entry.
501 */
502
503 hole_entry = CAST_TO_VM_MAP_ENTRY(map->holes_list);
504 assert(hole_entry);
505 next_hole_entry = hole_entry->vme_next;
506
507 while (1) {
508 #if DEBUG
509 /*
510 * If the entry doesn't exist in the RB tree, we are likely dealing with copy maps where
511 * the entries belonging to the copy map are linked into the list of entries silently and
512 * then added to the RB-tree later on.
513 * So sanity checks are useless in that case.
514 */
515 check_map_with_hole_sanity = vm_map_lookup_entry(map, new_entry->vme_start, &tmp_entry);
516 #endif /* DEBUG */
517
518 if (hole_entry->vme_start == new_entry->vme_start &&
519 hole_entry->vme_end == new_entry->vme_end) {
520 /* Case A */
521 #if DEBUG
522 copy_hole_info(hole_entry, &old_hole_entry);
523 #endif /* DEBUG */
524
525 /*
526 * This check makes sense only for regular maps, not copy maps.
527 * With a regular map, the VM entry is first linked and then
528 * the hole is deleted. So the check below, which makes sure that
529 * the map's bounds are being respected, is valid.
530 * But for copy maps, the hole is deleted before the VM entry is
531 * linked (vm_map_store_copy_insert) and so this check is invalid.
532 *
533 * if (hole_entry == (vm_map_entry_t) map->holes_list) {
534 *
535 * if (hole_entry->vme_next == (vm_map_entry_t) map->holes_list) {
536 *
537 * next_hole_entry = vm_map_last_entry(map);
538 * assert(next_hole_entry->vme_end >= map->max_offset);
539 * }
540 * }
541 */
542
543 vm_map_delete_hole(map, hole_entry);
544
545 #if DEBUG
546 if (vm_check_map_sanity && check_map_with_hole_sanity) {
547 check_map_sanity(map, &old_hole_entry);
548 }
549 #endif /* DEBUG */
550 return;
551 } else if (hole_entry->vme_start < new_entry->vme_start &&
552 hole_entry->vme_end > new_entry->vme_end) {
553 /* Case B */
554 struct vm_map_links *new_hole_entry = NULL;
555
556 new_hole_entry = zalloc(vm_map_holes_zone);
557
558 #if DEBUG
559 copy_hole_info(hole_entry, &old_hole_entry);
560 #endif /* DEBUG */
561
562 new_hole_entry->prev = hole_entry;
563 new_hole_entry->next = hole_entry->vme_next;
564 hole_entry->vme_next->vme_prev = CAST_TO_VM_MAP_ENTRY(new_hole_entry);
565 hole_entry->vme_next = CAST_TO_VM_MAP_ENTRY(new_hole_entry);
566
567 new_hole_entry->start = new_entry->vme_end;
568 new_hole_entry->end = hole_entry->vme_end;
569 hole_entry->vme_end = new_entry->vme_start;
570
571 assert(hole_entry->vme_start < hole_entry->vme_end);
572 assert(new_hole_entry->start < new_hole_entry->end);
573
574 #if DEBUG
575 if (vm_check_map_sanity && check_map_with_hole_sanity) {
576 check_map_sanity(map, &old_hole_entry);
577 }
578 #endif /* DEBUG */
579
580 SAVE_HINT_HOLE_WRITE(map, (struct vm_map_links*) hole_entry);
581 return;
582 } else if ((new_entry->vme_start <= hole_entry->vme_start) && (hole_entry->vme_start < new_entry->vme_end)) {
583 /*
584 * Case C1: Entry moving upwards and a part/full hole lies within the bounds of the entry.
585 */
586
587 #if DEBUG
588 copy_hole_info(hole_entry, &old_hole_entry);
589 #endif /* DEBUG */
590
591 if (hole_entry->vme_end <= new_entry->vme_end) {
592 vm_map_delete_hole(map, hole_entry);
593 } else {
594 hole_entry->vme_start = new_entry->vme_end;
595 SAVE_HINT_HOLE_WRITE(map, (struct vm_map_links*) hole_entry);
596 }
597
598 #if DEBUG
599 if (vm_check_map_sanity && check_map_with_hole_sanity) {
600 check_map_sanity(map, &old_hole_entry);
601 }
602 #endif /* DEBUG */
603
604 return;
605 } else if ((new_entry->vme_start < hole_entry->vme_end) && (hole_entry->vme_end <= new_entry->vme_end)) {
606 /*
607 * Case C2: Entry moving downwards and a part/full hole lies within the bounds of the entry.
608 */
609
610 #if DEBUG
611 copy_hole_info(hole_entry, &old_hole_entry);
612 #endif /* DEBUG */
613
614 if (hole_entry->vme_start >= new_entry->vme_start) {
615 vm_map_delete_hole(map, hole_entry);
616 } else {
617 hole_entry->vme_end = new_entry->vme_start;
618 SAVE_HINT_HOLE_WRITE(map, (struct vm_map_links*) hole_entry);
619 }
620
621 #if DEBUG
622 if (vm_check_map_sanity && check_map_with_hole_sanity) {
623 check_map_sanity(map, &old_hole_entry);
624 }
625 #endif /* DEBUG */
626
627 return;
628 }
629
630 hole_entry = next_hole_entry;
631 next_hole_entry = hole_entry->vme_next;
632
633 if (hole_entry == CAST_TO_VM_MAP_ENTRY(map->holes_list)) {
634 break;
635 }
636 }
637
638 panic("Illegal action: h1: %p, s:0x%llx, e:0x%llx...h2:%p, s:0x%llx, e:0x%llx...h3:0x%p, s:0x%llx, e:0x%llx\n",
639 hole_entry->vme_prev,
640 (unsigned long long)hole_entry->vme_prev->vme_start,
641 (unsigned long long)hole_entry->vme_prev->vme_end,
642 hole_entry,
643 (unsigned long long)hole_entry->vme_start,
644 (unsigned long long)hole_entry->vme_end,
645 hole_entry->vme_next,
646 (unsigned long long)hole_entry->vme_next->vme_start,
647 (unsigned long long)hole_entry->vme_next->vme_end);
648 }
649
650 void
651 update_first_free_rb(vm_map_t map, vm_map_entry_t entry, boolean_t new_entry_creation)
652 {
653 if (map->holelistenabled) {
654 /*
655 * Holes can be used to track ranges all the way up to MACH_VM_MAX_ADDRESS or more (e.g. kernel map).
656 */
657 vm_map_offset_t max_valid_offset = (map->max_offset > MACH_VM_MAX_ADDRESS) ? map->max_offset : MACH_VM_MAX_ADDRESS;
658
659 /*
660 * Clipping an entry will not result in the creation/deletion/modification of
661 * a hole. Those calls pass NULL for their target entry.
662 */
663 if (entry == NULL) {
664 return;
665 }
666
667 /*
668 * Commpage is pinned beyond the map's max offset. That shouldn't affect the
669 * holes within the bounds of the map.
670 */
671 if (vm_map_trunc_page(entry->vme_start, VM_MAP_PAGE_MASK(map)) >= max_valid_offset) {
672 return;
673 }
674
675 /*
676 *
677 * Note:
678 *
679 * - A new entry has already been added to the map
680 * OR
681 * - An older entry has already been deleted from the map
682 *
683 * We are updating the hole list after the fact (except in one special case involving copy maps).
684 *
685 */
686
687 if (new_entry_creation) {
688 update_holes_on_entry_creation(map, entry);
689 } else {
690 update_holes_on_entry_deletion(map, entry);
691 }
692 }
693 }