]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_map_store_rb.c
xnu-6153.41.3.tar.gz
[apple/xnu.git] / osfmk / vm / vm_map_store_rb.c
1 /*
2 * Copyright (c) 2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <kern/backtrace.h>
30 #include <vm/vm_map_store_rb.h>
31
32 RB_GENERATE(rb_head, vm_map_store, entry, rb_node_compare);
33
34 #define VME_FOR_STORE( store) \
35 (vm_map_entry_t)(((unsigned long)store) - ((unsigned long)sizeof(struct vm_map_links)))
36
37 void
38 vm_map_store_init_rb( struct vm_map_header* hdr )
39 {
40 RB_INIT(&(hdr->rb_head_store));
41 }
42
43 int
44 rb_node_compare(struct vm_map_store *node, struct vm_map_store *parent)
45 {
46 vm_map_entry_t vme_c;
47 vm_map_entry_t vme_p;
48
49 vme_c = VME_FOR_STORE(node);
50 vme_p = VME_FOR_STORE(parent);
51 if (vme_c->vme_start < vme_p->vme_start) {
52 return -1;
53 }
54 if (vme_c->vme_start >= vme_p->vme_end) {
55 return 1;
56 }
57 return 0;
58 }
59
60 __dead2
61 void
62 vm_map_store_walk_rb(vm_map_t map, vm_map_entry_t *wrong_vme, vm_map_entry_t *vm_entry)
63 {
64 struct vm_map_header *hdr = &map->hdr;
65 struct vm_map_store *rb_entry = RB_ROOT(&hdr->rb_head_store);
66 vm_map_entry_t cur = *vm_entry;
67
68 rb_entry = RB_FIND(rb_head, &hdr->rb_head_store, &(cur->store));
69 if (rb_entry == NULL) {
70 panic("NO SUCH ENTRY %p. Gave back %p", *vm_entry, *wrong_vme);
71 } else {
72 panic("Cur: %p, L: %p, R: %p", VME_FOR_STORE(rb_entry), VME_FOR_STORE(RB_LEFT(rb_entry, entry)), VME_FOR_STORE(RB_RIGHT(rb_entry, entry)));
73 }
74 }
75
76
77 boolean_t
78 vm_map_store_lookup_entry_rb(vm_map_t map, vm_map_offset_t address, vm_map_entry_t *vm_entry)
79 {
80 struct vm_map_header *hdr = &map->hdr;
81 struct vm_map_store *rb_entry = RB_ROOT(&hdr->rb_head_store);
82 vm_map_entry_t cur = vm_map_to_entry(map);
83 vm_map_entry_t prev = VM_MAP_ENTRY_NULL;
84
85 while (rb_entry != (struct vm_map_store*)NULL) {
86 cur = VME_FOR_STORE(rb_entry);
87 if (cur == VM_MAP_ENTRY_NULL) {
88 panic("no entry");
89 }
90 if (address >= cur->vme_start) {
91 if (address < cur->vme_end) {
92 *vm_entry = cur;
93 return TRUE;
94 }
95 rb_entry = RB_RIGHT(rb_entry, entry);
96 prev = cur;
97 } else {
98 rb_entry = RB_LEFT(rb_entry, entry);
99 }
100 }
101 if (prev == VM_MAP_ENTRY_NULL) {
102 prev = vm_map_to_entry(map);
103 }
104 *vm_entry = prev;
105 return FALSE;
106 }
107
108 void
109 vm_map_store_entry_link_rb( struct vm_map_header *mapHdr, __unused vm_map_entry_t after_where, vm_map_entry_t entry)
110 {
111 struct rb_head *rbh = &(mapHdr->rb_head_store);
112 struct vm_map_store *store = &(entry->store);
113 struct vm_map_store *tmp_store;
114 if ((tmp_store = RB_INSERT( rb_head, rbh, store )) != NULL) {
115 panic("VMSEL: INSERT FAILED: 0x%lx, 0x%lx, 0x%lx, 0x%lx", (uintptr_t)entry->vme_start, (uintptr_t)entry->vme_end,
116 (uintptr_t)(VME_FOR_STORE(tmp_store))->vme_start, (uintptr_t)(VME_FOR_STORE(tmp_store))->vme_end);
117 }
118 }
119
120 void
121 vm_map_store_entry_unlink_rb( struct vm_map_header *mapHdr, vm_map_entry_t entry)
122 {
123 struct rb_head *rbh = &(mapHdr->rb_head_store);
124 struct vm_map_store *rb_entry;
125 struct vm_map_store *store = &(entry->store);
126
127 rb_entry = RB_FIND( rb_head, rbh, store);
128 if (rb_entry == NULL) {
129 panic("NO ENTRY TO DELETE");
130 }
131 RB_REMOVE( rb_head, rbh, store );
132 }
133
134 void
135 vm_map_store_copy_reset_rb( vm_map_copy_t copy, vm_map_entry_t entry, int nentries )
136 {
137 struct vm_map_header *mapHdr = &(copy->cpy_hdr);
138 struct rb_head *rbh = &(mapHdr->rb_head_store);
139 struct vm_map_store *store;
140 int deleted = 0;
141
142 while (entry != vm_map_copy_to_entry(copy) && nentries > 0) {
143 store = &(entry->store);
144 RB_REMOVE( rb_head, rbh, store );
145 entry = entry->vme_next;
146 deleted++;
147 nentries--;
148 }
149 }
150
151 extern zone_t vm_map_holes_zone; /* zone for vm map holes (vm_map_links) structures */
152
153 void
154 vm_map_combine_hole(vm_map_t map, vm_map_entry_t hole_entry);
155 void
156 vm_map_combine_hole(__unused vm_map_t map, vm_map_entry_t hole_entry)
157 {
158 vm_map_entry_t middle_hole_entry, last_hole_entry;
159
160 hole_entry->vme_end = hole_entry->vme_next->vme_end;
161
162 middle_hole_entry = hole_entry->vme_next;
163 last_hole_entry = middle_hole_entry->vme_next;
164
165 assert(last_hole_entry->vme_prev == middle_hole_entry);
166 assert(middle_hole_entry->vme_end != last_hole_entry->vme_start);
167
168 last_hole_entry->vme_prev = hole_entry;
169 hole_entry->vme_next = last_hole_entry;
170
171 middle_hole_entry->vme_prev = NULL;
172 middle_hole_entry->vme_next = NULL;
173
174 zfree(vm_map_holes_zone, middle_hole_entry);
175
176 assert(hole_entry->vme_start < hole_entry->vme_end);
177 assert(last_hole_entry->vme_start < last_hole_entry->vme_end);
178 }
179
180
181 void
182 vm_map_delete_hole(vm_map_t map, vm_map_entry_t hole_entry);
183 void
184 vm_map_delete_hole(vm_map_t map, vm_map_entry_t hole_entry)
185 {
186 if (hole_entry == CAST_TO_VM_MAP_ENTRY(map->holes_list)) {
187 if (hole_entry->vme_next == CAST_TO_VM_MAP_ENTRY(map->holes_list)) {
188 map->holes_list = NULL;
189 SAVE_HINT_HOLE_WRITE(map, NULL);
190 } else {
191 vm_map_entry_t l_next, l_prev;
192
193 l_next = (vm_map_entry_t) map->holes_list->next;
194 l_prev = (vm_map_entry_t) map->holes_list->prev;
195 map->holes_list = (struct vm_map_links*) l_next;
196
197 l_next->vme_prev = l_prev;
198 l_prev->vme_next = l_next;
199
200 SAVE_HINT_HOLE_WRITE(map, (struct vm_map_links*) l_next);
201 }
202 } else {
203 SAVE_HINT_HOLE_WRITE(map, (struct vm_map_links*) hole_entry->vme_prev);
204
205 hole_entry->vme_prev->vme_next = hole_entry->vme_next;
206 hole_entry->vme_next->vme_prev = hole_entry->vme_prev;
207 }
208
209 hole_entry->vme_next = NULL;
210 hole_entry->vme_prev = NULL;
211 zfree(vm_map_holes_zone, hole_entry);
212 }
213
214
215 /*
216 * For Debugging.
217 */
218
219 #if DEBUG
220 static void
221 check_map_sanity(vm_map_t map, vm_map_entry_t old_hole_entry)
222 {
223 vm_map_entry_t hole_entry, next_hole_entry;
224 vm_map_entry_t map_entry, next_map_entry;
225
226 if (map->holes_list == NULL) {
227 return;
228 }
229
230 hole_entry = CAST_DOWN(vm_map_entry_t, map->holes_list);
231 next_hole_entry = hole_entry->vme_next;
232
233 map_entry = vm_map_first_entry(map);
234 next_map_entry = map_entry->vme_next;
235
236 while (map_entry->vme_start > hole_entry->vme_start) {
237 hole_entry = next_hole_entry;
238 next_hole_entry = hole_entry->vme_next;
239
240 if (hole_entry == CAST_DOWN(vm_map_entry_t, map->holes_list)) {
241 break;
242 }
243 }
244
245 while (map_entry != vm_map_to_entry(map)) {
246 if (map_entry->vme_start >= map->max_offset) {
247 break;
248 }
249
250 if (map_entry->vme_end != map_entry->vme_next->vme_start) {
251 if (map_entry->vme_next == vm_map_to_entry(map)) {
252 break;
253 }
254
255 if (hole_entry->vme_start != map_entry->vme_end) {
256 panic("hole_entry not aligned %p(0x%llx), %p (0x%llx), %p", hole_entry, (unsigned long long)hole_entry->vme_start, map_entry->vme_next, (unsigned long long)map_entry->vme_end, old_hole_entry);
257 assert(hole_entry->vme_start == map_entry->vme_end);
258 }
259
260 if (hole_entry->vme_end != map_entry->vme_next->vme_start) {
261 panic("hole_entry not next aligned %p(0x%llx), %p (0x%llx), %p", hole_entry, (unsigned long long)hole_entry->vme_end, map_entry->vme_next, (unsigned long long)map_entry->vme_next->vme_start, old_hole_entry);
262 assert(hole_entry->vme_end == map_entry->vme_next->vme_start);
263 }
264
265 hole_entry = next_hole_entry;
266 next_hole_entry = hole_entry->vme_next;
267
268 if (hole_entry == CAST_DOWN(vm_map_entry_t, map->holes_list)) {
269 break;
270 }
271 }
272
273 map_entry = map_entry->vme_next;
274 }
275 }
276
277 /*
278 * For debugging.
279 */
280 static void
281 copy_hole_info(vm_map_entry_t hole_entry, vm_map_entry_t old_hole_entry)
282 {
283 old_hole_entry->vme_prev = hole_entry->vme_prev;
284 old_hole_entry->vme_next = hole_entry->vme_next;
285 old_hole_entry->vme_start = hole_entry->vme_start;
286 old_hole_entry->vme_end = hole_entry->vme_end;
287 }
288 #endif /* DEBUG */
289
290 void
291 update_holes_on_entry_deletion(vm_map_t map, vm_map_entry_t old_entry);
292 void
293 update_holes_on_entry_deletion(vm_map_t map, vm_map_entry_t old_entry)
294 {
295 /*
296 * Dealing with the deletion of an older entry.
297 */
298
299 vm_map_entry_t hole_entry, next_hole_entry;
300 #if DEBUG
301 struct vm_map_entry old_hole_entry;
302 #endif /* DEBUG */
303 boolean_t create_new_hole = TRUE;
304
305 hole_entry = CAST_TO_VM_MAP_ENTRY(map->hole_hint);
306
307 if (hole_entry) {
308 if (hole_entry->vme_end == old_entry->vme_start) {
309 /*
310 * Found a hole right after above our entry.
311 * Hit.
312 */
313 } else if (hole_entry->vme_start == old_entry->vme_end) {
314 if (hole_entry != CAST_TO_VM_MAP_ENTRY(map->holes_list)) {
315 /*
316 * Found a hole right after below our entry but
317 * make sure we don't erroneously extend backwards.
318 *
319 * Hit.
320 */
321
322 hole_entry = hole_entry->vme_prev;
323 }
324 } else if (hole_entry->vme_start > old_entry->vme_end) {
325 /*
326 * Useless hint. Start from the top.
327 */
328
329 hole_entry = CAST_TO_VM_MAP_ENTRY(map->holes_list);
330 }
331
332 if (hole_entry != CAST_TO_VM_MAP_ENTRY(map->holes_list)) {
333 if (hole_entry->vme_start > old_entry->vme_start) {
334 panic("Hole hint failed: Hole entry start: 0x%llx, entry start: 0x%llx, map hole start: 0x%llx, map hint start: 0x%llx\n",
335 (unsigned long long)hole_entry->vme_start,
336 (unsigned long long)old_entry->vme_start,
337 (unsigned long long)map->holes_list->start,
338 (unsigned long long)map->hole_hint->start);
339 }
340 if (hole_entry->vme_end > old_entry->vme_start) {
341 panic("Hole hint failed: Hole entry end: 0x%llx, entry start: 0x%llx, map hole start: 0x%llx, map hint start: 0x%llx\n",
342 (unsigned long long)hole_entry->vme_end,
343 (unsigned long long)old_entry->vme_start,
344 (unsigned long long)map->holes_list->start,
345 (unsigned long long)map->hole_hint->start);
346 }
347 }
348
349 while (1) {
350 next_hole_entry = hole_entry->vme_next;
351
352 /*
353 * Hole is right above the entry.
354 */
355 if (hole_entry->vme_end == old_entry->vme_start) {
356 #if DEBUG
357 copy_hole_info(hole_entry, &old_hole_entry);
358 #endif /* DEBUG */
359
360 /*
361 * Is there another hole right below the entry?
362 * Can we combine holes?
363 */
364
365 if (old_entry->vme_end == hole_entry->vme_next->vme_start) {
366 vm_map_combine_hole(map, hole_entry);
367 } else {
368 hole_entry->vme_end = old_entry->vme_end;
369 }
370 create_new_hole = FALSE;
371 #if DEBUG
372 check_map_sanity(map, &old_hole_entry);
373 #endif /* DEBUG */
374 break;
375 }
376
377 /*
378 * Hole is right below the entry.
379 */
380 if (hole_entry->vme_start == old_entry->vme_end) {
381 #if DEBUG
382 copy_hole_info(hole_entry, &old_hole_entry);
383 #endif /* DEBUG */
384
385 hole_entry->vme_start = old_entry->vme_start;
386 create_new_hole = FALSE;
387
388 #if DEBUG
389 check_map_sanity(map, &old_hole_entry);
390 #endif /* DEBUG */
391 break;
392 }
393
394 /*
395 * Hole is beyond our entry. Let's go back to the last hole
396 * before our entry so we have the right place to link up the
397 * new hole that will be needed.
398 */
399 if (hole_entry->vme_start > old_entry->vme_end) {
400 #if DEBUG
401 copy_hole_info(hole_entry, &old_hole_entry);
402 #endif /* DEBUG */
403
404 if (hole_entry != CAST_TO_VM_MAP_ENTRY(map->holes_list)) {
405 assert(hole_entry->vme_start != old_entry->vme_start);
406 hole_entry = hole_entry->vme_prev;
407 }
408 break;
409 }
410
411 hole_entry = next_hole_entry;
412
413 if (hole_entry == CAST_TO_VM_MAP_ENTRY(map->holes_list)) {
414 hole_entry = hole_entry->vme_prev;
415 break;
416 }
417 }
418 }
419
420 if (create_new_hole) {
421 struct vm_map_links *new_hole_entry = NULL;
422 vm_map_entry_t l_next, l_prev;
423
424 new_hole_entry = zalloc(vm_map_holes_zone);
425
426 /*
427 * First hole in the map?
428 * OR
429 * A hole that is located above the current first hole in the map?
430 */
431 if (map->holes_list == NULL || (hole_entry == CAST_TO_VM_MAP_ENTRY(map->holes_list) && hole_entry->vme_start > old_entry->vme_start)) {
432 if (map->holes_list == NULL) {
433 map->holes_list = new_hole_entry;
434 new_hole_entry->prev = new_hole_entry->next = CAST_TO_VM_MAP_ENTRY(map->holes_list);
435 } else {
436 l_next = CAST_TO_VM_MAP_ENTRY(map->holes_list);
437 l_prev = map->holes_list->prev;
438 map->holes_list = new_hole_entry;
439 new_hole_entry->next = l_next;
440 new_hole_entry->prev = l_prev;
441
442 l_prev->vme_next = l_next->vme_prev = CAST_TO_VM_MAP_ENTRY(new_hole_entry);
443 }
444 } else {
445 l_next = hole_entry->vme_next;
446 l_prev = hole_entry->vme_next->vme_prev;
447
448 new_hole_entry->prev = hole_entry;
449 new_hole_entry->next = l_next;
450
451 hole_entry->vme_next = CAST_TO_VM_MAP_ENTRY(new_hole_entry);
452 l_next->vme_prev = CAST_TO_VM_MAP_ENTRY(new_hole_entry);
453 }
454
455 new_hole_entry->start = old_entry->vme_start;
456 new_hole_entry->end = old_entry->vme_end;
457
458 hole_entry = CAST_TO_VM_MAP_ENTRY(new_hole_entry);
459
460 assert(new_hole_entry->start < new_hole_entry->end);
461 }
462
463 #if DEBUG
464 check_map_sanity(map, &old_hole_entry);
465 #endif /* DEBUG */
466
467 SAVE_HINT_HOLE_WRITE(map, (struct vm_map_links*) hole_entry);
468 return;
469 }
470
471
472 void
473 update_holes_on_entry_creation(vm_map_t map, vm_map_entry_t new_entry);
474 void
475 update_holes_on_entry_creation(vm_map_t map, vm_map_entry_t new_entry)
476 {
477 vm_map_entry_t hole_entry, next_hole_entry;
478 #if DEBUG
479 struct vm_map_entry old_hole_entry;
480 vm_map_entry_t tmp_entry;
481 boolean_t check_map_with_hole_sanity = TRUE;
482 #endif /* DEBUG */
483
484 /*
485 * Case A: The entry is aligned exactly with the start and end of the hole.
486 * This will delete the hole.
487 *
488 * Case B: The entry is completely within a hole but NOT aligned with the start/end of the hole.
489 * This will split a hole.
490 *
491 * Case C: The entry overlaps with the hole. The entry could be extending upwards (C1) or downwards (C2).
492 * This will reduce the size of the hole or delete the hole completely if it is smaller than the entry.
493 */
494
495 hole_entry = CAST_TO_VM_MAP_ENTRY(map->holes_list);
496 assert(hole_entry);
497 next_hole_entry = hole_entry->vme_next;
498
499 while (1) {
500 #if DEBUG
501 /*
502 * If the entry doesn't exist in the RB tree, we are likely dealing with copy maps where
503 * the entries belonging to the copy map are linked into the list of entries silently and
504 * then added to the RB-tree later on.
505 * So sanity checks are useless in that case.
506 */
507 check_map_with_hole_sanity = vm_map_lookup_entry(map, new_entry->vme_start, &tmp_entry);
508 #endif /* DEBUG */
509
510 if (hole_entry->vme_start == new_entry->vme_start &&
511 hole_entry->vme_end == new_entry->vme_end) {
512 /* Case A */
513 #if DEBUG
514 copy_hole_info(hole_entry, &old_hole_entry);
515 #endif /* DEBUG */
516
517 /*
518 * This check makes sense only for regular maps, not copy maps.
519 * With a regular map, the VM entry is first linked and then
520 * the hole is deleted. So the check below, which makes sure that
521 * the map's bounds are being respected, is valid.
522 * But for copy maps, the hole is deleted before the VM entry is
523 * linked (vm_map_store_copy_insert) and so this check is invalid.
524 *
525 * if (hole_entry == (vm_map_entry_t) map->holes_list) {
526 *
527 * if (hole_entry->vme_next == (vm_map_entry_t) map->holes_list) {
528 *
529 * next_hole_entry = vm_map_last_entry(map);
530 * assert(next_hole_entry->vme_end >= map->max_offset);
531 * }
532 * }
533 */
534
535 vm_map_delete_hole(map, hole_entry);
536
537 #if DEBUG
538 if (check_map_with_hole_sanity) {
539 check_map_sanity(map, &old_hole_entry);
540 }
541 #endif /* DEBUG */
542 return;
543 } else if (hole_entry->vme_start < new_entry->vme_start &&
544 hole_entry->vme_end > new_entry->vme_end) {
545 /* Case B */
546 struct vm_map_links *new_hole_entry = NULL;
547
548 new_hole_entry = zalloc(vm_map_holes_zone);
549
550 #if DEBUG
551 copy_hole_info(hole_entry, &old_hole_entry);
552 #endif /* DEBUG */
553
554 new_hole_entry->prev = hole_entry;
555 new_hole_entry->next = hole_entry->vme_next;
556 hole_entry->vme_next->vme_prev = CAST_TO_VM_MAP_ENTRY(new_hole_entry);
557 hole_entry->vme_next = CAST_TO_VM_MAP_ENTRY(new_hole_entry);
558
559 new_hole_entry->start = new_entry->vme_end;
560 new_hole_entry->end = hole_entry->vme_end;
561 hole_entry->vme_end = new_entry->vme_start;
562
563 assert(hole_entry->vme_start < hole_entry->vme_end);
564 assert(new_hole_entry->start < new_hole_entry->end);
565
566 #if DEBUG
567 if (check_map_with_hole_sanity) {
568 check_map_sanity(map, &old_hole_entry);
569 }
570 #endif /* DEBUG */
571
572 SAVE_HINT_HOLE_WRITE(map, (struct vm_map_links*) hole_entry);
573 return;
574 } else if ((new_entry->vme_start <= hole_entry->vme_start) && (hole_entry->vme_start < new_entry->vme_end)) {
575 /*
576 * Case C1: Entry moving upwards and a part/full hole lies within the bounds of the entry.
577 */
578
579 #if DEBUG
580 copy_hole_info(hole_entry, &old_hole_entry);
581 #endif /* DEBUG */
582
583 if (hole_entry->vme_end <= new_entry->vme_end) {
584 vm_map_delete_hole(map, hole_entry);
585 } else {
586 hole_entry->vme_start = new_entry->vme_end;
587 SAVE_HINT_HOLE_WRITE(map, (struct vm_map_links*) hole_entry);
588 }
589
590 #if DEBUG
591 if (check_map_with_hole_sanity) {
592 check_map_sanity(map, &old_hole_entry);
593 }
594 #endif /* DEBUG */
595
596 return;
597 } else if ((new_entry->vme_start < hole_entry->vme_end) && (hole_entry->vme_end <= new_entry->vme_end)) {
598 /*
599 * Case C2: Entry moving downwards and a part/full hole lies within the bounds of the entry.
600 */
601
602 #if DEBUG
603 copy_hole_info(hole_entry, &old_hole_entry);
604 #endif /* DEBUG */
605
606 if (hole_entry->vme_start >= new_entry->vme_start) {
607 vm_map_delete_hole(map, hole_entry);
608 } else {
609 hole_entry->vme_end = new_entry->vme_start;
610 SAVE_HINT_HOLE_WRITE(map, (struct vm_map_links*) hole_entry);
611 }
612
613 #if DEBUG
614 if (check_map_with_hole_sanity) {
615 check_map_sanity(map, &old_hole_entry);
616 }
617 #endif /* DEBUG */
618
619 return;
620 }
621
622 hole_entry = next_hole_entry;
623 next_hole_entry = hole_entry->vme_next;
624
625 if (hole_entry == CAST_TO_VM_MAP_ENTRY(map->holes_list)) {
626 break;
627 }
628 }
629
630 panic("Illegal action: h1: %p, s:0x%llx, e:0x%llx...h2:%p, s:0x%llx, e:0x%llx...h3:0x%p, s:0x%llx, e:0x%llx\n",
631 hole_entry->vme_prev,
632 (unsigned long long)hole_entry->vme_prev->vme_start,
633 (unsigned long long)hole_entry->vme_prev->vme_end,
634 hole_entry,
635 (unsigned long long)hole_entry->vme_start,
636 (unsigned long long)hole_entry->vme_end,
637 hole_entry->vme_next,
638 (unsigned long long)hole_entry->vme_next->vme_start,
639 (unsigned long long)hole_entry->vme_next->vme_end);
640 }
641
642 void
643 update_first_free_rb(vm_map_t map, vm_map_entry_t entry, boolean_t new_entry_creation)
644 {
645 if (map->holelistenabled) {
646 /*
647 * Holes can be used to track ranges all the way up to MACH_VM_MAX_ADDRESS or more (e.g. kernel map).
648 */
649 vm_map_offset_t max_valid_offset = (map->max_offset > MACH_VM_MAX_ADDRESS) ? map->max_offset : MACH_VM_MAX_ADDRESS;
650
651 /*
652 * Clipping an entry will not result in the creation/deletion/modification of
653 * a hole. Those calls pass NULL for their target entry.
654 */
655 if (entry == NULL) {
656 return;
657 }
658
659 /*
660 * Commpage is pinned beyond the map's max offset. That shouldn't affect the
661 * holes within the bounds of the map.
662 */
663 if (vm_map_trunc_page(entry->vme_start, VM_MAP_PAGE_MASK(map)) >= max_valid_offset) {
664 return;
665 }
666
667 /*
668 *
669 * Note:
670 *
671 * - A new entry has already been added to the map
672 * OR
673 * - An older entry has already been deleted from the map
674 *
675 * We are updating the hole list after the fact (except in one special case involving copy maps).
676 *
677 */
678
679 if (new_entry_creation) {
680 update_holes_on_entry_creation(map, entry);
681 } else {
682 update_holes_on_entry_deletion(map, entry);
683 }
684 }
685 }