]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_map_store_rb.c
c66a1446ae9da6e7a6efee87ee114f9c696823d2
[apple/xnu.git] / osfmk / vm / vm_map_store_rb.c
1 /*
2 * Copyright (c) 2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <kern/backtrace.h>
30 #include <vm/vm_map_store_rb.h>
31
32 RB_GENERATE(rb_head, vm_map_store, entry, rb_node_compare);
33
34 #define VME_FOR_STORE( store) \
35 (vm_map_entry_t)(((unsigned long)store) - ((unsigned long)sizeof(struct vm_map_links)))
36
37 void
38 vm_map_store_init_rb( struct vm_map_header* hdr )
39 {
40 RB_INIT(&(hdr->rb_head_store));
41 }
42
43 int
44 rb_node_compare(struct vm_map_store *node, struct vm_map_store *parent)
45 {
46 vm_map_entry_t vme_c;
47 vm_map_entry_t vme_p;
48
49 vme_c = VME_FOR_STORE(node);
50 vme_p = VME_FOR_STORE(parent);
51 if (vme_c->vme_start < vme_p->vme_start) {
52 return -1;
53 }
54 if (vme_c->vme_start >= vme_p->vme_end) {
55 return 1;
56 }
57 return 0;
58 }
59
60 void
61 vm_map_store_walk_rb( vm_map_t map, vm_map_entry_t *wrong_vme, vm_map_entry_t *vm_entry)
62 {
63 struct vm_map_header hdr = map->hdr;
64 struct vm_map_store *rb_entry = RB_ROOT(&(hdr.rb_head_store));
65 vm_map_entry_t cur = *vm_entry;
66
67 rb_entry = RB_FIND( rb_head, &(hdr.rb_head_store), &(cur->store));
68 if (rb_entry == NULL) {
69 panic("NO SUCH ENTRY %p. Gave back %p", *vm_entry, *wrong_vme);
70 } else {
71 panic("Cur: %p, L: %p, R: %p", VME_FOR_STORE(rb_entry), VME_FOR_STORE(RB_LEFT(rb_entry, entry)), VME_FOR_STORE(RB_RIGHT(rb_entry, entry)));
72 }
73 }
74
75
76 boolean_t
77 vm_map_store_lookup_entry_rb( vm_map_t map, vm_map_offset_t address, vm_map_entry_t *vm_entry)
78 {
79 struct vm_map_header hdr = map->hdr;
80 struct vm_map_store *rb_entry = RB_ROOT(&(hdr.rb_head_store));
81 vm_map_entry_t cur = vm_map_to_entry(map);
82 vm_map_entry_t prev = VM_MAP_ENTRY_NULL;
83
84 while (rb_entry != (struct vm_map_store*)NULL) {
85 cur = VME_FOR_STORE(rb_entry);
86 if (cur == VM_MAP_ENTRY_NULL) {
87 panic("no entry");
88 }
89 if (address >= cur->vme_start) {
90 if (address < cur->vme_end) {
91 *vm_entry = cur;
92 return TRUE;
93 }
94 rb_entry = RB_RIGHT(rb_entry, entry);
95 prev = cur;
96 } else {
97 rb_entry = RB_LEFT(rb_entry, entry);
98 }
99 }
100 if (prev == VM_MAP_ENTRY_NULL) {
101 prev = vm_map_to_entry(map);
102 }
103 *vm_entry = prev;
104 return FALSE;
105 }
106
107 void
108 vm_map_store_entry_link_rb( struct vm_map_header *mapHdr, __unused vm_map_entry_t after_where, vm_map_entry_t entry)
109 {
110 struct rb_head *rbh = &(mapHdr->rb_head_store);
111 struct vm_map_store *store = &(entry->store);
112 struct vm_map_store *tmp_store;
113 if ((tmp_store = RB_INSERT( rb_head, rbh, store )) != NULL) {
114 panic("VMSEL: INSERT FAILED: 0x%lx, 0x%lx, 0x%lx, 0x%lx", (uintptr_t)entry->vme_start, (uintptr_t)entry->vme_end,
115 (uintptr_t)(VME_FOR_STORE(tmp_store))->vme_start, (uintptr_t)(VME_FOR_STORE(tmp_store))->vme_end);
116 }
117 }
118
119 void
120 vm_map_store_entry_unlink_rb( struct vm_map_header *mapHdr, vm_map_entry_t entry)
121 {
122 struct rb_head *rbh = &(mapHdr->rb_head_store);
123 struct vm_map_store *rb_entry;
124 struct vm_map_store *store = &(entry->store);
125
126 rb_entry = RB_FIND( rb_head, rbh, store);
127 if (rb_entry == NULL) {
128 panic("NO ENTRY TO DELETE");
129 }
130 RB_REMOVE( rb_head, rbh, store );
131 }
132
133 void
134 vm_map_store_copy_reset_rb( vm_map_copy_t copy, vm_map_entry_t entry, int nentries )
135 {
136 struct vm_map_header *mapHdr = &(copy->cpy_hdr);
137 struct rb_head *rbh = &(mapHdr->rb_head_store);
138 struct vm_map_store *store;
139 int deleted = 0;
140
141 while (entry != vm_map_copy_to_entry(copy) && nentries > 0) {
142 store = &(entry->store);
143 RB_REMOVE( rb_head, rbh, store );
144 entry = entry->vme_next;
145 deleted++;
146 nentries--;
147 }
148 }
149
150 extern zone_t vm_map_holes_zone; /* zone for vm map holes (vm_map_links) structures */
151
152 void
153 vm_map_combine_hole(vm_map_t map, vm_map_entry_t hole_entry);
154 void
155 vm_map_combine_hole(__unused vm_map_t map, vm_map_entry_t hole_entry)
156 {
157 vm_map_entry_t middle_hole_entry, last_hole_entry;
158
159 hole_entry->vme_end = hole_entry->vme_next->vme_end;
160
161 middle_hole_entry = hole_entry->vme_next;
162 last_hole_entry = middle_hole_entry->vme_next;
163
164 assert(last_hole_entry->vme_prev == middle_hole_entry);
165 assert(middle_hole_entry->vme_end != last_hole_entry->vme_start);
166
167 last_hole_entry->vme_prev = hole_entry;
168 hole_entry->vme_next = last_hole_entry;
169
170 middle_hole_entry->vme_prev = NULL;
171 middle_hole_entry->vme_next = NULL;
172
173 zfree(vm_map_holes_zone, middle_hole_entry);
174
175 assert(hole_entry->vme_start < hole_entry->vme_end);
176 assert(last_hole_entry->vme_start < last_hole_entry->vme_end);
177 }
178
179
180 void
181 vm_map_delete_hole(vm_map_t map, vm_map_entry_t hole_entry);
182 void
183 vm_map_delete_hole(vm_map_t map, vm_map_entry_t hole_entry)
184 {
185 if (hole_entry == CAST_TO_VM_MAP_ENTRY(map->holes_list)) {
186 if (hole_entry->vme_next == CAST_TO_VM_MAP_ENTRY(map->holes_list)) {
187 map->holes_list = NULL;
188 SAVE_HINT_HOLE_WRITE(map, NULL);
189 } else {
190 vm_map_entry_t l_next, l_prev;
191
192 l_next = (vm_map_entry_t) map->holes_list->next;
193 l_prev = (vm_map_entry_t) map->holes_list->prev;
194 map->holes_list = (struct vm_map_links*) l_next;
195
196 l_next->vme_prev = l_prev;
197 l_prev->vme_next = l_next;
198
199 SAVE_HINT_HOLE_WRITE(map, (struct vm_map_links*) l_next);
200 }
201 } else {
202 SAVE_HINT_HOLE_WRITE(map, (struct vm_map_links*) hole_entry->vme_prev);
203
204 hole_entry->vme_prev->vme_next = hole_entry->vme_next;
205 hole_entry->vme_next->vme_prev = hole_entry->vme_prev;
206 }
207
208 hole_entry->vme_next = NULL;
209 hole_entry->vme_prev = NULL;
210 zfree(vm_map_holes_zone, hole_entry);
211 }
212
213
214 /*
215 * For Debugging.
216 */
217
218 #if DEBUG
219 static void
220 check_map_sanity(vm_map_t map, vm_map_entry_t old_hole_entry)
221 {
222 vm_map_entry_t hole_entry, next_hole_entry;
223 vm_map_entry_t map_entry, next_map_entry;
224
225 if (map->holes_list == NULL) {
226 return;
227 }
228
229 hole_entry = (vm_map_entry_t) map->holes_list;
230 next_hole_entry = hole_entry->vme_next;
231
232 map_entry = vm_map_first_entry(map);
233 next_map_entry = map_entry->vme_next;
234
235 while (map_entry->vme_start > hole_entry->vme_start) {
236 hole_entry = next_hole_entry;
237 next_hole_entry = hole_entry->vme_next;
238
239 if (hole_entry == (vm_map_entry_t)map->holes_list) {
240 break;
241 }
242 }
243
244 while (map_entry != vm_map_to_entry(map)) {
245 if (map_entry->vme_start >= map->max_offset) {
246 break;
247 }
248
249 if (map_entry->vme_end != map_entry->vme_next->vme_start) {
250 if (map_entry->vme_next == vm_map_to_entry(map)) {
251 break;
252 }
253
254 if (hole_entry->vme_start != map_entry->vme_end) {
255 panic("hole_entry not aligned %p(0x%llx), %p (0x%llx), %p", hole_entry, (unsigned long long)hole_entry->vme_start, map_entry->vme_next, (unsigned long long)map_entry->vme_end, old_hole_entry);
256 assert(hole_entry->vme_start == map_entry->vme_end);
257 }
258
259 if (hole_entry->vme_end != map_entry->vme_next->vme_start) {
260 panic("hole_entry not next aligned %p(0x%llx), %p (0x%llx), %p", hole_entry, (unsigned long long)hole_entry->vme_end, map_entry->vme_next, (unsigned long long)map_entry->vme_next->vme_start, old_hole_entry);
261 assert(hole_entry->vme_end == map_entry->vme_next->vme_start);
262 }
263
264 hole_entry = next_hole_entry;
265 next_hole_entry = hole_entry->vme_next;
266
267 if (hole_entry == (vm_map_entry_t)map->holes_list) {
268 break;
269 }
270 }
271
272 map_entry = map_entry->vme_next;
273 }
274 }
275
276 /*
277 * For debugging.
278 */
279 static void
280 copy_hole_info(vm_map_entry_t hole_entry, vm_map_entry_t old_hole_entry)
281 {
282 old_hole_entry->vme_prev = hole_entry->vme_prev;
283 old_hole_entry->vme_next = hole_entry->vme_next;
284 old_hole_entry->vme_start = hole_entry->vme_start;
285 old_hole_entry->vme_end = hole_entry->vme_end;
286 }
287 #endif /* DEBUG */
288
289 void
290 update_holes_on_entry_deletion(vm_map_t map, vm_map_entry_t old_entry);
291 void
292 update_holes_on_entry_deletion(vm_map_t map, vm_map_entry_t old_entry)
293 {
294 /*
295 * Dealing with the deletion of an older entry.
296 */
297
298 vm_map_entry_t hole_entry, next_hole_entry;
299 #if DEBUG
300 struct vm_map_entry old_hole_entry;
301 #endif /* DEBUG */
302 boolean_t create_new_hole = TRUE;
303
304 hole_entry = CAST_TO_VM_MAP_ENTRY(map->hole_hint);
305
306 if (hole_entry) {
307 if (hole_entry->vme_end == old_entry->vme_start) {
308 /*
309 * Found a hole right after above our entry.
310 * Hit.
311 */
312 } else if (hole_entry->vme_start == old_entry->vme_end) {
313 if (hole_entry != CAST_TO_VM_MAP_ENTRY(map->holes_list)) {
314 /*
315 * Found a hole right after below our entry but
316 * make sure we don't erroneously extend backwards.
317 *
318 * Hit.
319 */
320
321 hole_entry = hole_entry->vme_prev;
322 }
323 } else if (hole_entry->vme_start > old_entry->vme_end) {
324 /*
325 * Useless hint. Start from the top.
326 */
327
328 hole_entry = CAST_TO_VM_MAP_ENTRY(map->holes_list);
329 }
330
331 if (hole_entry != CAST_TO_VM_MAP_ENTRY(map->holes_list)) {
332 if (hole_entry->vme_start > old_entry->vme_start) {
333 panic("Hole hint failed: Hole entry start: 0x%llx, entry start: 0x%llx, map hole start: 0x%llx, map hint start: 0x%llx\n",
334 (unsigned long long)hole_entry->vme_start,
335 (unsigned long long)old_entry->vme_start,
336 (unsigned long long)map->holes_list->start,
337 (unsigned long long)map->hole_hint->start);
338 }
339 if (hole_entry->vme_end > old_entry->vme_start) {
340 panic("Hole hint failed: Hole entry end: 0x%llx, entry start: 0x%llx, map hole start: 0x%llx, map hint start: 0x%llx\n",
341 (unsigned long long)hole_entry->vme_end,
342 (unsigned long long)old_entry->vme_start,
343 (unsigned long long)map->holes_list->start,
344 (unsigned long long)map->hole_hint->start);
345 }
346 }
347
348 while (1) {
349 next_hole_entry = hole_entry->vme_next;
350
351 /*
352 * Hole is right above the entry.
353 */
354 if (hole_entry->vme_end == old_entry->vme_start) {
355 #if DEBUG
356 copy_hole_info(hole_entry, &old_hole_entry);
357 #endif /* DEBUG */
358
359 /*
360 * Is there another hole right below the entry?
361 * Can we combine holes?
362 */
363
364 if (old_entry->vme_end == hole_entry->vme_next->vme_start) {
365 vm_map_combine_hole(map, hole_entry);
366 } else {
367 hole_entry->vme_end = old_entry->vme_end;
368 }
369 create_new_hole = FALSE;
370 #if DEBUG
371 check_map_sanity(map, &old_hole_entry);
372 #endif /* DEBUG */
373 break;
374 }
375
376 /*
377 * Hole is right below the entry.
378 */
379 if (hole_entry->vme_start == old_entry->vme_end) {
380 #if DEBUG
381 copy_hole_info(hole_entry, &old_hole_entry);
382 #endif /* DEBUG */
383
384 hole_entry->vme_start = old_entry->vme_start;
385 create_new_hole = FALSE;
386
387 #if DEBUG
388 check_map_sanity(map, &old_hole_entry);
389 #endif /* DEBUG */
390 break;
391 }
392
393 /*
394 * Hole is beyond our entry. Let's go back to the last hole
395 * before our entry so we have the right place to link up the
396 * new hole that will be needed.
397 */
398 if (hole_entry->vme_start > old_entry->vme_end) {
399 #if DEBUG
400 copy_hole_info(hole_entry, &old_hole_entry);
401 #endif /* DEBUG */
402
403 if (hole_entry != CAST_TO_VM_MAP_ENTRY(map->holes_list)) {
404 assert(hole_entry->vme_start != old_entry->vme_start);
405 hole_entry = hole_entry->vme_prev;
406 }
407 break;
408 }
409
410 hole_entry = next_hole_entry;
411
412 if (hole_entry == CAST_TO_VM_MAP_ENTRY(map->holes_list)) {
413 hole_entry = hole_entry->vme_prev;
414 break;
415 }
416 }
417 }
418
419 if (create_new_hole) {
420 struct vm_map_links *new_hole_entry = NULL;
421 vm_map_entry_t l_next, l_prev;
422
423 new_hole_entry = zalloc(vm_map_holes_zone);
424
425 /*
426 * First hole in the map?
427 * OR
428 * A hole that is located above the current first hole in the map?
429 */
430 if (map->holes_list == NULL || (hole_entry == CAST_TO_VM_MAP_ENTRY(map->holes_list) && hole_entry->vme_start > old_entry->vme_start)) {
431 if (map->holes_list == NULL) {
432 map->holes_list = new_hole_entry;
433 new_hole_entry->prev = new_hole_entry->next = CAST_TO_VM_MAP_ENTRY(map->holes_list);
434 } else {
435 l_next = CAST_TO_VM_MAP_ENTRY(map->holes_list);
436 l_prev = map->holes_list->prev;
437 map->holes_list = new_hole_entry;
438 new_hole_entry->next = l_next;
439 new_hole_entry->prev = l_prev;
440
441 l_prev->vme_next = l_next->vme_prev = CAST_TO_VM_MAP_ENTRY(new_hole_entry);
442 }
443 } else {
444 l_next = hole_entry->vme_next;
445 l_prev = hole_entry->vme_next->vme_prev;
446
447 new_hole_entry->prev = hole_entry;
448 new_hole_entry->next = l_next;
449
450 hole_entry->vme_next = CAST_TO_VM_MAP_ENTRY(new_hole_entry);
451 l_next->vme_prev = CAST_TO_VM_MAP_ENTRY(new_hole_entry);
452 }
453
454 new_hole_entry->start = old_entry->vme_start;
455 new_hole_entry->end = old_entry->vme_end;
456
457 hole_entry = CAST_TO_VM_MAP_ENTRY(new_hole_entry);
458
459 assert(new_hole_entry->start < new_hole_entry->end);
460 }
461
462 #if DEBUG
463 check_map_sanity(map, &old_hole_entry);
464 #endif /* DEBUG */
465
466 SAVE_HINT_HOLE_WRITE(map, (struct vm_map_links*) hole_entry);
467 return;
468 }
469
470
471 void
472 update_holes_on_entry_creation(vm_map_t map, vm_map_entry_t new_entry);
473 void
474 update_holes_on_entry_creation(vm_map_t map, vm_map_entry_t new_entry)
475 {
476 vm_map_entry_t hole_entry, next_hole_entry;
477 #if DEBUG
478 struct vm_map_entry old_hole_entry;
479 vm_map_entry_t tmp_entry;
480 boolean_t check_map_with_hole_sanity = TRUE;
481 #endif /* DEBUG */
482
483 /*
484 * Case A: The entry is aligned exactly with the start and end of the hole.
485 * This will delete the hole.
486 *
487 * Case B: The entry is completely within a hole but NOT aligned with the start/end of the hole.
488 * This will split a hole.
489 *
490 * Case C: The entry overlaps with the hole. The entry could be extending upwards (C1) or downwards (C2).
491 * This will reduce the size of the hole or delete the hole completely if it is smaller than the entry.
492 */
493
494 hole_entry = CAST_TO_VM_MAP_ENTRY(map->holes_list);
495 assert(hole_entry);
496 next_hole_entry = hole_entry->vme_next;
497
498 while (1) {
499 #if DEBUG
500 /*
501 * If the entry doesn't exist in the RB tree, we are likely dealing with copy maps where
502 * the entries belonging to the copy map are linked into the list of entries silently and
503 * then added to the RB-tree later on.
504 * So sanity checks are useless in that case.
505 */
506 check_map_with_hole_sanity = vm_map_lookup_entry(map, new_entry->vme_start, &tmp_entry);
507 #endif /* DEBUG */
508
509 if (hole_entry->vme_start == new_entry->vme_start &&
510 hole_entry->vme_end == new_entry->vme_end) {
511 /* Case A */
512 #if DEBUG
513 copy_hole_info(hole_entry, &old_hole_entry);
514 #endif /* DEBUG */
515
516 /*
517 * This check makes sense only for regular maps, not copy maps.
518 * With a regular map, the VM entry is first linked and then
519 * the hole is deleted. So the check below, which makes sure that
520 * the map's bounds are being respected, is valid.
521 * But for copy maps, the hole is deleted before the VM entry is
522 * linked (vm_map_store_copy_insert) and so this check is invalid.
523 *
524 * if (hole_entry == (vm_map_entry_t) map->holes_list) {
525 *
526 * if (hole_entry->vme_next == (vm_map_entry_t) map->holes_list) {
527 *
528 * next_hole_entry = vm_map_last_entry(map);
529 * assert(next_hole_entry->vme_end >= map->max_offset);
530 * }
531 * }
532 */
533
534 vm_map_delete_hole(map, hole_entry);
535
536 #if DEBUG
537 if (check_map_with_hole_sanity) {
538 check_map_sanity(map, &old_hole_entry);
539 }
540 #endif /* DEBUG */
541 return;
542 } else if (hole_entry->vme_start < new_entry->vme_start &&
543 hole_entry->vme_end > new_entry->vme_end) {
544 /* Case B */
545 struct vm_map_links *new_hole_entry = NULL;
546
547 new_hole_entry = zalloc(vm_map_holes_zone);
548
549 #if DEBUG
550 copy_hole_info(hole_entry, &old_hole_entry);
551 #endif /* DEBUG */
552
553 new_hole_entry->prev = hole_entry;
554 new_hole_entry->next = hole_entry->vme_next;
555 hole_entry->vme_next->vme_prev = CAST_TO_VM_MAP_ENTRY(new_hole_entry);
556 hole_entry->vme_next = CAST_TO_VM_MAP_ENTRY(new_hole_entry);
557
558 new_hole_entry->start = new_entry->vme_end;
559 new_hole_entry->end = hole_entry->vme_end;
560 hole_entry->vme_end = new_entry->vme_start;
561
562 assert(hole_entry->vme_start < hole_entry->vme_end);
563 assert(new_hole_entry->start < new_hole_entry->end);
564
565 #if DEBUG
566 if (check_map_with_hole_sanity) {
567 check_map_sanity(map, &old_hole_entry);
568 }
569 #endif /* DEBUG */
570
571 SAVE_HINT_HOLE_WRITE(map, (struct vm_map_links*) hole_entry);
572 return;
573 } else if ((new_entry->vme_start <= hole_entry->vme_start) && (hole_entry->vme_start < new_entry->vme_end)) {
574 /*
575 * Case C1: Entry moving upwards and a part/full hole lies within the bounds of the entry.
576 */
577
578 #if DEBUG
579 copy_hole_info(hole_entry, &old_hole_entry);
580 #endif /* DEBUG */
581
582 if (hole_entry->vme_end <= new_entry->vme_end) {
583 vm_map_delete_hole(map, hole_entry);
584 } else {
585 hole_entry->vme_start = new_entry->vme_end;
586 SAVE_HINT_HOLE_WRITE(map, (struct vm_map_links*) hole_entry);
587 }
588
589 #if DEBUG
590 if (check_map_with_hole_sanity) {
591 check_map_sanity(map, &old_hole_entry);
592 }
593 #endif /* DEBUG */
594
595 return;
596 } else if ((new_entry->vme_start < hole_entry->vme_end) && (hole_entry->vme_end <= new_entry->vme_end)) {
597 /*
598 * Case C2: Entry moving downwards and a part/full hole lies within the bounds of the entry.
599 */
600
601 #if DEBUG
602 copy_hole_info(hole_entry, &old_hole_entry);
603 #endif /* DEBUG */
604
605 if (hole_entry->vme_start >= new_entry->vme_start) {
606 vm_map_delete_hole(map, hole_entry);
607 } else {
608 hole_entry->vme_end = new_entry->vme_start;
609 SAVE_HINT_HOLE_WRITE(map, (struct vm_map_links*) hole_entry);
610 }
611
612 #if DEBUG
613 if (check_map_with_hole_sanity) {
614 check_map_sanity(map, &old_hole_entry);
615 }
616 #endif /* DEBUG */
617
618 return;
619 }
620
621 hole_entry = next_hole_entry;
622 next_hole_entry = hole_entry->vme_next;
623
624 if (hole_entry == CAST_TO_VM_MAP_ENTRY(map->holes_list)) {
625 break;
626 }
627 }
628
629 panic("Illegal action: h1: %p, s:0x%llx, e:0x%llx...h2:%p, s:0x%llx, e:0x%llx...h3:0x%p, s:0x%llx, e:0x%llx\n",
630 hole_entry->vme_prev,
631 (unsigned long long)hole_entry->vme_prev->vme_start,
632 (unsigned long long)hole_entry->vme_prev->vme_end,
633 hole_entry,
634 (unsigned long long)hole_entry->vme_start,
635 (unsigned long long)hole_entry->vme_end,
636 hole_entry->vme_next,
637 (unsigned long long)hole_entry->vme_next->vme_start,
638 (unsigned long long)hole_entry->vme_next->vme_end);
639 }
640
641 void
642 update_first_free_rb(vm_map_t map, vm_map_entry_t entry, boolean_t new_entry_creation)
643 {
644 if (map->holelistenabled) {
645 /*
646 * Holes can be used to track ranges all the way up to MACH_VM_MAX_ADDRESS or more (e.g. kernel map).
647 */
648 vm_map_offset_t max_valid_offset = (map->max_offset > MACH_VM_MAX_ADDRESS) ? map->max_offset : MACH_VM_MAX_ADDRESS;
649
650 /*
651 * Clipping an entry will not result in the creation/deletion/modification of
652 * a hole. Those calls pass NULL for their target entry.
653 */
654 if (entry == NULL) {
655 return;
656 }
657
658 /*
659 * Commpage is pinned beyond the map's max offset. That shouldn't affect the
660 * holes within the bounds of the map.
661 */
662 if (vm_map_trunc_page(entry->vme_start, VM_MAP_PAGE_MASK(map)) >= max_valid_offset) {
663 return;
664 }
665
666 /*
667 *
668 * Note:
669 *
670 * - A new entry has already been added to the map
671 * OR
672 * - An older entry has already been deleted from the map
673 *
674 * We are updating the hole list after the fact (except in one special case involving copy maps).
675 *
676 */
677
678 if (new_entry_creation) {
679 update_holes_on_entry_creation(map, entry);
680 } else {
681 update_holes_on_entry_deletion(map, entry);
682 }
683 }
684 }