]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_map_store_rb.c
5e881f6b70b8035d4f2be153b1e2bb666e8adb66
[apple/xnu.git] / osfmk / vm / vm_map_store_rb.c
1 /*
2 * Copyright (c) 2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <vm/vm_map_store_rb.h>
30
31 RB_GENERATE(rb_head, vm_map_store, entry, rb_node_compare);
32
33 #define VME_FOR_STORE( store) \
34 (vm_map_entry_t)(((unsigned long)store) - ((unsigned long)sizeof(struct vm_map_links)))
35
36 void
37 vm_map_store_init_rb( struct vm_map_header* hdr )
38 {
39 RB_INIT(&(hdr->rb_head_store));
40 }
41
42 int rb_node_compare(struct vm_map_store *node, struct vm_map_store *parent)
43 {
44 vm_map_entry_t vme_c;
45 vm_map_entry_t vme_p;
46
47 vme_c = VME_FOR_STORE(node);
48 vme_p = VME_FOR_STORE(parent);
49 if (vme_c->vme_start < vme_p->vme_start)
50 return -1;
51 if (vme_c->vme_start >= vme_p->vme_end)
52 return 1;
53 return 0;
54 }
55
56 void vm_map_store_walk_rb( vm_map_t map, vm_map_entry_t *wrong_vme, vm_map_entry_t *vm_entry)
57 {
58 struct vm_map_header hdr = map->hdr;
59 struct vm_map_store *rb_entry = RB_ROOT(&(hdr.rb_head_store));
60 vm_map_entry_t cur = *vm_entry;
61
62 rb_entry = RB_FIND( rb_head, &(hdr.rb_head_store), &(cur->store));
63 if(rb_entry == NULL)
64 panic("NO SUCH ENTRY %p. Gave back %p", *vm_entry, *wrong_vme);
65 else
66 panic("Cur: %p, L: %p, R: %p", VME_FOR_STORE(rb_entry), VME_FOR_STORE(RB_LEFT(rb_entry,entry)), VME_FOR_STORE(RB_RIGHT(rb_entry,entry)));
67 }
68
69
70 boolean_t vm_map_store_lookup_entry_rb( vm_map_t map, vm_map_offset_t address, vm_map_entry_t *vm_entry)
71 {
72 struct vm_map_header hdr = map->hdr;
73 struct vm_map_store *rb_entry = RB_ROOT(&(hdr.rb_head_store));
74 vm_map_entry_t cur = vm_map_to_entry(map);
75 vm_map_entry_t prev = VM_MAP_ENTRY_NULL;
76
77 while (rb_entry != (struct vm_map_store*)NULL) {
78 cur = VME_FOR_STORE(rb_entry);
79 if(cur == VM_MAP_ENTRY_NULL)
80 panic("no entry");
81 if (address >= cur->vme_start) {
82 if (address < cur->vme_end) {
83 *vm_entry = cur;
84 return TRUE;
85 }
86 rb_entry = RB_RIGHT(rb_entry, entry);
87 prev = cur;
88 } else {
89 rb_entry = RB_LEFT(rb_entry, entry);
90 }
91 }
92 if( prev == VM_MAP_ENTRY_NULL){
93 prev = vm_map_to_entry(map);
94 }
95 *vm_entry = prev;
96 return FALSE;
97 }
98
99 void vm_map_store_entry_link_rb( struct vm_map_header *mapHdr, __unused vm_map_entry_t after_where, vm_map_entry_t entry)
100 {
101 struct rb_head *rbh = &(mapHdr->rb_head_store);
102 struct vm_map_store *store = &(entry->store);
103 struct vm_map_store *tmp_store;
104 if((tmp_store = RB_INSERT( rb_head, rbh, store )) != NULL) {
105 panic("VMSEL: INSERT FAILED: 0x%lx, 0x%lx, 0x%lx, 0x%lx", (uintptr_t)entry->vme_start, (uintptr_t)entry->vme_end,
106 (uintptr_t)(VME_FOR_STORE(tmp_store))->vme_start, (uintptr_t)(VME_FOR_STORE(tmp_store))->vme_end);
107 }
108 }
109
110 void vm_map_store_entry_unlink_rb( struct vm_map_header *mapHdr, vm_map_entry_t entry)
111 {
112 struct rb_head *rbh = &(mapHdr->rb_head_store);
113 struct vm_map_store *rb_entry;
114 struct vm_map_store *store = &(entry->store);
115
116 rb_entry = RB_FIND( rb_head, rbh, store);
117 if(rb_entry == NULL)
118 panic("NO ENTRY TO DELETE");
119 RB_REMOVE( rb_head, rbh, store );
120 }
121
122 void vm_map_store_copy_insert_rb( vm_map_t map, __unused vm_map_entry_t after_where, vm_map_copy_t copy)
123 {
124 struct vm_map_header *mapHdr = &(map->hdr);
125 struct rb_head *rbh = &(mapHdr->rb_head_store);
126 struct vm_map_store *store;
127 vm_map_entry_t entry = vm_map_copy_first_entry(copy);
128 int inserted=0, nentries = copy->cpy_hdr.nentries;
129
130 while (entry != vm_map_copy_to_entry(copy) && nentries > 0) {
131 vm_map_entry_t prev = entry;
132 store = &(entry->store);
133 if( RB_INSERT( rb_head, rbh, store ) != NULL){
134 panic("VMSCIR1: INSERT FAILED: %d: %p, %p, %p, 0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%lx",inserted, prev, entry, vm_map_copy_to_entry(copy),
135 (uintptr_t)prev->vme_start, (uintptr_t)prev->vme_end, (uintptr_t)entry->vme_start, (uintptr_t)entry->vme_end,
136 (uintptr_t)(VME_FOR_STORE(rbh->rbh_root))->vme_start, (uintptr_t)(VME_FOR_STORE(rbh->rbh_root))->vme_end);
137 } else {
138 #if MAP_ENTRY_INSERTION_DEBUG
139 fastbacktrace(&entry->vme_insertion_bt[0],
140 (sizeof (entry->vme_insertion_bt) / sizeof (uintptr_t)));
141 #endif
142 entry = entry->vme_next;
143 inserted++;
144 nentries--;
145 }
146 }
147 }
148
149 void
150 vm_map_store_copy_reset_rb( vm_map_copy_t copy, vm_map_entry_t entry, int nentries )
151 {
152 struct vm_map_header *mapHdr = &(copy->cpy_hdr);
153 struct rb_head *rbh = &(mapHdr->rb_head_store);
154 struct vm_map_store *store;
155 int deleted=0;
156
157 while (entry != vm_map_copy_to_entry(copy) && nentries > 0) {
158 store = &(entry->store);
159 RB_REMOVE( rb_head, rbh, store );
160 entry = entry->vme_next;
161 deleted++;
162 nentries--;
163 }
164 }
165
166 extern zone_t vm_map_holes_zone; /* zone for vm map holes (vm_map_links) structures */
167
168 void
169 vm_map_combine_hole(vm_map_t map, vm_map_entry_t hole_entry);
170 void
171 vm_map_combine_hole(__unused vm_map_t map, vm_map_entry_t hole_entry)
172 {
173
174 vm_map_entry_t middle_hole_entry, last_hole_entry;
175
176 hole_entry->vme_end = hole_entry->vme_next->vme_end;
177
178 middle_hole_entry = hole_entry->vme_next;
179 last_hole_entry = middle_hole_entry->vme_next;
180
181 assert(last_hole_entry->vme_prev == middle_hole_entry);
182 assert(middle_hole_entry->vme_end != last_hole_entry->vme_start);
183
184 last_hole_entry->vme_prev = hole_entry;
185 hole_entry->vme_next = last_hole_entry;
186
187 middle_hole_entry->vme_prev = NULL;
188 middle_hole_entry->vme_next = NULL;
189
190 zfree(vm_map_holes_zone, middle_hole_entry);
191
192 assert(hole_entry->vme_start < hole_entry->vme_end);
193 assert(last_hole_entry->vme_start < last_hole_entry->vme_end);
194 }
195
196
197 void
198 vm_map_delete_hole(vm_map_t map, vm_map_entry_t hole_entry);
199 void
200 vm_map_delete_hole(vm_map_t map, vm_map_entry_t hole_entry)
201 {
202 if (hole_entry == (vm_map_entry_t) map->holes_list) {
203
204 if (hole_entry->vme_next == (vm_map_entry_t) map->holes_list) {
205
206 map->holes_list = NULL;
207 SAVE_HINT_HOLE_WRITE(map, NULL);
208 } else {
209
210 vm_map_entry_t l_next, l_prev;
211
212 l_next = (vm_map_entry_t) map->holes_list->next;
213 l_prev = (vm_map_entry_t) map->holes_list->prev;
214 map->holes_list = (struct vm_map_links*) l_next;
215
216 l_next->vme_prev = l_prev;
217 l_prev->vme_next = l_next;
218
219 SAVE_HINT_HOLE_WRITE(map, (struct vm_map_links*) l_next);
220 }
221 } else {
222
223 SAVE_HINT_HOLE_WRITE(map, (struct vm_map_links*) hole_entry->vme_prev);
224
225 hole_entry->vme_prev->vme_next = hole_entry->vme_next;
226 hole_entry->vme_next->vme_prev = hole_entry->vme_prev;
227 }
228
229 hole_entry->vme_next = NULL;
230 hole_entry->vme_prev = NULL;
231 zfree(vm_map_holes_zone, hole_entry);
232 }
233
234
235 /*
236 * For Debugging.
237 */
238
239 #if DEBUG
240 static void
241 check_map_sanity(vm_map_t map, vm_map_entry_t old_hole_entry)
242 {
243 vm_map_entry_t hole_entry, next_hole_entry;
244 vm_map_entry_t map_entry, next_map_entry;
245
246 if (map->holes_list == NULL) {
247
248 return;
249 }
250
251 hole_entry = (vm_map_entry_t) map->holes_list;
252 next_hole_entry = hole_entry->vme_next;
253
254 map_entry = vm_map_first_entry(map);
255 next_map_entry = map_entry->vme_next;
256
257 while(map_entry->vme_start > hole_entry->vme_start) {
258 hole_entry = next_hole_entry;
259 next_hole_entry = hole_entry->vme_next;
260
261 if (hole_entry == (vm_map_entry_t)map->holes_list)
262 break;
263 }
264
265 while (map_entry != vm_map_to_entry(map)) {
266
267 if (map_entry->vme_start >= map->max_offset)
268 break;
269
270 if (map_entry->vme_end != map_entry->vme_next->vme_start) {
271
272 if (map_entry->vme_next == vm_map_to_entry(map))
273 break;
274
275 if (hole_entry->vme_start != map_entry->vme_end) {
276 panic("hole_entry not aligned %p(0x%llx), %p (0x%llx), %p", hole_entry, (unsigned long long)hole_entry->vme_start, map_entry->vme_next, (unsigned long long)map_entry->vme_end, old_hole_entry);
277 assert(hole_entry->vme_start == map_entry->vme_end);
278 }
279
280 if (hole_entry->vme_end != map_entry->vme_next->vme_start) {
281 panic("hole_entry not next aligned %p(0x%llx), %p (0x%llx), %p", hole_entry, (unsigned long long)hole_entry->vme_end, map_entry->vme_next, (unsigned long long)map_entry->vme_next->vme_start, old_hole_entry);
282 assert(hole_entry->vme_end == map_entry->vme_next->vme_start);
283 }
284
285 hole_entry = next_hole_entry;
286 next_hole_entry = hole_entry->vme_next;
287
288 if (hole_entry == (vm_map_entry_t)map->holes_list)
289 break;
290 }
291
292 map_entry = map_entry->vme_next;
293 }
294 }
295
296 /*
297 * For debugging.
298 */
299 static void
300 copy_hole_info(vm_map_entry_t hole_entry, vm_map_entry_t old_hole_entry)
301 {
302 old_hole_entry->vme_prev = hole_entry->vme_prev;
303 old_hole_entry->vme_next = hole_entry->vme_next;
304 old_hole_entry->vme_start = hole_entry->vme_start;
305 old_hole_entry->vme_end = hole_entry->vme_end;
306 }
307 #endif /* DEBUG */
308
309 void
310 update_holes_on_entry_deletion(vm_map_t map, vm_map_entry_t old_entry);
311 void
312 update_holes_on_entry_deletion(vm_map_t map, vm_map_entry_t old_entry)
313 {
314 /*
315 * Dealing with the deletion of an older entry.
316 */
317
318 vm_map_entry_t hole_entry, next_hole_entry;
319 #if DEBUG
320 struct vm_map_entry old_hole_entry;
321 #endif /* DEBUG */
322 boolean_t create_new_hole = TRUE;
323
324 hole_entry = (vm_map_entry_t) map->hole_hint;
325
326 if (hole_entry) {
327
328 if (hole_entry->vme_end == old_entry->vme_start) {
329 /*
330 * Found a hole right after above our entry.
331 * Hit.
332 */
333
334 } else if (hole_entry->vme_start == old_entry->vme_end) {
335
336 if (hole_entry != (vm_map_entry_t) map->holes_list) {
337
338 /*
339 * Found a hole right after below our entry but
340 * make sure we don't erroneously extend backwards.
341 *
342 * Hit.
343 */
344
345 hole_entry = hole_entry->vme_prev;
346 }
347
348 } else if (hole_entry->vme_start > old_entry->vme_end) {
349
350 /*
351 * Useless hint. Start from the top.
352 */
353
354 hole_entry = (vm_map_entry_t) map->holes_list;
355 }
356
357 if (hole_entry != (vm_map_entry_t) map->holes_list) {
358 if (hole_entry->vme_start > old_entry->vme_start) {
359 panic("Hole hint failed: Hole entry start: 0x%llx, entry start: 0x%llx, map hole start: 0x%llx, map hint start: 0x%llx\n",
360 (unsigned long long)hole_entry->vme_start,
361 (unsigned long long)old_entry->vme_start,
362 (unsigned long long)map->holes_list->start,
363 (unsigned long long)map->hole_hint->start);
364 }
365 if (hole_entry->vme_end > old_entry->vme_start) {
366 panic("Hole hint failed: Hole entry end: 0x%llx, entry start: 0x%llx, map hole start: 0x%llx, map hint start: 0x%llx\n",
367 (unsigned long long)hole_entry->vme_end,
368 (unsigned long long)old_entry->vme_start,
369 (unsigned long long)map->holes_list->start,
370 (unsigned long long)map->hole_hint->start);
371 }
372 }
373
374 while (1) {
375
376 next_hole_entry = hole_entry->vme_next;
377
378 /*
379 * Hole is right above the entry.
380 */
381 if (hole_entry->vme_end == old_entry->vme_start) {
382
383 #if DEBUG
384 copy_hole_info(hole_entry, &old_hole_entry);
385 #endif /* DEBUG */
386
387 /*
388 * Is there another hole right below the entry?
389 * Can we combine holes?
390 */
391
392 if (old_entry->vme_end == hole_entry->vme_next->vme_start) {
393
394 vm_map_combine_hole(map, hole_entry);
395 } else {
396
397 hole_entry->vme_end = old_entry->vme_end;
398 }
399 create_new_hole = FALSE;
400 #if DEBUG
401 check_map_sanity(map, &old_hole_entry);
402 #endif /* DEBUG */
403 break;
404 }
405
406 /*
407 * Hole is right below the entry.
408 */
409 if (hole_entry->vme_start == old_entry->vme_end) {
410
411 #if DEBUG
412 copy_hole_info(hole_entry, &old_hole_entry);
413 #endif /* DEBUG */
414
415 hole_entry->vme_start = old_entry->vme_start;
416 create_new_hole = FALSE;
417
418 #if DEBUG
419 check_map_sanity(map, &old_hole_entry);
420 #endif /* DEBUG */
421 break;
422 }
423
424 /*
425 * Hole is beyond our entry. Let's go back to the last hole
426 * before our entry so we have the right place to link up the
427 * new hole that will be needed.
428 */
429 if (hole_entry->vme_start > old_entry->vme_end) {
430
431 #if DEBUG
432 copy_hole_info(hole_entry, &old_hole_entry);
433 #endif /* DEBUG */
434
435 if (hole_entry != (vm_map_entry_t) map->holes_list) {
436 assert(hole_entry->vme_start != old_entry->vme_start);
437 hole_entry = hole_entry->vme_prev;
438 }
439 break;
440 }
441
442 hole_entry = next_hole_entry;
443
444 if (hole_entry == (vm_map_entry_t)map->holes_list) {
445 hole_entry = hole_entry->vme_prev;
446 break;
447 }
448 }
449 }
450
451 if (create_new_hole) {
452 struct vm_map_links *new_hole_entry = NULL;
453 vm_map_entry_t l_next, l_prev;
454
455 new_hole_entry = zalloc(vm_map_holes_zone);
456
457 /*
458 * First hole in the map?
459 * OR
460 * A hole that is located above the current first hole in the map?
461 */
462 if (map->holes_list == NULL || (hole_entry == (vm_map_entry_t) map->holes_list && hole_entry->vme_start > old_entry->vme_start)) {
463
464 if (map->holes_list == NULL) {
465
466 map->holes_list = new_hole_entry;
467 new_hole_entry->prev = new_hole_entry->next = (vm_map_entry_t)map->holes_list;
468 } else {
469
470 l_next = (vm_map_entry_t) map->holes_list;
471 l_prev = map->holes_list->prev;
472 map->holes_list = new_hole_entry;
473 new_hole_entry->next = l_next;
474 new_hole_entry->prev = l_prev;
475
476 l_prev->vme_next = l_next->vme_prev = (vm_map_entry_t) new_hole_entry;
477 }
478 } else {
479
480 l_next = hole_entry->vme_next;
481 l_prev = hole_entry->vme_next->vme_prev;
482
483 new_hole_entry->prev = hole_entry;
484 new_hole_entry->next = l_next;
485
486 hole_entry->vme_next = (vm_map_entry_t)new_hole_entry;
487 l_next->vme_prev = (vm_map_entry_t) new_hole_entry;
488 }
489
490 new_hole_entry->start = old_entry->vme_start;
491 new_hole_entry->end = old_entry->vme_end;
492
493 hole_entry = (vm_map_entry_t) new_hole_entry;
494
495 assert(new_hole_entry->start < new_hole_entry->end);
496 }
497
498 #if DEBUG
499 check_map_sanity(map, &old_hole_entry);
500 #endif /* DEBUG */
501
502 SAVE_HINT_HOLE_WRITE(map, (struct vm_map_links*) hole_entry);
503 return;
504 }
505
506
507 void
508 update_holes_on_entry_creation(vm_map_t map, vm_map_entry_t new_entry);
509 void
510 update_holes_on_entry_creation(vm_map_t map, vm_map_entry_t new_entry)
511 {
512
513 vm_map_entry_t hole_entry, next_hole_entry;
514 #if DEBUG
515 struct vm_map_entry old_hole_entry;
516 vm_map_entry_t tmp_entry;
517 boolean_t check_map_with_hole_sanity = TRUE;
518 #endif /* DEBUG */
519
520 /*
521 * Case A: The entry is aligned exactly with the start and end of the hole.
522 * This will delete the hole.
523 *
524 * Case B: The entry is completely within a hole but NOT aligned with the start/end of the hole.
525 * This will split a hole.
526 *
527 * Case C: The entry overlaps with the hole. The entry could be extending upwards (C1) or downwards (C2).
528 * This will reduce the size of the hole or delete the hole completely if it is smaller than the entry.
529 */
530
531 hole_entry = (vm_map_entry_t) map->holes_list;
532 assert(hole_entry);
533 next_hole_entry = hole_entry->vme_next;
534
535 while (1) {
536
537 #if DEBUG
538 /*
539 * If the entry doesn't exist in the RB tree, we are likely dealing with copy maps where
540 * the entries belonging to the copy map are linked into the list of entries silently and
541 * then added to the RB-tree later on.
542 * So sanity checks are useless in that case.
543 */
544 check_map_with_hole_sanity = vm_map_lookup_entry(map, new_entry->vme_start, &tmp_entry);
545 #endif /* DEBUG */
546
547 if (hole_entry->vme_start == new_entry->vme_start &&
548 hole_entry->vme_end == new_entry->vme_end) {
549
550 /* Case A */
551 #if DEBUG
552 copy_hole_info(hole_entry, &old_hole_entry);
553 #endif /* DEBUG */
554
555 if (hole_entry == (vm_map_entry_t) map->holes_list) {
556
557 if (hole_entry->vme_next == (vm_map_entry_t) map->holes_list) {
558
559 next_hole_entry = vm_map_last_entry(map);
560 assert(next_hole_entry->vme_end >= map->max_offset);
561 }
562 }
563
564 vm_map_delete_hole(map, hole_entry);
565
566 #if DEBUG
567 if (check_map_with_hole_sanity)
568 check_map_sanity(map, &old_hole_entry);
569 #endif /* DEBUG */
570 return;
571
572 } else if (hole_entry->vme_start < new_entry->vme_start &&
573 hole_entry->vme_end > new_entry->vme_end) {
574
575 /* Case B */
576 struct vm_map_links *new_hole_entry = NULL;
577
578 new_hole_entry = zalloc(vm_map_holes_zone);
579
580 #if DEBUG
581 copy_hole_info(hole_entry, &old_hole_entry);
582 #endif /* DEBUG */
583
584 new_hole_entry->prev = hole_entry;
585 new_hole_entry->next = hole_entry->vme_next;
586 hole_entry->vme_next->vme_prev = (vm_map_entry_t)new_hole_entry;
587 hole_entry->vme_next = (vm_map_entry_t)new_hole_entry;
588
589 new_hole_entry->start = new_entry->vme_end;
590 new_hole_entry->end = hole_entry->vme_end;
591 hole_entry->vme_end = new_entry->vme_start;
592
593 assert(hole_entry->vme_start < hole_entry->vme_end);
594 assert(new_hole_entry->start < new_hole_entry->end);
595
596 #if DEBUG
597 if (check_map_with_hole_sanity)
598 check_map_sanity(map, &old_hole_entry);
599 #endif /* DEBUG */
600
601 SAVE_HINT_HOLE_WRITE(map, (struct vm_map_links*) hole_entry);
602 return;
603
604 } else if ((new_entry->vme_start <= hole_entry->vme_start) && (hole_entry->vme_start < new_entry->vme_end)) {
605
606 /*
607 * Case C1: Entry moving upwards and a part/full hole lies within the bounds of the entry.
608 */
609
610 #if DEBUG
611 copy_hole_info(hole_entry, &old_hole_entry);
612 #endif /* DEBUG */
613
614 if (hole_entry->vme_end <= new_entry->vme_end) {
615
616 vm_map_delete_hole(map, hole_entry);
617 } else {
618 hole_entry->vme_start = new_entry->vme_end;
619 SAVE_HINT_HOLE_WRITE(map, (struct vm_map_links*) hole_entry);
620 }
621
622 #if DEBUG
623 if (check_map_with_hole_sanity)
624 check_map_sanity(map, &old_hole_entry);
625 #endif /* DEBUG */
626
627 return;
628
629 } else if ((new_entry->vme_start < hole_entry->vme_end) && (hole_entry->vme_end <= new_entry->vme_end)) {
630
631 /*
632 * Case C2: Entry moving downwards and a part/full hole lies within the bounds of the entry.
633 */
634
635 #if DEBUG
636 copy_hole_info(hole_entry, &old_hole_entry);
637 #endif /* DEBUG */
638
639 if (hole_entry->vme_start >= new_entry->vme_start) {
640 vm_map_delete_hole(map, hole_entry);
641 } else {
642 hole_entry->vme_end = new_entry->vme_start;
643 SAVE_HINT_HOLE_WRITE(map, (struct vm_map_links*) hole_entry);
644 }
645
646 #if DEBUG
647 if (check_map_with_hole_sanity)
648 check_map_sanity(map, &old_hole_entry);
649 #endif /* DEBUG */
650
651 return;
652 }
653
654 hole_entry = next_hole_entry;
655 next_hole_entry = hole_entry->vme_next;
656
657 if (hole_entry == (vm_map_entry_t)map->holes_list)
658 break;
659 }
660
661 panic("Illegal action: h1: %p, s:0x%llx, e:0x%llx...h2:%p, s:0x%llx, e:0x%llx...h3:0x%p, s:0x%llx, e:0x%llx\n",
662 hole_entry->vme_prev,
663 (unsigned long long)hole_entry->vme_prev->vme_start,
664 (unsigned long long)hole_entry->vme_prev->vme_end,
665 hole_entry,
666 (unsigned long long)hole_entry->vme_start,
667 (unsigned long long)hole_entry->vme_end,
668 hole_entry->vme_next,
669 (unsigned long long)hole_entry->vme_next->vme_start,
670 (unsigned long long)hole_entry->vme_next->vme_end);
671
672 }
673
674 void
675 update_first_free_rb(vm_map_t map, vm_map_entry_t entry, boolean_t new_entry_creation)
676 {
677
678 if (map->holelistenabled) {
679
680 /*
681 * Holes can be used to track ranges all the way up to MACH_VM_MAX_ADDRESS or more (e.g. kernel map).
682 */
683 vm_map_offset_t max_valid_offset = (map->max_offset > MACH_VM_MAX_ADDRESS) ? map->max_offset : MACH_VM_MAX_ADDRESS;
684
685 /*
686 * Clipping an entry will not result in the creation/deletion/modification of
687 * a hole. Those calls pass NULL for their target entry.
688 */
689 if (entry == NULL) {
690 return;
691 }
692
693 /*
694 * Commpage is pinned beyond the map's max offset. That shouldn't affect the
695 * holes within the bounds of the map.
696 */
697 if (vm_map_trunc_page(entry->vme_start, VM_MAP_PAGE_MASK(map)) >= max_valid_offset) {
698 return;
699 }
700
701 /*
702 *
703 * Note:
704 *
705 * - A new entry has already been added to the map
706 * OR
707 * - An older entry has already been deleted from the map
708 *
709 * We are updating the hole list after the fact (except in one special case involving copy maps).
710 *
711 */
712
713 if (new_entry_creation) {
714
715 update_holes_on_entry_creation(map, entry);
716 } else {
717
718 update_holes_on_entry_deletion(map, entry);
719 }
720 }
721 }