]> git.saurik.com Git - apple/xnu.git/blame - osfmk/vm/vm_map_store_rb.c
xnu-4570.51.1.tar.gz
[apple/xnu.git] / osfmk / vm / vm_map_store_rb.c
CommitLineData
6d2010ae
A
1/*
2 * Copyright (c) 2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
39037602 29#include <kern/backtrace.h>
6d2010ae
A
30#include <vm/vm_map_store_rb.h>
31
32RB_GENERATE(rb_head, vm_map_store, entry, rb_node_compare);
33
34#define VME_FOR_STORE( store) \
35 (vm_map_entry_t)(((unsigned long)store) - ((unsigned long)sizeof(struct vm_map_links)))
36
37void
38vm_map_store_init_rb( struct vm_map_header* hdr )
39{
40 RB_INIT(&(hdr->rb_head_store));
41}
42
43int rb_node_compare(struct vm_map_store *node, struct vm_map_store *parent)
44{
45 vm_map_entry_t vme_c;
46 vm_map_entry_t vme_p;
47
48 vme_c = VME_FOR_STORE(node);
49 vme_p = VME_FOR_STORE(parent);
50 if (vme_c->vme_start < vme_p->vme_start)
51 return -1;
52 if (vme_c->vme_start >= vme_p->vme_end)
53 return 1;
54 return 0;
55}
56
57void vm_map_store_walk_rb( vm_map_t map, vm_map_entry_t *wrong_vme, vm_map_entry_t *vm_entry)
58{
59 struct vm_map_header hdr = map->hdr;
60 struct vm_map_store *rb_entry = RB_ROOT(&(hdr.rb_head_store));
61 vm_map_entry_t cur = *vm_entry;
62
63 rb_entry = RB_FIND( rb_head, &(hdr.rb_head_store), &(cur->store));
64 if(rb_entry == NULL)
65 panic("NO SUCH ENTRY %p. Gave back %p", *vm_entry, *wrong_vme);
66 else
67 panic("Cur: %p, L: %p, R: %p", VME_FOR_STORE(rb_entry), VME_FOR_STORE(RB_LEFT(rb_entry,entry)), VME_FOR_STORE(RB_RIGHT(rb_entry,entry)));
68}
69
70
71boolean_t vm_map_store_lookup_entry_rb( vm_map_t map, vm_map_offset_t address, vm_map_entry_t *vm_entry)
72{
73 struct vm_map_header hdr = map->hdr;
74 struct vm_map_store *rb_entry = RB_ROOT(&(hdr.rb_head_store));
75 vm_map_entry_t cur = vm_map_to_entry(map);
76 vm_map_entry_t prev = VM_MAP_ENTRY_NULL;
77
78 while (rb_entry != (struct vm_map_store*)NULL) {
79 cur = VME_FOR_STORE(rb_entry);
80 if(cur == VM_MAP_ENTRY_NULL)
81 panic("no entry");
82 if (address >= cur->vme_start) {
83 if (address < cur->vme_end) {
84 *vm_entry = cur;
85 return TRUE;
86 }
87 rb_entry = RB_RIGHT(rb_entry, entry);
88 prev = cur;
89 } else {
90 rb_entry = RB_LEFT(rb_entry, entry);
91 }
92 }
93 if( prev == VM_MAP_ENTRY_NULL){
94 prev = vm_map_to_entry(map);
95 }
96 *vm_entry = prev;
97 return FALSE;
98}
99
100void vm_map_store_entry_link_rb( struct vm_map_header *mapHdr, __unused vm_map_entry_t after_where, vm_map_entry_t entry)
101{
102 struct rb_head *rbh = &(mapHdr->rb_head_store);
103 struct vm_map_store *store = &(entry->store);
104 struct vm_map_store *tmp_store;
105 if((tmp_store = RB_INSERT( rb_head, rbh, store )) != NULL) {
106 panic("VMSEL: INSERT FAILED: 0x%lx, 0x%lx, 0x%lx, 0x%lx", (uintptr_t)entry->vme_start, (uintptr_t)entry->vme_end,
107 (uintptr_t)(VME_FOR_STORE(tmp_store))->vme_start, (uintptr_t)(VME_FOR_STORE(tmp_store))->vme_end);
108 }
109}
110
111void vm_map_store_entry_unlink_rb( struct vm_map_header *mapHdr, vm_map_entry_t entry)
112{
113 struct rb_head *rbh = &(mapHdr->rb_head_store);
114 struct vm_map_store *rb_entry;
115 struct vm_map_store *store = &(entry->store);
116
117 rb_entry = RB_FIND( rb_head, rbh, store);
118 if(rb_entry == NULL)
119 panic("NO ENTRY TO DELETE");
120 RB_REMOVE( rb_head, rbh, store );
121}
122
123void vm_map_store_copy_insert_rb( vm_map_t map, __unused vm_map_entry_t after_where, vm_map_copy_t copy)
124{
125 struct vm_map_header *mapHdr = &(map->hdr);
126 struct rb_head *rbh = &(mapHdr->rb_head_store);
127 struct vm_map_store *store;
128 vm_map_entry_t entry = vm_map_copy_first_entry(copy);
129 int inserted=0, nentries = copy->cpy_hdr.nentries;
130
131 while (entry != vm_map_copy_to_entry(copy) && nentries > 0) {
132 vm_map_entry_t prev = entry;
133 store = &(entry->store);
134 if( RB_INSERT( rb_head, rbh, store ) != NULL){
135 panic("VMSCIR1: INSERT FAILED: %d: %p, %p, %p, 0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%lx, 0x%lx",inserted, prev, entry, vm_map_copy_to_entry(copy),
136 (uintptr_t)prev->vme_start, (uintptr_t)prev->vme_end, (uintptr_t)entry->vme_start, (uintptr_t)entry->vme_end,
137 (uintptr_t)(VME_FOR_STORE(rbh->rbh_root))->vme_start, (uintptr_t)(VME_FOR_STORE(rbh->rbh_root))->vme_end);
138 } else {
39236c6e 139#if MAP_ENTRY_INSERTION_DEBUG
39037602
A
140 backtrace(&entry->vme_insertion_bt[0],
141 (sizeof (entry->vme_insertion_bt) / sizeof (uintptr_t)));
39236c6e 142#endif
6d2010ae
A
143 entry = entry->vme_next;
144 inserted++;
145 nentries--;
146 }
147 }
148}
149
150void
151vm_map_store_copy_reset_rb( vm_map_copy_t copy, vm_map_entry_t entry, int nentries )
152{
153 struct vm_map_header *mapHdr = &(copy->cpy_hdr);
154 struct rb_head *rbh = &(mapHdr->rb_head_store);
155 struct vm_map_store *store;
156 int deleted=0;
157
158 while (entry != vm_map_copy_to_entry(copy) && nentries > 0) {
159 store = &(entry->store);
160 RB_REMOVE( rb_head, rbh, store );
161 entry = entry->vme_next;
162 deleted++;
163 nentries--;
164 }
165}
166
3e170ce0
A
167extern zone_t vm_map_holes_zone; /* zone for vm map holes (vm_map_links) structures */
168
169void
170vm_map_combine_hole(vm_map_t map, vm_map_entry_t hole_entry);
171void
172vm_map_combine_hole(__unused vm_map_t map, vm_map_entry_t hole_entry)
173{
174
175 vm_map_entry_t middle_hole_entry, last_hole_entry;
176
177 hole_entry->vme_end = hole_entry->vme_next->vme_end;
178
179 middle_hole_entry = hole_entry->vme_next;
180 last_hole_entry = middle_hole_entry->vme_next;
181
182 assert(last_hole_entry->vme_prev == middle_hole_entry);
183 assert(middle_hole_entry->vme_end != last_hole_entry->vme_start);
184
185 last_hole_entry->vme_prev = hole_entry;
186 hole_entry->vme_next = last_hole_entry;
187
188 middle_hole_entry->vme_prev = NULL;
189 middle_hole_entry->vme_next = NULL;
190
191 zfree(vm_map_holes_zone, middle_hole_entry);
192
193 assert(hole_entry->vme_start < hole_entry->vme_end);
194 assert(last_hole_entry->vme_start < last_hole_entry->vme_end);
195}
196
197
198void
199vm_map_delete_hole(vm_map_t map, vm_map_entry_t hole_entry);
200void
201vm_map_delete_hole(vm_map_t map, vm_map_entry_t hole_entry)
202{
203 if (hole_entry == (vm_map_entry_t) map->holes_list) {
204
205 if (hole_entry->vme_next == (vm_map_entry_t) map->holes_list) {
206
207 map->holes_list = NULL;
208 SAVE_HINT_HOLE_WRITE(map, NULL);
209 } else {
210
211 vm_map_entry_t l_next, l_prev;
212
213 l_next = (vm_map_entry_t) map->holes_list->next;
214 l_prev = (vm_map_entry_t) map->holes_list->prev;
215 map->holes_list = (struct vm_map_links*) l_next;
216
217 l_next->vme_prev = l_prev;
218 l_prev->vme_next = l_next;
219
220 SAVE_HINT_HOLE_WRITE(map, (struct vm_map_links*) l_next);
221 }
222 } else {
223
224 SAVE_HINT_HOLE_WRITE(map, (struct vm_map_links*) hole_entry->vme_prev);
225
226 hole_entry->vme_prev->vme_next = hole_entry->vme_next;
227 hole_entry->vme_next->vme_prev = hole_entry->vme_prev;
228 }
229
230 hole_entry->vme_next = NULL;
231 hole_entry->vme_prev = NULL;
232 zfree(vm_map_holes_zone, hole_entry);
233}
234
235
236/*
237 * For Debugging.
238 */
239
240#if DEBUG
241static void
242check_map_sanity(vm_map_t map, vm_map_entry_t old_hole_entry)
243{
244 vm_map_entry_t hole_entry, next_hole_entry;
245 vm_map_entry_t map_entry, next_map_entry;
246
247 if (map->holes_list == NULL) {
248
249 return;
250 }
251
252 hole_entry = (vm_map_entry_t) map->holes_list;
253 next_hole_entry = hole_entry->vme_next;
254
255 map_entry = vm_map_first_entry(map);
256 next_map_entry = map_entry->vme_next;
257
258 while(map_entry->vme_start > hole_entry->vme_start) {
259 hole_entry = next_hole_entry;
260 next_hole_entry = hole_entry->vme_next;
261
262 if (hole_entry == (vm_map_entry_t)map->holes_list)
263 break;
264 }
265
266 while (map_entry != vm_map_to_entry(map)) {
267
268 if (map_entry->vme_start >= map->max_offset)
269 break;
270
271 if (map_entry->vme_end != map_entry->vme_next->vme_start) {
272
273 if (map_entry->vme_next == vm_map_to_entry(map))
274 break;
275
276 if (hole_entry->vme_start != map_entry->vme_end) {
277 panic("hole_entry not aligned %p(0x%llx), %p (0x%llx), %p", hole_entry, (unsigned long long)hole_entry->vme_start, map_entry->vme_next, (unsigned long long)map_entry->vme_end, old_hole_entry);
278 assert(hole_entry->vme_start == map_entry->vme_end);
279 }
280
281 if (hole_entry->vme_end != map_entry->vme_next->vme_start) {
282 panic("hole_entry not next aligned %p(0x%llx), %p (0x%llx), %p", hole_entry, (unsigned long long)hole_entry->vme_end, map_entry->vme_next, (unsigned long long)map_entry->vme_next->vme_start, old_hole_entry);
283 assert(hole_entry->vme_end == map_entry->vme_next->vme_start);
284 }
285
286 hole_entry = next_hole_entry;
287 next_hole_entry = hole_entry->vme_next;
288
289 if (hole_entry == (vm_map_entry_t)map->holes_list)
290 break;
291 }
292
293 map_entry = map_entry->vme_next;
294 }
295}
296
297/*
298 * For debugging.
299 */
300static void
301copy_hole_info(vm_map_entry_t hole_entry, vm_map_entry_t old_hole_entry)
6d2010ae 302{
3e170ce0
A
303 old_hole_entry->vme_prev = hole_entry->vme_prev;
304 old_hole_entry->vme_next = hole_entry->vme_next;
305 old_hole_entry->vme_start = hole_entry->vme_start;
306 old_hole_entry->vme_end = hole_entry->vme_end;
6d2010ae 307}
3e170ce0
A
308#endif /* DEBUG */
309
310void
311update_holes_on_entry_deletion(vm_map_t map, vm_map_entry_t old_entry);
312void
313update_holes_on_entry_deletion(vm_map_t map, vm_map_entry_t old_entry)
314{
315 /*
316 * Dealing with the deletion of an older entry.
317 */
318
319 vm_map_entry_t hole_entry, next_hole_entry;
320#if DEBUG
321 struct vm_map_entry old_hole_entry;
322#endif /* DEBUG */
323 boolean_t create_new_hole = TRUE;
324
325 hole_entry = (vm_map_entry_t) map->hole_hint;
326
327 if (hole_entry) {
328
329 if (hole_entry->vme_end == old_entry->vme_start) {
330 /*
331 * Found a hole right after above our entry.
332 * Hit.
333 */
334
335 } else if (hole_entry->vme_start == old_entry->vme_end) {
336
337 if (hole_entry != (vm_map_entry_t) map->holes_list) {
6d2010ae 338
3e170ce0
A
339 /*
340 * Found a hole right after below our entry but
341 * make sure we don't erroneously extend backwards.
342 *
343 * Hit.
344 */
345
346 hole_entry = hole_entry->vme_prev;
347 }
348
349 } else if (hole_entry->vme_start > old_entry->vme_end) {
350
351 /*
352 * Useless hint. Start from the top.
353 */
354
355 hole_entry = (vm_map_entry_t) map->holes_list;
356 }
357
358 if (hole_entry != (vm_map_entry_t) map->holes_list) {
359 if (hole_entry->vme_start > old_entry->vme_start) {
360 panic("Hole hint failed: Hole entry start: 0x%llx, entry start: 0x%llx, map hole start: 0x%llx, map hint start: 0x%llx\n",
361 (unsigned long long)hole_entry->vme_start,
362 (unsigned long long)old_entry->vme_start,
363 (unsigned long long)map->holes_list->start,
364 (unsigned long long)map->hole_hint->start);
365 }
366 if (hole_entry->vme_end > old_entry->vme_start) {
367 panic("Hole hint failed: Hole entry end: 0x%llx, entry start: 0x%llx, map hole start: 0x%llx, map hint start: 0x%llx\n",
368 (unsigned long long)hole_entry->vme_end,
369 (unsigned long long)old_entry->vme_start,
370 (unsigned long long)map->holes_list->start,
371 (unsigned long long)map->hole_hint->start);
372 }
373 }
374
375 while (1) {
376
377 next_hole_entry = hole_entry->vme_next;
378
379 /*
380 * Hole is right above the entry.
381 */
382 if (hole_entry->vme_end == old_entry->vme_start) {
383
384#if DEBUG
385 copy_hole_info(hole_entry, &old_hole_entry);
386#endif /* DEBUG */
387
388 /*
389 * Is there another hole right below the entry?
390 * Can we combine holes?
391 */
392
393 if (old_entry->vme_end == hole_entry->vme_next->vme_start) {
394
395 vm_map_combine_hole(map, hole_entry);
396 } else {
397
398 hole_entry->vme_end = old_entry->vme_end;
399 }
400 create_new_hole = FALSE;
401#if DEBUG
402 check_map_sanity(map, &old_hole_entry);
403#endif /* DEBUG */
404 break;
405 }
406
407 /*
408 * Hole is right below the entry.
409 */
410 if (hole_entry->vme_start == old_entry->vme_end) {
411
412#if DEBUG
413 copy_hole_info(hole_entry, &old_hole_entry);
414#endif /* DEBUG */
415
416 hole_entry->vme_start = old_entry->vme_start;
417 create_new_hole = FALSE;
418
419#if DEBUG
420 check_map_sanity(map, &old_hole_entry);
421#endif /* DEBUG */
422 break;
423 }
424
425 /*
426 * Hole is beyond our entry. Let's go back to the last hole
427 * before our entry so we have the right place to link up the
428 * new hole that will be needed.
429 */
430 if (hole_entry->vme_start > old_entry->vme_end) {
431
432#if DEBUG
433 copy_hole_info(hole_entry, &old_hole_entry);
434#endif /* DEBUG */
435
436 if (hole_entry != (vm_map_entry_t) map->holes_list) {
437 assert(hole_entry->vme_start != old_entry->vme_start);
438 hole_entry = hole_entry->vme_prev;
439 }
440 break;
441 }
442
443 hole_entry = next_hole_entry;
444
445 if (hole_entry == (vm_map_entry_t)map->holes_list) {
446 hole_entry = hole_entry->vme_prev;
447 break;
448 }
449 }
450 }
451
452 if (create_new_hole) {
453 struct vm_map_links *new_hole_entry = NULL;
454 vm_map_entry_t l_next, l_prev;
455
456 new_hole_entry = zalloc(vm_map_holes_zone);
457
458 /*
459 * First hole in the map?
460 * OR
461 * A hole that is located above the current first hole in the map?
462 */
463 if (map->holes_list == NULL || (hole_entry == (vm_map_entry_t) map->holes_list && hole_entry->vme_start > old_entry->vme_start)) {
464
465 if (map->holes_list == NULL) {
466
467 map->holes_list = new_hole_entry;
468 new_hole_entry->prev = new_hole_entry->next = (vm_map_entry_t)map->holes_list;
469 } else {
470
471 l_next = (vm_map_entry_t) map->holes_list;
472 l_prev = map->holes_list->prev;
473 map->holes_list = new_hole_entry;
474 new_hole_entry->next = l_next;
475 new_hole_entry->prev = l_prev;
476
477 l_prev->vme_next = l_next->vme_prev = (vm_map_entry_t) new_hole_entry;
478 }
479 } else {
480
481 l_next = hole_entry->vme_next;
482 l_prev = hole_entry->vme_next->vme_prev;
483
484 new_hole_entry->prev = hole_entry;
485 new_hole_entry->next = l_next;
486
487 hole_entry->vme_next = (vm_map_entry_t)new_hole_entry;
488 l_next->vme_prev = (vm_map_entry_t) new_hole_entry;
489 }
490
491 new_hole_entry->start = old_entry->vme_start;
492 new_hole_entry->end = old_entry->vme_end;
493
494 hole_entry = (vm_map_entry_t) new_hole_entry;
495
496 assert(new_hole_entry->start < new_hole_entry->end);
497 }
498
499#if DEBUG
500 check_map_sanity(map, &old_hole_entry);
501#endif /* DEBUG */
502
503 SAVE_HINT_HOLE_WRITE(map, (struct vm_map_links*) hole_entry);
504 return;
505}
506
507
508void
509update_holes_on_entry_creation(vm_map_t map, vm_map_entry_t new_entry);
510void
511update_holes_on_entry_creation(vm_map_t map, vm_map_entry_t new_entry)
512{
513
514 vm_map_entry_t hole_entry, next_hole_entry;
515#if DEBUG
516 struct vm_map_entry old_hole_entry;
517 vm_map_entry_t tmp_entry;
518 boolean_t check_map_with_hole_sanity = TRUE;
519#endif /* DEBUG */
520
521 /*
522 * Case A: The entry is aligned exactly with the start and end of the hole.
523 * This will delete the hole.
524 *
525 * Case B: The entry is completely within a hole but NOT aligned with the start/end of the hole.
526 * This will split a hole.
527 *
528 * Case C: The entry overlaps with the hole. The entry could be extending upwards (C1) or downwards (C2).
529 * This will reduce the size of the hole or delete the hole completely if it is smaller than the entry.
530 */
531
532 hole_entry = (vm_map_entry_t) map->holes_list;
533 assert(hole_entry);
534 next_hole_entry = hole_entry->vme_next;
535
536 while (1) {
537
538#if DEBUG
539 /*
540 * If the entry doesn't exist in the RB tree, we are likely dealing with copy maps where
541 * the entries belonging to the copy map are linked into the list of entries silently and
542 * then added to the RB-tree later on.
543 * So sanity checks are useless in that case.
544 */
545 check_map_with_hole_sanity = vm_map_lookup_entry(map, new_entry->vme_start, &tmp_entry);
546#endif /* DEBUG */
547
548 if (hole_entry->vme_start == new_entry->vme_start &&
549 hole_entry->vme_end == new_entry->vme_end) {
550
551 /* Case A */
552#if DEBUG
553 copy_hole_info(hole_entry, &old_hole_entry);
554#endif /* DEBUG */
555
5ba3f43e
A
556 /*
557 * This check makes sense only for regular maps, not copy maps.
558 * With a regular map, the VM entry is first linked and then
559 * the hole is deleted. So the check below, which makes sure that
560 * the map's bounds are being respected, is valid.
561 * But for copy maps, the hole is deleted before the VM entry is
562 * linked (vm_map_store_copy_insert) and so this check is invalid.
563 *
3e170ce0
A
564 if (hole_entry == (vm_map_entry_t) map->holes_list) {
565
566 if (hole_entry->vme_next == (vm_map_entry_t) map->holes_list) {
567
568 next_hole_entry = vm_map_last_entry(map);
569 assert(next_hole_entry->vme_end >= map->max_offset);
570 }
571 }
5ba3f43e 572 */
3e170ce0
A
573
574 vm_map_delete_hole(map, hole_entry);
575
576#if DEBUG
577 if (check_map_with_hole_sanity)
578 check_map_sanity(map, &old_hole_entry);
579#endif /* DEBUG */
580 return;
581
582 } else if (hole_entry->vme_start < new_entry->vme_start &&
583 hole_entry->vme_end > new_entry->vme_end) {
584
585 /* Case B */
586 struct vm_map_links *new_hole_entry = NULL;
587
588 new_hole_entry = zalloc(vm_map_holes_zone);
589
590#if DEBUG
591 copy_hole_info(hole_entry, &old_hole_entry);
592#endif /* DEBUG */
593
594 new_hole_entry->prev = hole_entry;
595 new_hole_entry->next = hole_entry->vme_next;
596 hole_entry->vme_next->vme_prev = (vm_map_entry_t)new_hole_entry;
597 hole_entry->vme_next = (vm_map_entry_t)new_hole_entry;
598
599 new_hole_entry->start = new_entry->vme_end;
600 new_hole_entry->end = hole_entry->vme_end;
601 hole_entry->vme_end = new_entry->vme_start;
602
603 assert(hole_entry->vme_start < hole_entry->vme_end);
604 assert(new_hole_entry->start < new_hole_entry->end);
605
606#if DEBUG
607 if (check_map_with_hole_sanity)
608 check_map_sanity(map, &old_hole_entry);
609#endif /* DEBUG */
610
611 SAVE_HINT_HOLE_WRITE(map, (struct vm_map_links*) hole_entry);
612 return;
613
614 } else if ((new_entry->vme_start <= hole_entry->vme_start) && (hole_entry->vme_start < new_entry->vme_end)) {
615
616 /*
617 * Case C1: Entry moving upwards and a part/full hole lies within the bounds of the entry.
618 */
619
620#if DEBUG
621 copy_hole_info(hole_entry, &old_hole_entry);
622#endif /* DEBUG */
623
624 if (hole_entry->vme_end <= new_entry->vme_end) {
625
626 vm_map_delete_hole(map, hole_entry);
627 } else {
628 hole_entry->vme_start = new_entry->vme_end;
629 SAVE_HINT_HOLE_WRITE(map, (struct vm_map_links*) hole_entry);
630 }
631
632#if DEBUG
633 if (check_map_with_hole_sanity)
634 check_map_sanity(map, &old_hole_entry);
635#endif /* DEBUG */
636
637 return;
638
639 } else if ((new_entry->vme_start < hole_entry->vme_end) && (hole_entry->vme_end <= new_entry->vme_end)) {
640
641 /*
642 * Case C2: Entry moving downwards and a part/full hole lies within the bounds of the entry.
643 */
644
645#if DEBUG
646 copy_hole_info(hole_entry, &old_hole_entry);
647#endif /* DEBUG */
648
649 if (hole_entry->vme_start >= new_entry->vme_start) {
650 vm_map_delete_hole(map, hole_entry);
651 } else {
652 hole_entry->vme_end = new_entry->vme_start;
653 SAVE_HINT_HOLE_WRITE(map, (struct vm_map_links*) hole_entry);
654 }
655
656#if DEBUG
657 if (check_map_with_hole_sanity)
658 check_map_sanity(map, &old_hole_entry);
659#endif /* DEBUG */
660
661 return;
662 }
663
664 hole_entry = next_hole_entry;
665 next_hole_entry = hole_entry->vme_next;
666
667 if (hole_entry == (vm_map_entry_t)map->holes_list)
668 break;
669 }
670
671 panic("Illegal action: h1: %p, s:0x%llx, e:0x%llx...h2:%p, s:0x%llx, e:0x%llx...h3:0x%p, s:0x%llx, e:0x%llx\n",
672 hole_entry->vme_prev,
673 (unsigned long long)hole_entry->vme_prev->vme_start,
674 (unsigned long long)hole_entry->vme_prev->vme_end,
675 hole_entry,
676 (unsigned long long)hole_entry->vme_start,
677 (unsigned long long)hole_entry->vme_end,
678 hole_entry->vme_next,
679 (unsigned long long)hole_entry->vme_next->vme_start,
680 (unsigned long long)hole_entry->vme_next->vme_end);
681
682}
683
684void
685update_first_free_rb(vm_map_t map, vm_map_entry_t entry, boolean_t new_entry_creation)
686{
687
688 if (map->holelistenabled) {
689
690 /*
691 * Holes can be used to track ranges all the way up to MACH_VM_MAX_ADDRESS or more (e.g. kernel map).
692 */
693 vm_map_offset_t max_valid_offset = (map->max_offset > MACH_VM_MAX_ADDRESS) ? map->max_offset : MACH_VM_MAX_ADDRESS;
694
695 /*
696 * Clipping an entry will not result in the creation/deletion/modification of
697 * a hole. Those calls pass NULL for their target entry.
698 */
699 if (entry == NULL) {
700 return;
701 }
702
703 /*
704 * Commpage is pinned beyond the map's max offset. That shouldn't affect the
705 * holes within the bounds of the map.
706 */
707 if (vm_map_trunc_page(entry->vme_start, VM_MAP_PAGE_MASK(map)) >= max_valid_offset) {
708 return;
709 }
710
711 /*
712 *
713 * Note:
714 *
715 * - A new entry has already been added to the map
716 * OR
717 * - An older entry has already been deleted from the map
718 *
719 * We are updating the hole list after the fact (except in one special case involving copy maps).
720 *
721 */
722
723 if (new_entry_creation) {
724
725 update_holes_on_entry_creation(map, entry);
726 } else {
727
728 update_holes_on_entry_deletion(map, entry);
729 }
730 }
731}