]> git.saurik.com Git - apple/xnu.git/blame - osfmk/vm/vm_map_store_rb.c
xnu-6153.41.3.tar.gz
[apple/xnu.git] / osfmk / vm / vm_map_store_rb.c
CommitLineData
6d2010ae
A
1/*
2 * Copyright (c) 2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
0a7de745 5 *
6d2010ae
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
0a7de745 14 *
6d2010ae
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
0a7de745 17 *
6d2010ae
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
0a7de745 25 *
6d2010ae
A
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
39037602 29#include <kern/backtrace.h>
6d2010ae
A
30#include <vm/vm_map_store_rb.h>
31
32RB_GENERATE(rb_head, vm_map_store, entry, rb_node_compare);
33
0a7de745 34#define VME_FOR_STORE( store) \
6d2010ae
A
35 (vm_map_entry_t)(((unsigned long)store) - ((unsigned long)sizeof(struct vm_map_links)))
36
37void
38vm_map_store_init_rb( struct vm_map_header* hdr )
39{
40 RB_INIT(&(hdr->rb_head_store));
41}
42
0a7de745
A
43int
44rb_node_compare(struct vm_map_store *node, struct vm_map_store *parent)
6d2010ae
A
45{
46 vm_map_entry_t vme_c;
47 vm_map_entry_t vme_p;
48
49 vme_c = VME_FOR_STORE(node);
50 vme_p = VME_FOR_STORE(parent);
0a7de745 51 if (vme_c->vme_start < vme_p->vme_start) {
6d2010ae 52 return -1;
0a7de745
A
53 }
54 if (vme_c->vme_start >= vme_p->vme_end) {
6d2010ae 55 return 1;
0a7de745 56 }
6d2010ae
A
57 return 0;
58}
59
cb323159 60__dead2
0a7de745 61void
cb323159 62vm_map_store_walk_rb(vm_map_t map, vm_map_entry_t *wrong_vme, vm_map_entry_t *vm_entry)
6d2010ae 63{
cb323159
A
64 struct vm_map_header *hdr = &map->hdr;
65 struct vm_map_store *rb_entry = RB_ROOT(&hdr->rb_head_store);
66 vm_map_entry_t cur = *vm_entry;
6d2010ae 67
cb323159 68 rb_entry = RB_FIND(rb_head, &hdr->rb_head_store, &(cur->store));
0a7de745 69 if (rb_entry == NULL) {
6d2010ae 70 panic("NO SUCH ENTRY %p. Gave back %p", *vm_entry, *wrong_vme);
0a7de745
A
71 } else {
72 panic("Cur: %p, L: %p, R: %p", VME_FOR_STORE(rb_entry), VME_FOR_STORE(RB_LEFT(rb_entry, entry)), VME_FOR_STORE(RB_RIGHT(rb_entry, entry)));
73 }
6d2010ae
A
74}
75
76
0a7de745 77boolean_t
cb323159 78vm_map_store_lookup_entry_rb(vm_map_t map, vm_map_offset_t address, vm_map_entry_t *vm_entry)
6d2010ae 79{
cb323159
A
80 struct vm_map_header *hdr = &map->hdr;
81 struct vm_map_store *rb_entry = RB_ROOT(&hdr->rb_head_store);
82 vm_map_entry_t cur = vm_map_to_entry(map);
83 vm_map_entry_t prev = VM_MAP_ENTRY_NULL;
6d2010ae
A
84
85 while (rb_entry != (struct vm_map_store*)NULL) {
0a7de745
A
86 cur = VME_FOR_STORE(rb_entry);
87 if (cur == VM_MAP_ENTRY_NULL) {
6d2010ae 88 panic("no entry");
0a7de745 89 }
6d2010ae
A
90 if (address >= cur->vme_start) {
91 if (address < cur->vme_end) {
92 *vm_entry = cur;
93 return TRUE;
94 }
95 rb_entry = RB_RIGHT(rb_entry, entry);
96 prev = cur;
97 } else {
98 rb_entry = RB_LEFT(rb_entry, entry);
99 }
100 }
0a7de745 101 if (prev == VM_MAP_ENTRY_NULL) {
6d2010ae
A
102 prev = vm_map_to_entry(map);
103 }
104 *vm_entry = prev;
105 return FALSE;
106}
107
0a7de745
A
108void
109vm_map_store_entry_link_rb( struct vm_map_header *mapHdr, __unused vm_map_entry_t after_where, vm_map_entry_t entry)
6d2010ae
A
110{
111 struct rb_head *rbh = &(mapHdr->rb_head_store);
112 struct vm_map_store *store = &(entry->store);
113 struct vm_map_store *tmp_store;
0a7de745 114 if ((tmp_store = RB_INSERT( rb_head, rbh, store )) != NULL) {
6d2010ae 115 panic("VMSEL: INSERT FAILED: 0x%lx, 0x%lx, 0x%lx, 0x%lx", (uintptr_t)entry->vme_start, (uintptr_t)entry->vme_end,
0a7de745 116 (uintptr_t)(VME_FOR_STORE(tmp_store))->vme_start, (uintptr_t)(VME_FOR_STORE(tmp_store))->vme_end);
6d2010ae
A
117 }
118}
119
0a7de745
A
120void
121vm_map_store_entry_unlink_rb( struct vm_map_header *mapHdr, vm_map_entry_t entry)
6d2010ae
A
122{
123 struct rb_head *rbh = &(mapHdr->rb_head_store);
124 struct vm_map_store *rb_entry;
125 struct vm_map_store *store = &(entry->store);
0a7de745
A
126
127 rb_entry = RB_FIND( rb_head, rbh, store);
128 if (rb_entry == NULL) {
6d2010ae 129 panic("NO ENTRY TO DELETE");
0a7de745 130 }
6d2010ae
A
131 RB_REMOVE( rb_head, rbh, store );
132}
133
6d2010ae
A
134void
135vm_map_store_copy_reset_rb( vm_map_copy_t copy, vm_map_entry_t entry, int nentries )
136{
137 struct vm_map_header *mapHdr = &(copy->cpy_hdr);
138 struct rb_head *rbh = &(mapHdr->rb_head_store);
139 struct vm_map_store *store;
0a7de745
A
140 int deleted = 0;
141
142 while (entry != vm_map_copy_to_entry(copy) && nentries > 0) {
6d2010ae
A
143 store = &(entry->store);
144 RB_REMOVE( rb_head, rbh, store );
145 entry = entry->vme_next;
146 deleted++;
147 nentries--;
148 }
149}
150
0a7de745 151extern zone_t vm_map_holes_zone; /* zone for vm map holes (vm_map_links) structures */
3e170ce0
A
152
153void
154vm_map_combine_hole(vm_map_t map, vm_map_entry_t hole_entry);
155void
156vm_map_combine_hole(__unused vm_map_t map, vm_map_entry_t hole_entry)
157{
3e170ce0
A
158 vm_map_entry_t middle_hole_entry, last_hole_entry;
159
160 hole_entry->vme_end = hole_entry->vme_next->vme_end;
161
162 middle_hole_entry = hole_entry->vme_next;
163 last_hole_entry = middle_hole_entry->vme_next;
164
165 assert(last_hole_entry->vme_prev == middle_hole_entry);
166 assert(middle_hole_entry->vme_end != last_hole_entry->vme_start);
167
168 last_hole_entry->vme_prev = hole_entry;
169 hole_entry->vme_next = last_hole_entry;
170
171 middle_hole_entry->vme_prev = NULL;
172 middle_hole_entry->vme_next = NULL;
173
174 zfree(vm_map_holes_zone, middle_hole_entry);
175
176 assert(hole_entry->vme_start < hole_entry->vme_end);
177 assert(last_hole_entry->vme_start < last_hole_entry->vme_end);
178}
179
180
181void
182vm_map_delete_hole(vm_map_t map, vm_map_entry_t hole_entry);
183void
184vm_map_delete_hole(vm_map_t map, vm_map_entry_t hole_entry)
185{
d9a64523 186 if (hole_entry == CAST_TO_VM_MAP_ENTRY(map->holes_list)) {
d9a64523 187 if (hole_entry->vme_next == CAST_TO_VM_MAP_ENTRY(map->holes_list)) {
3e170ce0
A
188 map->holes_list = NULL;
189 SAVE_HINT_HOLE_WRITE(map, NULL);
190 } else {
3e170ce0
A
191 vm_map_entry_t l_next, l_prev;
192
193 l_next = (vm_map_entry_t) map->holes_list->next;
194 l_prev = (vm_map_entry_t) map->holes_list->prev;
195 map->holes_list = (struct vm_map_links*) l_next;
196
197 l_next->vme_prev = l_prev;
198 l_prev->vme_next = l_next;
199
200 SAVE_HINT_HOLE_WRITE(map, (struct vm_map_links*) l_next);
201 }
202 } else {
3e170ce0
A
203 SAVE_HINT_HOLE_WRITE(map, (struct vm_map_links*) hole_entry->vme_prev);
204
205 hole_entry->vme_prev->vme_next = hole_entry->vme_next;
206 hole_entry->vme_next->vme_prev = hole_entry->vme_prev;
207 }
208
209 hole_entry->vme_next = NULL;
210 hole_entry->vme_prev = NULL;
211 zfree(vm_map_holes_zone, hole_entry);
212}
213
214
215/*
216 * For Debugging.
217 */
218
219#if DEBUG
220static void
221check_map_sanity(vm_map_t map, vm_map_entry_t old_hole_entry)
222{
0a7de745
A
223 vm_map_entry_t hole_entry, next_hole_entry;
224 vm_map_entry_t map_entry, next_map_entry;
3e170ce0
A
225
226 if (map->holes_list == NULL) {
3e170ce0
A
227 return;
228 }
229
cb323159 230 hole_entry = CAST_DOWN(vm_map_entry_t, map->holes_list);
3e170ce0
A
231 next_hole_entry = hole_entry->vme_next;
232
233 map_entry = vm_map_first_entry(map);
234 next_map_entry = map_entry->vme_next;
235
0a7de745 236 while (map_entry->vme_start > hole_entry->vme_start) {
3e170ce0
A
237 hole_entry = next_hole_entry;
238 next_hole_entry = hole_entry->vme_next;
239
cb323159 240 if (hole_entry == CAST_DOWN(vm_map_entry_t, map->holes_list)) {
3e170ce0 241 break;
0a7de745 242 }
3e170ce0
A
243 }
244
245 while (map_entry != vm_map_to_entry(map)) {
0a7de745 246 if (map_entry->vme_start >= map->max_offset) {
3e170ce0 247 break;
0a7de745 248 }
3e170ce0
A
249
250 if (map_entry->vme_end != map_entry->vme_next->vme_start) {
0a7de745 251 if (map_entry->vme_next == vm_map_to_entry(map)) {
3e170ce0 252 break;
0a7de745 253 }
3e170ce0
A
254
255 if (hole_entry->vme_start != map_entry->vme_end) {
256 panic("hole_entry not aligned %p(0x%llx), %p (0x%llx), %p", hole_entry, (unsigned long long)hole_entry->vme_start, map_entry->vme_next, (unsigned long long)map_entry->vme_end, old_hole_entry);
257 assert(hole_entry->vme_start == map_entry->vme_end);
258 }
259
260 if (hole_entry->vme_end != map_entry->vme_next->vme_start) {
261 panic("hole_entry not next aligned %p(0x%llx), %p (0x%llx), %p", hole_entry, (unsigned long long)hole_entry->vme_end, map_entry->vme_next, (unsigned long long)map_entry->vme_next->vme_start, old_hole_entry);
262 assert(hole_entry->vme_end == map_entry->vme_next->vme_start);
263 }
264
265 hole_entry = next_hole_entry;
266 next_hole_entry = hole_entry->vme_next;
267
cb323159 268 if (hole_entry == CAST_DOWN(vm_map_entry_t, map->holes_list)) {
3e170ce0 269 break;
0a7de745 270 }
3e170ce0
A
271 }
272
273 map_entry = map_entry->vme_next;
274 }
275}
276
277/*
278 * For debugging.
279 */
280static void
281copy_hole_info(vm_map_entry_t hole_entry, vm_map_entry_t old_hole_entry)
6d2010ae 282{
3e170ce0
A
283 old_hole_entry->vme_prev = hole_entry->vme_prev;
284 old_hole_entry->vme_next = hole_entry->vme_next;
285 old_hole_entry->vme_start = hole_entry->vme_start;
286 old_hole_entry->vme_end = hole_entry->vme_end;
6d2010ae 287}
3e170ce0
A
288#endif /* DEBUG */
289
290void
291update_holes_on_entry_deletion(vm_map_t map, vm_map_entry_t old_entry);
292void
293update_holes_on_entry_deletion(vm_map_t map, vm_map_entry_t old_entry)
294{
295 /*
296 * Dealing with the deletion of an older entry.
297 */
298
0a7de745 299 vm_map_entry_t hole_entry, next_hole_entry;
3e170ce0 300#if DEBUG
0a7de745 301 struct vm_map_entry old_hole_entry;
3e170ce0 302#endif /* DEBUG */
0a7de745 303 boolean_t create_new_hole = TRUE;
3e170ce0 304
d9a64523 305 hole_entry = CAST_TO_VM_MAP_ENTRY(map->hole_hint);
3e170ce0
A
306
307 if (hole_entry) {
3e170ce0
A
308 if (hole_entry->vme_end == old_entry->vme_start) {
309 /*
310 * Found a hole right after above our entry.
311 * Hit.
312 */
3e170ce0 313 } else if (hole_entry->vme_start == old_entry->vme_end) {
d9a64523 314 if (hole_entry != CAST_TO_VM_MAP_ENTRY(map->holes_list)) {
3e170ce0
A
315 /*
316 * Found a hole right after below our entry but
317 * make sure we don't erroneously extend backwards.
0a7de745 318 *
3e170ce0
A
319 * Hit.
320 */
321
322 hole_entry = hole_entry->vme_prev;
323 }
3e170ce0 324 } else if (hole_entry->vme_start > old_entry->vme_end) {
3e170ce0
A
325 /*
326 * Useless hint. Start from the top.
327 */
328
d9a64523 329 hole_entry = CAST_TO_VM_MAP_ENTRY(map->holes_list);
3e170ce0
A
330 }
331
d9a64523 332 if (hole_entry != CAST_TO_VM_MAP_ENTRY(map->holes_list)) {
3e170ce0
A
333 if (hole_entry->vme_start > old_entry->vme_start) {
334 panic("Hole hint failed: Hole entry start: 0x%llx, entry start: 0x%llx, map hole start: 0x%llx, map hint start: 0x%llx\n",
0a7de745
A
335 (unsigned long long)hole_entry->vme_start,
336 (unsigned long long)old_entry->vme_start,
337 (unsigned long long)map->holes_list->start,
338 (unsigned long long)map->hole_hint->start);
3e170ce0
A
339 }
340 if (hole_entry->vme_end > old_entry->vme_start) {
341 panic("Hole hint failed: Hole entry end: 0x%llx, entry start: 0x%llx, map hole start: 0x%llx, map hint start: 0x%llx\n",
0a7de745
A
342 (unsigned long long)hole_entry->vme_end,
343 (unsigned long long)old_entry->vme_start,
344 (unsigned long long)map->holes_list->start,
345 (unsigned long long)map->hole_hint->start);
3e170ce0
A
346 }
347 }
348
349 while (1) {
3e170ce0
A
350 next_hole_entry = hole_entry->vme_next;
351
352 /*
353 * Hole is right above the entry.
354 */
355 if (hole_entry->vme_end == old_entry->vme_start) {
3e170ce0
A
356#if DEBUG
357 copy_hole_info(hole_entry, &old_hole_entry);
358#endif /* DEBUG */
359
360 /*
361 * Is there another hole right below the entry?
362 * Can we combine holes?
363 */
364
365 if (old_entry->vme_end == hole_entry->vme_next->vme_start) {
3e170ce0
A
366 vm_map_combine_hole(map, hole_entry);
367 } else {
3e170ce0
A
368 hole_entry->vme_end = old_entry->vme_end;
369 }
370 create_new_hole = FALSE;
371#if DEBUG
372 check_map_sanity(map, &old_hole_entry);
373#endif /* DEBUG */
374 break;
375 }
376
377 /*
378 * Hole is right below the entry.
379 */
380 if (hole_entry->vme_start == old_entry->vme_end) {
3e170ce0
A
381#if DEBUG
382 copy_hole_info(hole_entry, &old_hole_entry);
383#endif /* DEBUG */
384
385 hole_entry->vme_start = old_entry->vme_start;
386 create_new_hole = FALSE;
387
388#if DEBUG
389 check_map_sanity(map, &old_hole_entry);
390#endif /* DEBUG */
391 break;
392 }
393
394 /*
395 * Hole is beyond our entry. Let's go back to the last hole
396 * before our entry so we have the right place to link up the
397 * new hole that will be needed.
398 */
399 if (hole_entry->vme_start > old_entry->vme_end) {
3e170ce0
A
400#if DEBUG
401 copy_hole_info(hole_entry, &old_hole_entry);
402#endif /* DEBUG */
403
d9a64523 404 if (hole_entry != CAST_TO_VM_MAP_ENTRY(map->holes_list)) {
3e170ce0
A
405 assert(hole_entry->vme_start != old_entry->vme_start);
406 hole_entry = hole_entry->vme_prev;
407 }
408 break;
409 }
410
411 hole_entry = next_hole_entry;
412
d9a64523 413 if (hole_entry == CAST_TO_VM_MAP_ENTRY(map->holes_list)) {
3e170ce0
A
414 hole_entry = hole_entry->vme_prev;
415 break;
416 }
417 }
418 }
419
420 if (create_new_hole) {
0a7de745
A
421 struct vm_map_links *new_hole_entry = NULL;
422 vm_map_entry_t l_next, l_prev;
3e170ce0
A
423
424 new_hole_entry = zalloc(vm_map_holes_zone);
425
426 /*
427 * First hole in the map?
428 * OR
429 * A hole that is located above the current first hole in the map?
430 */
d9a64523 431 if (map->holes_list == NULL || (hole_entry == CAST_TO_VM_MAP_ENTRY(map->holes_list) && hole_entry->vme_start > old_entry->vme_start)) {
3e170ce0 432 if (map->holes_list == NULL) {
3e170ce0 433 map->holes_list = new_hole_entry;
d9a64523 434 new_hole_entry->prev = new_hole_entry->next = CAST_TO_VM_MAP_ENTRY(map->holes_list);
3e170ce0 435 } else {
d9a64523 436 l_next = CAST_TO_VM_MAP_ENTRY(map->holes_list);
3e170ce0
A
437 l_prev = map->holes_list->prev;
438 map->holes_list = new_hole_entry;
439 new_hole_entry->next = l_next;
440 new_hole_entry->prev = l_prev;
441
d9a64523 442 l_prev->vme_next = l_next->vme_prev = CAST_TO_VM_MAP_ENTRY(new_hole_entry);
3e170ce0
A
443 }
444 } else {
3e170ce0
A
445 l_next = hole_entry->vme_next;
446 l_prev = hole_entry->vme_next->vme_prev;
447
448 new_hole_entry->prev = hole_entry;
449 new_hole_entry->next = l_next;
450
d9a64523
A
451 hole_entry->vme_next = CAST_TO_VM_MAP_ENTRY(new_hole_entry);
452 l_next->vme_prev = CAST_TO_VM_MAP_ENTRY(new_hole_entry);
3e170ce0
A
453 }
454
455 new_hole_entry->start = old_entry->vme_start;
456 new_hole_entry->end = old_entry->vme_end;
457
d9a64523 458 hole_entry = CAST_TO_VM_MAP_ENTRY(new_hole_entry);
3e170ce0
A
459
460 assert(new_hole_entry->start < new_hole_entry->end);
461 }
462
463#if DEBUG
464 check_map_sanity(map, &old_hole_entry);
465#endif /* DEBUG */
466
467 SAVE_HINT_HOLE_WRITE(map, (struct vm_map_links*) hole_entry);
468 return;
469}
470
471
472void
473update_holes_on_entry_creation(vm_map_t map, vm_map_entry_t new_entry);
474void
475update_holes_on_entry_creation(vm_map_t map, vm_map_entry_t new_entry)
476{
0a7de745 477 vm_map_entry_t hole_entry, next_hole_entry;
3e170ce0 478#if DEBUG
0a7de745
A
479 struct vm_map_entry old_hole_entry;
480 vm_map_entry_t tmp_entry;
481 boolean_t check_map_with_hole_sanity = TRUE;
3e170ce0
A
482#endif /* DEBUG */
483
484 /*
485 * Case A: The entry is aligned exactly with the start and end of the hole.
486 * This will delete the hole.
487 *
488 * Case B: The entry is completely within a hole but NOT aligned with the start/end of the hole.
489 * This will split a hole.
490 *
491 * Case C: The entry overlaps with the hole. The entry could be extending upwards (C1) or downwards (C2).
492 * This will reduce the size of the hole or delete the hole completely if it is smaller than the entry.
493 */
494
d9a64523 495 hole_entry = CAST_TO_VM_MAP_ENTRY(map->holes_list);
3e170ce0
A
496 assert(hole_entry);
497 next_hole_entry = hole_entry->vme_next;
498
499 while (1) {
3e170ce0
A
500#if DEBUG
501 /*
502 * If the entry doesn't exist in the RB tree, we are likely dealing with copy maps where
503 * the entries belonging to the copy map are linked into the list of entries silently and
504 * then added to the RB-tree later on.
505 * So sanity checks are useless in that case.
506 */
507 check_map_with_hole_sanity = vm_map_lookup_entry(map, new_entry->vme_start, &tmp_entry);
508#endif /* DEBUG */
509
510 if (hole_entry->vme_start == new_entry->vme_start &&
511 hole_entry->vme_end == new_entry->vme_end) {
3e170ce0
A
512 /* Case A */
513#if DEBUG
514 copy_hole_info(hole_entry, &old_hole_entry);
515#endif /* DEBUG */
516
5ba3f43e
A
517 /*
518 * This check makes sense only for regular maps, not copy maps.
519 * With a regular map, the VM entry is first linked and then
520 * the hole is deleted. So the check below, which makes sure that
521 * the map's bounds are being respected, is valid.
522 * But for copy maps, the hole is deleted before the VM entry is
523 * linked (vm_map_store_copy_insert) and so this check is invalid.
524 *
0a7de745
A
525 * if (hole_entry == (vm_map_entry_t) map->holes_list) {
526 *
527 * if (hole_entry->vme_next == (vm_map_entry_t) map->holes_list) {
528 *
529 * next_hole_entry = vm_map_last_entry(map);
530 * assert(next_hole_entry->vme_end >= map->max_offset);
531 * }
532 * }
533 */
3e170ce0
A
534
535 vm_map_delete_hole(map, hole_entry);
536
537#if DEBUG
0a7de745 538 if (check_map_with_hole_sanity) {
3e170ce0 539 check_map_sanity(map, &old_hole_entry);
0a7de745 540 }
3e170ce0
A
541#endif /* DEBUG */
542 return;
3e170ce0 543 } else if (hole_entry->vme_start < new_entry->vme_start &&
0a7de745 544 hole_entry->vme_end > new_entry->vme_end) {
3e170ce0
A
545 /* Case B */
546 struct vm_map_links *new_hole_entry = NULL;
547
548 new_hole_entry = zalloc(vm_map_holes_zone);
549
550#if DEBUG
551 copy_hole_info(hole_entry, &old_hole_entry);
552#endif /* DEBUG */
553
554 new_hole_entry->prev = hole_entry;
555 new_hole_entry->next = hole_entry->vme_next;
d9a64523
A
556 hole_entry->vme_next->vme_prev = CAST_TO_VM_MAP_ENTRY(new_hole_entry);
557 hole_entry->vme_next = CAST_TO_VM_MAP_ENTRY(new_hole_entry);
3e170ce0
A
558
559 new_hole_entry->start = new_entry->vme_end;
560 new_hole_entry->end = hole_entry->vme_end;
561 hole_entry->vme_end = new_entry->vme_start;
562
563 assert(hole_entry->vme_start < hole_entry->vme_end);
564 assert(new_hole_entry->start < new_hole_entry->end);
565
566#if DEBUG
0a7de745 567 if (check_map_with_hole_sanity) {
3e170ce0 568 check_map_sanity(map, &old_hole_entry);
0a7de745 569 }
3e170ce0
A
570#endif /* DEBUG */
571
572 SAVE_HINT_HOLE_WRITE(map, (struct vm_map_links*) hole_entry);
573 return;
3e170ce0 574 } else if ((new_entry->vme_start <= hole_entry->vme_start) && (hole_entry->vme_start < new_entry->vme_end)) {
3e170ce0
A
575 /*
576 * Case C1: Entry moving upwards and a part/full hole lies within the bounds of the entry.
577 */
578
579#if DEBUG
580 copy_hole_info(hole_entry, &old_hole_entry);
581#endif /* DEBUG */
582
583 if (hole_entry->vme_end <= new_entry->vme_end) {
3e170ce0
A
584 vm_map_delete_hole(map, hole_entry);
585 } else {
586 hole_entry->vme_start = new_entry->vme_end;
587 SAVE_HINT_HOLE_WRITE(map, (struct vm_map_links*) hole_entry);
588 }
589
590#if DEBUG
0a7de745 591 if (check_map_with_hole_sanity) {
3e170ce0 592 check_map_sanity(map, &old_hole_entry);
0a7de745 593 }
3e170ce0
A
594#endif /* DEBUG */
595
596 return;
3e170ce0 597 } else if ((new_entry->vme_start < hole_entry->vme_end) && (hole_entry->vme_end <= new_entry->vme_end)) {
3e170ce0
A
598 /*
599 * Case C2: Entry moving downwards and a part/full hole lies within the bounds of the entry.
600 */
601
602#if DEBUG
603 copy_hole_info(hole_entry, &old_hole_entry);
604#endif /* DEBUG */
605
606 if (hole_entry->vme_start >= new_entry->vme_start) {
607 vm_map_delete_hole(map, hole_entry);
608 } else {
609 hole_entry->vme_end = new_entry->vme_start;
610 SAVE_HINT_HOLE_WRITE(map, (struct vm_map_links*) hole_entry);
611 }
612
613#if DEBUG
0a7de745 614 if (check_map_with_hole_sanity) {
3e170ce0 615 check_map_sanity(map, &old_hole_entry);
0a7de745 616 }
3e170ce0
A
617#endif /* DEBUG */
618
619 return;
620 }
621
622 hole_entry = next_hole_entry;
623 next_hole_entry = hole_entry->vme_next;
624
0a7de745 625 if (hole_entry == CAST_TO_VM_MAP_ENTRY(map->holes_list)) {
3e170ce0 626 break;
0a7de745 627 }
3e170ce0
A
628 }
629
630 panic("Illegal action: h1: %p, s:0x%llx, e:0x%llx...h2:%p, s:0x%llx, e:0x%llx...h3:0x%p, s:0x%llx, e:0x%llx\n",
0a7de745
A
631 hole_entry->vme_prev,
632 (unsigned long long)hole_entry->vme_prev->vme_start,
633 (unsigned long long)hole_entry->vme_prev->vme_end,
634 hole_entry,
635 (unsigned long long)hole_entry->vme_start,
636 (unsigned long long)hole_entry->vme_end,
637 hole_entry->vme_next,
638 (unsigned long long)hole_entry->vme_next->vme_start,
639 (unsigned long long)hole_entry->vme_next->vme_end);
3e170ce0
A
640}
641
642void
643update_first_free_rb(vm_map_t map, vm_map_entry_t entry, boolean_t new_entry_creation)
644{
3e170ce0 645 if (map->holelistenabled) {
3e170ce0
A
646 /*
647 * Holes can be used to track ranges all the way up to MACH_VM_MAX_ADDRESS or more (e.g. kernel map).
648 */
649 vm_map_offset_t max_valid_offset = (map->max_offset > MACH_VM_MAX_ADDRESS) ? map->max_offset : MACH_VM_MAX_ADDRESS;
650
651 /*
652 * Clipping an entry will not result in the creation/deletion/modification of
653 * a hole. Those calls pass NULL for their target entry.
654 */
655 if (entry == NULL) {
656 return;
657 }
658
659 /*
660 * Commpage is pinned beyond the map's max offset. That shouldn't affect the
661 * holes within the bounds of the map.
662 */
663 if (vm_map_trunc_page(entry->vme_start, VM_MAP_PAGE_MASK(map)) >= max_valid_offset) {
664 return;
665 }
666
667 /*
668 *
669 * Note:
670 *
671 * - A new entry has already been added to the map
672 * OR
673 * - An older entry has already been deleted from the map
674 *
675 * We are updating the hole list after the fact (except in one special case involving copy maps).
676 *
677 */
678
679 if (new_entry_creation) {
3e170ce0
A
680 update_holes_on_entry_creation(map, entry);
681 } else {
3e170ce0
A
682 update_holes_on_entry_deletion(map, entry);
683 }
684 }
685}