]>
Commit | Line | Data |
---|---|---|
6d2010ae A |
1 | /* |
2 | * Copyright (c) 2009 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
0a7de745 | 5 | * |
6d2010ae A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
0a7de745 | 14 | * |
6d2010ae A |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
0a7de745 | 17 | * |
6d2010ae A |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
0a7de745 | 25 | * |
6d2010ae A |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
27 | */ | |
28 | ||
39037602 | 29 | #include <kern/backtrace.h> |
6d2010ae A |
30 | #include <vm/vm_map_store_rb.h> |
31 | ||
32 | RB_GENERATE(rb_head, vm_map_store, entry, rb_node_compare); | |
33 | ||
0a7de745 | 34 | #define VME_FOR_STORE( store) \ |
6d2010ae A |
35 | (vm_map_entry_t)(((unsigned long)store) - ((unsigned long)sizeof(struct vm_map_links))) |
36 | ||
37 | void | |
38 | vm_map_store_init_rb( struct vm_map_header* hdr ) | |
39 | { | |
40 | RB_INIT(&(hdr->rb_head_store)); | |
41 | } | |
42 | ||
0a7de745 A |
43 | int |
44 | rb_node_compare(struct vm_map_store *node, struct vm_map_store *parent) | |
6d2010ae A |
45 | { |
46 | vm_map_entry_t vme_c; | |
47 | vm_map_entry_t vme_p; | |
48 | ||
49 | vme_c = VME_FOR_STORE(node); | |
50 | vme_p = VME_FOR_STORE(parent); | |
0a7de745 | 51 | if (vme_c->vme_start < vme_p->vme_start) { |
6d2010ae | 52 | return -1; |
0a7de745 A |
53 | } |
54 | if (vme_c->vme_start >= vme_p->vme_end) { | |
6d2010ae | 55 | return 1; |
0a7de745 | 56 | } |
6d2010ae A |
57 | return 0; |
58 | } | |
59 | ||
cb323159 | 60 | __dead2 |
0a7de745 | 61 | void |
cb323159 | 62 | vm_map_store_walk_rb(vm_map_t map, vm_map_entry_t *wrong_vme, vm_map_entry_t *vm_entry) |
6d2010ae | 63 | { |
cb323159 A |
64 | struct vm_map_header *hdr = &map->hdr; |
65 | struct vm_map_store *rb_entry = RB_ROOT(&hdr->rb_head_store); | |
66 | vm_map_entry_t cur = *vm_entry; | |
6d2010ae | 67 | |
cb323159 | 68 | rb_entry = RB_FIND(rb_head, &hdr->rb_head_store, &(cur->store)); |
0a7de745 | 69 | if (rb_entry == NULL) { |
6d2010ae | 70 | panic("NO SUCH ENTRY %p. Gave back %p", *vm_entry, *wrong_vme); |
0a7de745 A |
71 | } else { |
72 | panic("Cur: %p, L: %p, R: %p", VME_FOR_STORE(rb_entry), VME_FOR_STORE(RB_LEFT(rb_entry, entry)), VME_FOR_STORE(RB_RIGHT(rb_entry, entry))); | |
73 | } | |
6d2010ae A |
74 | } |
75 | ||
76 | ||
0a7de745 | 77 | boolean_t |
cb323159 | 78 | vm_map_store_lookup_entry_rb(vm_map_t map, vm_map_offset_t address, vm_map_entry_t *vm_entry) |
6d2010ae | 79 | { |
cb323159 A |
80 | struct vm_map_header *hdr = &map->hdr; |
81 | struct vm_map_store *rb_entry = RB_ROOT(&hdr->rb_head_store); | |
82 | vm_map_entry_t cur = vm_map_to_entry(map); | |
83 | vm_map_entry_t prev = VM_MAP_ENTRY_NULL; | |
6d2010ae A |
84 | |
85 | while (rb_entry != (struct vm_map_store*)NULL) { | |
0a7de745 A |
86 | cur = VME_FOR_STORE(rb_entry); |
87 | if (cur == VM_MAP_ENTRY_NULL) { | |
6d2010ae | 88 | panic("no entry"); |
0a7de745 | 89 | } |
6d2010ae A |
90 | if (address >= cur->vme_start) { |
91 | if (address < cur->vme_end) { | |
92 | *vm_entry = cur; | |
93 | return TRUE; | |
94 | } | |
95 | rb_entry = RB_RIGHT(rb_entry, entry); | |
96 | prev = cur; | |
97 | } else { | |
98 | rb_entry = RB_LEFT(rb_entry, entry); | |
99 | } | |
100 | } | |
0a7de745 | 101 | if (prev == VM_MAP_ENTRY_NULL) { |
6d2010ae A |
102 | prev = vm_map_to_entry(map); |
103 | } | |
104 | *vm_entry = prev; | |
105 | return FALSE; | |
106 | } | |
107 | ||
0a7de745 A |
108 | void |
109 | vm_map_store_entry_link_rb( struct vm_map_header *mapHdr, __unused vm_map_entry_t after_where, vm_map_entry_t entry) | |
6d2010ae A |
110 | { |
111 | struct rb_head *rbh = &(mapHdr->rb_head_store); | |
112 | struct vm_map_store *store = &(entry->store); | |
113 | struct vm_map_store *tmp_store; | |
0a7de745 | 114 | if ((tmp_store = RB_INSERT( rb_head, rbh, store )) != NULL) { |
6d2010ae | 115 | panic("VMSEL: INSERT FAILED: 0x%lx, 0x%lx, 0x%lx, 0x%lx", (uintptr_t)entry->vme_start, (uintptr_t)entry->vme_end, |
0a7de745 | 116 | (uintptr_t)(VME_FOR_STORE(tmp_store))->vme_start, (uintptr_t)(VME_FOR_STORE(tmp_store))->vme_end); |
6d2010ae A |
117 | } |
118 | } | |
119 | ||
0a7de745 A |
120 | void |
121 | vm_map_store_entry_unlink_rb( struct vm_map_header *mapHdr, vm_map_entry_t entry) | |
6d2010ae A |
122 | { |
123 | struct rb_head *rbh = &(mapHdr->rb_head_store); | |
124 | struct vm_map_store *rb_entry; | |
125 | struct vm_map_store *store = &(entry->store); | |
0a7de745 A |
126 | |
127 | rb_entry = RB_FIND( rb_head, rbh, store); | |
128 | if (rb_entry == NULL) { | |
6d2010ae | 129 | panic("NO ENTRY TO DELETE"); |
0a7de745 | 130 | } |
6d2010ae A |
131 | RB_REMOVE( rb_head, rbh, store ); |
132 | } | |
133 | ||
6d2010ae A |
134 | void |
135 | vm_map_store_copy_reset_rb( vm_map_copy_t copy, vm_map_entry_t entry, int nentries ) | |
136 | { | |
137 | struct vm_map_header *mapHdr = &(copy->cpy_hdr); | |
138 | struct rb_head *rbh = &(mapHdr->rb_head_store); | |
139 | struct vm_map_store *store; | |
0a7de745 A |
140 | int deleted = 0; |
141 | ||
142 | while (entry != vm_map_copy_to_entry(copy) && nentries > 0) { | |
6d2010ae A |
143 | store = &(entry->store); |
144 | RB_REMOVE( rb_head, rbh, store ); | |
145 | entry = entry->vme_next; | |
146 | deleted++; | |
147 | nentries--; | |
148 | } | |
149 | } | |
150 | ||
0a7de745 | 151 | extern zone_t vm_map_holes_zone; /* zone for vm map holes (vm_map_links) structures */ |
3e170ce0 A |
152 | |
153 | void | |
154 | vm_map_combine_hole(vm_map_t map, vm_map_entry_t hole_entry); | |
155 | void | |
156 | vm_map_combine_hole(__unused vm_map_t map, vm_map_entry_t hole_entry) | |
157 | { | |
3e170ce0 A |
158 | vm_map_entry_t middle_hole_entry, last_hole_entry; |
159 | ||
160 | hole_entry->vme_end = hole_entry->vme_next->vme_end; | |
161 | ||
162 | middle_hole_entry = hole_entry->vme_next; | |
163 | last_hole_entry = middle_hole_entry->vme_next; | |
164 | ||
165 | assert(last_hole_entry->vme_prev == middle_hole_entry); | |
166 | assert(middle_hole_entry->vme_end != last_hole_entry->vme_start); | |
167 | ||
168 | last_hole_entry->vme_prev = hole_entry; | |
169 | hole_entry->vme_next = last_hole_entry; | |
170 | ||
171 | middle_hole_entry->vme_prev = NULL; | |
172 | middle_hole_entry->vme_next = NULL; | |
173 | ||
174 | zfree(vm_map_holes_zone, middle_hole_entry); | |
175 | ||
176 | assert(hole_entry->vme_start < hole_entry->vme_end); | |
177 | assert(last_hole_entry->vme_start < last_hole_entry->vme_end); | |
178 | } | |
179 | ||
180 | ||
181 | void | |
182 | vm_map_delete_hole(vm_map_t map, vm_map_entry_t hole_entry); | |
183 | void | |
184 | vm_map_delete_hole(vm_map_t map, vm_map_entry_t hole_entry) | |
185 | { | |
d9a64523 | 186 | if (hole_entry == CAST_TO_VM_MAP_ENTRY(map->holes_list)) { |
d9a64523 | 187 | if (hole_entry->vme_next == CAST_TO_VM_MAP_ENTRY(map->holes_list)) { |
3e170ce0 A |
188 | map->holes_list = NULL; |
189 | SAVE_HINT_HOLE_WRITE(map, NULL); | |
190 | } else { | |
3e170ce0 A |
191 | vm_map_entry_t l_next, l_prev; |
192 | ||
193 | l_next = (vm_map_entry_t) map->holes_list->next; | |
194 | l_prev = (vm_map_entry_t) map->holes_list->prev; | |
195 | map->holes_list = (struct vm_map_links*) l_next; | |
196 | ||
197 | l_next->vme_prev = l_prev; | |
198 | l_prev->vme_next = l_next; | |
199 | ||
200 | SAVE_HINT_HOLE_WRITE(map, (struct vm_map_links*) l_next); | |
201 | } | |
202 | } else { | |
3e170ce0 A |
203 | SAVE_HINT_HOLE_WRITE(map, (struct vm_map_links*) hole_entry->vme_prev); |
204 | ||
205 | hole_entry->vme_prev->vme_next = hole_entry->vme_next; | |
206 | hole_entry->vme_next->vme_prev = hole_entry->vme_prev; | |
207 | } | |
208 | ||
209 | hole_entry->vme_next = NULL; | |
210 | hole_entry->vme_prev = NULL; | |
211 | zfree(vm_map_holes_zone, hole_entry); | |
212 | } | |
213 | ||
214 | ||
215 | /* | |
216 | * For Debugging. | |
217 | */ | |
218 | ||
219 | #if DEBUG | |
4ba76501 A |
220 | extern int vm_check_map_sanity; |
221 | ||
3e170ce0 A |
222 | static void |
223 | check_map_sanity(vm_map_t map, vm_map_entry_t old_hole_entry) | |
224 | { | |
0a7de745 A |
225 | vm_map_entry_t hole_entry, next_hole_entry; |
226 | vm_map_entry_t map_entry, next_map_entry; | |
3e170ce0 A |
227 | |
228 | if (map->holes_list == NULL) { | |
3e170ce0 A |
229 | return; |
230 | } | |
231 | ||
cb323159 | 232 | hole_entry = CAST_DOWN(vm_map_entry_t, map->holes_list); |
3e170ce0 A |
233 | next_hole_entry = hole_entry->vme_next; |
234 | ||
235 | map_entry = vm_map_first_entry(map); | |
236 | next_map_entry = map_entry->vme_next; | |
237 | ||
0a7de745 | 238 | while (map_entry->vme_start > hole_entry->vme_start) { |
3e170ce0 A |
239 | hole_entry = next_hole_entry; |
240 | next_hole_entry = hole_entry->vme_next; | |
241 | ||
cb323159 | 242 | if (hole_entry == CAST_DOWN(vm_map_entry_t, map->holes_list)) { |
3e170ce0 | 243 | break; |
0a7de745 | 244 | } |
3e170ce0 A |
245 | } |
246 | ||
247 | while (map_entry != vm_map_to_entry(map)) { | |
0a7de745 | 248 | if (map_entry->vme_start >= map->max_offset) { |
3e170ce0 | 249 | break; |
0a7de745 | 250 | } |
3e170ce0 A |
251 | |
252 | if (map_entry->vme_end != map_entry->vme_next->vme_start) { | |
0a7de745 | 253 | if (map_entry->vme_next == vm_map_to_entry(map)) { |
3e170ce0 | 254 | break; |
0a7de745 | 255 | } |
3e170ce0 A |
256 | |
257 | if (hole_entry->vme_start != map_entry->vme_end) { | |
258 | panic("hole_entry not aligned %p(0x%llx), %p (0x%llx), %p", hole_entry, (unsigned long long)hole_entry->vme_start, map_entry->vme_next, (unsigned long long)map_entry->vme_end, old_hole_entry); | |
259 | assert(hole_entry->vme_start == map_entry->vme_end); | |
260 | } | |
261 | ||
262 | if (hole_entry->vme_end != map_entry->vme_next->vme_start) { | |
263 | panic("hole_entry not next aligned %p(0x%llx), %p (0x%llx), %p", hole_entry, (unsigned long long)hole_entry->vme_end, map_entry->vme_next, (unsigned long long)map_entry->vme_next->vme_start, old_hole_entry); | |
264 | assert(hole_entry->vme_end == map_entry->vme_next->vme_start); | |
265 | } | |
266 | ||
267 | hole_entry = next_hole_entry; | |
268 | next_hole_entry = hole_entry->vme_next; | |
269 | ||
cb323159 | 270 | if (hole_entry == CAST_DOWN(vm_map_entry_t, map->holes_list)) { |
3e170ce0 | 271 | break; |
0a7de745 | 272 | } |
3e170ce0 A |
273 | } |
274 | ||
275 | map_entry = map_entry->vme_next; | |
276 | } | |
277 | } | |
278 | ||
279 | /* | |
280 | * For debugging. | |
281 | */ | |
282 | static void | |
283 | copy_hole_info(vm_map_entry_t hole_entry, vm_map_entry_t old_hole_entry) | |
6d2010ae | 284 | { |
3e170ce0 A |
285 | old_hole_entry->vme_prev = hole_entry->vme_prev; |
286 | old_hole_entry->vme_next = hole_entry->vme_next; | |
287 | old_hole_entry->vme_start = hole_entry->vme_start; | |
288 | old_hole_entry->vme_end = hole_entry->vme_end; | |
6d2010ae | 289 | } |
3e170ce0 A |
290 | #endif /* DEBUG */ |
291 | ||
292 | void | |
293 | update_holes_on_entry_deletion(vm_map_t map, vm_map_entry_t old_entry); | |
294 | void | |
295 | update_holes_on_entry_deletion(vm_map_t map, vm_map_entry_t old_entry) | |
296 | { | |
297 | /* | |
298 | * Dealing with the deletion of an older entry. | |
299 | */ | |
300 | ||
0a7de745 | 301 | vm_map_entry_t hole_entry, next_hole_entry; |
3e170ce0 | 302 | #if DEBUG |
0a7de745 | 303 | struct vm_map_entry old_hole_entry; |
3e170ce0 | 304 | #endif /* DEBUG */ |
0a7de745 | 305 | boolean_t create_new_hole = TRUE; |
3e170ce0 | 306 | |
d9a64523 | 307 | hole_entry = CAST_TO_VM_MAP_ENTRY(map->hole_hint); |
3e170ce0 A |
308 | |
309 | if (hole_entry) { | |
3e170ce0 A |
310 | if (hole_entry->vme_end == old_entry->vme_start) { |
311 | /* | |
312 | * Found a hole right after above our entry. | |
313 | * Hit. | |
314 | */ | |
3e170ce0 | 315 | } else if (hole_entry->vme_start == old_entry->vme_end) { |
d9a64523 | 316 | if (hole_entry != CAST_TO_VM_MAP_ENTRY(map->holes_list)) { |
3e170ce0 A |
317 | /* |
318 | * Found a hole right after below our entry but | |
319 | * make sure we don't erroneously extend backwards. | |
0a7de745 | 320 | * |
3e170ce0 A |
321 | * Hit. |
322 | */ | |
323 | ||
324 | hole_entry = hole_entry->vme_prev; | |
325 | } | |
3e170ce0 | 326 | } else if (hole_entry->vme_start > old_entry->vme_end) { |
3e170ce0 A |
327 | /* |
328 | * Useless hint. Start from the top. | |
329 | */ | |
330 | ||
d9a64523 | 331 | hole_entry = CAST_TO_VM_MAP_ENTRY(map->holes_list); |
3e170ce0 A |
332 | } |
333 | ||
d9a64523 | 334 | if (hole_entry != CAST_TO_VM_MAP_ENTRY(map->holes_list)) { |
3e170ce0 A |
335 | if (hole_entry->vme_start > old_entry->vme_start) { |
336 | panic("Hole hint failed: Hole entry start: 0x%llx, entry start: 0x%llx, map hole start: 0x%llx, map hint start: 0x%llx\n", | |
0a7de745 A |
337 | (unsigned long long)hole_entry->vme_start, |
338 | (unsigned long long)old_entry->vme_start, | |
339 | (unsigned long long)map->holes_list->start, | |
340 | (unsigned long long)map->hole_hint->start); | |
3e170ce0 A |
341 | } |
342 | if (hole_entry->vme_end > old_entry->vme_start) { | |
343 | panic("Hole hint failed: Hole entry end: 0x%llx, entry start: 0x%llx, map hole start: 0x%llx, map hint start: 0x%llx\n", | |
0a7de745 A |
344 | (unsigned long long)hole_entry->vme_end, |
345 | (unsigned long long)old_entry->vme_start, | |
346 | (unsigned long long)map->holes_list->start, | |
347 | (unsigned long long)map->hole_hint->start); | |
3e170ce0 A |
348 | } |
349 | } | |
350 | ||
351 | while (1) { | |
3e170ce0 A |
352 | next_hole_entry = hole_entry->vme_next; |
353 | ||
354 | /* | |
355 | * Hole is right above the entry. | |
356 | */ | |
357 | if (hole_entry->vme_end == old_entry->vme_start) { | |
3e170ce0 A |
358 | #if DEBUG |
359 | copy_hole_info(hole_entry, &old_hole_entry); | |
360 | #endif /* DEBUG */ | |
361 | ||
362 | /* | |
363 | * Is there another hole right below the entry? | |
364 | * Can we combine holes? | |
365 | */ | |
366 | ||
367 | if (old_entry->vme_end == hole_entry->vme_next->vme_start) { | |
3e170ce0 A |
368 | vm_map_combine_hole(map, hole_entry); |
369 | } else { | |
3e170ce0 A |
370 | hole_entry->vme_end = old_entry->vme_end; |
371 | } | |
372 | create_new_hole = FALSE; | |
373 | #if DEBUG | |
4ba76501 A |
374 | if (vm_check_map_sanity) { |
375 | check_map_sanity(map, &old_hole_entry); | |
376 | } | |
3e170ce0 A |
377 | #endif /* DEBUG */ |
378 | break; | |
379 | } | |
380 | ||
381 | /* | |
382 | * Hole is right below the entry. | |
383 | */ | |
384 | if (hole_entry->vme_start == old_entry->vme_end) { | |
3e170ce0 A |
385 | #if DEBUG |
386 | copy_hole_info(hole_entry, &old_hole_entry); | |
387 | #endif /* DEBUG */ | |
388 | ||
389 | hole_entry->vme_start = old_entry->vme_start; | |
390 | create_new_hole = FALSE; | |
391 | ||
392 | #if DEBUG | |
4ba76501 A |
393 | if (vm_check_map_sanity) { |
394 | check_map_sanity(map, &old_hole_entry); | |
395 | } | |
3e170ce0 A |
396 | #endif /* DEBUG */ |
397 | break; | |
398 | } | |
399 | ||
400 | /* | |
401 | * Hole is beyond our entry. Let's go back to the last hole | |
402 | * before our entry so we have the right place to link up the | |
403 | * new hole that will be needed. | |
404 | */ | |
405 | if (hole_entry->vme_start > old_entry->vme_end) { | |
3e170ce0 A |
406 | #if DEBUG |
407 | copy_hole_info(hole_entry, &old_hole_entry); | |
408 | #endif /* DEBUG */ | |
409 | ||
d9a64523 | 410 | if (hole_entry != CAST_TO_VM_MAP_ENTRY(map->holes_list)) { |
3e170ce0 A |
411 | assert(hole_entry->vme_start != old_entry->vme_start); |
412 | hole_entry = hole_entry->vme_prev; | |
413 | } | |
414 | break; | |
415 | } | |
416 | ||
417 | hole_entry = next_hole_entry; | |
418 | ||
d9a64523 | 419 | if (hole_entry == CAST_TO_VM_MAP_ENTRY(map->holes_list)) { |
3e170ce0 A |
420 | hole_entry = hole_entry->vme_prev; |
421 | break; | |
422 | } | |
423 | } | |
424 | } | |
425 | ||
426 | if (create_new_hole) { | |
0a7de745 A |
427 | struct vm_map_links *new_hole_entry = NULL; |
428 | vm_map_entry_t l_next, l_prev; | |
3e170ce0 A |
429 | |
430 | new_hole_entry = zalloc(vm_map_holes_zone); | |
431 | ||
432 | /* | |
433 | * First hole in the map? | |
434 | * OR | |
435 | * A hole that is located above the current first hole in the map? | |
436 | */ | |
d9a64523 | 437 | if (map->holes_list == NULL || (hole_entry == CAST_TO_VM_MAP_ENTRY(map->holes_list) && hole_entry->vme_start > old_entry->vme_start)) { |
3e170ce0 | 438 | if (map->holes_list == NULL) { |
3e170ce0 | 439 | map->holes_list = new_hole_entry; |
d9a64523 | 440 | new_hole_entry->prev = new_hole_entry->next = CAST_TO_VM_MAP_ENTRY(map->holes_list); |
3e170ce0 | 441 | } else { |
d9a64523 | 442 | l_next = CAST_TO_VM_MAP_ENTRY(map->holes_list); |
3e170ce0 A |
443 | l_prev = map->holes_list->prev; |
444 | map->holes_list = new_hole_entry; | |
445 | new_hole_entry->next = l_next; | |
446 | new_hole_entry->prev = l_prev; | |
447 | ||
d9a64523 | 448 | l_prev->vme_next = l_next->vme_prev = CAST_TO_VM_MAP_ENTRY(new_hole_entry); |
3e170ce0 A |
449 | } |
450 | } else { | |
3e170ce0 A |
451 | l_next = hole_entry->vme_next; |
452 | l_prev = hole_entry->vme_next->vme_prev; | |
453 | ||
454 | new_hole_entry->prev = hole_entry; | |
455 | new_hole_entry->next = l_next; | |
456 | ||
d9a64523 A |
457 | hole_entry->vme_next = CAST_TO_VM_MAP_ENTRY(new_hole_entry); |
458 | l_next->vme_prev = CAST_TO_VM_MAP_ENTRY(new_hole_entry); | |
3e170ce0 A |
459 | } |
460 | ||
461 | new_hole_entry->start = old_entry->vme_start; | |
462 | new_hole_entry->end = old_entry->vme_end; | |
463 | ||
d9a64523 | 464 | hole_entry = CAST_TO_VM_MAP_ENTRY(new_hole_entry); |
3e170ce0 A |
465 | |
466 | assert(new_hole_entry->start < new_hole_entry->end); | |
467 | } | |
468 | ||
469 | #if DEBUG | |
4ba76501 A |
470 | if (vm_check_map_sanity) { |
471 | check_map_sanity(map, &old_hole_entry); | |
472 | } | |
3e170ce0 A |
473 | #endif /* DEBUG */ |
474 | ||
475 | SAVE_HINT_HOLE_WRITE(map, (struct vm_map_links*) hole_entry); | |
476 | return; | |
477 | } | |
478 | ||
479 | ||
480 | void | |
481 | update_holes_on_entry_creation(vm_map_t map, vm_map_entry_t new_entry); | |
482 | void | |
483 | update_holes_on_entry_creation(vm_map_t map, vm_map_entry_t new_entry) | |
484 | { | |
0a7de745 | 485 | vm_map_entry_t hole_entry, next_hole_entry; |
3e170ce0 | 486 | #if DEBUG |
0a7de745 A |
487 | struct vm_map_entry old_hole_entry; |
488 | vm_map_entry_t tmp_entry; | |
489 | boolean_t check_map_with_hole_sanity = TRUE; | |
3e170ce0 A |
490 | #endif /* DEBUG */ |
491 | ||
492 | /* | |
493 | * Case A: The entry is aligned exactly with the start and end of the hole. | |
494 | * This will delete the hole. | |
495 | * | |
496 | * Case B: The entry is completely within a hole but NOT aligned with the start/end of the hole. | |
497 | * This will split a hole. | |
498 | * | |
499 | * Case C: The entry overlaps with the hole. The entry could be extending upwards (C1) or downwards (C2). | |
500 | * This will reduce the size of the hole or delete the hole completely if it is smaller than the entry. | |
501 | */ | |
502 | ||
d9a64523 | 503 | hole_entry = CAST_TO_VM_MAP_ENTRY(map->holes_list); |
3e170ce0 A |
504 | assert(hole_entry); |
505 | next_hole_entry = hole_entry->vme_next; | |
506 | ||
507 | while (1) { | |
3e170ce0 A |
508 | #if DEBUG |
509 | /* | |
510 | * If the entry doesn't exist in the RB tree, we are likely dealing with copy maps where | |
511 | * the entries belonging to the copy map are linked into the list of entries silently and | |
512 | * then added to the RB-tree later on. | |
513 | * So sanity checks are useless in that case. | |
514 | */ | |
515 | check_map_with_hole_sanity = vm_map_lookup_entry(map, new_entry->vme_start, &tmp_entry); | |
516 | #endif /* DEBUG */ | |
517 | ||
518 | if (hole_entry->vme_start == new_entry->vme_start && | |
519 | hole_entry->vme_end == new_entry->vme_end) { | |
3e170ce0 A |
520 | /* Case A */ |
521 | #if DEBUG | |
522 | copy_hole_info(hole_entry, &old_hole_entry); | |
523 | #endif /* DEBUG */ | |
524 | ||
5ba3f43e A |
525 | /* |
526 | * This check makes sense only for regular maps, not copy maps. | |
527 | * With a regular map, the VM entry is first linked and then | |
528 | * the hole is deleted. So the check below, which makes sure that | |
529 | * the map's bounds are being respected, is valid. | |
530 | * But for copy maps, the hole is deleted before the VM entry is | |
531 | * linked (vm_map_store_copy_insert) and so this check is invalid. | |
532 | * | |
0a7de745 A |
533 | * if (hole_entry == (vm_map_entry_t) map->holes_list) { |
534 | * | |
535 | * if (hole_entry->vme_next == (vm_map_entry_t) map->holes_list) { | |
536 | * | |
537 | * next_hole_entry = vm_map_last_entry(map); | |
538 | * assert(next_hole_entry->vme_end >= map->max_offset); | |
539 | * } | |
540 | * } | |
541 | */ | |
3e170ce0 A |
542 | |
543 | vm_map_delete_hole(map, hole_entry); | |
544 | ||
545 | #if DEBUG | |
4ba76501 | 546 | if (vm_check_map_sanity && check_map_with_hole_sanity) { |
3e170ce0 | 547 | check_map_sanity(map, &old_hole_entry); |
0a7de745 | 548 | } |
3e170ce0 A |
549 | #endif /* DEBUG */ |
550 | return; | |
3e170ce0 | 551 | } else if (hole_entry->vme_start < new_entry->vme_start && |
0a7de745 | 552 | hole_entry->vme_end > new_entry->vme_end) { |
3e170ce0 A |
553 | /* Case B */ |
554 | struct vm_map_links *new_hole_entry = NULL; | |
555 | ||
556 | new_hole_entry = zalloc(vm_map_holes_zone); | |
557 | ||
558 | #if DEBUG | |
559 | copy_hole_info(hole_entry, &old_hole_entry); | |
560 | #endif /* DEBUG */ | |
561 | ||
562 | new_hole_entry->prev = hole_entry; | |
563 | new_hole_entry->next = hole_entry->vme_next; | |
d9a64523 A |
564 | hole_entry->vme_next->vme_prev = CAST_TO_VM_MAP_ENTRY(new_hole_entry); |
565 | hole_entry->vme_next = CAST_TO_VM_MAP_ENTRY(new_hole_entry); | |
3e170ce0 A |
566 | |
567 | new_hole_entry->start = new_entry->vme_end; | |
568 | new_hole_entry->end = hole_entry->vme_end; | |
569 | hole_entry->vme_end = new_entry->vme_start; | |
570 | ||
571 | assert(hole_entry->vme_start < hole_entry->vme_end); | |
572 | assert(new_hole_entry->start < new_hole_entry->end); | |
573 | ||
574 | #if DEBUG | |
4ba76501 | 575 | if (vm_check_map_sanity && check_map_with_hole_sanity) { |
3e170ce0 | 576 | check_map_sanity(map, &old_hole_entry); |
0a7de745 | 577 | } |
3e170ce0 A |
578 | #endif /* DEBUG */ |
579 | ||
580 | SAVE_HINT_HOLE_WRITE(map, (struct vm_map_links*) hole_entry); | |
581 | return; | |
3e170ce0 | 582 | } else if ((new_entry->vme_start <= hole_entry->vme_start) && (hole_entry->vme_start < new_entry->vme_end)) { |
3e170ce0 A |
583 | /* |
584 | * Case C1: Entry moving upwards and a part/full hole lies within the bounds of the entry. | |
585 | */ | |
586 | ||
587 | #if DEBUG | |
588 | copy_hole_info(hole_entry, &old_hole_entry); | |
589 | #endif /* DEBUG */ | |
590 | ||
591 | if (hole_entry->vme_end <= new_entry->vme_end) { | |
3e170ce0 A |
592 | vm_map_delete_hole(map, hole_entry); |
593 | } else { | |
594 | hole_entry->vme_start = new_entry->vme_end; | |
595 | SAVE_HINT_HOLE_WRITE(map, (struct vm_map_links*) hole_entry); | |
596 | } | |
597 | ||
598 | #if DEBUG | |
4ba76501 | 599 | if (vm_check_map_sanity && check_map_with_hole_sanity) { |
3e170ce0 | 600 | check_map_sanity(map, &old_hole_entry); |
0a7de745 | 601 | } |
3e170ce0 A |
602 | #endif /* DEBUG */ |
603 | ||
604 | return; | |
3e170ce0 | 605 | } else if ((new_entry->vme_start < hole_entry->vme_end) && (hole_entry->vme_end <= new_entry->vme_end)) { |
3e170ce0 A |
606 | /* |
607 | * Case C2: Entry moving downwards and a part/full hole lies within the bounds of the entry. | |
608 | */ | |
609 | ||
610 | #if DEBUG | |
611 | copy_hole_info(hole_entry, &old_hole_entry); | |
612 | #endif /* DEBUG */ | |
613 | ||
614 | if (hole_entry->vme_start >= new_entry->vme_start) { | |
615 | vm_map_delete_hole(map, hole_entry); | |
616 | } else { | |
617 | hole_entry->vme_end = new_entry->vme_start; | |
618 | SAVE_HINT_HOLE_WRITE(map, (struct vm_map_links*) hole_entry); | |
619 | } | |
620 | ||
621 | #if DEBUG | |
4ba76501 | 622 | if (vm_check_map_sanity && check_map_with_hole_sanity) { |
3e170ce0 | 623 | check_map_sanity(map, &old_hole_entry); |
0a7de745 | 624 | } |
3e170ce0 A |
625 | #endif /* DEBUG */ |
626 | ||
627 | return; | |
628 | } | |
629 | ||
630 | hole_entry = next_hole_entry; | |
631 | next_hole_entry = hole_entry->vme_next; | |
632 | ||
0a7de745 | 633 | if (hole_entry == CAST_TO_VM_MAP_ENTRY(map->holes_list)) { |
3e170ce0 | 634 | break; |
0a7de745 | 635 | } |
3e170ce0 A |
636 | } |
637 | ||
638 | panic("Illegal action: h1: %p, s:0x%llx, e:0x%llx...h2:%p, s:0x%llx, e:0x%llx...h3:0x%p, s:0x%llx, e:0x%llx\n", | |
0a7de745 A |
639 | hole_entry->vme_prev, |
640 | (unsigned long long)hole_entry->vme_prev->vme_start, | |
641 | (unsigned long long)hole_entry->vme_prev->vme_end, | |
642 | hole_entry, | |
643 | (unsigned long long)hole_entry->vme_start, | |
644 | (unsigned long long)hole_entry->vme_end, | |
645 | hole_entry->vme_next, | |
646 | (unsigned long long)hole_entry->vme_next->vme_start, | |
647 | (unsigned long long)hole_entry->vme_next->vme_end); | |
3e170ce0 A |
648 | } |
649 | ||
650 | void | |
651 | update_first_free_rb(vm_map_t map, vm_map_entry_t entry, boolean_t new_entry_creation) | |
652 | { | |
3e170ce0 | 653 | if (map->holelistenabled) { |
3e170ce0 A |
654 | /* |
655 | * Holes can be used to track ranges all the way up to MACH_VM_MAX_ADDRESS or more (e.g. kernel map). | |
656 | */ | |
657 | vm_map_offset_t max_valid_offset = (map->max_offset > MACH_VM_MAX_ADDRESS) ? map->max_offset : MACH_VM_MAX_ADDRESS; | |
658 | ||
659 | /* | |
660 | * Clipping an entry will not result in the creation/deletion/modification of | |
661 | * a hole. Those calls pass NULL for their target entry. | |
662 | */ | |
663 | if (entry == NULL) { | |
664 | return; | |
665 | } | |
666 | ||
667 | /* | |
668 | * Commpage is pinned beyond the map's max offset. That shouldn't affect the | |
669 | * holes within the bounds of the map. | |
670 | */ | |
671 | if (vm_map_trunc_page(entry->vme_start, VM_MAP_PAGE_MASK(map)) >= max_valid_offset) { | |
672 | return; | |
673 | } | |
674 | ||
675 | /* | |
676 | * | |
677 | * Note: | |
678 | * | |
679 | * - A new entry has already been added to the map | |
680 | * OR | |
681 | * - An older entry has already been deleted from the map | |
682 | * | |
683 | * We are updating the hole list after the fact (except in one special case involving copy maps). | |
684 | * | |
685 | */ | |
686 | ||
687 | if (new_entry_creation) { | |
3e170ce0 A |
688 | update_holes_on_entry_creation(map, entry); |
689 | } else { | |
3e170ce0 A |
690 | update_holes_on_entry_deletion(map, entry); |
691 | } | |
692 | } | |
693 | } |