]> git.saurik.com Git - apple/xnu.git/blame - osfmk/vm/vm_map_store_ll.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / osfmk / vm / vm_map_store_ll.c
CommitLineData
6d2010ae
A
1/*
2 * Copyright (c) 2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
0a7de745 5 *
6d2010ae
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
0a7de745 14 *
6d2010ae
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
0a7de745 17 *
6d2010ae
A
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
0a7de745 25 *
6d2010ae
A
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <vm/vm_map_store_ll.h>
30
31boolean_t
32first_free_is_valid_ll( vm_map_t map )
33{
0a7de745 34 vm_map_entry_t entry, next;
6d2010ae
A
35 entry = vm_map_to_entry(map);
36 next = entry->vme_next;
39236c6e 37 while (vm_map_trunc_page(next->vme_start,
0a7de745
A
38 VM_MAP_PAGE_MASK(map)) ==
39 vm_map_trunc_page(entry->vme_end,
40 VM_MAP_PAGE_MASK(map)) ||
41 (vm_map_trunc_page(next->vme_start,
42 VM_MAP_PAGE_MASK(map)) ==
43 vm_map_trunc_page(entry->vme_start,
44 VM_MAP_PAGE_MASK(map)) &&
45 next != vm_map_to_entry(map))) {
6d2010ae
A
46 entry = next;
47 next = entry->vme_next;
0a7de745 48 if (entry == vm_map_to_entry(map)) {
6d2010ae 49 break;
0a7de745 50 }
6d2010ae
A
51 }
52 if (map->first_free != entry) {
53 printf("Bad first_free for map %p: %p should be %p\n",
0a7de745 54 map, map->first_free, entry);
6d2010ae
A
55 return FALSE;
56 }
57 return TRUE;
58}
59
60/*
61 * UPDATE_FIRST_FREE:
62 *
63 * Updates the map->first_free pointer to the
64 * entry immediately before the first hole in the map.
0a7de745 65 * The map should be locked.
6d2010ae 66 */
0a7de745
A
67#define UPDATE_FIRST_FREE_LL(map, new_first_free) \
68 MACRO_BEGIN \
69 if( map->disable_vmentry_reuse == FALSE){ \
70 vm_map_t UFF_map; \
71 vm_map_entry_t UFF_first_free; \
72 vm_map_entry_t UFF_next_entry; \
73 UFF_map = (map); \
74 UFF_first_free = (new_first_free); \
75 UFF_next_entry = UFF_first_free->vme_next; \
76 while (vm_map_trunc_page(UFF_next_entry->vme_start, \
77 VM_MAP_PAGE_MASK(UFF_map)) == \
78 vm_map_trunc_page(UFF_first_free->vme_end, \
79 VM_MAP_PAGE_MASK(UFF_map)) || \
80 (vm_map_trunc_page(UFF_next_entry->vme_start, \
81 VM_MAP_PAGE_MASK(UFF_map)) == \
82 vm_map_trunc_page(UFF_first_free->vme_start, \
83 VM_MAP_PAGE_MASK(UFF_map)) && \
84 UFF_next_entry != vm_map_to_entry(UFF_map))) { \
85 UFF_first_free = UFF_next_entry; \
86 UFF_next_entry = UFF_first_free->vme_next; \
87 if (UFF_first_free == vm_map_to_entry(UFF_map)) \
88 break; \
89 } \
90 UFF_map->first_free = UFF_first_free; \
91 assert(first_free_is_valid(UFF_map)); \
92 } \
6d2010ae
A
93 MACRO_END
94
0a7de745
A
95#define _vm_map_entry_link_ll(hdr, after_where, entry) \
96 MACRO_BEGIN \
97 if (entry->map_aligned) { \
98 assert(VM_MAP_PAGE_ALIGNED((entry->vme_start), \
99 VM_MAP_HDR_PAGE_MASK((hdr))));\
100 assert(VM_MAP_PAGE_ALIGNED((entry->vme_end), \
101 VM_MAP_HDR_PAGE_MASK((hdr))));\
102 } \
103 (hdr)->nentries++; \
104 (entry)->vme_prev = (after_where); \
105 (entry)->vme_next = (after_where)->vme_next; \
6d2010ae
A
106 (entry)->vme_prev->vme_next = (entry)->vme_next->vme_prev = (entry); \
107 MACRO_END
108
0a7de745
A
109#define _vm_map_entry_unlink_ll(hdr, entry) \
110 MACRO_BEGIN \
111 (hdr)->nentries--; \
112 (entry)->vme_next->vme_prev = (entry)->vme_prev; \
113 (entry)->vme_prev->vme_next = (entry)->vme_next; \
6d2010ae
A
114 MACRO_END
115/*
116 * Macro: vm_map_copy_insert
0a7de745 117 *
6d2010ae
A
118 * Description:
119 * Link a copy chain ("copy") into a map at the
120 * specified location (after "where").
121 * Side effects:
122 * The copy chain is destroyed.
123 * Warning:
124 * The arguments are evaluated multiple times.
125 */
0a7de745
A
126#define _vm_map_copy_insert_ll(map, where, copy) \
127MACRO_BEGIN \
128 vm_map_t VMCI_map; \
129 vm_map_entry_t VMCI_where; \
130 vm_map_copy_t VMCI_copy; \
131 VMCI_map = (map); \
132 VMCI_where = (where); \
133 VMCI_copy = (copy); \
6d2010ae 134 ((VMCI_where->vme_next)->vme_prev = vm_map_copy_last_entry(VMCI_copy))\
0a7de745
A
135 ->vme_next = (VMCI_where->vme_next); \
136 ((VMCI_where)->vme_next = vm_map_copy_first_entry(VMCI_copy)) \
137 ->vme_prev = VMCI_where; \
138 VMCI_map->hdr.nentries += VMCI_copy->cpy_hdr.nentries; \
139 update_first_free_ll(VMCI_map, VMCI_map->first_free); \
6d2010ae
A
140MACRO_END
141
142
143
144void
145vm_map_store_init_ll( __unused struct vm_map_header *hdr)
146{
147 return;
148}
149
150/*
151 * vm_map_lookup_entry_ll: [ internal use only ]
152 * Use the linked list to find the map entry containing (or
153 * immediately preceding) the specified address
154 * in the given map; the entry is returned
155 * in the "entry" parameter. The boolean
156 * result indicates whether the address is
157 * actually contained in the map.
158 */
159boolean_t
160vm_map_store_lookup_entry_ll(
0a7de745
A
161 vm_map_t map,
162 vm_map_offset_t address,
163 vm_map_entry_t *entry) /* OUT */
6d2010ae 164{
0a7de745
A
165 vm_map_entry_t cur;
166 vm_map_entry_t last;
6d2010ae
A
167
168 /*
169 * Start looking either from the head of the
170 * list, or from the hint.
171 */
172 cur = map->hint;
173
0a7de745 174 if (cur == vm_map_to_entry(map)) {
6d2010ae 175 cur = cur->vme_next;
0a7de745 176 }
6d2010ae
A
177
178 if (address >= cur->vme_start) {
179 /*
180 * Go from hint to end of list.
181 *
182 * But first, make a quick check to see if
183 * we are already looking at the entry we
184 * want (which is usually the case).
185 * Note also that we don't need to save the hint
186 * here... it is the same hint (unless we are
187 * at the header, in which case the hint didn't
188 * buy us anything anyway).
189 */
190 last = vm_map_to_entry(map);
191 if ((cur != last) && (cur->vme_end > address)) {
192 *entry = cur;
0a7de745 193 return TRUE;
6d2010ae 194 }
0a7de745 195 } else {
6d2010ae
A
196 /*
197 * Go from start to hint, *inclusively*
198 */
199 last = cur->vme_next;
200 cur = vm_map_first_entry(map);
201 }
202
203 /*
204 * Search linearly
205 */
206
207 while (cur != last) {
208 if (cur->vme_end > address) {
209 if (address >= cur->vme_start) {
210 /*
211 * Save this lookup for future
212 * hints, and return
213 */
214
215 *entry = cur;
216 SAVE_HINT_MAP_READ(map, cur);
217
0a7de745 218 return TRUE;
6d2010ae
A
219 }
220 break;
221 }
222 cur = cur->vme_next;
223 }
224 *entry = cur->vme_prev;
225 SAVE_HINT_MAP_READ(map, *entry);
226
0a7de745 227 return FALSE;
6d2010ae
A
228}
229
f427ee49
A
230/*
231 * vm_map_store_find_last_free_ll:
232 *
233 * Finds and returns in O_ENTRY the entry *after* the last hole (if one exists) in MAP.
234 * Returns NULL if map is full and no hole can be found.
235 */
236void
237vm_map_store_find_last_free_ll(
238 vm_map_t map,
239 vm_map_entry_t *o_entry) /* OUT */
240{
241 vm_map_entry_t entry, prev_entry;
242 vm_offset_t end;
243
244 entry = vm_map_to_entry(map);
245 end = map->max_offset;
246
247 /* Skip over contiguous entries from end of map until wraps around */
248 while ((prev_entry = entry->vme_prev) != vm_map_to_entry(map)
249 && (prev_entry->vme_end == end || prev_entry->vme_end > map->max_offset)) {
250 entry = prev_entry;
251 end = entry->vme_start;
252 }
253
254 if (entry != vm_map_to_entry(map) && entry->vme_start == map->min_offset) {
255 /* Map is completely full, no holes exist */
256 *o_entry = NULL;
257 return;
258 }
259
260 *o_entry = entry;
261}
262
6d2010ae
A
263void
264vm_map_store_entry_link_ll( struct vm_map_header *mapHdr, vm_map_entry_t after_where, vm_map_entry_t entry)
265{
266 _vm_map_entry_link_ll( mapHdr, after_where, entry);
267}
268
269void
270vm_map_store_entry_unlink_ll( struct vm_map_header *mapHdr, vm_map_entry_t entry)
271{
272 _vm_map_entry_unlink_ll( mapHdr, entry);
273}
274
6d2010ae
A
275void
276vm_map_store_copy_reset_ll( vm_map_copy_t copy, __unused vm_map_entry_t entry, __unused int nentries)
277{
278 copy->cpy_hdr.nentries = 0;
279 vm_map_copy_first_entry(copy) =
0a7de745
A
280 vm_map_copy_last_entry(copy) =
281 vm_map_copy_to_entry(copy);
6d2010ae
A
282}
283
284void
285update_first_free_ll( vm_map_t map, vm_map_entry_t new_first_free)
286{
0a7de745 287 if (map->holelistenabled) {
3e170ce0 288 return;
0a7de745 289 }
3e170ce0 290
6d2010ae
A
291 UPDATE_FIRST_FREE_LL( map, new_first_free);
292}