2 * Copyright (c) 2009 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <vm/vm_map_store_ll.h>
32 first_free_is_valid_ll( vm_map_t map
)
34 vm_map_entry_t entry
, next
;
35 entry
= vm_map_to_entry(map
);
36 next
= entry
->vme_next
;
37 while (vm_map_trunc_page(next
->vme_start
,
38 VM_MAP_PAGE_MASK(map
)) ==
39 vm_map_trunc_page(entry
->vme_end
,
40 VM_MAP_PAGE_MASK(map
)) ||
41 (vm_map_trunc_page(next
->vme_start
,
42 VM_MAP_PAGE_MASK(map
)) ==
43 vm_map_trunc_page(entry
->vme_start
,
44 VM_MAP_PAGE_MASK(map
)) &&
45 next
!= vm_map_to_entry(map
))) {
47 next
= entry
->vme_next
;
48 if (entry
== vm_map_to_entry(map
))
51 if (map
->first_free
!= entry
) {
52 printf("Bad first_free for map %p: %p should be %p\n",
53 map
, map
->first_free
, entry
);
62 * Updates the map->first_free pointer to the
63 * entry immediately before the first hole in the map.
64 * The map should be locked.
66 #define UPDATE_FIRST_FREE_LL(map, new_first_free) \
68 if( map->disable_vmentry_reuse == FALSE){ \
70 vm_map_entry_t UFF_first_free; \
71 vm_map_entry_t UFF_next_entry; \
73 UFF_first_free = (new_first_free); \
74 UFF_next_entry = UFF_first_free->vme_next; \
75 while (vm_map_trunc_page(UFF_next_entry->vme_start, \
76 VM_MAP_PAGE_MASK(UFF_map)) == \
77 vm_map_trunc_page(UFF_first_free->vme_end, \
78 VM_MAP_PAGE_MASK(UFF_map)) || \
79 (vm_map_trunc_page(UFF_next_entry->vme_start, \
80 VM_MAP_PAGE_MASK(UFF_map)) == \
81 vm_map_trunc_page(UFF_first_free->vme_start, \
82 VM_MAP_PAGE_MASK(UFF_map)) && \
83 UFF_next_entry != vm_map_to_entry(UFF_map))) { \
84 UFF_first_free = UFF_next_entry; \
85 UFF_next_entry = UFF_first_free->vme_next; \
86 if (UFF_first_free == vm_map_to_entry(UFF_map)) \
89 UFF_map->first_free = UFF_first_free; \
90 assert(first_free_is_valid(UFF_map)); \
94 #define _vm_map_entry_link_ll(hdr, after_where, entry) \
96 if (entry->map_aligned) { \
97 assert(VM_MAP_PAGE_ALIGNED((entry->vme_start), \
98 VM_MAP_HDR_PAGE_MASK((hdr))));\
99 assert(VM_MAP_PAGE_ALIGNED((entry->vme_end), \
100 VM_MAP_HDR_PAGE_MASK((hdr))));\
103 (entry)->vme_prev = (after_where); \
104 (entry)->vme_next = (after_where)->vme_next; \
105 (entry)->vme_prev->vme_next = (entry)->vme_next->vme_prev = (entry); \
108 #define _vm_map_entry_unlink_ll(hdr, entry) \
111 (entry)->vme_next->vme_prev = (entry)->vme_prev; \
112 (entry)->vme_prev->vme_next = (entry)->vme_next; \
115 * Macro: vm_map_copy_insert
118 * Link a copy chain ("copy") into a map at the
119 * specified location (after "where").
121 * The copy chain is destroyed.
123 * The arguments are evaluated multiple times.
125 #define _vm_map_copy_insert_ll(map, where, copy) \
128 vm_map_entry_t VMCI_where; \
129 vm_map_copy_t VMCI_copy; \
131 VMCI_where = (where); \
132 VMCI_copy = (copy); \
133 ((VMCI_where->vme_next)->vme_prev = vm_map_copy_last_entry(VMCI_copy))\
134 ->vme_next = (VMCI_where->vme_next); \
135 ((VMCI_where)->vme_next = vm_map_copy_first_entry(VMCI_copy)) \
136 ->vme_prev = VMCI_where; \
137 VMCI_map->hdr.nentries += VMCI_copy->cpy_hdr.nentries; \
138 update_first_free_ll(VMCI_map, VMCI_map->first_free); \
144 vm_map_store_init_ll( __unused
struct vm_map_header
*hdr
)
150 * vm_map_lookup_entry_ll: [ internal use only ]
151 * Use the linked list to find the map entry containing (or
152 * immediately preceding) the specified address
153 * in the given map; the entry is returned
154 * in the "entry" parameter. The boolean
155 * result indicates whether the address is
156 * actually contained in the map.
159 vm_map_store_lookup_entry_ll(
160 register vm_map_t map
,
161 register vm_map_offset_t address
,
162 vm_map_entry_t
*entry
) /* OUT */
164 register vm_map_entry_t cur
;
165 register vm_map_entry_t last
;
168 * Start looking either from the head of the
169 * list, or from the hint.
173 if (cur
== vm_map_to_entry(map
))
176 if (address
>= cur
->vme_start
) {
178 * Go from hint to end of list.
180 * But first, make a quick check to see if
181 * we are already looking at the entry we
182 * want (which is usually the case).
183 * Note also that we don't need to save the hint
184 * here... it is the same hint (unless we are
185 * at the header, in which case the hint didn't
186 * buy us anything anyway).
188 last
= vm_map_to_entry(map
);
189 if ((cur
!= last
) && (cur
->vme_end
> address
)) {
196 * Go from start to hint, *inclusively*
198 last
= cur
->vme_next
;
199 cur
= vm_map_first_entry(map
);
206 while (cur
!= last
) {
207 if (cur
->vme_end
> address
) {
208 if (address
>= cur
->vme_start
) {
210 * Save this lookup for future
215 SAVE_HINT_MAP_READ(map
, cur
);
223 *entry
= cur
->vme_prev
;
224 SAVE_HINT_MAP_READ(map
, *entry
);
230 vm_map_store_entry_link_ll( struct vm_map_header
*mapHdr
, vm_map_entry_t after_where
, vm_map_entry_t entry
)
232 _vm_map_entry_link_ll( mapHdr
, after_where
, entry
);
236 vm_map_store_entry_unlink_ll( struct vm_map_header
*mapHdr
, vm_map_entry_t entry
)
238 _vm_map_entry_unlink_ll( mapHdr
, entry
);
242 vm_map_store_copy_insert_ll( vm_map_t map
, vm_map_entry_t after_where
, vm_map_copy_t copy
)
244 _vm_map_copy_insert_ll( map
, after_where
, copy
);
248 vm_map_store_copy_reset_ll( vm_map_copy_t copy
, __unused vm_map_entry_t entry
, __unused
int nentries
)
250 copy
->cpy_hdr
.nentries
= 0;
251 vm_map_copy_first_entry(copy
) =
252 vm_map_copy_last_entry(copy
) =
253 vm_map_copy_to_entry(copy
);
258 update_first_free_ll( vm_map_t map
, vm_map_entry_t new_first_free
)
260 UPDATE_FIRST_FREE_LL( map
, new_first_free
);