2 * Copyright (c) 2009-2020 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <kern/backtrace.h>
31 #include <vm/vm_map_store.h>
32 #include <vm/vm_pageout.h> /* for vm_debug_events */
36 first_free_is_valid_store( vm_map_t map
)
38 return first_free_is_valid_ll( map
);
43 vm_map_store_has_RB_support( struct vm_map_header
*hdr
)
45 if ((void*)hdr
->rb_head_store
.rbh_root
== (void*)(int)SKIP_RB_TREE
) {
52 vm_map_store_init( struct vm_map_header
*hdr
)
54 vm_map_store_init_ll( hdr
);
55 #ifdef VM_MAP_STORE_USE_RB
56 if (vm_map_store_has_RB_support( hdr
)) {
57 vm_map_store_init_rb( hdr
);
62 __attribute__((noinline
))
64 vm_map_store_lookup_entry(
66 vm_map_offset_t address
,
67 vm_map_entry_t
*entry
) /* OUT */
69 #ifdef VM_MAP_STORE_USE_LL
70 return vm_map_store_lookup_entry_ll( map
, address
, entry
);
71 #elif defined VM_MAP_STORE_USE_RB
72 if (vm_map_store_has_RB_support( &map
->hdr
)) {
73 return vm_map_store_lookup_entry_rb( map
, address
, entry
);
75 panic("VM map lookups need RB tree support.\n");
76 return FALSE
; /* For compiler warning.*/
82 vm_map_store_update( vm_map_t map
, vm_map_entry_t entry
, int update_type
)
84 switch (update_type
) {
85 case VM_MAP_ENTRY_CREATE
:
87 case VM_MAP_ENTRY_DELETE
:
88 if ((map
->holelistenabled
== FALSE
) && ((entry
) == (map
)->first_free
)) {
89 (map
)->first_free
= vm_map_to_entry(map
);
91 if ((entry
) == (map
)->hint
) {
92 (map
)->hint
= vm_map_to_entry(map
);
101 * vm_map_store_find_last_free:
103 * Finds and returns in O_ENTRY the entry *after* the last hole (if one exists) in MAP.
104 * Returns NULL if map is full and no hole can be found.
107 vm_map_store_find_last_free(
109 vm_map_entry_t
*o_entry
) /* OUT */
111 /* TODO: Provide a RB implementation for this routine. */
112 vm_map_store_find_last_free_ll(map
, o_entry
);
116 * vm_map_entry_{un,}link:
118 * Insert/remove entries from maps (or map copies).
119 * The _vm_map_store_entry_{un,}link variants are used at
120 * some places where updating first_free is not needed &
121 * copy maps are being modified. Also note the first argument
123 * Modifying the vm_map_store_entry_{un,}link functions to
124 * deal with these call sites made the interface confusing
129 _vm_map_store_entry_link( struct vm_map_header
* mapHdr
, vm_map_entry_t after_where
, vm_map_entry_t entry
)
131 assert(entry
->vme_start
< entry
->vme_end
);
132 if (__improbable(vm_debug_events
)) {
133 DTRACE_VM4(map_entry_link
, vm_map_t
, (char *)mapHdr
- sizeof(lck_rw_t
), vm_map_entry_t
, entry
, vm_address_t
, entry
->links
.start
, vm_address_t
, entry
->links
.end
);
136 vm_map_store_entry_link_ll(mapHdr
, after_where
, entry
);
137 #ifdef VM_MAP_STORE_USE_RB
138 if (vm_map_store_has_RB_support( mapHdr
)) {
139 vm_map_store_entry_link_rb(mapHdr
, after_where
, entry
);
142 #if MAP_ENTRY_INSERTION_DEBUG
143 if (entry
->vme_start_original
== 0 && entry
->vme_end_original
== 0) {
144 entry
->vme_start_original
= entry
->vme_start
;
145 entry
->vme_end_original
= entry
->vme_end
;
147 backtrace(&entry
->vme_insertion_bt
[0],
148 (sizeof(entry
->vme_insertion_bt
) / sizeof(uintptr_t)), NULL
);
153 vm_map_store_entry_link(
155 vm_map_entry_t after_where
,
156 vm_map_entry_t entry
,
157 vm_map_kernel_flags_t vmk_flags
)
160 vm_map_entry_t VMEL_entry
;
162 VMEL_entry
= (entry
);
164 if (entry
->is_sub_map
) {
165 assertf(VM_MAP_PAGE_SHIFT(VME_SUBMAP(entry
)) >= VM_MAP_PAGE_SHIFT(map
),
166 "map %p (%d) entry %p submap %p (%d)\n",
167 map
, VM_MAP_PAGE_SHIFT(map
), entry
,
168 VME_SUBMAP(entry
), VM_MAP_PAGE_SHIFT(VME_SUBMAP(entry
)));
171 _vm_map_store_entry_link(&VMEL_map
->hdr
, after_where
, VMEL_entry
);
172 if (VMEL_map
->disable_vmentry_reuse
== TRUE
) {
173 UPDATE_HIGHEST_ENTRY_END( VMEL_map
, VMEL_entry
);
175 update_first_free_ll(VMEL_map
, VMEL_map
->first_free
);
176 #ifdef VM_MAP_STORE_USE_RB
177 if (vm_map_store_has_RB_support( &VMEL_map
->hdr
)) {
178 update_first_free_rb(VMEL_map
, entry
, TRUE
);
183 (void) vm_map_entry_cs_associate(map
, entry
, vmk_flags
);
190 _vm_map_store_entry_unlink( struct vm_map_header
* mapHdr
, vm_map_entry_t entry
)
192 if (__improbable(vm_debug_events
)) {
193 DTRACE_VM4(map_entry_unlink
, vm_map_t
, (char *)mapHdr
- sizeof(lck_rw_t
), vm_map_entry_t
, entry
, vm_address_t
, entry
->links
.start
, vm_address_t
, entry
->links
.end
);
196 vm_map_store_entry_unlink_ll(mapHdr
, entry
);
197 #ifdef VM_MAP_STORE_USE_RB
198 if (vm_map_store_has_RB_support( mapHdr
)) {
199 vm_map_store_entry_unlink_rb(mapHdr
, entry
);
205 vm_map_store_entry_unlink( vm_map_t map
, vm_map_entry_t entry
)
208 vm_map_entry_t VMEU_entry
= NULL
;
209 vm_map_entry_t VMEU_first_free
= NULL
;
211 VMEU_entry
= (entry
);
213 if (map
->holelistenabled
== FALSE
) {
214 if (VMEU_entry
->vme_start
<= VMEU_map
->first_free
->vme_start
) {
215 VMEU_first_free
= VMEU_entry
->vme_prev
;
217 VMEU_first_free
= VMEU_map
->first_free
;
220 _vm_map_store_entry_unlink(&VMEU_map
->hdr
, VMEU_entry
);
221 vm_map_store_update( map
, entry
, VM_MAP_ENTRY_DELETE
);
222 update_first_free_ll(VMEU_map
, VMEU_first_free
);
223 #ifdef VM_MAP_STORE_USE_RB
224 if (vm_map_store_has_RB_support( &VMEU_map
->hdr
)) {
225 update_first_free_rb(VMEU_map
, entry
, FALSE
);
231 vm_map_store_copy_reset( vm_map_copy_t copy
, vm_map_entry_t entry
)
233 int nentries
= copy
->cpy_hdr
.nentries
;
234 vm_map_store_copy_reset_ll(copy
, entry
, nentries
);
235 #ifdef VM_MAP_STORE_USE_RB
236 if (vm_map_store_has_RB_support( ©
->c_u
.hdr
)) {
237 vm_map_store_copy_reset_rb(copy
, entry
, nentries
);
243 vm_map_store_update_first_free( vm_map_t map
, vm_map_entry_t first_free_entry
, boolean_t new_entry_creation
)
245 update_first_free_ll(map
, first_free_entry
);
246 #ifdef VM_MAP_STORE_USE_RB
247 if (vm_map_store_has_RB_support( &map
->hdr
)) {
248 update_first_free_rb(map
, first_free_entry
, new_entry_creation
);