]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_map_store.c
990f1d4526cf2f7148f5949a4fea46cf9d6afcdf
[apple/xnu.git] / osfmk / vm / vm_map_store.c
1 /*
2 * Copyright (c) 2009-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <kern/backtrace.h>
30 #include <mach/sdt.h>
31 #include <vm/vm_map_store.h>
32 #include <vm/vm_pageout.h> /* for vm_debug_events */
33
34 #if MACH_ASSERT
35 boolean_t
36 first_free_is_valid_store( vm_map_t map )
37 {
38 return first_free_is_valid_ll( map );
39 }
40 #endif
41
42 boolean_t
43 vm_map_store_has_RB_support( struct vm_map_header *hdr )
44 {
45 if ((void*)hdr->rb_head_store.rbh_root == (void*)(int)SKIP_RB_TREE) {
46 return FALSE;
47 }
48 return TRUE;
49 }
50
51 void
52 vm_map_store_init( struct vm_map_header *hdr )
53 {
54 vm_map_store_init_ll( hdr );
55 #ifdef VM_MAP_STORE_USE_RB
56 if (vm_map_store_has_RB_support( hdr )) {
57 vm_map_store_init_rb( hdr );
58 }
59 #endif
60 }
61
62 __attribute__((noinline))
63 boolean_t
64 vm_map_store_lookup_entry(
65 vm_map_t map,
66 vm_map_offset_t address,
67 vm_map_entry_t *entry) /* OUT */
68 {
69 #ifdef VM_MAP_STORE_USE_LL
70 return vm_map_store_lookup_entry_ll( map, address, entry );
71 #elif defined VM_MAP_STORE_USE_RB
72 if (vm_map_store_has_RB_support( &map->hdr )) {
73 return vm_map_store_lookup_entry_rb( map, address, entry );
74 } else {
75 panic("VM map lookups need RB tree support.\n");
76 return FALSE; /* For compiler warning.*/
77 }
78 #endif
79 }
80
81 void
82 vm_map_store_update( vm_map_t map, vm_map_entry_t entry, int update_type )
83 {
84 switch (update_type) {
85 case VM_MAP_ENTRY_CREATE:
86 break;
87 case VM_MAP_ENTRY_DELETE:
88 if ((map->holelistenabled == FALSE) && ((entry) == (map)->first_free)) {
89 (map)->first_free = vm_map_to_entry(map);
90 }
91 if ((entry) == (map)->hint) {
92 (map)->hint = vm_map_to_entry(map);
93 }
94 break;
95 default:
96 break;
97 }
98 }
99
100 /*
101 * vm_map_store_find_last_free:
102 *
103 * Finds and returns in O_ENTRY the entry *after* the last hole (if one exists) in MAP.
104 * Returns NULL if map is full and no hole can be found.
105 */
106 void
107 vm_map_store_find_last_free(
108 vm_map_t map,
109 vm_map_entry_t *o_entry) /* OUT */
110 {
111 /* TODO: Provide a RB implementation for this routine. */
112 vm_map_store_find_last_free_ll(map, o_entry);
113 }
114
115 /*
116 * vm_map_entry_{un,}link:
117 *
118 * Insert/remove entries from maps (or map copies).
119 * The _vm_map_store_entry_{un,}link variants are used at
120 * some places where updating first_free is not needed &
121 * copy maps are being modified. Also note the first argument
122 * is the map header.
123 * Modifying the vm_map_store_entry_{un,}link functions to
124 * deal with these call sites made the interface confusing
125 * and clunky.
126 */
127
128 void
129 _vm_map_store_entry_link( struct vm_map_header * mapHdr, vm_map_entry_t after_where, vm_map_entry_t entry)
130 {
131 assert(entry->vme_start < entry->vme_end);
132 if (__improbable(vm_debug_events)) {
133 DTRACE_VM4(map_entry_link, vm_map_t, (char *)mapHdr - sizeof(lck_rw_t), vm_map_entry_t, entry, vm_address_t, entry->links.start, vm_address_t, entry->links.end);
134 }
135
136 vm_map_store_entry_link_ll(mapHdr, after_where, entry);
137 #ifdef VM_MAP_STORE_USE_RB
138 if (vm_map_store_has_RB_support( mapHdr )) {
139 vm_map_store_entry_link_rb(mapHdr, after_where, entry);
140 }
141 #endif
142 #if MAP_ENTRY_INSERTION_DEBUG
143 if (entry->vme_start_original == 0 && entry->vme_end_original == 0) {
144 entry->vme_start_original = entry->vme_start;
145 entry->vme_end_original = entry->vme_end;
146 }
147 backtrace(&entry->vme_insertion_bt[0],
148 (sizeof(entry->vme_insertion_bt) / sizeof(uintptr_t)), NULL);
149 #endif
150 }
151
152 void
153 vm_map_store_entry_link(
154 vm_map_t map,
155 vm_map_entry_t after_where,
156 vm_map_entry_t entry,
157 vm_map_kernel_flags_t vmk_flags)
158 {
159 vm_map_t VMEL_map;
160 vm_map_entry_t VMEL_entry;
161 VMEL_map = (map);
162 VMEL_entry = (entry);
163
164 if (entry->is_sub_map) {
165 assertf(VM_MAP_PAGE_SHIFT(VME_SUBMAP(entry)) >= VM_MAP_PAGE_SHIFT(map),
166 "map %p (%d) entry %p submap %p (%d)\n",
167 map, VM_MAP_PAGE_SHIFT(map), entry,
168 VME_SUBMAP(entry), VM_MAP_PAGE_SHIFT(VME_SUBMAP(entry)));
169 }
170
171 _vm_map_store_entry_link(&VMEL_map->hdr, after_where, VMEL_entry);
172 if (VMEL_map->disable_vmentry_reuse == TRUE) {
173 UPDATE_HIGHEST_ENTRY_END( VMEL_map, VMEL_entry);
174 } else {
175 update_first_free_ll(VMEL_map, VMEL_map->first_free);
176 #ifdef VM_MAP_STORE_USE_RB
177 if (vm_map_store_has_RB_support( &VMEL_map->hdr )) {
178 update_first_free_rb(VMEL_map, entry, TRUE);
179 }
180 #endif
181 }
182 #if PMAP_CS
183 (void) vm_map_entry_cs_associate(map, entry, vmk_flags);
184 #else /* PMAP_CS */
185 (void) vmk_flags;
186 #endif /* PMAP_CS */
187 }
188
189 void
190 _vm_map_store_entry_unlink( struct vm_map_header * mapHdr, vm_map_entry_t entry)
191 {
192 if (__improbable(vm_debug_events)) {
193 DTRACE_VM4(map_entry_unlink, vm_map_t, (char *)mapHdr - sizeof(lck_rw_t), vm_map_entry_t, entry, vm_address_t, entry->links.start, vm_address_t, entry->links.end);
194 }
195
196 vm_map_store_entry_unlink_ll(mapHdr, entry);
197 #ifdef VM_MAP_STORE_USE_RB
198 if (vm_map_store_has_RB_support( mapHdr )) {
199 vm_map_store_entry_unlink_rb(mapHdr, entry);
200 }
201 #endif
202 }
203
204 void
205 vm_map_store_entry_unlink( vm_map_t map, vm_map_entry_t entry)
206 {
207 vm_map_t VMEU_map;
208 vm_map_entry_t VMEU_entry = NULL;
209 vm_map_entry_t VMEU_first_free = NULL;
210 VMEU_map = (map);
211 VMEU_entry = (entry);
212
213 if (map->holelistenabled == FALSE) {
214 if (VMEU_entry->vme_start <= VMEU_map->first_free->vme_start) {
215 VMEU_first_free = VMEU_entry->vme_prev;
216 } else {
217 VMEU_first_free = VMEU_map->first_free;
218 }
219 }
220 _vm_map_store_entry_unlink(&VMEU_map->hdr, VMEU_entry);
221 vm_map_store_update( map, entry, VM_MAP_ENTRY_DELETE);
222 update_first_free_ll(VMEU_map, VMEU_first_free);
223 #ifdef VM_MAP_STORE_USE_RB
224 if (vm_map_store_has_RB_support( &VMEU_map->hdr )) {
225 update_first_free_rb(VMEU_map, entry, FALSE);
226 }
227 #endif
228 }
229
230 void
231 vm_map_store_copy_reset( vm_map_copy_t copy, vm_map_entry_t entry)
232 {
233 int nentries = copy->cpy_hdr.nentries;
234 vm_map_store_copy_reset_ll(copy, entry, nentries);
235 #ifdef VM_MAP_STORE_USE_RB
236 if (vm_map_store_has_RB_support( &copy->c_u.hdr )) {
237 vm_map_store_copy_reset_rb(copy, entry, nentries);
238 }
239 #endif
240 }
241
242 void
243 vm_map_store_update_first_free( vm_map_t map, vm_map_entry_t first_free_entry, boolean_t new_entry_creation)
244 {
245 update_first_free_ll(map, first_free_entry);
246 #ifdef VM_MAP_STORE_USE_RB
247 if (vm_map_store_has_RB_support( &map->hdr )) {
248 update_first_free_rb(map, first_free_entry, new_entry_creation);
249 }
250 #endif
251 }