]> git.saurik.com Git - apple/xnu.git/blob - osfmk/vm/vm_map_store.c
xnu-3789.31.2.tar.gz
[apple/xnu.git] / osfmk / vm / vm_map_store.c
1 /*
2 * Copyright (c) 2009 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 #include <kern/backtrace.h>
30 #include <mach/sdt.h>
31 #include <vm/vm_map_store.h>
32 #include <vm/vm_pageout.h> /* for vm_debug_events */
33
34 #if MACH_ASSERT
35 boolean_t
36 first_free_is_valid_store( vm_map_t map )
37 {
38 return(first_free_is_valid_ll( map ));
39 }
40 #endif
41
42 boolean_t
43 vm_map_store_has_RB_support( struct vm_map_header *hdr )
44 {
45 if ((void*)hdr->rb_head_store.rbh_root == (void*)(int)SKIP_RB_TREE) {
46 return FALSE;
47 }
48 return TRUE;
49 }
50
51 void
52 vm_map_store_init( struct vm_map_header *hdr )
53 {
54 vm_map_store_init_ll( hdr );
55 #ifdef VM_MAP_STORE_USE_RB
56 if (vm_map_store_has_RB_support( hdr )) {
57 vm_map_store_init_rb( hdr );
58 }
59 #endif
60 }
61
62 boolean_t
63 vm_map_store_lookup_entry(
64 vm_map_t map,
65 vm_map_offset_t address,
66 vm_map_entry_t *entry) /* OUT */
67 {
68 #ifdef VM_MAP_STORE_USE_LL
69 return (vm_map_store_lookup_entry_ll( map, address, entry ));
70 #elif defined VM_MAP_STORE_USE_RB
71 if (vm_map_store_has_RB_support( &map->hdr )) {
72 return (vm_map_store_lookup_entry_rb( map, address, entry ));
73 } else {
74 panic("VM map lookups need RB tree support.\n");
75 return FALSE; /* For compiler warning.*/
76 }
77 #endif
78 }
79
80 void
81 vm_map_store_update( vm_map_t map, vm_map_entry_t entry, int update_type )
82 {
83 switch (update_type) {
84 case VM_MAP_ENTRY_CREATE:
85 break;
86 case VM_MAP_ENTRY_DELETE:
87 if((map->holelistenabled == FALSE) && ((entry) == (map)->first_free)) {
88 (map)->first_free = vm_map_to_entry(map);
89 }
90 if((entry) == (map)->hint) {
91 (map)->hint = vm_map_to_entry(map);
92 }
93 break;
94 default:
95 break;
96 }
97 }
98
99 void vm_map_store_copy_insert( vm_map_t map, vm_map_entry_t after_where, vm_map_copy_t copy)
100 {
101 if (__improbable(vm_debug_events)) {
102 vm_map_entry_t entry;
103 for (entry = vm_map_copy_first_entry(copy); entry != vm_map_copy_to_entry(copy); entry = entry->vme_next) {
104 DTRACE_VM4(map_entry_link_copy, vm_map_t, map, vm_map_entry_t, entry, vm_address_t, entry->links.start, vm_address_t, entry->links.end);
105 }
106 }
107
108 if (map->holelistenabled) {
109 vm_map_entry_t entry = NULL;
110
111 entry = vm_map_copy_first_entry(copy);
112 while (entry != vm_map_copy_to_entry(copy)) {
113 vm_map_store_update_first_free(map, entry, TRUE);
114 entry = entry->vme_next;
115 }
116 }
117
118 vm_map_store_copy_insert_ll(map, after_where, copy);
119 #ifdef VM_MAP_STORE_USE_RB
120 if (vm_map_store_has_RB_support( &map->hdr )) {
121 vm_map_store_copy_insert_rb(map, after_where, copy);
122 }
123 #endif
124 }
125
126 /*
127 * vm_map_entry_{un,}link:
128 *
129 * Insert/remove entries from maps (or map copies).
130 * The _vm_map_store_entry_{un,}link variants are used at
131 * some places where updating first_free is not needed &
132 * copy maps are being modified. Also note the first argument
133 * is the map header.
134 * Modifying the vm_map_store_entry_{un,}link functions to
135 * deal with these call sites made the interface confusing
136 * and clunky.
137 */
138
139 void
140 _vm_map_store_entry_link( struct vm_map_header * mapHdr, vm_map_entry_t after_where, vm_map_entry_t entry)
141 {
142 assert(entry->vme_start < entry->vme_end);
143 if (__improbable(vm_debug_events))
144 DTRACE_VM4(map_entry_link, vm_map_t, (char *)mapHdr - sizeof (lck_rw_t), vm_map_entry_t, entry, vm_address_t, entry->links.start, vm_address_t, entry->links.end);
145
146 vm_map_store_entry_link_ll(mapHdr, after_where, entry);
147 #ifdef VM_MAP_STORE_USE_RB
148 if (vm_map_store_has_RB_support( mapHdr )) {
149 vm_map_store_entry_link_rb(mapHdr, after_where, entry);
150 }
151 #endif
152 #if MAP_ENTRY_INSERTION_DEBUG
153 backtrace(&entry->vme_insertion_bt[0],
154 (sizeof (entry->vme_insertion_bt) / sizeof (uintptr_t)));
155 #endif
156 }
157
158 void
159 vm_map_store_entry_link( vm_map_t map, vm_map_entry_t after_where, vm_map_entry_t entry)
160 {
161 vm_map_t VMEL_map;
162 vm_map_entry_t VMEL_entry;
163 VMEL_map = (map);
164 VMEL_entry = (entry);
165
166 _vm_map_store_entry_link(&VMEL_map->hdr, after_where, VMEL_entry);
167 if( VMEL_map->disable_vmentry_reuse == TRUE ) {
168 UPDATE_HIGHEST_ENTRY_END( VMEL_map, VMEL_entry);
169 } else {
170 update_first_free_ll(VMEL_map, VMEL_map->first_free);
171 #ifdef VM_MAP_STORE_USE_RB
172 if (vm_map_store_has_RB_support( &VMEL_map->hdr )) {
173 update_first_free_rb(VMEL_map, entry, TRUE);
174 }
175 #endif
176 }
177 }
178
179 void
180 _vm_map_store_entry_unlink( struct vm_map_header * mapHdr, vm_map_entry_t entry)
181 {
182 if (__improbable(vm_debug_events))
183 DTRACE_VM4(map_entry_unlink, vm_map_t, (char *)mapHdr - sizeof (lck_rw_t), vm_map_entry_t, entry, vm_address_t, entry->links.start, vm_address_t, entry->links.end);
184
185 vm_map_store_entry_unlink_ll(mapHdr, entry);
186 #ifdef VM_MAP_STORE_USE_RB
187 if (vm_map_store_has_RB_support( mapHdr )) {
188 vm_map_store_entry_unlink_rb(mapHdr, entry);
189 }
190 #endif
191 }
192
193 void
194 vm_map_store_entry_unlink( vm_map_t map, vm_map_entry_t entry)
195 {
196 vm_map_t VMEU_map;
197 vm_map_entry_t VMEU_entry = NULL;
198 vm_map_entry_t VMEU_first_free = NULL;
199 VMEU_map = (map);
200 VMEU_entry = (entry);
201
202 if (map->holelistenabled == FALSE) {
203 if (VMEU_entry->vme_start <= VMEU_map->first_free->vme_start){
204 VMEU_first_free = VMEU_entry->vme_prev;
205 } else {
206 VMEU_first_free = VMEU_map->first_free;
207 }
208 }
209 _vm_map_store_entry_unlink(&VMEU_map->hdr, VMEU_entry);
210 vm_map_store_update( map, entry, VM_MAP_ENTRY_DELETE);
211 update_first_free_ll(VMEU_map, VMEU_first_free);
212 #ifdef VM_MAP_STORE_USE_RB
213 if (vm_map_store_has_RB_support( &VMEU_map->hdr )) {
214 update_first_free_rb(VMEU_map, entry, FALSE);
215 }
216 #endif
217 }
218
219 void
220 vm_map_store_copy_reset( vm_map_copy_t copy,vm_map_entry_t entry)
221 {
222 int nentries = copy->cpy_hdr.nentries;
223 vm_map_store_copy_reset_ll(copy, entry, nentries);
224 #ifdef VM_MAP_STORE_USE_RB
225 if (vm_map_store_has_RB_support( &copy->c_u.hdr )) {
226 vm_map_store_copy_reset_rb(copy, entry, nentries);
227 }
228 #endif
229 }
230
231 void
232 vm_map_store_update_first_free( vm_map_t map, vm_map_entry_t first_free_entry, boolean_t new_entry_creation)
233 {
234 update_first_free_ll(map, first_free_entry);
235 #ifdef VM_MAP_STORE_USE_RB
236 if (vm_map_store_has_RB_support( &map->hdr )) {
237 update_first_free_rb(map, first_free_entry, new_entry_creation);
238 }
239 #endif
240 }