]>
Commit | Line | Data |
---|---|---|
f427ee49 A |
1 | /* |
2 | * Copyright (c) 2020 Apple Inc. All rights reserved. | |
3 | * | |
4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ | |
5 | * | |
6 | * This file contains Original Code and/or Modifications of Original Code | |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
14 | * | |
15 | * Please obtain a copy of the License at | |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
17 | * | |
18 | * The Original Code and all software distributed under the License are | |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, | |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. | |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
25 | * | |
26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ | |
27 | */ | |
28 | ||
29 | #include <mach_assert.h> | |
30 | ||
31 | #include <mach/mach_types.h> | |
32 | #include <mach/memory_object.h> | |
33 | #include <mach/vm_map.h> | |
34 | ||
35 | #include <vm/memory_object.h> | |
36 | #include <vm/vm_fault.h> | |
37 | #include <vm/vm_map.h> | |
38 | #include <vm/vm_object.h> | |
39 | #include <vm/vm_protos.h> | |
40 | ||
41 | extern kern_return_t | |
42 | vm_map_copy_adjust_to_target( | |
43 | vm_map_copy_t copy_map, | |
44 | vm_map_offset_t offset, | |
45 | vm_map_size_t size, | |
46 | vm_map_t target_map, | |
47 | boolean_t copy, | |
48 | vm_map_copy_t *target_copy_map_p, | |
49 | vm_map_offset_t *overmap_start_p, | |
50 | vm_map_offset_t *overmap_end_p, | |
51 | vm_map_offset_t *trimmed_start_p); | |
52 | ||
53 | #define VM_TEST_COLLAPSE_COMPRESSOR 0 | |
54 | #define VM_TEST_WIRE_AND_EXTRACT 0 | |
55 | #define VM_TEST_PAGE_WIRE_OVERFLOW_PANIC 0 | |
56 | #if __arm64__ | |
57 | #define VM_TEST_KERNEL_OBJECT_FAULT 0 | |
58 | #endif /* __arm64__ */ | |
59 | #define VM_TEST_DEVICE_PAGER_TRANSPOSE (DEVELOPMENT || DEBUG) | |
60 | ||
61 | #if VM_TEST_COLLAPSE_COMPRESSOR | |
62 | extern boolean_t vm_object_collapse_compressor_allowed; | |
63 | #include <IOKit/IOLib.h> | |
64 | static void | |
65 | vm_test_collapse_compressor(void) | |
66 | { | |
67 | vm_object_size_t backing_size, top_size; | |
68 | vm_object_t backing_object, top_object; | |
69 | vm_map_offset_t backing_offset, top_offset; | |
70 | unsigned char *backing_address, *top_address; | |
71 | kern_return_t kr; | |
72 | ||
73 | printf("VM_TEST_COLLAPSE_COMPRESSOR:\n"); | |
74 | ||
75 | /* create backing object */ | |
76 | backing_size = 15 * PAGE_SIZE; | |
77 | backing_object = vm_object_allocate(backing_size); | |
78 | assert(backing_object != VM_OBJECT_NULL); | |
79 | printf("VM_TEST_COLLAPSE_COMPRESSOR: created backing object %p\n", | |
80 | backing_object); | |
81 | /* map backing object */ | |
82 | backing_offset = 0; | |
83 | kr = vm_map_enter(kernel_map, &backing_offset, backing_size, 0, | |
84 | VM_FLAGS_ANYWHERE, VM_MAP_KERNEL_FLAGS_NONE, | |
85 | backing_object, 0, FALSE, | |
86 | VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT); | |
87 | assert(kr == KERN_SUCCESS); | |
88 | backing_address = (unsigned char *) backing_offset; | |
89 | printf("VM_TEST_COLLAPSE_COMPRESSOR: " | |
90 | "mapped backing object %p at 0x%llx\n", | |
91 | backing_object, (uint64_t) backing_offset); | |
92 | /* populate with pages to be compressed in backing object */ | |
93 | backing_address[0x1 * PAGE_SIZE] = 0xB1; | |
94 | backing_address[0x4 * PAGE_SIZE] = 0xB4; | |
95 | backing_address[0x7 * PAGE_SIZE] = 0xB7; | |
96 | backing_address[0xa * PAGE_SIZE] = 0xBA; | |
97 | backing_address[0xd * PAGE_SIZE] = 0xBD; | |
98 | printf("VM_TEST_COLLAPSE_COMPRESSOR: " | |
99 | "populated pages to be compressed in " | |
100 | "backing_object %p\n", backing_object); | |
101 | /* compress backing object */ | |
102 | vm_object_pageout(backing_object); | |
103 | printf("VM_TEST_COLLAPSE_COMPRESSOR: compressing backing_object %p\n", | |
104 | backing_object); | |
105 | /* wait for all the pages to be gone */ | |
106 | while (*(volatile int *)&backing_object->resident_page_count != 0) { | |
107 | IODelay(10); | |
108 | } | |
109 | printf("VM_TEST_COLLAPSE_COMPRESSOR: backing_object %p compressed\n", | |
110 | backing_object); | |
111 | /* populate with pages to be resident in backing object */ | |
112 | backing_address[0x0 * PAGE_SIZE] = 0xB0; | |
113 | backing_address[0x3 * PAGE_SIZE] = 0xB3; | |
114 | backing_address[0x6 * PAGE_SIZE] = 0xB6; | |
115 | backing_address[0x9 * PAGE_SIZE] = 0xB9; | |
116 | backing_address[0xc * PAGE_SIZE] = 0xBC; | |
117 | printf("VM_TEST_COLLAPSE_COMPRESSOR: " | |
118 | "populated pages to be resident in " | |
119 | "backing_object %p\n", backing_object); | |
120 | /* leave the other pages absent */ | |
121 | /* mess with the paging_offset of the backing_object */ | |
122 | assert(backing_object->paging_offset == 0); | |
123 | backing_object->paging_offset = 3 * PAGE_SIZE; | |
124 | ||
125 | /* create top object */ | |
126 | top_size = 9 * PAGE_SIZE; | |
127 | top_object = vm_object_allocate(top_size); | |
128 | assert(top_object != VM_OBJECT_NULL); | |
129 | printf("VM_TEST_COLLAPSE_COMPRESSOR: created top object %p\n", | |
130 | top_object); | |
131 | /* map top object */ | |
132 | top_offset = 0; | |
133 | kr = vm_map_enter(kernel_map, &top_offset, top_size, 0, | |
134 | VM_FLAGS_ANYWHERE, VM_MAP_KERNEL_FLAGS_NONE, | |
135 | top_object, 0, FALSE, | |
136 | VM_PROT_DEFAULT, VM_PROT_DEFAULT, VM_INHERIT_DEFAULT); | |
137 | assert(kr == KERN_SUCCESS); | |
138 | top_address = (unsigned char *) top_offset; | |
139 | printf("VM_TEST_COLLAPSE_COMPRESSOR: " | |
140 | "mapped top object %p at 0x%llx\n", | |
141 | top_object, (uint64_t) top_offset); | |
142 | /* populate with pages to be compressed in top object */ | |
143 | top_address[0x3 * PAGE_SIZE] = 0xA3; | |
144 | top_address[0x4 * PAGE_SIZE] = 0xA4; | |
145 | top_address[0x5 * PAGE_SIZE] = 0xA5; | |
146 | printf("VM_TEST_COLLAPSE_COMPRESSOR: " | |
147 | "populated pages to be compressed in " | |
148 | "top_object %p\n", top_object); | |
149 | /* compress top object */ | |
150 | vm_object_pageout(top_object); | |
151 | printf("VM_TEST_COLLAPSE_COMPRESSOR: compressing top_object %p\n", | |
152 | top_object); | |
153 | /* wait for all the pages to be gone */ | |
154 | while (top_object->resident_page_count != 0) { | |
155 | IODelay(10); | |
156 | } | |
157 | printf("VM_TEST_COLLAPSE_COMPRESSOR: top_object %p compressed\n", | |
158 | top_object); | |
159 | /* populate with pages to be resident in top object */ | |
160 | top_address[0x0 * PAGE_SIZE] = 0xA0; | |
161 | top_address[0x1 * PAGE_SIZE] = 0xA1; | |
162 | top_address[0x2 * PAGE_SIZE] = 0xA2; | |
163 | printf("VM_TEST_COLLAPSE_COMPRESSOR: " | |
164 | "populated pages to be resident in " | |
165 | "top_object %p\n", top_object); | |
166 | /* leave the other pages absent */ | |
167 | ||
168 | /* link the 2 objects */ | |
169 | vm_object_reference(backing_object); | |
170 | top_object->shadow = backing_object; | |
171 | top_object->vo_shadow_offset = 3 * PAGE_SIZE; | |
172 | printf("VM_TEST_COLLAPSE_COMPRESSOR: linked %p and %p\n", | |
173 | top_object, backing_object); | |
174 | ||
175 | /* unmap backing object */ | |
176 | vm_map_remove(kernel_map, | |
177 | backing_offset, | |
178 | backing_offset + backing_size, | |
179 | VM_MAP_REMOVE_NO_FLAGS); | |
180 | printf("VM_TEST_COLLAPSE_COMPRESSOR: " | |
181 | "unmapped backing_object %p [0x%llx:0x%llx]\n", | |
182 | backing_object, | |
183 | (uint64_t) backing_offset, | |
184 | (uint64_t) (backing_offset + backing_size)); | |
185 | ||
186 | /* collapse */ | |
187 | printf("VM_TEST_COLLAPSE_COMPRESSOR: collapsing %p\n", top_object); | |
188 | vm_object_lock(top_object); | |
189 | vm_object_collapse(top_object, 0, FALSE); | |
190 | vm_object_unlock(top_object); | |
191 | printf("VM_TEST_COLLAPSE_COMPRESSOR: collapsed %p\n", top_object); | |
192 | ||
193 | /* did it work? */ | |
194 | if (top_object->shadow != VM_OBJECT_NULL) { | |
195 | printf("VM_TEST_COLLAPSE_COMPRESSOR: not collapsed\n"); | |
196 | printf("VM_TEST_COLLAPSE_COMPRESSOR: FAIL\n"); | |
197 | if (vm_object_collapse_compressor_allowed) { | |
198 | panic("VM_TEST_COLLAPSE_COMPRESSOR: FAIL\n"); | |
199 | } | |
200 | } else { | |
201 | /* check the contents of the mapping */ | |
202 | unsigned char expect[9] = | |
203 | { 0xA0, 0xA1, 0xA2, /* resident in top */ | |
204 | 0xA3, 0xA4, 0xA5, /* compressed in top */ | |
205 | 0xB9, /* resident in backing + shadow_offset */ | |
206 | 0xBD, /* compressed in backing + shadow_offset + paging_offset */ | |
207 | 0x00 }; /* absent in both */ | |
208 | unsigned char actual[9]; | |
209 | unsigned int i, errors; | |
210 | ||
211 | errors = 0; | |
212 | for (i = 0; i < sizeof(actual); i++) { | |
213 | actual[i] = (unsigned char) top_address[i * PAGE_SIZE]; | |
214 | if (actual[i] != expect[i]) { | |
215 | errors++; | |
216 | } | |
217 | } | |
218 | printf("VM_TEST_COLLAPSE_COMPRESSOR: " | |
219 | "actual [%x %x %x %x %x %x %x %x %x] " | |
220 | "expect [%x %x %x %x %x %x %x %x %x] " | |
221 | "%d errors\n", | |
222 | actual[0], actual[1], actual[2], actual[3], | |
223 | actual[4], actual[5], actual[6], actual[7], | |
224 | actual[8], | |
225 | expect[0], expect[1], expect[2], expect[3], | |
226 | expect[4], expect[5], expect[6], expect[7], | |
227 | expect[8], | |
228 | errors); | |
229 | if (errors) { | |
230 | panic("VM_TEST_COLLAPSE_COMPRESSOR: FAIL\n"); | |
231 | } else { | |
232 | printf("VM_TEST_COLLAPSE_COMPRESSOR: PASS\n"); | |
233 | } | |
234 | } | |
235 | } | |
236 | #else /* VM_TEST_COLLAPSE_COMPRESSOR */ | |
237 | #define vm_test_collapse_compressor() | |
238 | #endif /* VM_TEST_COLLAPSE_COMPRESSOR */ | |
239 | ||
240 | #if VM_TEST_WIRE_AND_EXTRACT | |
241 | extern ledger_template_t task_ledger_template; | |
242 | #include <mach/mach_vm.h> | |
243 | extern ppnum_t vm_map_get_phys_page(vm_map_t map, | |
244 | vm_offset_t offset); | |
245 | static void | |
246 | vm_test_wire_and_extract(void) | |
247 | { | |
248 | ledger_t ledger; | |
249 | vm_map_t user_map, wire_map; | |
250 | mach_vm_address_t user_addr, wire_addr; | |
251 | mach_vm_size_t user_size, wire_size; | |
252 | mach_vm_offset_t cur_offset; | |
253 | vm_prot_t cur_prot, max_prot; | |
254 | ppnum_t user_ppnum, wire_ppnum; | |
255 | kern_return_t kr; | |
256 | ||
257 | ledger = ledger_instantiate(task_ledger_template, | |
258 | LEDGER_CREATE_ACTIVE_ENTRIES); | |
259 | user_map = vm_map_create(pmap_create_options(ledger, 0, PMAP_CREATE_64BIT), | |
260 | 0x100000000ULL, | |
261 | 0x200000000ULL, | |
262 | TRUE); | |
263 | wire_map = vm_map_create(NULL, | |
264 | 0x100000000ULL, | |
265 | 0x200000000ULL, | |
266 | TRUE); | |
267 | user_addr = 0; | |
268 | user_size = 0x10000; | |
269 | kr = mach_vm_allocate(user_map, | |
270 | &user_addr, | |
271 | user_size, | |
272 | VM_FLAGS_ANYWHERE); | |
273 | assert(kr == KERN_SUCCESS); | |
274 | wire_addr = 0; | |
275 | wire_size = user_size; | |
276 | kr = mach_vm_remap(wire_map, | |
277 | &wire_addr, | |
278 | wire_size, | |
279 | 0, | |
280 | VM_FLAGS_ANYWHERE, | |
281 | user_map, | |
282 | user_addr, | |
283 | FALSE, | |
284 | &cur_prot, | |
285 | &max_prot, | |
286 | VM_INHERIT_NONE); | |
287 | assert(kr == KERN_SUCCESS); | |
288 | for (cur_offset = 0; | |
289 | cur_offset < wire_size; | |
290 | cur_offset += PAGE_SIZE) { | |
291 | kr = vm_map_wire_and_extract(wire_map, | |
292 | wire_addr + cur_offset, | |
293 | VM_PROT_DEFAULT | VM_PROT_MEMORY_TAG_MAKE(VM_KERN_MEMORY_OSFMK), | |
294 | TRUE, | |
295 | &wire_ppnum); | |
296 | assert(kr == KERN_SUCCESS); | |
297 | user_ppnum = vm_map_get_phys_page(user_map, | |
298 | user_addr + cur_offset); | |
299 | printf("VM_TEST_WIRE_AND_EXTRACT: kr=0x%x " | |
300 | "user[%p:0x%llx:0x%x] wire[%p:0x%llx:0x%x]\n", | |
301 | kr, | |
302 | user_map, user_addr + cur_offset, user_ppnum, | |
303 | wire_map, wire_addr + cur_offset, wire_ppnum); | |
304 | if (kr != KERN_SUCCESS || | |
305 | wire_ppnum == 0 || | |
306 | wire_ppnum != user_ppnum) { | |
307 | panic("VM_TEST_WIRE_AND_EXTRACT: FAIL\n"); | |
308 | } | |
309 | } | |
310 | cur_offset -= PAGE_SIZE; | |
311 | kr = vm_map_wire_and_extract(wire_map, | |
312 | wire_addr + cur_offset, | |
313 | VM_PROT_DEFAULT, | |
314 | TRUE, | |
315 | &wire_ppnum); | |
316 | assert(kr == KERN_SUCCESS); | |
317 | printf("VM_TEST_WIRE_AND_EXTRACT: re-wire kr=0x%x " | |
318 | "user[%p:0x%llx:0x%x] wire[%p:0x%llx:0x%x]\n", | |
319 | kr, | |
320 | user_map, user_addr + cur_offset, user_ppnum, | |
321 | wire_map, wire_addr + cur_offset, wire_ppnum); | |
322 | if (kr != KERN_SUCCESS || | |
323 | wire_ppnum == 0 || | |
324 | wire_ppnum != user_ppnum) { | |
325 | panic("VM_TEST_WIRE_AND_EXTRACT: FAIL\n"); | |
326 | } | |
327 | ||
328 | printf("VM_TEST_WIRE_AND_EXTRACT: PASS\n"); | |
329 | } | |
330 | #else /* VM_TEST_WIRE_AND_EXTRACT */ | |
331 | #define vm_test_wire_and_extract() | |
332 | #endif /* VM_TEST_WIRE_AND_EXTRACT */ | |
333 | ||
334 | #if VM_TEST_PAGE_WIRE_OVERFLOW_PANIC | |
335 | static void | |
336 | vm_test_page_wire_overflow_panic(void) | |
337 | { | |
338 | vm_object_t object; | |
339 | vm_page_t page; | |
340 | ||
341 | printf("VM_TEST_PAGE_WIRE_OVERFLOW_PANIC: starting...\n"); | |
342 | ||
343 | object = vm_object_allocate(PAGE_SIZE); | |
344 | vm_object_lock(object); | |
345 | page = vm_page_alloc(object, 0x0); | |
346 | vm_page_lock_queues(); | |
347 | do { | |
348 | vm_page_wire(page, 1, FALSE); | |
349 | } while (page->wire_count != 0); | |
350 | vm_page_unlock_queues(); | |
351 | vm_object_unlock(object); | |
352 | panic("FBDP(%p,%p): wire_count overflow not detected\n", | |
353 | object, page); | |
354 | } | |
355 | #else /* VM_TEST_PAGE_WIRE_OVERFLOW_PANIC */ | |
356 | #define vm_test_page_wire_overflow_panic() | |
357 | #endif /* VM_TEST_PAGE_WIRE_OVERFLOW_PANIC */ | |
358 | ||
359 | #if __arm64__ && VM_TEST_KERNEL_OBJECT_FAULT | |
360 | extern int copyinframe(vm_address_t fp, char *frame, boolean_t is64bit); | |
361 | static void | |
362 | vm_test_kernel_object_fault(void) | |
363 | { | |
364 | kern_return_t kr; | |
365 | vm_offset_t stack; | |
366 | uintptr_t frameb[2]; | |
367 | int ret; | |
368 | ||
369 | kr = kernel_memory_allocate(kernel_map, &stack, | |
370 | kernel_stack_size + (2 * PAGE_SIZE), | |
371 | 0, | |
372 | (KMA_KSTACK | KMA_KOBJECT | | |
373 | KMA_GUARD_FIRST | KMA_GUARD_LAST), | |
374 | VM_KERN_MEMORY_STACK); | |
375 | if (kr != KERN_SUCCESS) { | |
376 | panic("VM_TEST_KERNEL_OBJECT_FAULT: kernel_memory_allocate kr 0x%x\n", kr); | |
377 | } | |
378 | ret = copyinframe((uintptr_t)stack, (char *)frameb, TRUE); | |
379 | if (ret != 0) { | |
380 | printf("VM_TEST_KERNEL_OBJECT_FAULT: PASS\n"); | |
381 | } else { | |
382 | printf("VM_TEST_KERNEL_OBJECT_FAULT: FAIL\n"); | |
383 | } | |
384 | vm_map_remove(kernel_map, | |
385 | stack, | |
386 | stack + kernel_stack_size + (2 * PAGE_SIZE), | |
387 | VM_MAP_REMOVE_KUNWIRE); | |
388 | stack = 0; | |
389 | } | |
390 | #else /* __arm64__ && VM_TEST_KERNEL_OBJECT_FAULT */ | |
391 | #define vm_test_kernel_object_fault() | |
392 | #endif /* __arm64__ && VM_TEST_KERNEL_OBJECT_FAULT */ | |
393 | ||
394 | #if VM_TEST_DEVICE_PAGER_TRANSPOSE | |
395 | static void | |
396 | vm_test_device_pager_transpose(void) | |
397 | { | |
398 | memory_object_t device_pager; | |
399 | vm_object_t anon_object, device_object; | |
400 | vm_size_t size; | |
401 | vm_map_offset_t device_mapping; | |
402 | kern_return_t kr; | |
403 | ||
404 | size = 3 * PAGE_SIZE; | |
405 | anon_object = vm_object_allocate(size); | |
406 | assert(anon_object != VM_OBJECT_NULL); | |
407 | device_pager = device_pager_setup(NULL, 0, size, 0); | |
408 | assert(device_pager != NULL); | |
409 | device_object = memory_object_to_vm_object(device_pager); | |
410 | assert(device_object != VM_OBJECT_NULL); | |
411 | #if 0 | |
412 | /* | |
413 | * Can't actually map this, since another thread might do a | |
414 | * vm_map_enter() that gets coalesced into this object, which | |
415 | * would cause the test to fail. | |
416 | */ | |
417 | vm_map_offset_t anon_mapping = 0; | |
418 | kr = vm_map_enter(kernel_map, &anon_mapping, size, 0, | |
419 | VM_FLAGS_ANYWHERE, VM_MAP_KERNEL_FLAGS_NONE, VM_KERN_MEMORY_NONE, | |
420 | anon_object, 0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL, | |
421 | VM_INHERIT_DEFAULT); | |
422 | assert(kr == KERN_SUCCESS); | |
423 | #endif | |
424 | device_mapping = 0; | |
425 | kr = vm_map_enter_mem_object(kernel_map, &device_mapping, size, 0, | |
426 | VM_FLAGS_ANYWHERE, | |
427 | VM_MAP_KERNEL_FLAGS_NONE, | |
428 | VM_KERN_MEMORY_NONE, | |
429 | (void *)device_pager, 0, FALSE, | |
430 | VM_PROT_DEFAULT, VM_PROT_ALL, | |
431 | VM_INHERIT_DEFAULT); | |
432 | assert(kr == KERN_SUCCESS); | |
433 | memory_object_deallocate(device_pager); | |
434 | ||
435 | vm_object_lock(anon_object); | |
436 | vm_object_activity_begin(anon_object); | |
437 | anon_object->blocked_access = TRUE; | |
438 | vm_object_unlock(anon_object); | |
439 | vm_object_lock(device_object); | |
440 | vm_object_activity_begin(device_object); | |
441 | device_object->blocked_access = TRUE; | |
442 | vm_object_unlock(device_object); | |
443 | ||
444 | assert(anon_object->ref_count == 1); | |
445 | assert(!anon_object->named); | |
446 | assert(device_object->ref_count == 2); | |
447 | assert(device_object->named); | |
448 | ||
449 | kr = vm_object_transpose(device_object, anon_object, size); | |
450 | assert(kr == KERN_SUCCESS); | |
451 | ||
452 | vm_object_lock(anon_object); | |
453 | vm_object_activity_end(anon_object); | |
454 | anon_object->blocked_access = FALSE; | |
455 | vm_object_unlock(anon_object); | |
456 | vm_object_lock(device_object); | |
457 | vm_object_activity_end(device_object); | |
458 | device_object->blocked_access = FALSE; | |
459 | vm_object_unlock(device_object); | |
460 | ||
461 | assert(anon_object->ref_count == 2); | |
462 | assert(anon_object->named); | |
463 | #if 0 | |
464 | kr = vm_deallocate(kernel_map, anon_mapping, size); | |
465 | assert(kr == KERN_SUCCESS); | |
466 | #endif | |
467 | assert(device_object->ref_count == 1); | |
468 | assert(!device_object->named); | |
469 | kr = vm_deallocate(kernel_map, device_mapping, size); | |
470 | assert(kr == KERN_SUCCESS); | |
471 | ||
472 | printf("VM_TEST_DEVICE_PAGER_TRANSPOSE: PASS\n"); | |
473 | } | |
474 | #else /* VM_TEST_DEVICE_PAGER_TRANSPOSE */ | |
475 | #define vm_test_device_pager_transpose() | |
476 | #endif /* VM_TEST_DEVICE_PAGER_TRANSPOSE */ | |
477 | ||
478 | #if PMAP_CREATE_FORCE_4K_PAGES && MACH_ASSERT | |
479 | extern kern_return_t vm_allocate_external(vm_map_t map, | |
480 | vm_offset_t *addr, | |
481 | vm_size_t size, | |
482 | int flags); | |
483 | extern kern_return_t vm_remap_external(vm_map_t target_map, | |
484 | vm_offset_t *address, | |
485 | vm_size_t size, | |
486 | vm_offset_t mask, | |
487 | int flags, | |
488 | vm_map_t src_map, | |
489 | vm_offset_t memory_address, | |
490 | boolean_t copy, | |
491 | vm_prot_t *cur_protection, | |
492 | vm_prot_t *max_protection, | |
493 | vm_inherit_t inheritance); | |
494 | extern int debug4k_panic_on_misaligned_sharing; | |
495 | ||
496 | void vm_test_4k(void); | |
497 | void | |
498 | vm_test_4k(void) | |
499 | { | |
500 | pmap_t test_pmap; | |
501 | vm_map_t test_map; | |
502 | kern_return_t kr; | |
503 | vm_address_t expected_addr; | |
504 | vm_address_t alloc1_addr, alloc2_addr, alloc3_addr, alloc4_addr; | |
505 | vm_address_t alloc5_addr, dealloc_addr, remap_src_addr, remap_dst_addr; | |
506 | vm_size_t alloc1_size, alloc2_size, alloc3_size, alloc4_size; | |
507 | vm_size_t alloc5_size, remap_src_size; | |
508 | vm_address_t fault_addr; | |
509 | vm_prot_t cur_prot, max_prot; | |
510 | int saved_debug4k_panic_on_misaligned_sharing; | |
511 | ||
512 | printf("\n\n\nVM_TEST_4K:%d creating 4K map...\n", __LINE__); | |
513 | test_pmap = pmap_create_options(NULL, 0, PMAP_CREATE_64BIT | PMAP_CREATE_FORCE_4K_PAGES); | |
514 | assert(test_pmap != NULL); | |
515 | test_map = vm_map_create(test_pmap, | |
516 | MACH_VM_MIN_ADDRESS, | |
517 | MACH_VM_MAX_ADDRESS, | |
518 | TRUE); | |
519 | assert(test_map != VM_MAP_NULL); | |
520 | vm_map_set_page_shift(test_map, FOURK_PAGE_SHIFT); | |
521 | printf("VM_TEST_4K:%d map %p pmap %p page_size 0x%x\n", __LINE__, test_map, test_pmap, VM_MAP_PAGE_SIZE(test_map)); | |
522 | ||
523 | alloc1_addr = 0; | |
524 | alloc1_size = 1 * FOURK_PAGE_SIZE; | |
525 | expected_addr = 0x1000; | |
526 | printf("VM_TEST_4K:%d vm_allocate(%p, 0x%lx, 0x%lx)...\n", __LINE__, test_map, alloc1_addr, alloc1_size); | |
527 | kr = vm_allocate_external(test_map, | |
528 | &alloc1_addr, | |
529 | alloc1_size, | |
530 | VM_FLAGS_ANYWHERE); | |
531 | assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr); | |
532 | assertf(alloc1_addr == expected_addr, "alloc1_addr = 0x%lx expected 0x%lx", alloc1_addr, expected_addr); | |
533 | printf("VM_TEST_4K:%d -> 0x%lx\n", __LINE__, alloc1_addr); | |
534 | expected_addr += alloc1_size; | |
535 | ||
536 | printf("VM_TEST_4K:%d vm_deallocate(%p, 0x%lx, 0x%lx)...\n", __LINE__, test_map, alloc1_addr, alloc1_size); | |
537 | kr = vm_deallocate(test_map, alloc1_addr, alloc1_size); | |
538 | assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr); | |
539 | printf("VM_TEST_4K:%d -> 0x%lx\n", __LINE__, alloc1_addr); | |
540 | ||
541 | alloc1_addr = 0; | |
542 | alloc1_size = 1 * FOURK_PAGE_SIZE; | |
543 | expected_addr = 0x1000; | |
544 | printf("VM_TEST_4K:%d vm_allocate(%p, 0x%lx, 0x%lx)...\n", __LINE__, test_map, alloc1_addr, alloc1_size); | |
545 | kr = vm_allocate_external(test_map, | |
546 | &alloc1_addr, | |
547 | alloc1_size, | |
548 | VM_FLAGS_ANYWHERE); | |
549 | assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr); | |
550 | assertf(alloc1_addr == expected_addr, "alloc1_addr = 0x%lx expected 0x%lx", alloc1_addr, expected_addr); | |
551 | printf("VM_TEST_4K:%d -> 0x%lx\n", __LINE__, alloc1_addr); | |
552 | expected_addr += alloc1_size; | |
553 | ||
554 | alloc2_addr = 0; | |
555 | alloc2_size = 3 * FOURK_PAGE_SIZE; | |
556 | printf("VM_TEST_4K:%d vm_allocate(%p, 0x%lx, 0x%lx)...\n", __LINE__, test_map, alloc2_addr, alloc2_size); | |
557 | kr = vm_allocate_external(test_map, | |
558 | &alloc2_addr, | |
559 | alloc2_size, | |
560 | VM_FLAGS_ANYWHERE); | |
561 | assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr); | |
562 | assertf(alloc2_addr == expected_addr, "alloc2_addr = 0x%lx expected 0x%lx", alloc2_addr, expected_addr); | |
563 | printf("VM_TEST_4K:%d -> 0x%lx\n", __LINE__, alloc2_addr); | |
564 | expected_addr += alloc2_size; | |
565 | ||
566 | alloc3_addr = 0; | |
567 | alloc3_size = 18 * FOURK_PAGE_SIZE; | |
568 | printf("VM_TEST_4K:%d vm_allocate(%p, 0x%lx, 0x%lx)...\n", __LINE__, test_map, alloc3_addr, alloc3_size); | |
569 | kr = vm_allocate_external(test_map, | |
570 | &alloc3_addr, | |
571 | alloc3_size, | |
572 | VM_FLAGS_ANYWHERE); | |
573 | assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr); | |
574 | assertf(alloc3_addr == expected_addr, "alloc3_addr = 0x%lx expected 0x%lx\n", alloc3_addr, expected_addr); | |
575 | printf("VM_TEST_4K:%d -> 0x%lx\n", __LINE__, alloc3_addr); | |
576 | expected_addr += alloc3_size; | |
577 | ||
578 | alloc4_addr = 0; | |
579 | alloc4_size = 1 * FOURK_PAGE_SIZE; | |
580 | printf("VM_TEST_4K:%d vm_allocate(%p, 0x%lx, 0x%lx)...\n", __LINE__, test_map, alloc4_addr, alloc4_size); | |
581 | kr = vm_allocate_external(test_map, | |
582 | &alloc4_addr, | |
583 | alloc4_size, | |
584 | VM_FLAGS_ANYWHERE); | |
585 | assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr); | |
586 | assertf(alloc4_addr == expected_addr, "alloc4_addr = 0x%lx expected 0x%lx", alloc4_addr, expected_addr); | |
587 | printf("VM_TEST_4K:%d -> 0x%lx\n", __LINE__, alloc3_addr); | |
588 | expected_addr += alloc4_size; | |
589 | ||
590 | printf("VM_TEST_4K:%d vm_protect(%p, 0x%lx, 0x%lx, READ)...\n", __LINE__, test_map, alloc2_addr, (1UL * FOURK_PAGE_SIZE)); | |
591 | kr = vm_protect(test_map, | |
592 | alloc2_addr, | |
593 | (1UL * FOURK_PAGE_SIZE), | |
594 | FALSE, | |
595 | VM_PROT_READ); | |
596 | assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr); | |
597 | ||
598 | for (fault_addr = alloc1_addr; | |
599 | fault_addr < alloc4_addr + alloc4_size + (2 * FOURK_PAGE_SIZE); | |
600 | fault_addr += FOURK_PAGE_SIZE) { | |
601 | printf("VM_TEST_4K:%d write fault at 0x%lx...\n", __LINE__, fault_addr); | |
602 | kr = vm_fault(test_map, | |
603 | fault_addr, | |
604 | VM_PROT_WRITE, | |
605 | FALSE, | |
606 | VM_KERN_MEMORY_NONE, | |
607 | THREAD_UNINT, | |
608 | NULL, | |
609 | 0); | |
610 | printf("VM_TEST_4K:%d -> 0x%x\n", __LINE__, kr); | |
611 | if (fault_addr == alloc2_addr) { | |
612 | assertf(kr == KERN_PROTECTION_FAILURE, "fault_addr = 0x%lx kr = 0x%x expected 0x%x", fault_addr, kr, KERN_PROTECTION_FAILURE); | |
613 | printf("VM_TEST_4K:%d read fault at 0x%lx...\n", __LINE__, fault_addr); | |
614 | kr = vm_fault(test_map, | |
615 | fault_addr, | |
616 | VM_PROT_READ, | |
617 | FALSE, | |
618 | VM_KERN_MEMORY_NONE, | |
619 | THREAD_UNINT, | |
620 | NULL, | |
621 | 0); | |
622 | assertf(kr == KERN_SUCCESS, "fault_addr = 0x%lx kr = 0x%x expected 0x%x", fault_addr, kr, KERN_SUCCESS); | |
623 | printf("VM_TEST_4K:%d -> 0x%x\n", __LINE__, kr); | |
624 | } else if (fault_addr >= alloc4_addr + alloc4_size) { | |
625 | assertf(kr == KERN_INVALID_ADDRESS, "fault_addr = 0x%lx kr = 0x%x expected 0x%x", fault_addr, kr, KERN_INVALID_ADDRESS); | |
626 | } else { | |
627 | assertf(kr == KERN_SUCCESS, "fault_addr = 0x%lx kr = 0x%x expected 0x%x", fault_addr, kr, KERN_SUCCESS); | |
628 | } | |
629 | } | |
630 | ||
631 | alloc5_addr = 0; | |
632 | alloc5_size = 7 * FOURK_PAGE_SIZE; | |
633 | printf("VM_TEST_4K:%d vm_allocate(%p, 0x%lx, 0x%lx)...\n", __LINE__, test_map, alloc5_addr, alloc5_size); | |
634 | kr = vm_allocate_external(test_map, | |
635 | &alloc5_addr, | |
636 | alloc5_size, | |
637 | VM_FLAGS_ANYWHERE); | |
638 | assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr); | |
639 | assertf(alloc5_addr == expected_addr, "alloc5_addr = 0x%lx expected 0x%lx", alloc5_addr, expected_addr); | |
640 | printf("VM_TEST_4K:%d -> 0x%lx\n", __LINE__, alloc5_addr); | |
641 | expected_addr += alloc5_size; | |
642 | ||
643 | dealloc_addr = vm_map_round_page(alloc5_addr, PAGE_SHIFT); | |
644 | dealloc_addr += FOURK_PAGE_SIZE; | |
645 | printf("VM_TEST_4K:%d vm_deallocate(%p, 0x%lx, 0x%x)...\n", __LINE__, test_map, dealloc_addr, FOURK_PAGE_SIZE); | |
646 | kr = vm_deallocate(test_map, dealloc_addr, FOURK_PAGE_SIZE); | |
647 | assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr); | |
648 | printf("VM_TEST_4K:%d -> 0x%x\n", __LINE__, kr); | |
649 | ||
650 | remap_src_addr = vm_map_round_page(alloc3_addr, PAGE_SHIFT); | |
651 | remap_src_addr += FOURK_PAGE_SIZE; | |
652 | remap_src_size = 2 * FOURK_PAGE_SIZE; | |
653 | remap_dst_addr = 0; | |
654 | printf("VM_TEST_4K:%d vm_remap(%p, 0x%lx, 0x%lx, 0x%lx, copy=0)...\n", __LINE__, test_map, remap_dst_addr, remap_src_size, remap_src_addr); | |
655 | kr = vm_remap_external(test_map, | |
656 | &remap_dst_addr, | |
657 | remap_src_size, | |
658 | 0, /* mask */ | |
659 | VM_FLAGS_ANYWHERE, | |
660 | test_map, | |
661 | remap_src_addr, | |
662 | FALSE, /* copy */ | |
663 | &cur_prot, | |
664 | &max_prot, | |
665 | VM_INHERIT_DEFAULT); | |
666 | assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr); | |
667 | assertf(remap_dst_addr == expected_addr, "remap_dst_addr = 0x%lx expected 0x%lx", remap_dst_addr, expected_addr); | |
668 | printf("VM_TEST_4K:%d -> 0x%lx\n", __LINE__, remap_dst_addr); | |
669 | expected_addr += remap_src_size; | |
670 | ||
671 | for (fault_addr = remap_dst_addr; | |
672 | fault_addr < remap_dst_addr + remap_src_size; | |
673 | fault_addr += 4096) { | |
674 | printf("VM_TEST_4K:%d write fault at 0x%lx...\n", __LINE__, fault_addr); | |
675 | kr = vm_fault(test_map, | |
676 | fault_addr, | |
677 | VM_PROT_WRITE, | |
678 | FALSE, | |
679 | VM_KERN_MEMORY_NONE, | |
680 | THREAD_UNINT, | |
681 | NULL, | |
682 | 0); | |
683 | assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr); | |
684 | printf("VM_TEST_4K:%d -> 0x%x\n", __LINE__, kr); | |
685 | } | |
686 | ||
687 | printf("VM_TEST_4K:\n"); | |
688 | remap_dst_addr = 0; | |
689 | remap_src_addr = alloc3_addr + 0xc000; | |
690 | remap_src_size = 0x5000; | |
691 | printf("VM_TEST_4K: vm_remap(%p, 0x%lx, 0x%lx, %p, copy=0) from 4K to 16K\n", test_map, remap_src_addr, remap_src_size, kernel_map); | |
692 | kr = vm_remap_external(kernel_map, | |
693 | &remap_dst_addr, | |
694 | remap_src_size, | |
695 | 0, /* mask */ | |
696 | VM_FLAGS_ANYWHERE | VM_FLAGS_RETURN_DATA_ADDR, | |
697 | test_map, | |
698 | remap_src_addr, | |
699 | FALSE, /* copy */ | |
700 | &cur_prot, | |
701 | &max_prot, | |
702 | VM_INHERIT_DEFAULT); | |
703 | assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr); | |
704 | printf("VM_TEST_4K: -> remapped (shared) in map %p at addr 0x%lx\n", kernel_map, remap_dst_addr); | |
705 | ||
706 | printf("VM_TEST_4K:\n"); | |
707 | remap_dst_addr = 0; | |
708 | remap_src_addr = alloc3_addr + 0xc000; | |
709 | remap_src_size = 0x5000; | |
710 | printf("VM_TEST_4K: vm_remap(%p, 0x%lx, 0x%lx, %p, copy=1) from 4K to 16K\n", test_map, remap_src_addr, remap_src_size, kernel_map); | |
711 | kr = vm_remap_external(kernel_map, | |
712 | &remap_dst_addr, | |
713 | remap_src_size, | |
714 | 0, /* mask */ | |
715 | VM_FLAGS_ANYWHERE | VM_FLAGS_RETURN_DATA_ADDR, | |
716 | test_map, | |
717 | remap_src_addr, | |
718 | TRUE, /* copy */ | |
719 | &cur_prot, | |
720 | &max_prot, | |
721 | VM_INHERIT_DEFAULT); | |
722 | assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr); | |
723 | printf("VM_TEST_4K: -> remapped (COW) in map %p at addr 0x%lx\n", kernel_map, remap_dst_addr); | |
724 | ||
725 | printf("VM_TEST_4K:\n"); | |
726 | saved_debug4k_panic_on_misaligned_sharing = debug4k_panic_on_misaligned_sharing; | |
727 | debug4k_panic_on_misaligned_sharing = 0; | |
728 | remap_dst_addr = 0; | |
729 | remap_src_addr = alloc1_addr; | |
730 | remap_src_size = alloc1_size + alloc2_size; | |
731 | printf("VM_TEST_4K: vm_remap(%p, 0x%lx, 0x%lx, %p, copy=0) from 4K to 16K\n", test_map, remap_src_addr, remap_src_size, kernel_map); | |
732 | kr = vm_remap_external(kernel_map, | |
733 | &remap_dst_addr, | |
734 | remap_src_size, | |
735 | 0, /* mask */ | |
736 | VM_FLAGS_ANYWHERE | VM_FLAGS_RETURN_DATA_ADDR, | |
737 | test_map, | |
738 | remap_src_addr, | |
739 | FALSE, /* copy */ | |
740 | &cur_prot, | |
741 | &max_prot, | |
742 | VM_INHERIT_DEFAULT); | |
743 | assertf(kr != KERN_SUCCESS, "kr = 0x%x", kr); | |
744 | printf("VM_TEST_4K: -> remap (SHARED) in map %p at addr 0x%lx kr=0x%x\n", kernel_map, remap_dst_addr, kr); | |
745 | debug4k_panic_on_misaligned_sharing = saved_debug4k_panic_on_misaligned_sharing; | |
746 | ||
747 | printf("VM_TEST_4K:\n"); | |
748 | remap_dst_addr = 0; | |
749 | remap_src_addr = alloc1_addr; | |
750 | remap_src_size = alloc1_size + alloc2_size; | |
751 | printf("VM_TEST_4K: vm_remap(%p, 0x%lx, 0x%lx, %p, copy=1) from 4K to 16K\n", test_map, remap_src_addr, remap_src_size, kernel_map); | |
752 | kr = vm_remap_external(kernel_map, | |
753 | &remap_dst_addr, | |
754 | remap_src_size, | |
755 | 0, /* mask */ | |
756 | VM_FLAGS_ANYWHERE | VM_FLAGS_RETURN_DATA_ADDR, | |
757 | test_map, | |
758 | remap_src_addr, | |
759 | TRUE, /* copy */ | |
760 | &cur_prot, | |
761 | &max_prot, | |
762 | VM_INHERIT_DEFAULT); | |
763 | #if 000 | |
764 | assertf(kr != KERN_SUCCESS, "kr = 0x%x", kr); | |
765 | printf("VM_TEST_4K: -> remap (COPY) in map %p at addr 0x%lx kr=0x%x\n", kernel_map, remap_dst_addr, kr); | |
766 | #else /* 000 */ | |
767 | assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr); | |
768 | printf("VM_TEST_4K: -> remap (COPY) in map %p at addr 0x%lx kr=0x%x\n", kernel_map, remap_dst_addr, kr); | |
769 | #endif /* 000 */ | |
770 | ||
771 | ||
772 | #if 00 | |
773 | printf("VM_TEST_4K:%d vm_map_remove(%p, 0x%llx, 0x%llx)...\n", __LINE__, test_map, test_map->min_offset, test_map->max_offset); | |
774 | kr = vm_map_remove(test_map, | |
775 | test_map->min_offset, | |
776 | test_map->max_offset, | |
777 | VM_MAP_REMOVE_GAPS_OK); | |
778 | assertf(kr == KERN_SUCCESS, "kr = 0x%x", kr); | |
779 | #endif | |
780 | ||
781 | printf("VM_TEST_4K: PASS\n\n\n\n"); | |
782 | } | |
783 | #endif /* PMAP_CREATE_FORCE_4K_PAGES && MACH_ASSERT */ | |
784 | ||
785 | #if MACH_ASSERT | |
786 | static void | |
787 | vm_test_map_copy_adjust_to_target_one( | |
788 | vm_map_copy_t copy_map, | |
789 | vm_map_t target_map) | |
790 | { | |
791 | kern_return_t kr; | |
792 | vm_map_copy_t target_copy; | |
793 | vm_map_offset_t overmap_start, overmap_end, trimmed_start; | |
794 | ||
795 | target_copy = VM_MAP_COPY_NULL; | |
796 | /* size is 2 (4k) pages but range covers 3 pages */ | |
797 | kr = vm_map_copy_adjust_to_target(copy_map, | |
798 | 0x0 + 0xfff, | |
799 | 0x1002, | |
800 | target_map, | |
801 | FALSE, | |
802 | &target_copy, | |
803 | &overmap_start, | |
804 | &overmap_end, | |
805 | &trimmed_start); | |
806 | assert(kr == KERN_SUCCESS); | |
807 | assert(overmap_start == 0); | |
808 | assert(overmap_end == 0); | |
809 | assert(trimmed_start == 0); | |
810 | assertf(target_copy->size == 0x3000, | |
811 | "target_copy %p size 0x%llx\n", | |
812 | target_copy, (uint64_t)target_copy->size); | |
813 | vm_map_copy_discard(target_copy); | |
814 | ||
815 | /* 1. adjust_to_target() for bad offset -> error */ | |
816 | /* 2. adjust_to_target() for bad size -> error */ | |
817 | /* 3. adjust_to_target() for the whole thing -> unchanged */ | |
818 | /* 4. adjust_to_target() to trim start by less than 1 page */ | |
819 | /* 5. adjust_to_target() to trim end by less than 1 page */ | |
820 | /* 6. adjust_to_target() to trim start and end by less than 1 page */ | |
821 | /* 7. adjust_to_target() to trim start by more than 1 page */ | |
822 | /* 8. adjust_to_target() to trim end by more than 1 page */ | |
823 | /* 9. adjust_to_target() to trim start and end by more than 1 page */ | |
824 | /* 10. adjust_to_target() to trim start by more than 1 entry */ | |
825 | /* 11. adjust_to_target() to trim start by more than 1 entry */ | |
826 | /* 12. adjust_to_target() to trim start and end by more than 1 entry */ | |
827 | /* 13. adjust_to_target() to trim start and end down to 1 entry */ | |
828 | } | |
829 | ||
830 | static void | |
831 | vm_test_map_copy_adjust_to_target(void) | |
832 | { | |
833 | kern_return_t kr; | |
834 | vm_map_t map4k, map16k; | |
835 | vm_object_t obj1, obj2, obj3, obj4; | |
836 | vm_map_offset_t addr4k, addr16k; | |
837 | vm_map_size_t size4k, size16k; | |
838 | vm_map_copy_t copy4k, copy16k; | |
839 | vm_prot_t curprot, maxprot; | |
840 | ||
841 | /* create a 4k map */ | |
842 | map4k = vm_map_create(PMAP_NULL, 0, (uint32_t)-1, TRUE); | |
843 | vm_map_set_page_shift(map4k, 12); | |
844 | ||
845 | /* create a 16k map */ | |
846 | map16k = vm_map_create(PMAP_NULL, 0, (uint32_t)-1, TRUE); | |
847 | vm_map_set_page_shift(map16k, 14); | |
848 | ||
849 | /* create 4 VM objects */ | |
850 | obj1 = vm_object_allocate(0x100000); | |
851 | obj2 = vm_object_allocate(0x100000); | |
852 | obj3 = vm_object_allocate(0x100000); | |
853 | obj4 = vm_object_allocate(0x100000); | |
854 | ||
855 | /* map objects in 4k map */ | |
856 | vm_object_reference(obj1); | |
857 | addr4k = 0x1000; | |
858 | size4k = 0x3000; | |
859 | kr = vm_map_enter(map4k, &addr4k, size4k, 0, VM_FLAGS_ANYWHERE, | |
860 | VM_MAP_KERNEL_FLAGS_NONE, 0, obj1, 0, | |
861 | FALSE, VM_PROT_DEFAULT, VM_PROT_DEFAULT, | |
862 | VM_INHERIT_DEFAULT); | |
863 | assert(kr == KERN_SUCCESS); | |
864 | assert(addr4k == 0x1000); | |
865 | ||
866 | /* map objects in 16k map */ | |
867 | vm_object_reference(obj1); | |
868 | addr16k = 0x4000; | |
869 | size16k = 0x8000; | |
870 | kr = vm_map_enter(map16k, &addr16k, size16k, 0, VM_FLAGS_ANYWHERE, | |
871 | VM_MAP_KERNEL_FLAGS_NONE, 0, obj1, 0, | |
872 | FALSE, VM_PROT_DEFAULT, VM_PROT_DEFAULT, | |
873 | VM_INHERIT_DEFAULT); | |
874 | assert(kr == KERN_SUCCESS); | |
875 | assert(addr16k == 0x4000); | |
876 | ||
877 | /* test for <rdar://60959809> */ | |
878 | ipc_port_t mem_entry; | |
879 | memory_object_size_t mem_entry_size; | |
880 | mach_vm_size_t map_size; | |
881 | mem_entry_size = 0x1002; | |
882 | mem_entry = IPC_PORT_NULL; | |
883 | kr = mach_make_memory_entry_64(map16k, &mem_entry_size, addr16k + 0x2fff, | |
884 | MAP_MEM_VM_SHARE | MAP_MEM_USE_DATA_ADDR | VM_PROT_READ, | |
885 | &mem_entry, IPC_PORT_NULL); | |
886 | assertf(kr == KERN_SUCCESS, "kr 0x%x\n", kr); | |
887 | assertf(mem_entry_size == 0x5001, "mem_entry_size 0x%llx\n", (uint64_t) mem_entry_size); | |
888 | map_size = 0; | |
889 | kr = mach_memory_entry_map_size(mem_entry, map4k, 0, 0x1002, &map_size); | |
890 | assertf(kr == KERN_SUCCESS, "kr 0x%x\n", kr); | |
891 | assertf(map_size == 0x3000, "mem_entry %p map_size 0x%llx\n", mem_entry, (uint64_t)map_size); | |
892 | mach_memory_entry_port_release(mem_entry); | |
893 | ||
894 | /* create 4k copy map */ | |
c3c9b80d A |
895 | curprot = VM_PROT_NONE; |
896 | maxprot = VM_PROT_NONE; | |
f427ee49 | 897 | kr = vm_map_copy_extract(map4k, addr4k, 0x3000, |
c3c9b80d | 898 | FALSE, ©4k, &curprot, &maxprot, |
f427ee49 A |
899 | VM_INHERIT_DEFAULT, VM_MAP_KERNEL_FLAGS_NONE); |
900 | assert(kr == KERN_SUCCESS); | |
901 | assert(copy4k->size == 0x3000); | |
902 | ||
903 | /* create 16k copy map */ | |
c3c9b80d A |
904 | curprot = VM_PROT_NONE; |
905 | maxprot = VM_PROT_NONE; | |
f427ee49 | 906 | kr = vm_map_copy_extract(map16k, addr16k, 0x4000, |
c3c9b80d | 907 | FALSE, ©16k, &curprot, &maxprot, |
f427ee49 A |
908 | VM_INHERIT_DEFAULT, VM_MAP_KERNEL_FLAGS_NONE); |
909 | assert(kr == KERN_SUCCESS); | |
910 | assert(copy16k->size == 0x4000); | |
911 | ||
912 | /* test each combination */ | |
913 | // vm_test_map_copy_adjust_to_target_one(copy4k, map4k); | |
914 | // vm_test_map_copy_adjust_to_target_one(copy16k, map16k); | |
915 | // vm_test_map_copy_adjust_to_target_one(copy4k, map16k); | |
916 | vm_test_map_copy_adjust_to_target_one(copy16k, map4k); | |
917 | ||
918 | /* assert 1 ref on 4k map */ | |
919 | assert(os_ref_get_count(&map4k->map_refcnt) == 1); | |
920 | /* release 4k map */ | |
921 | vm_map_deallocate(map4k); | |
922 | /* assert 1 ref on 16k map */ | |
923 | assert(os_ref_get_count(&map16k->map_refcnt) == 1); | |
924 | /* release 16k map */ | |
925 | vm_map_deallocate(map16k); | |
926 | /* deallocate copy maps */ | |
927 | vm_map_copy_discard(copy4k); | |
928 | vm_map_copy_discard(copy16k); | |
929 | /* assert 1 ref on all VM objects */ | |
930 | assert(obj1->ref_count == 1); | |
931 | assert(obj2->ref_count == 1); | |
932 | assert(obj3->ref_count == 1); | |
933 | assert(obj4->ref_count == 1); | |
934 | /* release all VM objects */ | |
935 | vm_object_deallocate(obj1); | |
936 | vm_object_deallocate(obj2); | |
937 | vm_object_deallocate(obj3); | |
938 | vm_object_deallocate(obj4); | |
939 | } | |
940 | #endif /* MACH_ASSERT */ | |
941 | ||
942 | boolean_t vm_tests_in_progress = FALSE; | |
943 | ||
944 | kern_return_t | |
945 | vm_tests(void) | |
946 | { | |
947 | vm_tests_in_progress = TRUE; | |
948 | ||
949 | vm_test_collapse_compressor(); | |
950 | vm_test_wire_and_extract(); | |
951 | vm_test_page_wire_overflow_panic(); | |
952 | vm_test_kernel_object_fault(); | |
953 | vm_test_device_pager_transpose(); | |
954 | #if MACH_ASSERT | |
955 | vm_test_map_copy_adjust_to_target(); | |
956 | #endif /* MACH_ASSERT */ | |
957 | #if PMAP_CREATE_FORCE_4K_PAGES && MACH_ASSERT | |
958 | vm_test_4k(); | |
959 | #endif /* PMAP_CREATE_FORCE_4K_PAGES && MACH_ASSERT */ | |
960 | ||
961 | vm_tests_in_progress = FALSE; | |
962 | ||
963 | return KERN_SUCCESS; | |
964 | } |