2 * Copyright (c) 2020 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <mach_assert.h>
31 #include <mach/mach_types.h>
32 #include <mach/memory_object.h>
33 #include <mach/vm_map.h>
35 #include <vm/memory_object.h>
36 #include <vm/vm_fault.h>
37 #include <vm/vm_map.h>
38 #include <vm/vm_object.h>
39 #include <vm/vm_protos.h>
42 vm_map_copy_adjust_to_target(
43 vm_map_copy_t copy_map
,
44 vm_map_offset_t offset
,
48 vm_map_copy_t
*target_copy_map_p
,
49 vm_map_offset_t
*overmap_start_p
,
50 vm_map_offset_t
*overmap_end_p
,
51 vm_map_offset_t
*trimmed_start_p
);
53 #define VM_TEST_COLLAPSE_COMPRESSOR 0
54 #define VM_TEST_WIRE_AND_EXTRACT 0
55 #define VM_TEST_PAGE_WIRE_OVERFLOW_PANIC 0
57 #define VM_TEST_KERNEL_OBJECT_FAULT 0
58 #endif /* __arm64__ */
59 #define VM_TEST_DEVICE_PAGER_TRANSPOSE (DEVELOPMENT || DEBUG)
61 #if VM_TEST_COLLAPSE_COMPRESSOR
62 extern boolean_t vm_object_collapse_compressor_allowed
;
63 #include <IOKit/IOLib.h>
65 vm_test_collapse_compressor(void)
67 vm_object_size_t backing_size
, top_size
;
68 vm_object_t backing_object
, top_object
;
69 vm_map_offset_t backing_offset
, top_offset
;
70 unsigned char *backing_address
, *top_address
;
73 printf("VM_TEST_COLLAPSE_COMPRESSOR:\n");
75 /* create backing object */
76 backing_size
= 15 * PAGE_SIZE
;
77 backing_object
= vm_object_allocate(backing_size
);
78 assert(backing_object
!= VM_OBJECT_NULL
);
79 printf("VM_TEST_COLLAPSE_COMPRESSOR: created backing object %p\n",
81 /* map backing object */
83 kr
= vm_map_enter(kernel_map
, &backing_offset
, backing_size
, 0,
84 VM_FLAGS_ANYWHERE
, VM_MAP_KERNEL_FLAGS_NONE
,
85 backing_object
, 0, FALSE
,
86 VM_PROT_DEFAULT
, VM_PROT_DEFAULT
, VM_INHERIT_DEFAULT
);
87 assert(kr
== KERN_SUCCESS
);
88 backing_address
= (unsigned char *) backing_offset
;
89 printf("VM_TEST_COLLAPSE_COMPRESSOR: "
90 "mapped backing object %p at 0x%llx\n",
91 backing_object
, (uint64_t) backing_offset
);
92 /* populate with pages to be compressed in backing object */
93 backing_address
[0x1 * PAGE_SIZE
] = 0xB1;
94 backing_address
[0x4 * PAGE_SIZE
] = 0xB4;
95 backing_address
[0x7 * PAGE_SIZE
] = 0xB7;
96 backing_address
[0xa * PAGE_SIZE
] = 0xBA;
97 backing_address
[0xd * PAGE_SIZE
] = 0xBD;
98 printf("VM_TEST_COLLAPSE_COMPRESSOR: "
99 "populated pages to be compressed in "
100 "backing_object %p\n", backing_object
);
101 /* compress backing object */
102 vm_object_pageout(backing_object
);
103 printf("VM_TEST_COLLAPSE_COMPRESSOR: compressing backing_object %p\n",
105 /* wait for all the pages to be gone */
106 while (*(volatile int *)&backing_object
->resident_page_count
!= 0) {
109 printf("VM_TEST_COLLAPSE_COMPRESSOR: backing_object %p compressed\n",
111 /* populate with pages to be resident in backing object */
112 backing_address
[0x0 * PAGE_SIZE
] = 0xB0;
113 backing_address
[0x3 * PAGE_SIZE
] = 0xB3;
114 backing_address
[0x6 * PAGE_SIZE
] = 0xB6;
115 backing_address
[0x9 * PAGE_SIZE
] = 0xB9;
116 backing_address
[0xc * PAGE_SIZE
] = 0xBC;
117 printf("VM_TEST_COLLAPSE_COMPRESSOR: "
118 "populated pages to be resident in "
119 "backing_object %p\n", backing_object
);
120 /* leave the other pages absent */
121 /* mess with the paging_offset of the backing_object */
122 assert(backing_object
->paging_offset
== 0);
123 backing_object
->paging_offset
= 3 * PAGE_SIZE
;
125 /* create top object */
126 top_size
= 9 * PAGE_SIZE
;
127 top_object
= vm_object_allocate(top_size
);
128 assert(top_object
!= VM_OBJECT_NULL
);
129 printf("VM_TEST_COLLAPSE_COMPRESSOR: created top object %p\n",
133 kr
= vm_map_enter(kernel_map
, &top_offset
, top_size
, 0,
134 VM_FLAGS_ANYWHERE
, VM_MAP_KERNEL_FLAGS_NONE
,
135 top_object
, 0, FALSE
,
136 VM_PROT_DEFAULT
, VM_PROT_DEFAULT
, VM_INHERIT_DEFAULT
);
137 assert(kr
== KERN_SUCCESS
);
138 top_address
= (unsigned char *) top_offset
;
139 printf("VM_TEST_COLLAPSE_COMPRESSOR: "
140 "mapped top object %p at 0x%llx\n",
141 top_object
, (uint64_t) top_offset
);
142 /* populate with pages to be compressed in top object */
143 top_address
[0x3 * PAGE_SIZE
] = 0xA3;
144 top_address
[0x4 * PAGE_SIZE
] = 0xA4;
145 top_address
[0x5 * PAGE_SIZE
] = 0xA5;
146 printf("VM_TEST_COLLAPSE_COMPRESSOR: "
147 "populated pages to be compressed in "
148 "top_object %p\n", top_object
);
149 /* compress top object */
150 vm_object_pageout(top_object
);
151 printf("VM_TEST_COLLAPSE_COMPRESSOR: compressing top_object %p\n",
153 /* wait for all the pages to be gone */
154 while (top_object
->resident_page_count
!= 0) {
157 printf("VM_TEST_COLLAPSE_COMPRESSOR: top_object %p compressed\n",
159 /* populate with pages to be resident in top object */
160 top_address
[0x0 * PAGE_SIZE
] = 0xA0;
161 top_address
[0x1 * PAGE_SIZE
] = 0xA1;
162 top_address
[0x2 * PAGE_SIZE
] = 0xA2;
163 printf("VM_TEST_COLLAPSE_COMPRESSOR: "
164 "populated pages to be resident in "
165 "top_object %p\n", top_object
);
166 /* leave the other pages absent */
168 /* link the 2 objects */
169 vm_object_reference(backing_object
);
170 top_object
->shadow
= backing_object
;
171 top_object
->vo_shadow_offset
= 3 * PAGE_SIZE
;
172 printf("VM_TEST_COLLAPSE_COMPRESSOR: linked %p and %p\n",
173 top_object
, backing_object
);
175 /* unmap backing object */
176 vm_map_remove(kernel_map
,
178 backing_offset
+ backing_size
,
179 VM_MAP_REMOVE_NO_FLAGS
);
180 printf("VM_TEST_COLLAPSE_COMPRESSOR: "
181 "unmapped backing_object %p [0x%llx:0x%llx]\n",
183 (uint64_t) backing_offset
,
184 (uint64_t) (backing_offset
+ backing_size
));
187 printf("VM_TEST_COLLAPSE_COMPRESSOR: collapsing %p\n", top_object
);
188 vm_object_lock(top_object
);
189 vm_object_collapse(top_object
, 0, FALSE
);
190 vm_object_unlock(top_object
);
191 printf("VM_TEST_COLLAPSE_COMPRESSOR: collapsed %p\n", top_object
);
194 if (top_object
->shadow
!= VM_OBJECT_NULL
) {
195 printf("VM_TEST_COLLAPSE_COMPRESSOR: not collapsed\n");
196 printf("VM_TEST_COLLAPSE_COMPRESSOR: FAIL\n");
197 if (vm_object_collapse_compressor_allowed
) {
198 panic("VM_TEST_COLLAPSE_COMPRESSOR: FAIL\n");
201 /* check the contents of the mapping */
202 unsigned char expect
[9] =
203 { 0xA0, 0xA1, 0xA2, /* resident in top */
204 0xA3, 0xA4, 0xA5, /* compressed in top */
205 0xB9, /* resident in backing + shadow_offset */
206 0xBD, /* compressed in backing + shadow_offset + paging_offset */
207 0x00 }; /* absent in both */
208 unsigned char actual
[9];
209 unsigned int i
, errors
;
212 for (i
= 0; i
< sizeof(actual
); i
++) {
213 actual
[i
] = (unsigned char) top_address
[i
* PAGE_SIZE
];
214 if (actual
[i
] != expect
[i
]) {
218 printf("VM_TEST_COLLAPSE_COMPRESSOR: "
219 "actual [%x %x %x %x %x %x %x %x %x] "
220 "expect [%x %x %x %x %x %x %x %x %x] "
222 actual
[0], actual
[1], actual
[2], actual
[3],
223 actual
[4], actual
[5], actual
[6], actual
[7],
225 expect
[0], expect
[1], expect
[2], expect
[3],
226 expect
[4], expect
[5], expect
[6], expect
[7],
230 panic("VM_TEST_COLLAPSE_COMPRESSOR: FAIL\n");
232 printf("VM_TEST_COLLAPSE_COMPRESSOR: PASS\n");
236 #else /* VM_TEST_COLLAPSE_COMPRESSOR */
237 #define vm_test_collapse_compressor()
238 #endif /* VM_TEST_COLLAPSE_COMPRESSOR */
240 #if VM_TEST_WIRE_AND_EXTRACT
241 extern ledger_template_t task_ledger_template
;
242 #include <mach/mach_vm.h>
243 extern ppnum_t
vm_map_get_phys_page(vm_map_t map
,
246 vm_test_wire_and_extract(void)
249 vm_map_t user_map
, wire_map
;
250 mach_vm_address_t user_addr
, wire_addr
;
251 mach_vm_size_t user_size
, wire_size
;
252 mach_vm_offset_t cur_offset
;
253 vm_prot_t cur_prot
, max_prot
;
254 ppnum_t user_ppnum
, wire_ppnum
;
257 ledger
= ledger_instantiate(task_ledger_template
,
258 LEDGER_CREATE_ACTIVE_ENTRIES
);
259 user_map
= vm_map_create(pmap_create_options(ledger
, 0, PMAP_CREATE_64BIT
),
263 wire_map
= vm_map_create(NULL
,
269 kr
= mach_vm_allocate(user_map
,
273 assert(kr
== KERN_SUCCESS
);
275 wire_size
= user_size
;
276 kr
= mach_vm_remap(wire_map
,
287 assert(kr
== KERN_SUCCESS
);
289 cur_offset
< wire_size
;
290 cur_offset
+= PAGE_SIZE
) {
291 kr
= vm_map_wire_and_extract(wire_map
,
292 wire_addr
+ cur_offset
,
293 VM_PROT_DEFAULT
| VM_PROT_MEMORY_TAG_MAKE(VM_KERN_MEMORY_OSFMK
),
296 assert(kr
== KERN_SUCCESS
);
297 user_ppnum
= vm_map_get_phys_page(user_map
,
298 user_addr
+ cur_offset
);
299 printf("VM_TEST_WIRE_AND_EXTRACT: kr=0x%x "
300 "user[%p:0x%llx:0x%x] wire[%p:0x%llx:0x%x]\n",
302 user_map
, user_addr
+ cur_offset
, user_ppnum
,
303 wire_map
, wire_addr
+ cur_offset
, wire_ppnum
);
304 if (kr
!= KERN_SUCCESS
||
306 wire_ppnum
!= user_ppnum
) {
307 panic("VM_TEST_WIRE_AND_EXTRACT: FAIL\n");
310 cur_offset
-= PAGE_SIZE
;
311 kr
= vm_map_wire_and_extract(wire_map
,
312 wire_addr
+ cur_offset
,
316 assert(kr
== KERN_SUCCESS
);
317 printf("VM_TEST_WIRE_AND_EXTRACT: re-wire kr=0x%x "
318 "user[%p:0x%llx:0x%x] wire[%p:0x%llx:0x%x]\n",
320 user_map
, user_addr
+ cur_offset
, user_ppnum
,
321 wire_map
, wire_addr
+ cur_offset
, wire_ppnum
);
322 if (kr
!= KERN_SUCCESS
||
324 wire_ppnum
!= user_ppnum
) {
325 panic("VM_TEST_WIRE_AND_EXTRACT: FAIL\n");
328 printf("VM_TEST_WIRE_AND_EXTRACT: PASS\n");
330 #else /* VM_TEST_WIRE_AND_EXTRACT */
331 #define vm_test_wire_and_extract()
332 #endif /* VM_TEST_WIRE_AND_EXTRACT */
334 #if VM_TEST_PAGE_WIRE_OVERFLOW_PANIC
336 vm_test_page_wire_overflow_panic(void)
341 printf("VM_TEST_PAGE_WIRE_OVERFLOW_PANIC: starting...\n");
343 object
= vm_object_allocate(PAGE_SIZE
);
344 vm_object_lock(object
);
345 page
= vm_page_alloc(object
, 0x0);
346 vm_page_lock_queues();
348 vm_page_wire(page
, 1, FALSE
);
349 } while (page
->wire_count
!= 0);
350 vm_page_unlock_queues();
351 vm_object_unlock(object
);
352 panic("FBDP(%p,%p): wire_count overflow not detected\n",
355 #else /* VM_TEST_PAGE_WIRE_OVERFLOW_PANIC */
356 #define vm_test_page_wire_overflow_panic()
357 #endif /* VM_TEST_PAGE_WIRE_OVERFLOW_PANIC */
359 #if __arm64__ && VM_TEST_KERNEL_OBJECT_FAULT
360 extern int copyinframe(vm_address_t fp
, char *frame
, boolean_t is64bit
);
362 vm_test_kernel_object_fault(void)
369 kr
= kernel_memory_allocate(kernel_map
, &stack
,
370 kernel_stack_size
+ (2 * PAGE_SIZE
),
372 (KMA_KSTACK
| KMA_KOBJECT
|
373 KMA_GUARD_FIRST
| KMA_GUARD_LAST
),
374 VM_KERN_MEMORY_STACK
);
375 if (kr
!= KERN_SUCCESS
) {
376 panic("VM_TEST_KERNEL_OBJECT_FAULT: kernel_memory_allocate kr 0x%x\n", kr
);
378 ret
= copyinframe((uintptr_t)stack
, (char *)frameb
, TRUE
);
380 printf("VM_TEST_KERNEL_OBJECT_FAULT: PASS\n");
382 printf("VM_TEST_KERNEL_OBJECT_FAULT: FAIL\n");
384 vm_map_remove(kernel_map
,
386 stack
+ kernel_stack_size
+ (2 * PAGE_SIZE
),
387 VM_MAP_REMOVE_KUNWIRE
);
390 #else /* __arm64__ && VM_TEST_KERNEL_OBJECT_FAULT */
391 #define vm_test_kernel_object_fault()
392 #endif /* __arm64__ && VM_TEST_KERNEL_OBJECT_FAULT */
394 #if VM_TEST_DEVICE_PAGER_TRANSPOSE
396 vm_test_device_pager_transpose(void)
398 memory_object_t device_pager
;
399 vm_object_t anon_object
, device_object
;
401 vm_map_offset_t device_mapping
;
404 size
= 3 * PAGE_SIZE
;
405 anon_object
= vm_object_allocate(size
);
406 assert(anon_object
!= VM_OBJECT_NULL
);
407 device_pager
= device_pager_setup(NULL
, 0, size
, 0);
408 assert(device_pager
!= NULL
);
409 device_object
= memory_object_to_vm_object(device_pager
);
410 assert(device_object
!= VM_OBJECT_NULL
);
413 * Can't actually map this, since another thread might do a
414 * vm_map_enter() that gets coalesced into this object, which
415 * would cause the test to fail.
417 vm_map_offset_t anon_mapping
= 0;
418 kr
= vm_map_enter(kernel_map
, &anon_mapping
, size
, 0,
419 VM_FLAGS_ANYWHERE
, VM_MAP_KERNEL_FLAGS_NONE
, VM_KERN_MEMORY_NONE
,
420 anon_object
, 0, FALSE
, VM_PROT_DEFAULT
, VM_PROT_ALL
,
422 assert(kr
== KERN_SUCCESS
);
425 kr
= vm_map_enter_mem_object(kernel_map
, &device_mapping
, size
, 0,
427 VM_MAP_KERNEL_FLAGS_NONE
,
429 (void *)device_pager
, 0, FALSE
,
430 VM_PROT_DEFAULT
, VM_PROT_ALL
,
432 assert(kr
== KERN_SUCCESS
);
433 memory_object_deallocate(device_pager
);
435 vm_object_lock(anon_object
);
436 vm_object_activity_begin(anon_object
);
437 anon_object
->blocked_access
= TRUE
;
438 vm_object_unlock(anon_object
);
439 vm_object_lock(device_object
);
440 vm_object_activity_begin(device_object
);
441 device_object
->blocked_access
= TRUE
;
442 vm_object_unlock(device_object
);
444 assert(anon_object
->ref_count
== 1);
445 assert(!anon_object
->named
);
446 assert(device_object
->ref_count
== 2);
447 assert(device_object
->named
);
449 kr
= vm_object_transpose(device_object
, anon_object
, size
);
450 assert(kr
== KERN_SUCCESS
);
452 vm_object_lock(anon_object
);
453 vm_object_activity_end(anon_object
);
454 anon_object
->blocked_access
= FALSE
;
455 vm_object_unlock(anon_object
);
456 vm_object_lock(device_object
);
457 vm_object_activity_end(device_object
);
458 device_object
->blocked_access
= FALSE
;
459 vm_object_unlock(device_object
);
461 assert(anon_object
->ref_count
== 2);
462 assert(anon_object
->named
);
464 kr
= vm_deallocate(kernel_map
, anon_mapping
, size
);
465 assert(kr
== KERN_SUCCESS
);
467 assert(device_object
->ref_count
== 1);
468 assert(!device_object
->named
);
469 kr
= vm_deallocate(kernel_map
, device_mapping
, size
);
470 assert(kr
== KERN_SUCCESS
);
472 printf("VM_TEST_DEVICE_PAGER_TRANSPOSE: PASS\n");
474 #else /* VM_TEST_DEVICE_PAGER_TRANSPOSE */
475 #define vm_test_device_pager_transpose()
476 #endif /* VM_TEST_DEVICE_PAGER_TRANSPOSE */
478 #if PMAP_CREATE_FORCE_4K_PAGES && MACH_ASSERT
479 extern kern_return_t
vm_allocate_external(vm_map_t map
,
483 extern kern_return_t
vm_remap_external(vm_map_t target_map
,
484 vm_offset_t
*address
,
489 vm_offset_t memory_address
,
491 vm_prot_t
*cur_protection
,
492 vm_prot_t
*max_protection
,
493 vm_inherit_t inheritance
);
494 extern int debug4k_panic_on_misaligned_sharing
;
496 void vm_test_4k(void);
503 vm_address_t expected_addr
;
504 vm_address_t alloc1_addr
, alloc2_addr
, alloc3_addr
, alloc4_addr
;
505 vm_address_t alloc5_addr
, dealloc_addr
, remap_src_addr
, remap_dst_addr
;
506 vm_size_t alloc1_size
, alloc2_size
, alloc3_size
, alloc4_size
;
507 vm_size_t alloc5_size
, remap_src_size
;
508 vm_address_t fault_addr
;
509 vm_prot_t cur_prot
, max_prot
;
510 int saved_debug4k_panic_on_misaligned_sharing
;
512 printf("\n\n\nVM_TEST_4K:%d creating 4K map...\n", __LINE__
);
513 test_pmap
= pmap_create_options(NULL
, 0, PMAP_CREATE_64BIT
| PMAP_CREATE_FORCE_4K_PAGES
);
514 assert(test_pmap
!= NULL
);
515 test_map
= vm_map_create(test_pmap
,
519 assert(test_map
!= VM_MAP_NULL
);
520 vm_map_set_page_shift(test_map
, FOURK_PAGE_SHIFT
);
521 printf("VM_TEST_4K:%d map %p pmap %p page_size 0x%x\n", __LINE__
, test_map
, test_pmap
, VM_MAP_PAGE_SIZE(test_map
));
524 alloc1_size
= 1 * FOURK_PAGE_SIZE
;
525 expected_addr
= 0x1000;
526 printf("VM_TEST_4K:%d vm_allocate(%p, 0x%lx, 0x%lx)...\n", __LINE__
, test_map
, alloc1_addr
, alloc1_size
);
527 kr
= vm_allocate_external(test_map
,
531 assertf(kr
== KERN_SUCCESS
, "kr = 0x%x", kr
);
532 assertf(alloc1_addr
== expected_addr
, "alloc1_addr = 0x%lx expected 0x%lx", alloc1_addr
, expected_addr
);
533 printf("VM_TEST_4K:%d -> 0x%lx\n", __LINE__
, alloc1_addr
);
534 expected_addr
+= alloc1_size
;
536 printf("VM_TEST_4K:%d vm_deallocate(%p, 0x%lx, 0x%lx)...\n", __LINE__
, test_map
, alloc1_addr
, alloc1_size
);
537 kr
= vm_deallocate(test_map
, alloc1_addr
, alloc1_size
);
538 assertf(kr
== KERN_SUCCESS
, "kr = 0x%x", kr
);
539 printf("VM_TEST_4K:%d -> 0x%lx\n", __LINE__
, alloc1_addr
);
542 alloc1_size
= 1 * FOURK_PAGE_SIZE
;
543 expected_addr
= 0x1000;
544 printf("VM_TEST_4K:%d vm_allocate(%p, 0x%lx, 0x%lx)...\n", __LINE__
, test_map
, alloc1_addr
, alloc1_size
);
545 kr
= vm_allocate_external(test_map
,
549 assertf(kr
== KERN_SUCCESS
, "kr = 0x%x", kr
);
550 assertf(alloc1_addr
== expected_addr
, "alloc1_addr = 0x%lx expected 0x%lx", alloc1_addr
, expected_addr
);
551 printf("VM_TEST_4K:%d -> 0x%lx\n", __LINE__
, alloc1_addr
);
552 expected_addr
+= alloc1_size
;
555 alloc2_size
= 3 * FOURK_PAGE_SIZE
;
556 printf("VM_TEST_4K:%d vm_allocate(%p, 0x%lx, 0x%lx)...\n", __LINE__
, test_map
, alloc2_addr
, alloc2_size
);
557 kr
= vm_allocate_external(test_map
,
561 assertf(kr
== KERN_SUCCESS
, "kr = 0x%x", kr
);
562 assertf(alloc2_addr
== expected_addr
, "alloc2_addr = 0x%lx expected 0x%lx", alloc2_addr
, expected_addr
);
563 printf("VM_TEST_4K:%d -> 0x%lx\n", __LINE__
, alloc2_addr
);
564 expected_addr
+= alloc2_size
;
567 alloc3_size
= 18 * FOURK_PAGE_SIZE
;
568 printf("VM_TEST_4K:%d vm_allocate(%p, 0x%lx, 0x%lx)...\n", __LINE__
, test_map
, alloc3_addr
, alloc3_size
);
569 kr
= vm_allocate_external(test_map
,
573 assertf(kr
== KERN_SUCCESS
, "kr = 0x%x", kr
);
574 assertf(alloc3_addr
== expected_addr
, "alloc3_addr = 0x%lx expected 0x%lx\n", alloc3_addr
, expected_addr
);
575 printf("VM_TEST_4K:%d -> 0x%lx\n", __LINE__
, alloc3_addr
);
576 expected_addr
+= alloc3_size
;
579 alloc4_size
= 1 * FOURK_PAGE_SIZE
;
580 printf("VM_TEST_4K:%d vm_allocate(%p, 0x%lx, 0x%lx)...\n", __LINE__
, test_map
, alloc4_addr
, alloc4_size
);
581 kr
= vm_allocate_external(test_map
,
585 assertf(kr
== KERN_SUCCESS
, "kr = 0x%x", kr
);
586 assertf(alloc4_addr
== expected_addr
, "alloc4_addr = 0x%lx expected 0x%lx", alloc4_addr
, expected_addr
);
587 printf("VM_TEST_4K:%d -> 0x%lx\n", __LINE__
, alloc3_addr
);
588 expected_addr
+= alloc4_size
;
590 printf("VM_TEST_4K:%d vm_protect(%p, 0x%lx, 0x%lx, READ)...\n", __LINE__
, test_map
, alloc2_addr
, (1UL * FOURK_PAGE_SIZE
));
591 kr
= vm_protect(test_map
,
593 (1UL * FOURK_PAGE_SIZE
),
596 assertf(kr
== KERN_SUCCESS
, "kr = 0x%x", kr
);
598 for (fault_addr
= alloc1_addr
;
599 fault_addr
< alloc4_addr
+ alloc4_size
+ (2 * FOURK_PAGE_SIZE
);
600 fault_addr
+= FOURK_PAGE_SIZE
) {
601 printf("VM_TEST_4K:%d write fault at 0x%lx...\n", __LINE__
, fault_addr
);
602 kr
= vm_fault(test_map
,
610 printf("VM_TEST_4K:%d -> 0x%x\n", __LINE__
, kr
);
611 if (fault_addr
== alloc2_addr
) {
612 assertf(kr
== KERN_PROTECTION_FAILURE
, "fault_addr = 0x%lx kr = 0x%x expected 0x%x", fault_addr
, kr
, KERN_PROTECTION_FAILURE
);
613 printf("VM_TEST_4K:%d read fault at 0x%lx...\n", __LINE__
, fault_addr
);
614 kr
= vm_fault(test_map
,
622 assertf(kr
== KERN_SUCCESS
, "fault_addr = 0x%lx kr = 0x%x expected 0x%x", fault_addr
, kr
, KERN_SUCCESS
);
623 printf("VM_TEST_4K:%d -> 0x%x\n", __LINE__
, kr
);
624 } else if (fault_addr
>= alloc4_addr
+ alloc4_size
) {
625 assertf(kr
== KERN_INVALID_ADDRESS
, "fault_addr = 0x%lx kr = 0x%x expected 0x%x", fault_addr
, kr
, KERN_INVALID_ADDRESS
);
627 assertf(kr
== KERN_SUCCESS
, "fault_addr = 0x%lx kr = 0x%x expected 0x%x", fault_addr
, kr
, KERN_SUCCESS
);
632 alloc5_size
= 7 * FOURK_PAGE_SIZE
;
633 printf("VM_TEST_4K:%d vm_allocate(%p, 0x%lx, 0x%lx)...\n", __LINE__
, test_map
, alloc5_addr
, alloc5_size
);
634 kr
= vm_allocate_external(test_map
,
638 assertf(kr
== KERN_SUCCESS
, "kr = 0x%x", kr
);
639 assertf(alloc5_addr
== expected_addr
, "alloc5_addr = 0x%lx expected 0x%lx", alloc5_addr
, expected_addr
);
640 printf("VM_TEST_4K:%d -> 0x%lx\n", __LINE__
, alloc5_addr
);
641 expected_addr
+= alloc5_size
;
643 dealloc_addr
= vm_map_round_page(alloc5_addr
, PAGE_SHIFT
);
644 dealloc_addr
+= FOURK_PAGE_SIZE
;
645 printf("VM_TEST_4K:%d vm_deallocate(%p, 0x%lx, 0x%x)...\n", __LINE__
, test_map
, dealloc_addr
, FOURK_PAGE_SIZE
);
646 kr
= vm_deallocate(test_map
, dealloc_addr
, FOURK_PAGE_SIZE
);
647 assertf(kr
== KERN_SUCCESS
, "kr = 0x%x", kr
);
648 printf("VM_TEST_4K:%d -> 0x%x\n", __LINE__
, kr
);
650 remap_src_addr
= vm_map_round_page(alloc3_addr
, PAGE_SHIFT
);
651 remap_src_addr
+= FOURK_PAGE_SIZE
;
652 remap_src_size
= 2 * FOURK_PAGE_SIZE
;
654 printf("VM_TEST_4K:%d vm_remap(%p, 0x%lx, 0x%lx, 0x%lx, copy=0)...\n", __LINE__
, test_map
, remap_dst_addr
, remap_src_size
, remap_src_addr
);
655 kr
= vm_remap_external(test_map
,
666 assertf(kr
== KERN_SUCCESS
, "kr = 0x%x", kr
);
667 assertf(remap_dst_addr
== expected_addr
, "remap_dst_addr = 0x%lx expected 0x%lx", remap_dst_addr
, expected_addr
);
668 printf("VM_TEST_4K:%d -> 0x%lx\n", __LINE__
, remap_dst_addr
);
669 expected_addr
+= remap_src_size
;
671 for (fault_addr
= remap_dst_addr
;
672 fault_addr
< remap_dst_addr
+ remap_src_size
;
673 fault_addr
+= 4096) {
674 printf("VM_TEST_4K:%d write fault at 0x%lx...\n", __LINE__
, fault_addr
);
675 kr
= vm_fault(test_map
,
683 assertf(kr
== KERN_SUCCESS
, "kr = 0x%x", kr
);
684 printf("VM_TEST_4K:%d -> 0x%x\n", __LINE__
, kr
);
687 printf("VM_TEST_4K:\n");
689 remap_src_addr
= alloc3_addr
+ 0xc000;
690 remap_src_size
= 0x5000;
691 printf("VM_TEST_4K: vm_remap(%p, 0x%lx, 0x%lx, %p, copy=0) from 4K to 16K\n", test_map
, remap_src_addr
, remap_src_size
, kernel_map
);
692 kr
= vm_remap_external(kernel_map
,
696 VM_FLAGS_ANYWHERE
| VM_FLAGS_RETURN_DATA_ADDR
,
703 assertf(kr
== KERN_SUCCESS
, "kr = 0x%x", kr
);
704 printf("VM_TEST_4K: -> remapped (shared) in map %p at addr 0x%lx\n", kernel_map
, remap_dst_addr
);
706 printf("VM_TEST_4K:\n");
708 remap_src_addr
= alloc3_addr
+ 0xc000;
709 remap_src_size
= 0x5000;
710 printf("VM_TEST_4K: vm_remap(%p, 0x%lx, 0x%lx, %p, copy=1) from 4K to 16K\n", test_map
, remap_src_addr
, remap_src_size
, kernel_map
);
711 kr
= vm_remap_external(kernel_map
,
715 VM_FLAGS_ANYWHERE
| VM_FLAGS_RETURN_DATA_ADDR
,
722 assertf(kr
== KERN_SUCCESS
, "kr = 0x%x", kr
);
723 printf("VM_TEST_4K: -> remapped (COW) in map %p at addr 0x%lx\n", kernel_map
, remap_dst_addr
);
725 printf("VM_TEST_4K:\n");
726 saved_debug4k_panic_on_misaligned_sharing
= debug4k_panic_on_misaligned_sharing
;
727 debug4k_panic_on_misaligned_sharing
= 0;
729 remap_src_addr
= alloc1_addr
;
730 remap_src_size
= alloc1_size
+ alloc2_size
;
731 printf("VM_TEST_4K: vm_remap(%p, 0x%lx, 0x%lx, %p, copy=0) from 4K to 16K\n", test_map
, remap_src_addr
, remap_src_size
, kernel_map
);
732 kr
= vm_remap_external(kernel_map
,
736 VM_FLAGS_ANYWHERE
| VM_FLAGS_RETURN_DATA_ADDR
,
743 assertf(kr
!= KERN_SUCCESS
, "kr = 0x%x", kr
);
744 printf("VM_TEST_4K: -> remap (SHARED) in map %p at addr 0x%lx kr=0x%x\n", kernel_map
, remap_dst_addr
, kr
);
745 debug4k_panic_on_misaligned_sharing
= saved_debug4k_panic_on_misaligned_sharing
;
747 printf("VM_TEST_4K:\n");
749 remap_src_addr
= alloc1_addr
;
750 remap_src_size
= alloc1_size
+ alloc2_size
;
751 printf("VM_TEST_4K: vm_remap(%p, 0x%lx, 0x%lx, %p, copy=1) from 4K to 16K\n", test_map
, remap_src_addr
, remap_src_size
, kernel_map
);
752 kr
= vm_remap_external(kernel_map
,
756 VM_FLAGS_ANYWHERE
| VM_FLAGS_RETURN_DATA_ADDR
,
764 assertf(kr
!= KERN_SUCCESS
, "kr = 0x%x", kr
);
765 printf("VM_TEST_4K: -> remap (COPY) in map %p at addr 0x%lx kr=0x%x\n", kernel_map
, remap_dst_addr
, kr
);
767 assertf(kr
== KERN_SUCCESS
, "kr = 0x%x", kr
);
768 printf("VM_TEST_4K: -> remap (COPY) in map %p at addr 0x%lx kr=0x%x\n", kernel_map
, remap_dst_addr
, kr
);
773 printf("VM_TEST_4K:%d vm_map_remove(%p, 0x%llx, 0x%llx)...\n", __LINE__
, test_map
, test_map
->min_offset
, test_map
->max_offset
);
774 kr
= vm_map_remove(test_map
,
775 test_map
->min_offset
,
776 test_map
->max_offset
,
777 VM_MAP_REMOVE_GAPS_OK
);
778 assertf(kr
== KERN_SUCCESS
, "kr = 0x%x", kr
);
781 printf("VM_TEST_4K: PASS\n\n\n\n");
783 #endif /* PMAP_CREATE_FORCE_4K_PAGES && MACH_ASSERT */
787 vm_test_map_copy_adjust_to_target_one(
788 vm_map_copy_t copy_map
,
792 vm_map_copy_t target_copy
;
793 vm_map_offset_t overmap_start
, overmap_end
, trimmed_start
;
795 target_copy
= VM_MAP_COPY_NULL
;
796 /* size is 2 (4k) pages but range covers 3 pages */
797 kr
= vm_map_copy_adjust_to_target(copy_map
,
806 assert(kr
== KERN_SUCCESS
);
807 assert(overmap_start
== 0);
808 assert(overmap_end
== 0);
809 assert(trimmed_start
== 0);
810 assertf(target_copy
->size
== 0x3000,
811 "target_copy %p size 0x%llx\n",
812 target_copy
, (uint64_t)target_copy
->size
);
813 vm_map_copy_discard(target_copy
);
815 /* 1. adjust_to_target() for bad offset -> error */
816 /* 2. adjust_to_target() for bad size -> error */
817 /* 3. adjust_to_target() for the whole thing -> unchanged */
818 /* 4. adjust_to_target() to trim start by less than 1 page */
819 /* 5. adjust_to_target() to trim end by less than 1 page */
820 /* 6. adjust_to_target() to trim start and end by less than 1 page */
821 /* 7. adjust_to_target() to trim start by more than 1 page */
822 /* 8. adjust_to_target() to trim end by more than 1 page */
823 /* 9. adjust_to_target() to trim start and end by more than 1 page */
824 /* 10. adjust_to_target() to trim start by more than 1 entry */
825 /* 11. adjust_to_target() to trim start by more than 1 entry */
826 /* 12. adjust_to_target() to trim start and end by more than 1 entry */
827 /* 13. adjust_to_target() to trim start and end down to 1 entry */
831 vm_test_map_copy_adjust_to_target(void)
834 vm_map_t map4k
, map16k
;
835 vm_object_t obj1
, obj2
, obj3
, obj4
;
836 vm_map_offset_t addr4k
, addr16k
;
837 vm_map_size_t size4k
, size16k
;
838 vm_map_copy_t copy4k
, copy16k
;
839 vm_prot_t curprot
, maxprot
;
841 /* create a 4k map */
842 map4k
= vm_map_create(PMAP_NULL
, 0, (uint32_t)-1, TRUE
);
843 vm_map_set_page_shift(map4k
, 12);
845 /* create a 16k map */
846 map16k
= vm_map_create(PMAP_NULL
, 0, (uint32_t)-1, TRUE
);
847 vm_map_set_page_shift(map16k
, 14);
849 /* create 4 VM objects */
850 obj1
= vm_object_allocate(0x100000);
851 obj2
= vm_object_allocate(0x100000);
852 obj3
= vm_object_allocate(0x100000);
853 obj4
= vm_object_allocate(0x100000);
855 /* map objects in 4k map */
856 vm_object_reference(obj1
);
859 kr
= vm_map_enter(map4k
, &addr4k
, size4k
, 0, VM_FLAGS_ANYWHERE
,
860 VM_MAP_KERNEL_FLAGS_NONE
, 0, obj1
, 0,
861 FALSE
, VM_PROT_DEFAULT
, VM_PROT_DEFAULT
,
863 assert(kr
== KERN_SUCCESS
);
864 assert(addr4k
== 0x1000);
866 /* map objects in 16k map */
867 vm_object_reference(obj1
);
870 kr
= vm_map_enter(map16k
, &addr16k
, size16k
, 0, VM_FLAGS_ANYWHERE
,
871 VM_MAP_KERNEL_FLAGS_NONE
, 0, obj1
, 0,
872 FALSE
, VM_PROT_DEFAULT
, VM_PROT_DEFAULT
,
874 assert(kr
== KERN_SUCCESS
);
875 assert(addr16k
== 0x4000);
877 /* test for <rdar://60959809> */
878 ipc_port_t mem_entry
;
879 memory_object_size_t mem_entry_size
;
880 mach_vm_size_t map_size
;
881 mem_entry_size
= 0x1002;
882 mem_entry
= IPC_PORT_NULL
;
883 kr
= mach_make_memory_entry_64(map16k
, &mem_entry_size
, addr16k
+ 0x2fff,
884 MAP_MEM_VM_SHARE
| MAP_MEM_USE_DATA_ADDR
| VM_PROT_READ
,
885 &mem_entry
, IPC_PORT_NULL
);
886 assertf(kr
== KERN_SUCCESS
, "kr 0x%x\n", kr
);
887 assertf(mem_entry_size
== 0x5001, "mem_entry_size 0x%llx\n", (uint64_t) mem_entry_size
);
889 kr
= mach_memory_entry_map_size(mem_entry
, map4k
, 0, 0x1002, &map_size
);
890 assertf(kr
== KERN_SUCCESS
, "kr 0x%x\n", kr
);
891 assertf(map_size
== 0x3000, "mem_entry %p map_size 0x%llx\n", mem_entry
, (uint64_t)map_size
);
892 mach_memory_entry_port_release(mem_entry
);
894 /* create 4k copy map */
895 kr
= vm_map_copy_extract(map4k
, addr4k
, 0x3000,
897 ©4k
, &curprot
, &maxprot
,
898 VM_INHERIT_DEFAULT
, VM_MAP_KERNEL_FLAGS_NONE
);
899 assert(kr
== KERN_SUCCESS
);
900 assert(copy4k
->size
== 0x3000);
902 /* create 16k copy map */
903 kr
= vm_map_copy_extract(map16k
, addr16k
, 0x4000,
905 ©16k
, &curprot
, &maxprot
,
906 VM_INHERIT_DEFAULT
, VM_MAP_KERNEL_FLAGS_NONE
);
907 assert(kr
== KERN_SUCCESS
);
908 assert(copy16k
->size
== 0x4000);
910 /* test each combination */
911 // vm_test_map_copy_adjust_to_target_one(copy4k, map4k);
912 // vm_test_map_copy_adjust_to_target_one(copy16k, map16k);
913 // vm_test_map_copy_adjust_to_target_one(copy4k, map16k);
914 vm_test_map_copy_adjust_to_target_one(copy16k
, map4k
);
916 /* assert 1 ref on 4k map */
917 assert(os_ref_get_count(&map4k
->map_refcnt
) == 1);
919 vm_map_deallocate(map4k
);
920 /* assert 1 ref on 16k map */
921 assert(os_ref_get_count(&map16k
->map_refcnt
) == 1);
922 /* release 16k map */
923 vm_map_deallocate(map16k
);
924 /* deallocate copy maps */
925 vm_map_copy_discard(copy4k
);
926 vm_map_copy_discard(copy16k
);
927 /* assert 1 ref on all VM objects */
928 assert(obj1
->ref_count
== 1);
929 assert(obj2
->ref_count
== 1);
930 assert(obj3
->ref_count
== 1);
931 assert(obj4
->ref_count
== 1);
932 /* release all VM objects */
933 vm_object_deallocate(obj1
);
934 vm_object_deallocate(obj2
);
935 vm_object_deallocate(obj3
);
936 vm_object_deallocate(obj4
);
938 #endif /* MACH_ASSERT */
940 boolean_t vm_tests_in_progress
= FALSE
;
945 vm_tests_in_progress
= TRUE
;
947 vm_test_collapse_compressor();
948 vm_test_wire_and_extract();
949 vm_test_page_wire_overflow_panic();
950 vm_test_kernel_object_fault();
951 vm_test_device_pager_transpose();
953 vm_test_map_copy_adjust_to_target();
954 #endif /* MACH_ASSERT */
955 #if PMAP_CREATE_FORCE_4K_PAGES && MACH_ASSERT
957 #endif /* PMAP_CREATE_FORCE_4K_PAGES && MACH_ASSERT */
959 vm_tests_in_progress
= FALSE
;