1 /* Mach vm map miscellaneous unit tests
3 * This test program serves to be a regression test suite for legacy
4 * vm issues, ideally each test will be linked to a radar number and
5 * perform a set of certain validations.
8 #include <darwintest.h>
19 #include <mach/mach_error.h>
20 #include <mach/mach_init.h>
21 #include <mach/mach_port.h>
22 #include <mach/mach_vm.h>
23 #include <mach/vm_map.h>
24 #include <mach/task.h>
25 #include <mach/task_info.h>
26 #include <mach/shared_region.h>
27 #include <machine/cpu_capabilities.h>
29 T_GLOBAL_META(T_META_NAMESPACE("xnu.vm"),
30 T_META_RUN_CONCURRENTLY(true));
33 test_memory_entry_tagging(int override_tag
)
38 mach_vm_address_t vmaddr_orig
, vmaddr_shared
, vmaddr_copied
;
39 mach_vm_size_t vmsize_orig
, vmsize_shared
, vmsize_copied
;
40 mach_vm_address_t
*vmaddr_ptr
;
41 mach_vm_size_t
*vmsize_ptr
;
42 mach_vm_address_t vmaddr_chunk
;
43 mach_vm_size_t vmsize_chunk
;
44 mach_vm_offset_t vmoff
;
45 mach_port_t mem_entry_copied
, mem_entry_shared
;
46 mach_port_t
*mem_entry_ptr
;
48 vm_region_submap_short_info_data_64_t ri
;
49 mach_msg_type_number_t ri_count
;
59 vmsize_chunk
= 16 * 1024;
61 vmsize_orig
= 3 * vmsize_chunk
;
62 mem_entry_copied
= MACH_PORT_NULL
;
63 mem_entry_shared
= MACH_PORT_NULL
;
67 kr
= mach_vm_allocate(mach_task_self(),
72 T_EXPECT_MACH_SUCCESS(kr
, "[override_tag:%d] vm_allocate(%lld)",
73 override_tag
, vmsize_orig
);
74 if (T_RESULT
== T_RESULT_FAIL
) {
78 for (i
= 0; i
< vmsize_orig
/ vmsize_chunk
; i
++) {
79 vmaddr_chunk
= vmaddr_orig
+ (i
* vmsize_chunk
);
80 kr
= mach_vm_allocate(mach_task_self(),
85 VM_MAKE_TAG(100 + i
)));
87 T_EXPECT_MACH_SUCCESS(kr
, "[override_tag:%d] vm_allocate(%lld)",
88 override_tag
, vmsize_chunk
);
89 if (T_RESULT
== T_RESULT_FAIL
) {
97 *((unsigned char *)(uintptr_t)(vmaddr_orig
+ vmoff
)) = 'x';
100 do_copy
= time(NULL
) & 1;
102 *((unsigned char *)(uintptr_t)vmaddr_orig
) = 'x';
104 mem_entry_ptr
= &mem_entry_copied
;
105 vmsize_copied
= vmsize_orig
;
106 vmsize_ptr
= &vmsize_copied
;
108 vmaddr_ptr
= &vmaddr_copied
;
109 vm_flags
= MAP_MEM_VM_COPY
;
111 mem_entry_ptr
= &mem_entry_shared
;
112 vmsize_shared
= vmsize_orig
;
113 vmsize_ptr
= &vmsize_shared
;
115 vmaddr_ptr
= &vmaddr_shared
;
116 vm_flags
= MAP_MEM_VM_SHARE
;
118 kr
= mach_make_memory_entry_64(mach_task_self(),
120 vmaddr_orig
, /* offset */
122 VM_PROT_READ
| VM_PROT_WRITE
),
126 T_EXPECT_MACH_SUCCESS(kr
, "[override_tag:%d][do_copy:%d] mach_make_memory_entry()",
127 override_tag
, do_copy
);
128 if (T_RESULT
== T_RESULT_FAIL
) {
132 T_EXPECT_EQ(*vmsize_ptr
, vmsize_orig
, "[override_tag:%d][do_copy:%d] vmsize (0x%llx) != vmsize_orig (0x%llx)",
133 override_tag
, do_copy
, (uint64_t) *vmsize_ptr
, (uint64_t) vmsize_orig
);
134 if (T_RESULT
== T_RESULT_FAIL
) {
138 T_EXPECT_NOTNULL(*mem_entry_ptr
, "[override_tag:%d][do_copy:%d] mem_entry == 0x%x",
139 override_tag
, do_copy
, *mem_entry_ptr
);
140 if (T_RESULT
== T_RESULT_FAIL
) {
146 vm_flags
= VM_MAKE_TAG(200);
150 kr
= mach_vm_map(mach_task_self(),
154 vm_flags
| VM_FLAGS_ANYWHERE
,
158 VM_PROT_READ
| VM_PROT_WRITE
,
159 VM_PROT_READ
| VM_PROT_WRITE
,
162 T_EXPECT_MACH_SUCCESS(kr
, "[override_tag:%d][do_copy:%d] mach_vm_map()",
163 override_tag
, do_copy
);
164 if (T_RESULT
== T_RESULT_FAIL
) {
168 *((unsigned char *)(uintptr_t)vmaddr_orig
) = 'X';
169 if (*(unsigned char *)(uintptr_t)*vmaddr_ptr
== 'X') {
171 T_EXPECT_EQ(do_copy
, 0, "[override_tag:%d][do_copy:%d] memory shared instead of copied",
172 override_tag
, do_copy
);
173 if (T_RESULT
== T_RESULT_FAIL
) {
178 T_EXPECT_NE(do_copy
, 0, "[override_tag:%d][do_copy:%d] memory copied instead of shared",
179 override_tag
, do_copy
);
180 if (T_RESULT
== T_RESULT_FAIL
) {
185 for (i
= 0; i
< vmsize_orig
/ vmsize_chunk
; i
++) {
186 mach_vm_address_t vmaddr_info
;
187 mach_vm_size_t vmsize_info
;
189 vmaddr_info
= *vmaddr_ptr
+ (i
* vmsize_chunk
);
192 ri_count
= VM_REGION_SUBMAP_SHORT_INFO_COUNT_64
;
193 kr
= mach_vm_region_recurse(mach_task_self(),
197 (vm_region_recurse_info_t
) &ri
,
200 T_EXPECT_MACH_SUCCESS(kr
, "[override_tag:%d][do_copy:%d] mach_vm_region_recurse(0x%llx+0x%llx)",
201 override_tag
, do_copy
, *vmaddr_ptr
, i
* vmsize_chunk
);
202 if (T_RESULT
== T_RESULT_FAIL
) {
206 T_EXPECT_EQ(vmaddr_info
, *vmaddr_ptr
+ (i
* vmsize_chunk
), "[override_tag:%d][do_copy:%d] mach_vm_region_recurse(0x%llx+0x%llx) returned addr 0x%llx",
207 override_tag
, do_copy
, *vmaddr_ptr
, i
* vmsize_chunk
, vmaddr_info
);
208 if (T_RESULT
== T_RESULT_FAIL
) {
212 T_EXPECT_EQ(vmsize_info
, vmsize_chunk
, "[override_tag:%d][do_copy:%d] mach_vm_region_recurse(0x%llx+0x%llx) returned size 0x%llx expected 0x%llx",
213 override_tag
, do_copy
, *vmaddr_ptr
, i
* vmsize_chunk
, vmsize_info
, vmsize_chunk
);
214 if (T_RESULT
== T_RESULT_FAIL
) {
220 expected_tag
= 100 + i
;
223 T_EXPECT_EQ(ri
.user_tag
, expected_tag
, "[override_tag:%d][do_copy:%d] i=%d tag=%d expected %d",
224 override_tag
, do_copy
, i
, ri
.user_tag
, expected_tag
);
225 if (T_RESULT
== T_RESULT_FAIL
) {
236 if (vmaddr_orig
!= 0) {
237 mach_vm_deallocate(mach_task_self(),
243 if (vmaddr_copied
!= 0) {
244 mach_vm_deallocate(mach_task_self(),
250 if (vmaddr_shared
!= 0) {
251 mach_vm_deallocate(mach_task_self(),
257 if (mem_entry_copied
!= MACH_PORT_NULL
) {
258 mach_port_deallocate(mach_task_self(), mem_entry_copied
);
259 mem_entry_copied
= MACH_PORT_NULL
;
261 if (mem_entry_shared
!= MACH_PORT_NULL
) {
262 mach_port_deallocate(mach_task_self(), mem_entry_shared
);
263 mem_entry_shared
= MACH_PORT_NULL
;
270 test_map_memory_entry(void)
273 mach_vm_address_t vmaddr1
, vmaddr2
;
274 mach_vm_size_t vmsize1
, vmsize2
;
275 mach_port_t mem_entry
;
276 unsigned char *cp1
, *cp2
;
282 mem_entry
= MACH_PORT_NULL
;
286 kr
= mach_vm_allocate(mach_task_self(),
291 T_EXPECT_MACH_SUCCESS(kr
, "vm_allocate(%lld)", vmsize1
);
292 if (T_RESULT
== T_RESULT_FAIL
) {
296 cp1
= (unsigned char *)(uintptr_t)vmaddr1
;
300 mem_entry
= MACH_PORT_NULL
;
301 kr
= mach_make_memory_entry_64(mach_task_self(),
303 vmaddr1
, /* offset */
305 VM_PROT_READ
| VM_PROT_WRITE
),
309 T_EXPECT_MACH_SUCCESS(kr
, "mach_make_memory_entry()");
310 if (T_RESULT
== T_RESULT_FAIL
) {
314 T_EXPECT_GE(vmsize2
, vmsize1
, "vmsize2 (0x%llx) < vmsize1 (0x%llx)",
315 (uint64_t) vmsize2
, (uint64_t) vmsize1
);
316 if (T_RESULT
== T_RESULT_FAIL
) {
320 T_EXPECT_NOTNULL(mem_entry
, "mem_entry == 0x%x", mem_entry
);
321 if (T_RESULT
== T_RESULT_FAIL
) {
326 kr
= mach_vm_map(mach_task_self(),
334 VM_PROT_READ
| VM_PROT_WRITE
,
335 VM_PROT_READ
| VM_PROT_WRITE
,
338 T_EXPECT_MACH_SUCCESS(kr
, "mach_vm_map()");
339 if (T_RESULT
== T_RESULT_FAIL
) {
343 cp2
= (unsigned char *)(uintptr_t)vmaddr2
;
345 T_EXPECT_TRUE(((*cp1
== '1') && (*cp2
== '1')), "*cp1/*cp2 0x%x/0x%x expected 0x%x/0x%x",
346 *cp1
, *cp2
, '1', '1');
347 if (T_RESULT
== T_RESULT_FAIL
) {
353 T_EXPECT_TRUE(((*cp1
== '1') && (*cp2
== '2')), "*cp1/*cp2 0x%x/0x%x expected 0x%x/0x%x",
354 *cp1
, *cp2
, '1', '2');
355 if (T_RESULT
== T_RESULT_FAIL
) {
361 mach_vm_deallocate(mach_task_self(), vmaddr1
, vmsize1
);
366 mach_vm_deallocate(mach_task_self(), vmaddr2
, vmsize2
);
370 if (mem_entry
!= MACH_PORT_NULL
) {
371 mach_port_deallocate(mach_task_self(), mem_entry
);
372 mem_entry
= MACH_PORT_NULL
;
378 T_DECL(memory_entry_tagging
, "test mem entry tag for rdar://problem/23334087 \
379 VM memory tags should be propagated through memory entries",
380 T_META_ALL_VALID_ARCHS(true))
382 test_memory_entry_tagging(0);
383 test_memory_entry_tagging(1);
386 T_DECL(map_memory_entry
, "test mapping mem entry for rdar://problem/22611816 \
387 mach_make_memory_entry(MAP_MEM_VM_COPY) should never use a KERNEL_BUFFER \
388 copy", T_META_ALL_VALID_ARCHS(true))
390 test_map_memory_entry();
393 static char *vm_purgable_state
[4] = { "NONVOLATILE", "VOLATILE", "EMPTY", "DENY" };
398 task_vm_info_data_t ti
;
400 mach_msg_type_number_t count
;
402 count
= TASK_VM_INFO_COUNT
;
403 kr
= task_info(mach_task_self(),
408 T_ASSERT_MACH_SUCCESS(kr
, "task_info()");
409 #if defined(__arm64__) || defined(__arm__)
411 T_ASSERT_EQ(count
, TASK_VM_INFO_COUNT
, "task_info() count = %d (expected %d)",
412 count
, TASK_VM_INFO_COUNT
);
413 #endif /* defined(__arm64__) || defined(__arm__) */
414 return ti
.phys_footprint
;
417 T_DECL(purgeable_empty_to_volatile
, "test task physical footprint when \
418 emptying, volatilizing purgeable vm")
421 mach_vm_address_t vm_addr
;
422 mach_vm_size_t vm_size
;
426 uint64_t footprint
[8];
429 vm_size
= 1 * 1024 * 1024;
430 T_LOG("--> allocate %llu bytes", vm_size
);
431 kr
= mach_vm_allocate(mach_task_self(),
434 VM_FLAGS_ANYWHERE
| VM_FLAGS_PURGABLE
);
435 T_ASSERT_MACH_SUCCESS(kr
, "vm_allocate()");
438 footprint
[0] = task_footprint();
439 T_LOG(" footprint[0] = %llu", footprint
[0]);
441 T_LOG("--> access %llu bytes", vm_size
);
442 for (cp
= (char *) vm_addr
;
443 cp
< (char *) (vm_addr
+ vm_size
);
444 cp
+= vm_kernel_page_size
) {
447 /* footprint1 == footprint0 + vm_size */
448 footprint
[1] = task_footprint();
449 T_LOG(" footprint[1] = %llu", footprint
[1]);
450 if (footprint
[1] != footprint
[0] + vm_size
) {
451 T_LOG("WARN: footprint[1] != footprint[0] + vm_size");
454 T_LOG("--> wire %llu bytes", vm_size
/ 2);
455 ret
= mlock((char *)vm_addr
, (size_t) (vm_size
/ 2));
456 T_ASSERT_POSIX_SUCCESS(ret
, "mlock()");
458 /* footprint2 == footprint1 */
459 footprint
[2] = task_footprint();
460 T_LOG(" footprint[2] = %llu", footprint
[2]);
461 if (footprint
[2] != footprint
[1]) {
462 T_LOG("WARN: footprint[2] != footprint[1]");
465 T_LOG("--> VOLATILE");
466 state
= VM_PURGABLE_VOLATILE
;
467 kr
= mach_vm_purgable_control(mach_task_self(),
469 VM_PURGABLE_SET_STATE
,
471 T_ASSERT_MACH_SUCCESS(kr
, "vm_purgable_control(VOLATILE)");
472 T_ASSERT_EQ(state
, VM_PURGABLE_NONVOLATILE
, "NONVOLATILE->VOLATILE: state was %s",
473 vm_purgable_state
[state
]);
474 /* footprint3 == footprint2 - (vm_size / 2) */
475 footprint
[3] = task_footprint();
476 T_LOG(" footprint[3] = %llu", footprint
[3]);
477 if (footprint
[3] != footprint
[2] - (vm_size
/ 2)) {
478 T_LOG("WARN: footprint[3] != footprint[2] - (vm_size / 2)");
482 state
= VM_PURGABLE_EMPTY
;
483 kr
= mach_vm_purgable_control(mach_task_self(),
485 VM_PURGABLE_SET_STATE
,
487 T_ASSERT_MACH_SUCCESS(kr
, "vm_purgable_control(EMPTY)");
488 if (state
!= VM_PURGABLE_VOLATILE
&&
489 state
!= VM_PURGABLE_EMPTY
) {
490 T_ASSERT_FAIL("VOLATILE->EMPTY: state was %s",
491 vm_purgable_state
[state
]);
493 /* footprint4 == footprint3 */
494 footprint
[4] = task_footprint();
495 T_LOG(" footprint[4] = %llu", footprint
[4]);
496 if (footprint
[4] != footprint
[3]) {
497 T_LOG("WARN: footprint[4] != footprint[3]");
500 T_LOG("--> unwire %llu bytes", vm_size
/ 2);
501 ret
= munlock((char *)vm_addr
, (size_t) (vm_size
/ 2));
502 T_ASSERT_POSIX_SUCCESS(ret
, "munlock()");
504 /* footprint5 == footprint4 - (vm_size/2) (unless memory pressure) */
505 /* footprint5 == footprint0 */
506 footprint
[5] = task_footprint();
507 T_LOG(" footprint[5] = %llu", footprint
[5]);
508 if (footprint
[5] != footprint
[4] - (vm_size
/ 2)) {
509 T_LOG("WARN: footprint[5] != footprint[4] - (vm_size/2)");
511 if (footprint
[5] != footprint
[0]) {
512 T_LOG("WARN: footprint[5] != footprint[0]");
515 T_LOG("--> VOLATILE");
516 state
= VM_PURGABLE_VOLATILE
;
517 kr
= mach_vm_purgable_control(mach_task_self(),
519 VM_PURGABLE_SET_STATE
,
521 T_ASSERT_MACH_SUCCESS(kr
, "vm_purgable_control(VOLATILE)");
522 T_ASSERT_EQ(state
, VM_PURGABLE_EMPTY
, "EMPTY->VOLATILE: state == %s",
523 vm_purgable_state
[state
]);
524 /* footprint6 == footprint5 */
525 /* footprint6 == footprint0 */
526 footprint
[6] = task_footprint();
527 T_LOG(" footprint[6] = %llu", footprint
[6]);
528 if (footprint
[6] != footprint
[5]) {
529 T_LOG("WARN: footprint[6] != footprint[5]");
531 if (footprint
[6] != footprint
[0]) {
532 T_LOG("WARN: footprint[6] != footprint[0]");
535 T_LOG("--> NONVOLATILE");
536 state
= VM_PURGABLE_NONVOLATILE
;
537 kr
= mach_vm_purgable_control(mach_task_self(),
539 VM_PURGABLE_SET_STATE
,
541 T_ASSERT_MACH_SUCCESS(kr
, "vm_purgable_control(NONVOLATILE)");
542 T_ASSERT_EQ(state
, VM_PURGABLE_EMPTY
, "EMPTY->NONVOLATILE: state == %s",
543 vm_purgable_state
[state
]);
544 /* footprint7 == footprint6 */
545 /* footprint7 == footprint0 */
546 footprint
[7] = task_footprint();
547 T_LOG(" footprint[7] = %llu", footprint
[7]);
548 if (footprint
[7] != footprint
[6]) {
549 T_LOG("WARN: footprint[7] != footprint[6]");
551 if (footprint
[7] != footprint
[0]) {
552 T_LOG("WARN: footprint[7] != footprint[0]");
556 T_DECL(madvise_shared
, "test madvise shared for rdar://problem/2295713 logging \
557 rethink needs madvise(MADV_FREE_HARDER)",
558 T_META_ALL_VALID_ARCHS(true))
560 vm_address_t vmaddr
= 0, vmaddr2
= 0;
564 vm_prot_t curprot
, maxprot
;
566 task_vm_info_data_t ti
;
567 mach_msg_type_number_t ti_count
;
569 vmsize
= 10 * 1024 * 1024; /* 10MB */
570 kr
= vm_allocate(mach_task_self(),
575 T_EXPECT_MACH_SUCCESS(kr
, "vm_allocate()");
576 if (T_RESULT
== T_RESULT_FAIL
) {
580 for (cp
= (char *)(uintptr_t)vmaddr
;
581 cp
< (char *)(uintptr_t)(vmaddr
+ vmsize
);
586 kr
= vm_remap(mach_task_self(),
598 T_EXPECT_MACH_SUCCESS(kr
, "vm_remap()");
599 if (T_RESULT
== T_RESULT_FAIL
) {
603 for (cp
= (char *)(uintptr_t)vmaddr2
;
604 cp
< (char *)(uintptr_t)(vmaddr2
+ vmsize
);
607 T_EXPECT_EQ(*cp
, 'x', "vmaddr=%p vmaddr2=%p %p:0x%x",
608 (void *)(uintptr_t)vmaddr
,
609 (void *)(uintptr_t)vmaddr2
,
612 if (T_RESULT
== T_RESULT_FAIL
) {
616 cp
= (char *)(uintptr_t)vmaddr
;
618 cp
= (char *)(uintptr_t)vmaddr2
;
620 T_EXPECT_EQ(*cp
, 'X', "memory was not properly shared");
621 if (T_RESULT
== T_RESULT_FAIL
) {
625 #if defined(__x86_64__) || defined(__i386__)
626 if (*((uint64_t *)_COMM_PAGE_CPU_CAPABILITIES64
) & kIsTranslated
) {
627 T_LOG("Skipping madvise reusable tests because we're running under translation.");
630 #endif /* defined(__x86_64__) || defined(__i386__) */
631 ret
= madvise((char *)(uintptr_t)vmaddr
,
635 T_EXPECT_POSIX_SUCCESS(ret
, "madvise()");
636 if (T_RESULT
== T_RESULT_FAIL
) {
640 ti_count
= TASK_VM_INFO_COUNT
;
641 kr
= task_info(mach_task_self(),
646 T_EXPECT_MACH_SUCCESS(kr
, "task_info()");
647 if (T_RESULT
== T_RESULT_FAIL
) {
652 T_EXPECT_EQ(ti
.reusable
, 2ULL * vmsize
, "ti.reusable=%lld expected %lld",
653 ti
.reusable
, (uint64_t)(2 * vmsize
));
654 if (T_RESULT
== T_RESULT_FAIL
) {
660 vm_deallocate(mach_task_self(), vmaddr
, vmsize
);
664 vm_deallocate(mach_task_self(), vmaddr2
, vmsize
);
669 T_DECL(madvise_purgeable_can_reuse
, "test madvise purgeable can reuse for \
670 rdar://problem/37476183 Preview Footprint memory regressions ~100MB \
671 [ purgeable_malloc became eligible for reuse ]",
672 T_META_ALL_VALID_ARCHS(true))
674 #if defined(__x86_64__) || defined(__i386__)
675 if (*((uint64_t *)_COMM_PAGE_CPU_CAPABILITIES64
) & kIsTranslated
) {
676 T_SKIP("madvise reusable is not supported under Rosetta translation. Skipping.)");
678 #endif /* defined(__x86_64__) || defined(__i386__) */
679 vm_address_t vmaddr
= 0;
685 vmsize
= 10 * 1024 * 1024; /* 10MB */
686 kr
= vm_allocate(mach_task_self(),
691 VM_MAKE_TAG(VM_MEMORY_MALLOC
)));
693 T_EXPECT_MACH_SUCCESS(kr
, "vm_allocate()");
694 if (T_RESULT
== T_RESULT_FAIL
) {
698 for (cp
= (char *)(uintptr_t)vmaddr
;
699 cp
< (char *)(uintptr_t)(vmaddr
+ vmsize
);
704 ret
= madvise((char *)(uintptr_t)vmaddr
,
708 T_EXPECT_TRUE(((ret
== -1) && (errno
== EINVAL
)), "madvise(): purgeable vm can't be adviced to reuse");
709 if (T_RESULT
== T_RESULT_FAIL
) {
715 vm_deallocate(mach_task_self(), vmaddr
, vmsize
);
720 #define DEST_PATTERN 0xFEDCBA98
722 T_DECL(map_read_overwrite
, "test overwriting vm map from other map - \
724 T_META_ALL_VALID_ARCHS(true))
727 mach_vm_address_t vmaddr1
, vmaddr2
;
728 mach_vm_size_t vmsize1
, vmsize2
;
734 kr
= mach_vm_allocate(mach_task_self(),
738 T_ASSERT_MACH_SUCCESS(kr
, "vm_allocate()");
740 ip
= (int *)(uintptr_t)vmaddr1
;
741 for (i
= 0; i
< vmsize1
/ sizeof(*ip
); i
++) {
746 kr
= mach_vm_allocate(mach_task_self(),
750 T_ASSERT_MACH_SUCCESS(kr
, "vm_allocate()");
752 ip
= (int *)(uintptr_t)vmaddr2
;
753 for (i
= 0; i
< vmsize1
/ sizeof(*ip
); i
++) {
754 ip
[i
] = DEST_PATTERN
;
757 vmsize2
= vmsize1
- 2 * (sizeof(*ip
));
758 kr
= mach_vm_read_overwrite(mach_task_self(),
759 vmaddr1
+ sizeof(*ip
),
761 vmaddr2
+ sizeof(*ip
),
763 T_ASSERT_MACH_SUCCESS(kr
, "vm_read_overwrite()");
765 ip
= (int *)(uintptr_t)vmaddr2
;
766 for (i
= 0; i
< 1; i
++) {
768 T_ASSERT_EQ(ip
[i
], DEST_PATTERN
, "vmaddr2[%d] = 0x%x instead of 0x%x",
769 i
, ip
[i
], DEST_PATTERN
);
771 for (; i
< (vmsize1
- 2) / sizeof(*ip
); i
++) {
773 T_ASSERT_EQ(ip
[i
], i
, "vmaddr2[%d] = 0x%x instead of 0x%x",
776 for (; i
< vmsize1
/ sizeof(*ip
); i
++) {
778 T_ASSERT_EQ(ip
[i
], DEST_PATTERN
, "vmaddr2[%d] = 0x%x instead of 0x%x",
779 i
, ip
[i
], DEST_PATTERN
);
783 T_DECL(copy_none_use_pmap
, "test copy-on-write remapping of COPY_NONE vm \
784 objects - rdar://35610377",
785 T_META_ALL_VALID_ARCHS(true))
788 mach_vm_address_t vmaddr1
, vmaddr2
, vmaddr3
;
789 mach_vm_size_t vmsize
;
790 vm_prot_t curprot
, maxprot
;
792 vmsize
= 32 * 1024 * 1024;
795 kr
= mach_vm_allocate(mach_task_self(),
798 VM_FLAGS_ANYWHERE
| VM_FLAGS_PURGABLE
);
799 T_ASSERT_MACH_SUCCESS(kr
, "vm_allocate()");
801 memset((void *)(uintptr_t)vmaddr1
, 'x', vmsize
);
804 kr
= mach_vm_remap(mach_task_self(),
815 T_ASSERT_MACH_SUCCESS(kr
, "vm_remap() #1");
818 kr
= mach_vm_remap(mach_task_self(),
829 T_ASSERT_MACH_SUCCESS(kr
, "vm_remap() #2");
832 T_DECL(purgable_deny
, "test purgeable memory is not allowed to be converted to \
833 non-purgeable - rdar://31990033",
834 T_META_ALL_VALID_ARCHS(true))
841 kr
= vm_allocate(mach_task_self(), &vmaddr
, 1,
842 VM_FLAGS_ANYWHERE
| VM_FLAGS_PURGABLE
);
843 T_ASSERT_MACH_SUCCESS(kr
, "vm_allocate()");
845 state
= VM_PURGABLE_DENY
;
846 kr
= vm_purgable_control(mach_task_self(), vmaddr
,
847 VM_PURGABLE_SET_STATE
, &state
);
848 T_ASSERT_EQ(kr
, KERN_INVALID_ARGUMENT
,
849 "vm_purgable_control(VM_PURGABLE_DENY) -> 0x%x (%s)",
850 kr
, mach_error_string(kr
));
852 kr
= vm_deallocate(mach_task_self(), vmaddr
, 1);
853 T_ASSERT_MACH_SUCCESS(kr
, "vm_deallocate()");
856 #define VMSIZE 0x10000
858 T_DECL(vm_remap_zero
, "test vm map of zero size - rdar://33114981",
859 T_META_ALL_VALID_ARCHS(true))
862 mach_vm_address_t vmaddr1
, vmaddr2
;
863 mach_vm_size_t vmsize
;
864 vm_prot_t curprot
, maxprot
;
868 kr
= mach_vm_allocate(mach_task_self(),
872 T_ASSERT_MACH_SUCCESS(kr
, "vm_allocate()");
876 kr
= mach_vm_remap(mach_task_self(),
887 T_ASSERT_EQ(kr
, KERN_INVALID_ARGUMENT
, "vm_remap(size=0x%llx) 0x%x (%s)",
888 vmsize
, kr
, mach_error_string(kr
));
891 vmsize
= (mach_vm_size_t
)-2;
892 kr
= mach_vm_remap(mach_task_self(),
903 T_ASSERT_EQ(kr
, KERN_INVALID_ARGUMENT
, "vm_remap(size=0x%llx) 0x%x (%s)",
904 vmsize
, kr
, mach_error_string(kr
));
907 extern int __shared_region_check_np(uint64_t *);
909 T_DECL(nested_pmap_trigger
, "nested pmap should only be triggered from kernel \
910 - rdar://problem/41481703",
911 T_META_ALL_VALID_ARCHS(true))
915 mach_vm_address_t sr_start
;
916 mach_vm_size_t vmsize
;
917 mach_vm_address_t vmaddr
;
918 mach_port_t mem_entry
;
920 ret
= __shared_region_check_np(&sr_start
);
925 T_ASSERT_EQ(saved_errno
, ENOMEM
, "__shared_region_check_np() %d (%s)",
926 saved_errno
, strerror(saved_errno
));
931 kr
= mach_make_memory_entry_64(mach_task_self(),
934 MAP_MEM_VM_SHARE
| VM_PROT_READ
,
937 T_ASSERT_MACH_SUCCESS(kr
, "make_memory_entry(0x%llx)", sr_start
);
940 kr
= mach_vm_map(mach_task_self(),
951 T_ASSERT_MACH_SUCCESS(kr
, "vm_map()");
954 T_DECL(copyoverwrite_submap_protection
, "test copywrite vm region submap \
955 protection", T_META_ALL_VALID_ARCHS(true))
958 mach_vm_address_t vmaddr
;
959 mach_vm_size_t vmsize
;
961 vm_region_submap_short_info_data_64_t region_info
;
962 mach_msg_type_number_t region_info_count
;
964 for (vmaddr
= SHARED_REGION_BASE
;
965 vmaddr
< SHARED_REGION_BASE
+ SHARED_REGION_SIZE
;
968 region_info_count
= VM_REGION_SUBMAP_SHORT_INFO_COUNT_64
;
969 kr
= mach_vm_region_recurse(mach_task_self(),
973 (vm_region_info_t
) ®ion_info
,
975 if (kr
== KERN_INVALID_ADDRESS
) {
978 T_ASSERT_MACH_SUCCESS(kr
, "vm_region_recurse(0x%llx)", vmaddr
);
979 T_ASSERT_EQ(region_info_count
,
980 VM_REGION_SUBMAP_SHORT_INFO_COUNT_64
,
981 "vm_region_recurse(0x%llx) count = %d expected %d",
982 vmaddr
, region_info_count
,
983 VM_REGION_SUBMAP_SHORT_INFO_COUNT_64
);
985 T_LOG("--> region: vmaddr 0x%llx depth %d prot 0x%x/0x%x",
986 vmaddr
, depth
, region_info
.protection
,
987 region_info
.max_protection
);
989 /* not a submap mapping: next mapping */
992 if (vmaddr
>= SHARED_REGION_BASE
+ SHARED_REGION_SIZE
) {
995 kr
= mach_vm_copy(mach_task_self(),
999 if (kr
== KERN_PROTECTION_FAILURE
) {
1000 T_PASS("vm_copy(0x%llx,0x%llx) expected prot error 0x%x (%s)",
1001 vmaddr
, vmsize
, kr
, mach_error_string(kr
));
1004 T_ASSERT_MACH_SUCCESS(kr
, "vm_copy(0x%llx,0x%llx) prot 0x%x",
1005 vmaddr
, vmsize
, region_info
.protection
);
1007 region_info_count
= VM_REGION_SUBMAP_SHORT_INFO_COUNT_64
;
1008 kr
= mach_vm_region_recurse(mach_task_self(),
1012 (vm_region_info_t
) ®ion_info
,
1013 ®ion_info_count
);
1014 T_ASSERT_MACH_SUCCESS(kr
, "m_region_recurse(0x%llx)", vmaddr
);
1015 T_ASSERT_EQ(region_info_count
,
1016 VM_REGION_SUBMAP_SHORT_INFO_COUNT_64
,
1017 "vm_region_recurse() count = %d expected %d",
1018 region_info_count
, VM_REGION_SUBMAP_SHORT_INFO_COUNT_64
);
1020 T_ASSERT_EQ(depth
, 0, "vm_region_recurse(0x%llx): depth = %d expected 0",
1022 T_ASSERT_EQ((region_info
.protection
& VM_PROT_EXECUTE
),
1023 0, "vm_region_recurse(0x%llx): prot 0x%x",
1024 vmaddr
, region_info
.protection
);
1028 T_DECL(wire_text
, "test wired text for rdar://problem/16783546 Wiring code in \
1029 the shared region triggers code-signing violations",
1030 T_META_ALL_VALID_ARCHS(true))
1036 vm_address_t map_addr
, remap_addr
;
1037 vm_prot_t curprot
, maxprot
;
1039 addr
= (char *)&printf
;
1040 #if __has_feature(ptrauth_calls)
1041 map_addr
= (vm_address_t
)(uintptr_t)ptrauth_strip(addr
, ptrauth_key_function_pointer
);
1042 #else /* __has_feature(ptrauth_calls) */
1043 map_addr
= (vm_address_t
)(uintptr_t)addr
;
1044 #endif /* __has_feature(ptrauth_calls) */
1046 kr
= vm_remap(mach_task_self(), &remap_addr
, 4096,
1049 mach_task_self(), map_addr
,
1052 VM_INHERIT_DEFAULT
);
1053 T_ASSERT_EQ(kr
, KERN_SUCCESS
, "vm_remap error 0x%x (%s)",
1054 kr
, mach_error_string(kr
));
1055 retval
= mlock(addr
, 4096);
1057 saved_errno
= errno
;
1058 T_ASSERT_EQ(saved_errno
, EACCES
, "wire shared text error %d (%s), expected: %d",
1059 saved_errno
, strerror(saved_errno
), EACCES
);
1061 T_PASS("wire shared text");
1064 addr
= (char *) &fprintf
;
1065 retval
= mlock(addr
, 4096);
1067 saved_errno
= errno
;
1068 T_ASSERT_EQ(saved_errno
, EACCES
, "wire shared text error %d (%s), expected: %d",
1069 saved_errno
, strerror(saved_errno
), EACCES
);
1071 T_PASS("wire shared text");
1074 addr
= (char *) &testmain_wire_text
;
1075 retval
= mlock(addr
, 4096);
1077 saved_errno
= errno
;
1078 T_ASSERT_EQ(saved_errno
, EACCES
, "wire text error return error %d (%s)",
1079 saved_errno
, strerror(saved_errno
));
1081 T_PASS("wire text");