]> git.saurik.com Git - apple/xnu.git/blob - tests/vm_test_mach_map.c
2ab86744fd01e8461f216508622a5564efbe4a89
[apple/xnu.git] / tests / vm_test_mach_map.c
1 /* Mach vm map miscellaneous unit tests
2 *
3 * This test program serves to be a regression test suite for legacy
4 * vm issues, ideally each test will be linked to a radar number and
5 * perform a set of certain validations.
6 *
7 */
8 #include <darwintest.h>
9
10 #include <errno.h>
11 #include <ptrauth.h>
12 #include <stdio.h>
13 #include <stdlib.h>
14 #include <string.h>
15 #include <time.h>
16
17 #include <sys/mman.h>
18
19 #include <mach/mach_error.h>
20 #include <mach/mach_init.h>
21 #include <mach/mach_port.h>
22 #include <mach/mach_vm.h>
23 #include <mach/vm_map.h>
24 #include <mach/task.h>
25 #include <mach/task_info.h>
26 #include <mach/shared_region.h>
27 #include <machine/cpu_capabilities.h>
28
29 T_GLOBAL_META(T_META_NAMESPACE("xnu.vm"),
30 T_META_RUN_CONCURRENTLY(true));
31
32 static void
33 test_memory_entry_tagging(int override_tag)
34 {
35 int pass;
36 int do_copy;
37 kern_return_t kr;
38 mach_vm_address_t vmaddr_orig, vmaddr_shared, vmaddr_copied;
39 mach_vm_size_t vmsize_orig, vmsize_shared, vmsize_copied;
40 mach_vm_address_t *vmaddr_ptr;
41 mach_vm_size_t *vmsize_ptr;
42 mach_vm_address_t vmaddr_chunk;
43 mach_vm_size_t vmsize_chunk;
44 mach_vm_offset_t vmoff;
45 mach_port_t mem_entry_copied, mem_entry_shared;
46 mach_port_t *mem_entry_ptr;
47 int i;
48 vm_region_submap_short_info_data_64_t ri;
49 mach_msg_type_number_t ri_count;
50 unsigned int depth;
51 int vm_flags;
52 int expected_tag;
53
54 vmaddr_copied = 0;
55 vmaddr_shared = 0;
56 vmsize_copied = 0;
57 vmsize_shared = 0;
58 vmaddr_chunk = 0;
59 vmsize_chunk = 16 * 1024;
60 vmaddr_orig = 0;
61 vmsize_orig = 3 * vmsize_chunk;
62 mem_entry_copied = MACH_PORT_NULL;
63 mem_entry_shared = MACH_PORT_NULL;
64 pass = 0;
65
66 vmaddr_orig = 0;
67 kr = mach_vm_allocate(mach_task_self(),
68 &vmaddr_orig,
69 vmsize_orig,
70 VM_FLAGS_ANYWHERE);
71 T_QUIET;
72 T_EXPECT_MACH_SUCCESS(kr, "[override_tag:%d] vm_allocate(%lld)",
73 override_tag, vmsize_orig);
74 if (T_RESULT == T_RESULT_FAIL) {
75 goto done;
76 }
77
78 for (i = 0; i < vmsize_orig / vmsize_chunk; i++) {
79 vmaddr_chunk = vmaddr_orig + (i * vmsize_chunk);
80 kr = mach_vm_allocate(mach_task_self(),
81 &vmaddr_chunk,
82 vmsize_chunk,
83 (VM_FLAGS_FIXED |
84 VM_FLAGS_OVERWRITE |
85 VM_MAKE_TAG(100 + i)));
86 T_QUIET;
87 T_EXPECT_MACH_SUCCESS(kr, "[override_tag:%d] vm_allocate(%lld)",
88 override_tag, vmsize_chunk);
89 if (T_RESULT == T_RESULT_FAIL) {
90 goto done;
91 }
92 }
93
94 for (vmoff = 0;
95 vmoff < vmsize_orig;
96 vmoff += PAGE_SIZE) {
97 *((unsigned char *)(uintptr_t)(vmaddr_orig + vmoff)) = 'x';
98 }
99
100 do_copy = time(NULL) & 1;
101 again:
102 *((unsigned char *)(uintptr_t)vmaddr_orig) = 'x';
103 if (do_copy) {
104 mem_entry_ptr = &mem_entry_copied;
105 vmsize_copied = vmsize_orig;
106 vmsize_ptr = &vmsize_copied;
107 vmaddr_copied = 0;
108 vmaddr_ptr = &vmaddr_copied;
109 vm_flags = MAP_MEM_VM_COPY;
110 } else {
111 mem_entry_ptr = &mem_entry_shared;
112 vmsize_shared = vmsize_orig;
113 vmsize_ptr = &vmsize_shared;
114 vmaddr_shared = 0;
115 vmaddr_ptr = &vmaddr_shared;
116 vm_flags = MAP_MEM_VM_SHARE;
117 }
118 kr = mach_make_memory_entry_64(mach_task_self(),
119 vmsize_ptr,
120 vmaddr_orig, /* offset */
121 (vm_flags |
122 VM_PROT_READ | VM_PROT_WRITE),
123 mem_entry_ptr,
124 MACH_PORT_NULL);
125 T_QUIET;
126 T_EXPECT_MACH_SUCCESS(kr, "[override_tag:%d][do_copy:%d] mach_make_memory_entry()",
127 override_tag, do_copy);
128 if (T_RESULT == T_RESULT_FAIL) {
129 goto done;
130 }
131 T_QUIET;
132 T_EXPECT_EQ(*vmsize_ptr, vmsize_orig, "[override_tag:%d][do_copy:%d] vmsize (0x%llx) != vmsize_orig (0x%llx)",
133 override_tag, do_copy, (uint64_t) *vmsize_ptr, (uint64_t) vmsize_orig);
134 if (T_RESULT == T_RESULT_FAIL) {
135 goto done;
136 }
137 T_QUIET;
138 T_EXPECT_NOTNULL(*mem_entry_ptr, "[override_tag:%d][do_copy:%d] mem_entry == 0x%x",
139 override_tag, do_copy, *mem_entry_ptr);
140 if (T_RESULT == T_RESULT_FAIL) {
141 goto done;
142 }
143
144 *vmaddr_ptr = 0;
145 if (override_tag) {
146 vm_flags = VM_MAKE_TAG(200);
147 } else {
148 vm_flags = 0;
149 }
150 kr = mach_vm_map(mach_task_self(),
151 vmaddr_ptr,
152 vmsize_orig,
153 0, /* mask */
154 vm_flags | VM_FLAGS_ANYWHERE,
155 *mem_entry_ptr,
156 0, /* offset */
157 FALSE, /* copy */
158 VM_PROT_READ | VM_PROT_WRITE,
159 VM_PROT_READ | VM_PROT_WRITE,
160 VM_INHERIT_DEFAULT);
161 T_QUIET;
162 T_EXPECT_MACH_SUCCESS(kr, "[override_tag:%d][do_copy:%d] mach_vm_map()",
163 override_tag, do_copy);
164 if (T_RESULT == T_RESULT_FAIL) {
165 goto done;
166 }
167
168 *((unsigned char *)(uintptr_t)vmaddr_orig) = 'X';
169 if (*(unsigned char *)(uintptr_t)*vmaddr_ptr == 'X') {
170 T_QUIET;
171 T_EXPECT_EQ(do_copy, 0, "[override_tag:%d][do_copy:%d] memory shared instead of copied",
172 override_tag, do_copy);
173 if (T_RESULT == T_RESULT_FAIL) {
174 goto done;
175 }
176 } else {
177 T_QUIET;
178 T_EXPECT_NE(do_copy, 0, "[override_tag:%d][do_copy:%d] memory copied instead of shared",
179 override_tag, do_copy);
180 if (T_RESULT == T_RESULT_FAIL) {
181 goto done;
182 }
183 }
184
185 for (i = 0; i < vmsize_orig / vmsize_chunk; i++) {
186 mach_vm_address_t vmaddr_info;
187 mach_vm_size_t vmsize_info;
188
189 vmaddr_info = *vmaddr_ptr + (i * vmsize_chunk);
190 vmsize_info = 0;
191 depth = 1;
192 ri_count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
193 kr = mach_vm_region_recurse(mach_task_self(),
194 &vmaddr_info,
195 &vmsize_info,
196 &depth,
197 (vm_region_recurse_info_t) &ri,
198 &ri_count);
199 T_QUIET;
200 T_EXPECT_MACH_SUCCESS(kr, "[override_tag:%d][do_copy:%d] mach_vm_region_recurse(0x%llx+0x%llx)",
201 override_tag, do_copy, *vmaddr_ptr, i * vmsize_chunk);
202 if (T_RESULT == T_RESULT_FAIL) {
203 goto done;
204 }
205 T_QUIET;
206 T_EXPECT_EQ(vmaddr_info, *vmaddr_ptr + (i * vmsize_chunk), "[override_tag:%d][do_copy:%d] mach_vm_region_recurse(0x%llx+0x%llx) returned addr 0x%llx",
207 override_tag, do_copy, *vmaddr_ptr, i * vmsize_chunk, vmaddr_info);
208 if (T_RESULT == T_RESULT_FAIL) {
209 goto done;
210 }
211 T_QUIET;
212 T_EXPECT_EQ(vmsize_info, vmsize_chunk, "[override_tag:%d][do_copy:%d] mach_vm_region_recurse(0x%llx+0x%llx) returned size 0x%llx expected 0x%llx",
213 override_tag, do_copy, *vmaddr_ptr, i * vmsize_chunk, vmsize_info, vmsize_chunk);
214 if (T_RESULT == T_RESULT_FAIL) {
215 goto done;
216 }
217 if (override_tag) {
218 expected_tag = 200;
219 } else {
220 expected_tag = 100 + i;
221 }
222 T_QUIET;
223 T_EXPECT_EQ(ri.user_tag, expected_tag, "[override_tag:%d][do_copy:%d] i=%d tag=%d expected %d",
224 override_tag, do_copy, i, ri.user_tag, expected_tag);
225 if (T_RESULT == T_RESULT_FAIL) {
226 goto done;
227 }
228 }
229
230 if (++pass < 2) {
231 do_copy = !do_copy;
232 goto again;
233 }
234
235 done:
236 if (vmaddr_orig != 0) {
237 mach_vm_deallocate(mach_task_self(),
238 vmaddr_orig,
239 vmsize_orig);
240 vmaddr_orig = 0;
241 vmsize_orig = 0;
242 }
243 if (vmaddr_copied != 0) {
244 mach_vm_deallocate(mach_task_self(),
245 vmaddr_copied,
246 vmsize_copied);
247 vmaddr_copied = 0;
248 vmsize_copied = 0;
249 }
250 if (vmaddr_shared != 0) {
251 mach_vm_deallocate(mach_task_self(),
252 vmaddr_shared,
253 vmsize_shared);
254 vmaddr_shared = 0;
255 vmsize_shared = 0;
256 }
257 if (mem_entry_copied != MACH_PORT_NULL) {
258 mach_port_deallocate(mach_task_self(), mem_entry_copied);
259 mem_entry_copied = MACH_PORT_NULL;
260 }
261 if (mem_entry_shared != MACH_PORT_NULL) {
262 mach_port_deallocate(mach_task_self(), mem_entry_shared);
263 mem_entry_shared = MACH_PORT_NULL;
264 }
265
266 return;
267 }
268
269 static void
270 test_map_memory_entry(void)
271 {
272 kern_return_t kr;
273 mach_vm_address_t vmaddr1, vmaddr2;
274 mach_vm_size_t vmsize1, vmsize2;
275 mach_port_t mem_entry;
276 unsigned char *cp1, *cp2;
277
278 vmaddr1 = 0;
279 vmsize1 = 0;
280 vmaddr2 = 0;
281 vmsize2 = 0;
282 mem_entry = MACH_PORT_NULL;
283
284 vmsize1 = 1;
285 vmaddr1 = 0;
286 kr = mach_vm_allocate(mach_task_self(),
287 &vmaddr1,
288 vmsize1,
289 VM_FLAGS_ANYWHERE);
290 T_QUIET;
291 T_EXPECT_MACH_SUCCESS(kr, "vm_allocate(%lld)", vmsize1);
292 if (T_RESULT == T_RESULT_FAIL) {
293 goto done;
294 }
295
296 cp1 = (unsigned char *)(uintptr_t)vmaddr1;
297 *cp1 = '1';
298
299 vmsize2 = 1;
300 mem_entry = MACH_PORT_NULL;
301 kr = mach_make_memory_entry_64(mach_task_self(),
302 &vmsize2,
303 vmaddr1, /* offset */
304 (MAP_MEM_VM_COPY |
305 VM_PROT_READ | VM_PROT_WRITE),
306 &mem_entry,
307 MACH_PORT_NULL);
308 T_QUIET;
309 T_EXPECT_MACH_SUCCESS(kr, "mach_make_memory_entry()");
310 if (T_RESULT == T_RESULT_FAIL) {
311 goto done;
312 }
313 T_QUIET;
314 T_EXPECT_GE(vmsize2, vmsize1, "vmsize2 (0x%llx) < vmsize1 (0x%llx)",
315 (uint64_t) vmsize2, (uint64_t) vmsize1);
316 if (T_RESULT == T_RESULT_FAIL) {
317 goto done;
318 }
319 T_QUIET;
320 T_EXPECT_NOTNULL(mem_entry, "mem_entry == 0x%x", mem_entry);
321 if (T_RESULT == T_RESULT_FAIL) {
322 goto done;
323 }
324
325 vmaddr2 = 0;
326 kr = mach_vm_map(mach_task_self(),
327 &vmaddr2,
328 vmsize2,
329 0, /* mask */
330 VM_FLAGS_ANYWHERE,
331 mem_entry,
332 0, /* offset */
333 TRUE, /* copy */
334 VM_PROT_READ | VM_PROT_WRITE,
335 VM_PROT_READ | VM_PROT_WRITE,
336 VM_INHERIT_DEFAULT);
337 T_QUIET;
338 T_EXPECT_MACH_SUCCESS(kr, "mach_vm_map()");
339 if (T_RESULT == T_RESULT_FAIL) {
340 goto done;
341 }
342
343 cp2 = (unsigned char *)(uintptr_t)vmaddr2;
344 T_QUIET;
345 T_EXPECT_TRUE(((*cp1 == '1') && (*cp2 == '1')), "*cp1/*cp2 0x%x/0x%x expected 0x%x/0x%x",
346 *cp1, *cp2, '1', '1');
347 if (T_RESULT == T_RESULT_FAIL) {
348 goto done;
349 }
350
351 *cp2 = '2';
352 T_QUIET;
353 T_EXPECT_TRUE(((*cp1 == '1') && (*cp2 == '2')), "*cp1/*cp2 0x%x/0x%x expected 0x%x/0x%x",
354 *cp1, *cp2, '1', '2');
355 if (T_RESULT == T_RESULT_FAIL) {
356 goto done;
357 }
358
359 done:
360 if (vmaddr1 != 0) {
361 mach_vm_deallocate(mach_task_self(), vmaddr1, vmsize1);
362 vmaddr1 = 0;
363 vmsize1 = 0;
364 }
365 if (vmaddr2 != 0) {
366 mach_vm_deallocate(mach_task_self(), vmaddr2, vmsize2);
367 vmaddr2 = 0;
368 vmsize2 = 0;
369 }
370 if (mem_entry != MACH_PORT_NULL) {
371 mach_port_deallocate(mach_task_self(), mem_entry);
372 mem_entry = MACH_PORT_NULL;
373 }
374
375 return;
376 }
377
378 T_DECL(memory_entry_tagging, "test mem entry tag for rdar://problem/23334087 \
379 VM memory tags should be propagated through memory entries",
380 T_META_ALL_VALID_ARCHS(true))
381 {
382 test_memory_entry_tagging(0);
383 test_memory_entry_tagging(1);
384 }
385
386 T_DECL(map_memory_entry, "test mapping mem entry for rdar://problem/22611816 \
387 mach_make_memory_entry(MAP_MEM_VM_COPY) should never use a KERNEL_BUFFER \
388 copy", T_META_ALL_VALID_ARCHS(true))
389 {
390 test_map_memory_entry();
391 }
392
393 static char *vm_purgable_state[4] = { "NONVOLATILE", "VOLATILE", "EMPTY", "DENY" };
394
395 static uint64_t
396 task_footprint(void)
397 {
398 task_vm_info_data_t ti;
399 kern_return_t kr;
400 mach_msg_type_number_t count;
401
402 count = TASK_VM_INFO_COUNT;
403 kr = task_info(mach_task_self(),
404 TASK_VM_INFO,
405 (task_info_t) &ti,
406 &count);
407 T_QUIET;
408 T_ASSERT_MACH_SUCCESS(kr, "task_info()");
409 #if defined(__arm64__) || defined(__arm__)
410 T_QUIET;
411 T_ASSERT_EQ(count, TASK_VM_INFO_COUNT, "task_info() count = %d (expected %d)",
412 count, TASK_VM_INFO_COUNT);
413 #endif /* defined(__arm64__) || defined(__arm__) */
414 return ti.phys_footprint;
415 }
416
417 T_DECL(purgeable_empty_to_volatile, "test task physical footprint when \
418 emptying, volatilizing purgeable vm")
419 {
420 kern_return_t kr;
421 mach_vm_address_t vm_addr;
422 mach_vm_size_t vm_size;
423 char *cp;
424 int ret;
425 vm_purgable_t state;
426 uint64_t footprint[8];
427
428 vm_addr = 0;
429 vm_size = 1 * 1024 * 1024;
430 T_LOG("--> allocate %llu bytes", vm_size);
431 kr = mach_vm_allocate(mach_task_self(),
432 &vm_addr,
433 vm_size,
434 VM_FLAGS_ANYWHERE | VM_FLAGS_PURGABLE);
435 T_ASSERT_MACH_SUCCESS(kr, "vm_allocate()");
436
437 /* footprint0 */
438 footprint[0] = task_footprint();
439 T_LOG(" footprint[0] = %llu", footprint[0]);
440
441 T_LOG("--> access %llu bytes", vm_size);
442 for (cp = (char *) vm_addr;
443 cp < (char *) (vm_addr + vm_size);
444 cp += vm_kernel_page_size) {
445 *cp = 'x';
446 }
447 /* footprint1 == footprint0 + vm_size */
448 footprint[1] = task_footprint();
449 T_LOG(" footprint[1] = %llu", footprint[1]);
450 if (footprint[1] != footprint[0] + vm_size) {
451 T_LOG("WARN: footprint[1] != footprint[0] + vm_size");
452 }
453
454 T_LOG("--> wire %llu bytes", vm_size / 2);
455 ret = mlock((char *)vm_addr, (size_t) (vm_size / 2));
456 T_ASSERT_POSIX_SUCCESS(ret, "mlock()");
457
458 /* footprint2 == footprint1 */
459 footprint[2] = task_footprint();
460 T_LOG(" footprint[2] = %llu", footprint[2]);
461 if (footprint[2] != footprint[1]) {
462 T_LOG("WARN: footprint[2] != footprint[1]");
463 }
464
465 T_LOG("--> VOLATILE");
466 state = VM_PURGABLE_VOLATILE;
467 kr = mach_vm_purgable_control(mach_task_self(),
468 vm_addr,
469 VM_PURGABLE_SET_STATE,
470 &state);
471 T_ASSERT_MACH_SUCCESS(kr, "vm_purgable_control(VOLATILE)");
472 T_ASSERT_EQ(state, VM_PURGABLE_NONVOLATILE, "NONVOLATILE->VOLATILE: state was %s",
473 vm_purgable_state[state]);
474 /* footprint3 == footprint2 - (vm_size / 2) */
475 footprint[3] = task_footprint();
476 T_LOG(" footprint[3] = %llu", footprint[3]);
477 if (footprint[3] != footprint[2] - (vm_size / 2)) {
478 T_LOG("WARN: footprint[3] != footprint[2] - (vm_size / 2)");
479 }
480
481 T_LOG("--> EMPTY");
482 state = VM_PURGABLE_EMPTY;
483 kr = mach_vm_purgable_control(mach_task_self(),
484 vm_addr,
485 VM_PURGABLE_SET_STATE,
486 &state);
487 T_ASSERT_MACH_SUCCESS(kr, "vm_purgable_control(EMPTY)");
488 if (state != VM_PURGABLE_VOLATILE &&
489 state != VM_PURGABLE_EMPTY) {
490 T_ASSERT_FAIL("VOLATILE->EMPTY: state was %s",
491 vm_purgable_state[state]);
492 }
493 /* footprint4 == footprint3 */
494 footprint[4] = task_footprint();
495 T_LOG(" footprint[4] = %llu", footprint[4]);
496 if (footprint[4] != footprint[3]) {
497 T_LOG("WARN: footprint[4] != footprint[3]");
498 }
499
500 T_LOG("--> unwire %llu bytes", vm_size / 2);
501 ret = munlock((char *)vm_addr, (size_t) (vm_size / 2));
502 T_ASSERT_POSIX_SUCCESS(ret, "munlock()");
503
504 /* footprint5 == footprint4 - (vm_size/2) (unless memory pressure) */
505 /* footprint5 == footprint0 */
506 footprint[5] = task_footprint();
507 T_LOG(" footprint[5] = %llu", footprint[5]);
508 if (footprint[5] != footprint[4] - (vm_size / 2)) {
509 T_LOG("WARN: footprint[5] != footprint[4] - (vm_size/2)");
510 }
511 if (footprint[5] != footprint[0]) {
512 T_LOG("WARN: footprint[5] != footprint[0]");
513 }
514
515 T_LOG("--> VOLATILE");
516 state = VM_PURGABLE_VOLATILE;
517 kr = mach_vm_purgable_control(mach_task_self(),
518 vm_addr,
519 VM_PURGABLE_SET_STATE,
520 &state);
521 T_ASSERT_MACH_SUCCESS(kr, "vm_purgable_control(VOLATILE)");
522 T_ASSERT_EQ(state, VM_PURGABLE_EMPTY, "EMPTY->VOLATILE: state == %s",
523 vm_purgable_state[state]);
524 /* footprint6 == footprint5 */
525 /* footprint6 == footprint0 */
526 footprint[6] = task_footprint();
527 T_LOG(" footprint[6] = %llu", footprint[6]);
528 if (footprint[6] != footprint[5]) {
529 T_LOG("WARN: footprint[6] != footprint[5]");
530 }
531 if (footprint[6] != footprint[0]) {
532 T_LOG("WARN: footprint[6] != footprint[0]");
533 }
534
535 T_LOG("--> NONVOLATILE");
536 state = VM_PURGABLE_NONVOLATILE;
537 kr = mach_vm_purgable_control(mach_task_self(),
538 vm_addr,
539 VM_PURGABLE_SET_STATE,
540 &state);
541 T_ASSERT_MACH_SUCCESS(kr, "vm_purgable_control(NONVOLATILE)");
542 T_ASSERT_EQ(state, VM_PURGABLE_EMPTY, "EMPTY->NONVOLATILE: state == %s",
543 vm_purgable_state[state]);
544 /* footprint7 == footprint6 */
545 /* footprint7 == footprint0 */
546 footprint[7] = task_footprint();
547 T_LOG(" footprint[7] = %llu", footprint[7]);
548 if (footprint[7] != footprint[6]) {
549 T_LOG("WARN: footprint[7] != footprint[6]");
550 }
551 if (footprint[7] != footprint[0]) {
552 T_LOG("WARN: footprint[7] != footprint[0]");
553 }
554 }
555
556 T_DECL(madvise_shared, "test madvise shared for rdar://problem/2295713 logging \
557 rethink needs madvise(MADV_FREE_HARDER)",
558 T_META_ALL_VALID_ARCHS(true))
559 {
560 vm_address_t vmaddr = 0, vmaddr2 = 0;
561 vm_size_t vmsize;
562 kern_return_t kr;
563 char *cp;
564 vm_prot_t curprot, maxprot;
565 int ret;
566 task_vm_info_data_t ti;
567 mach_msg_type_number_t ti_count;
568
569 vmsize = 10 * 1024 * 1024; /* 10MB */
570 kr = vm_allocate(mach_task_self(),
571 &vmaddr,
572 vmsize,
573 VM_FLAGS_ANYWHERE);
574 T_QUIET;
575 T_EXPECT_MACH_SUCCESS(kr, "vm_allocate()");
576 if (T_RESULT == T_RESULT_FAIL) {
577 goto done;
578 }
579
580 for (cp = (char *)(uintptr_t)vmaddr;
581 cp < (char *)(uintptr_t)(vmaddr + vmsize);
582 cp++) {
583 *cp = 'x';
584 }
585
586 kr = vm_remap(mach_task_self(),
587 &vmaddr2,
588 vmsize,
589 0, /* mask */
590 VM_FLAGS_ANYWHERE,
591 mach_task_self(),
592 vmaddr,
593 FALSE, /* copy */
594 &curprot,
595 &maxprot,
596 VM_INHERIT_DEFAULT);
597 T_QUIET;
598 T_EXPECT_MACH_SUCCESS(kr, "vm_remap()");
599 if (T_RESULT == T_RESULT_FAIL) {
600 goto done;
601 }
602
603 for (cp = (char *)(uintptr_t)vmaddr2;
604 cp < (char *)(uintptr_t)(vmaddr2 + vmsize);
605 cp++) {
606 T_QUIET;
607 T_EXPECT_EQ(*cp, 'x', "vmaddr=%p vmaddr2=%p %p:0x%x",
608 (void *)(uintptr_t)vmaddr,
609 (void *)(uintptr_t)vmaddr2,
610 (void *)cp,
611 (unsigned char)*cp);
612 if (T_RESULT == T_RESULT_FAIL) {
613 goto done;
614 }
615 }
616 cp = (char *)(uintptr_t)vmaddr;
617 *cp = 'X';
618 cp = (char *)(uintptr_t)vmaddr2;
619 T_QUIET;
620 T_EXPECT_EQ(*cp, 'X', "memory was not properly shared");
621 if (T_RESULT == T_RESULT_FAIL) {
622 goto done;
623 }
624
625 #if defined(__x86_64__) || defined(__i386__)
626 if (*((uint64_t *)_COMM_PAGE_CPU_CAPABILITIES64) & kIsTranslated) {
627 T_LOG("Skipping madvise reusable tests because we're running under translation.");
628 goto done;
629 }
630 #endif /* defined(__x86_64__) || defined(__i386__) */
631 ret = madvise((char *)(uintptr_t)vmaddr,
632 vmsize,
633 MADV_FREE_REUSABLE);
634 T_QUIET;
635 T_EXPECT_POSIX_SUCCESS(ret, "madvise()");
636 if (T_RESULT == T_RESULT_FAIL) {
637 goto done;
638 }
639
640 ti_count = TASK_VM_INFO_COUNT;
641 kr = task_info(mach_task_self(),
642 TASK_VM_INFO,
643 (task_info_t) &ti,
644 &ti_count);
645 T_QUIET;
646 T_EXPECT_MACH_SUCCESS(kr, "task_info()");
647 if (T_RESULT == T_RESULT_FAIL) {
648 goto done;
649 }
650
651 T_QUIET;
652 T_EXPECT_EQ(ti.reusable, 2ULL * vmsize, "ti.reusable=%lld expected %lld",
653 ti.reusable, (uint64_t)(2 * vmsize));
654 if (T_RESULT == T_RESULT_FAIL) {
655 goto done;
656 }
657
658 done:
659 if (vmaddr != 0) {
660 vm_deallocate(mach_task_self(), vmaddr, vmsize);
661 vmaddr = 0;
662 }
663 if (vmaddr2 != 0) {
664 vm_deallocate(mach_task_self(), vmaddr2, vmsize);
665 vmaddr2 = 0;
666 }
667 }
668
669 T_DECL(madvise_purgeable_can_reuse, "test madvise purgeable can reuse for \
670 rdar://problem/37476183 Preview Footprint memory regressions ~100MB \
671 [ purgeable_malloc became eligible for reuse ]",
672 T_META_ALL_VALID_ARCHS(true))
673 {
674 #if defined(__x86_64__) || defined(__i386__)
675 if (*((uint64_t *)_COMM_PAGE_CPU_CAPABILITIES64) & kIsTranslated) {
676 T_SKIP("madvise reusable is not supported under Rosetta translation. Skipping.)");
677 }
678 #endif /* defined(__x86_64__) || defined(__i386__) */
679 vm_address_t vmaddr = 0;
680 vm_size_t vmsize;
681 kern_return_t kr;
682 char *cp;
683 int ret;
684
685 vmsize = 10 * 1024 * 1024; /* 10MB */
686 kr = vm_allocate(mach_task_self(),
687 &vmaddr,
688 vmsize,
689 (VM_FLAGS_ANYWHERE |
690 VM_FLAGS_PURGABLE |
691 VM_MAKE_TAG(VM_MEMORY_MALLOC)));
692 T_QUIET;
693 T_EXPECT_MACH_SUCCESS(kr, "vm_allocate()");
694 if (T_RESULT == T_RESULT_FAIL) {
695 goto done;
696 }
697
698 for (cp = (char *)(uintptr_t)vmaddr;
699 cp < (char *)(uintptr_t)(vmaddr + vmsize);
700 cp++) {
701 *cp = 'x';
702 }
703
704 ret = madvise((char *)(uintptr_t)vmaddr,
705 vmsize,
706 MADV_CAN_REUSE);
707 T_QUIET;
708 T_EXPECT_TRUE(((ret == -1) && (errno == EINVAL)), "madvise(): purgeable vm can't be adviced to reuse");
709 if (T_RESULT == T_RESULT_FAIL) {
710 goto done;
711 }
712
713 done:
714 if (vmaddr != 0) {
715 vm_deallocate(mach_task_self(), vmaddr, vmsize);
716 vmaddr = 0;
717 }
718 }
719
720 #define DEST_PATTERN 0xFEDCBA98
721
722 T_DECL(map_read_overwrite, "test overwriting vm map from other map - \
723 rdar://31075370",
724 T_META_ALL_VALID_ARCHS(true))
725 {
726 kern_return_t kr;
727 mach_vm_address_t vmaddr1, vmaddr2;
728 mach_vm_size_t vmsize1, vmsize2;
729 int *ip;
730 int i;
731
732 vmaddr1 = 0;
733 vmsize1 = 4 * 4096;
734 kr = mach_vm_allocate(mach_task_self(),
735 &vmaddr1,
736 vmsize1,
737 VM_FLAGS_ANYWHERE);
738 T_ASSERT_MACH_SUCCESS(kr, "vm_allocate()");
739
740 ip = (int *)(uintptr_t)vmaddr1;
741 for (i = 0; i < vmsize1 / sizeof(*ip); i++) {
742 ip[i] = i;
743 }
744
745 vmaddr2 = 0;
746 kr = mach_vm_allocate(mach_task_self(),
747 &vmaddr2,
748 vmsize1,
749 VM_FLAGS_ANYWHERE);
750 T_ASSERT_MACH_SUCCESS(kr, "vm_allocate()");
751
752 ip = (int *)(uintptr_t)vmaddr2;
753 for (i = 0; i < vmsize1 / sizeof(*ip); i++) {
754 ip[i] = DEST_PATTERN;
755 }
756
757 vmsize2 = vmsize1 - 2 * (sizeof(*ip));
758 kr = mach_vm_read_overwrite(mach_task_self(),
759 vmaddr1 + sizeof(*ip),
760 vmsize2,
761 vmaddr2 + sizeof(*ip),
762 &vmsize2);
763 T_ASSERT_MACH_SUCCESS(kr, "vm_read_overwrite()");
764
765 ip = (int *)(uintptr_t)vmaddr2;
766 for (i = 0; i < 1; i++) {
767 T_QUIET;
768 T_ASSERT_EQ(ip[i], DEST_PATTERN, "vmaddr2[%d] = 0x%x instead of 0x%x",
769 i, ip[i], DEST_PATTERN);
770 }
771 for (; i < (vmsize1 - 2) / sizeof(*ip); i++) {
772 T_QUIET;
773 T_ASSERT_EQ(ip[i], i, "vmaddr2[%d] = 0x%x instead of 0x%x",
774 i, ip[i], i);
775 }
776 for (; i < vmsize1 / sizeof(*ip); i++) {
777 T_QUIET;
778 T_ASSERT_EQ(ip[i], DEST_PATTERN, "vmaddr2[%d] = 0x%x instead of 0x%x",
779 i, ip[i], DEST_PATTERN);
780 }
781 }
782
783 T_DECL(copy_none_use_pmap, "test copy-on-write remapping of COPY_NONE vm \
784 objects - rdar://35610377",
785 T_META_ALL_VALID_ARCHS(true))
786 {
787 kern_return_t kr;
788 mach_vm_address_t vmaddr1, vmaddr2, vmaddr3;
789 mach_vm_size_t vmsize;
790 vm_prot_t curprot, maxprot;
791
792 vmsize = 32 * 1024 * 1024;
793
794 vmaddr1 = 0;
795 kr = mach_vm_allocate(mach_task_self(),
796 &vmaddr1,
797 vmsize,
798 VM_FLAGS_ANYWHERE | VM_FLAGS_PURGABLE);
799 T_ASSERT_MACH_SUCCESS(kr, "vm_allocate()");
800
801 memset((void *)(uintptr_t)vmaddr1, 'x', vmsize);
802
803 vmaddr2 = 0;
804 kr = mach_vm_remap(mach_task_self(),
805 &vmaddr2,
806 vmsize,
807 0, /* mask */
808 VM_FLAGS_ANYWHERE,
809 mach_task_self(),
810 vmaddr1,
811 TRUE, /* copy */
812 &curprot,
813 &maxprot,
814 VM_INHERIT_DEFAULT);
815 T_ASSERT_MACH_SUCCESS(kr, "vm_remap() #1");
816
817 vmaddr3 = 0;
818 kr = mach_vm_remap(mach_task_self(),
819 &vmaddr3,
820 vmsize,
821 0, /* mask */
822 VM_FLAGS_ANYWHERE,
823 mach_task_self(),
824 vmaddr2,
825 TRUE, /* copy */
826 &curprot,
827 &maxprot,
828 VM_INHERIT_DEFAULT);
829 T_ASSERT_MACH_SUCCESS(kr, "vm_remap() #2");
830 }
831
832 T_DECL(purgable_deny, "test purgeable memory is not allowed to be converted to \
833 non-purgeable - rdar://31990033",
834 T_META_ALL_VALID_ARCHS(true))
835 {
836 kern_return_t kr;
837 vm_address_t vmaddr;
838 vm_purgable_t state;
839
840 vmaddr = 0;
841 kr = vm_allocate(mach_task_self(), &vmaddr, 1,
842 VM_FLAGS_ANYWHERE | VM_FLAGS_PURGABLE);
843 T_ASSERT_MACH_SUCCESS(kr, "vm_allocate()");
844
845 state = VM_PURGABLE_DENY;
846 kr = vm_purgable_control(mach_task_self(), vmaddr,
847 VM_PURGABLE_SET_STATE, &state);
848 T_ASSERT_EQ(kr, KERN_INVALID_ARGUMENT,
849 "vm_purgable_control(VM_PURGABLE_DENY) -> 0x%x (%s)",
850 kr, mach_error_string(kr));
851
852 kr = vm_deallocate(mach_task_self(), vmaddr, 1);
853 T_ASSERT_MACH_SUCCESS(kr, "vm_deallocate()");
854 }
855
856 #define VMSIZE 0x10000
857
858 T_DECL(vm_remap_zero, "test vm map of zero size - rdar://33114981",
859 T_META_ALL_VALID_ARCHS(true))
860 {
861 kern_return_t kr;
862 mach_vm_address_t vmaddr1, vmaddr2;
863 mach_vm_size_t vmsize;
864 vm_prot_t curprot, maxprot;
865
866 vmaddr1 = 0;
867 vmsize = VMSIZE;
868 kr = mach_vm_allocate(mach_task_self(),
869 &vmaddr1,
870 vmsize,
871 VM_FLAGS_ANYWHERE);
872 T_ASSERT_MACH_SUCCESS(kr, "vm_allocate()");
873
874 vmaddr2 = 0;
875 vmsize = 0;
876 kr = mach_vm_remap(mach_task_self(),
877 &vmaddr2,
878 vmsize,
879 0,
880 VM_FLAGS_ANYWHERE,
881 mach_task_self(),
882 vmaddr1,
883 FALSE,
884 &curprot,
885 &maxprot,
886 VM_INHERIT_DEFAULT);
887 T_ASSERT_EQ(kr, KERN_INVALID_ARGUMENT, "vm_remap(size=0x%llx) 0x%x (%s)",
888 vmsize, kr, mach_error_string(kr));
889
890 vmaddr2 = 0;
891 vmsize = (mach_vm_size_t)-2;
892 kr = mach_vm_remap(mach_task_self(),
893 &vmaddr2,
894 vmsize,
895 0,
896 VM_FLAGS_ANYWHERE,
897 mach_task_self(),
898 vmaddr1,
899 FALSE,
900 &curprot,
901 &maxprot,
902 VM_INHERIT_DEFAULT);
903 T_ASSERT_EQ(kr, KERN_INVALID_ARGUMENT, "vm_remap(size=0x%llx) 0x%x (%s)",
904 vmsize, kr, mach_error_string(kr));
905 }
906
907 extern int __shared_region_check_np(uint64_t *);
908
909 T_DECL(nested_pmap_trigger, "nested pmap should only be triggered from kernel \
910 - rdar://problem/41481703",
911 T_META_ALL_VALID_ARCHS(true))
912 {
913 int ret;
914 kern_return_t kr;
915 mach_vm_address_t sr_start;
916 mach_vm_size_t vmsize;
917 mach_vm_address_t vmaddr;
918 mach_port_t mem_entry;
919
920 ret = __shared_region_check_np(&sr_start);
921 if (ret != 0) {
922 int saved_errno;
923 saved_errno = errno;
924
925 T_ASSERT_EQ(saved_errno, ENOMEM, "__shared_region_check_np() %d (%s)",
926 saved_errno, strerror(saved_errno));
927 T_END;
928 }
929
930 vmsize = PAGE_SIZE;
931 kr = mach_make_memory_entry_64(mach_task_self(),
932 &vmsize,
933 sr_start,
934 MAP_MEM_VM_SHARE | VM_PROT_READ,
935 &mem_entry,
936 MACH_PORT_NULL);
937 T_ASSERT_MACH_SUCCESS(kr, "make_memory_entry(0x%llx)", sr_start);
938
939 vmaddr = 0;
940 kr = mach_vm_map(mach_task_self(),
941 &vmaddr,
942 vmsize,
943 0,
944 VM_FLAGS_ANYWHERE,
945 mem_entry,
946 0,
947 FALSE,
948 VM_PROT_READ,
949 VM_PROT_READ,
950 VM_INHERIT_DEFAULT);
951 T_ASSERT_MACH_SUCCESS(kr, "vm_map()");
952 }
953
954 T_DECL(copyoverwrite_submap_protection, "test copywrite vm region submap \
955 protection", T_META_ALL_VALID_ARCHS(true))
956 {
957 kern_return_t kr;
958 mach_vm_address_t vmaddr;
959 mach_vm_size_t vmsize;
960 natural_t depth;
961 vm_region_submap_short_info_data_64_t region_info;
962 mach_msg_type_number_t region_info_count;
963
964 for (vmaddr = SHARED_REGION_BASE;
965 vmaddr < SHARED_REGION_BASE + SHARED_REGION_SIZE;
966 vmaddr += vmsize) {
967 depth = 99;
968 region_info_count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
969 kr = mach_vm_region_recurse(mach_task_self(),
970 &vmaddr,
971 &vmsize,
972 &depth,
973 (vm_region_info_t) &region_info,
974 &region_info_count);
975 if (kr == KERN_INVALID_ADDRESS) {
976 break;
977 }
978 T_ASSERT_MACH_SUCCESS(kr, "vm_region_recurse(0x%llx)", vmaddr);
979 T_ASSERT_EQ(region_info_count,
980 VM_REGION_SUBMAP_SHORT_INFO_COUNT_64,
981 "vm_region_recurse(0x%llx) count = %d expected %d",
982 vmaddr, region_info_count,
983 VM_REGION_SUBMAP_SHORT_INFO_COUNT_64);
984
985 T_LOG("--> region: vmaddr 0x%llx depth %d prot 0x%x/0x%x",
986 vmaddr, depth, region_info.protection,
987 region_info.max_protection);
988 if (depth == 0) {
989 /* not a submap mapping: next mapping */
990 continue;
991 }
992 if (vmaddr >= SHARED_REGION_BASE + SHARED_REGION_SIZE) {
993 break;
994 }
995 kr = mach_vm_copy(mach_task_self(),
996 vmaddr,
997 vmsize,
998 vmaddr);
999 if (kr == KERN_PROTECTION_FAILURE) {
1000 T_PASS("vm_copy(0x%llx,0x%llx) expected prot error 0x%x (%s)",
1001 vmaddr, vmsize, kr, mach_error_string(kr));
1002 continue;
1003 }
1004 T_ASSERT_MACH_SUCCESS(kr, "vm_copy(0x%llx,0x%llx) prot 0x%x",
1005 vmaddr, vmsize, region_info.protection);
1006 depth = 0;
1007 region_info_count = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64;
1008 kr = mach_vm_region_recurse(mach_task_self(),
1009 &vmaddr,
1010 &vmsize,
1011 &depth,
1012 (vm_region_info_t) &region_info,
1013 &region_info_count);
1014 T_ASSERT_MACH_SUCCESS(kr, "m_region_recurse(0x%llx)", vmaddr);
1015 T_ASSERT_EQ(region_info_count,
1016 VM_REGION_SUBMAP_SHORT_INFO_COUNT_64,
1017 "vm_region_recurse() count = %d expected %d",
1018 region_info_count, VM_REGION_SUBMAP_SHORT_INFO_COUNT_64);
1019
1020 T_ASSERT_EQ(depth, 0, "vm_region_recurse(0x%llx): depth = %d expected 0",
1021 vmaddr, depth);
1022 T_ASSERT_EQ((region_info.protection & VM_PROT_EXECUTE),
1023 0, "vm_region_recurse(0x%llx): prot 0x%x",
1024 vmaddr, region_info.protection);
1025 }
1026 }
1027
1028 T_DECL(wire_text, "test wired text for rdar://problem/16783546 Wiring code in \
1029 the shared region triggers code-signing violations",
1030 T_META_ALL_VALID_ARCHS(true))
1031 {
1032 char *addr;
1033 int retval;
1034 int saved_errno;
1035 kern_return_t kr;
1036 vm_address_t map_addr, remap_addr;
1037 vm_prot_t curprot, maxprot;
1038
1039 addr = (char *)&printf;
1040 #if __has_feature(ptrauth_calls)
1041 map_addr = (vm_address_t)(uintptr_t)ptrauth_strip(addr, ptrauth_key_function_pointer);
1042 #else /* __has_feature(ptrauth_calls) */
1043 map_addr = (vm_address_t)(uintptr_t)addr;
1044 #endif /* __has_feature(ptrauth_calls) */
1045 remap_addr = 0;
1046 kr = vm_remap(mach_task_self(), &remap_addr, 4096,
1047 0, /* mask */
1048 VM_FLAGS_ANYWHERE,
1049 mach_task_self(), map_addr,
1050 FALSE, /* copy */
1051 &curprot, &maxprot,
1052 VM_INHERIT_DEFAULT);
1053 T_ASSERT_EQ(kr, KERN_SUCCESS, "vm_remap error 0x%x (%s)",
1054 kr, mach_error_string(kr));
1055 retval = mlock(addr, 4096);
1056 if (retval != 0) {
1057 saved_errno = errno;
1058 T_ASSERT_EQ(saved_errno, EACCES, "wire shared text error %d (%s), expected: %d",
1059 saved_errno, strerror(saved_errno), EACCES);
1060 } else {
1061 T_PASS("wire shared text");
1062 }
1063
1064 addr = (char *) &fprintf;
1065 retval = mlock(addr, 4096);
1066 if (retval != 0) {
1067 saved_errno = errno;
1068 T_ASSERT_EQ(saved_errno, EACCES, "wire shared text error %d (%s), expected: %d",
1069 saved_errno, strerror(saved_errno), EACCES);
1070 } else {
1071 T_PASS("wire shared text");
1072 }
1073
1074 addr = (char *) &testmain_wire_text;
1075 retval = mlock(addr, 4096);
1076 if (retval != 0) {
1077 saved_errno = errno;
1078 T_ASSERT_EQ(saved_errno, EACCES, "wire text error return error %d (%s)",
1079 saved_errno, strerror(saved_errno));
1080 } else {
1081 T_PASS("wire text");
1082 }
1083 }