1 /* Mach virtual memory unit tests
3 * The main goal of this code is to facilitate the construction,
4 * running, result logging and clean up of a test suite, taking care
5 * of all the scaffolding. A test suite is a sequence of very targeted
6 * unit tests, each running as a separate process to isolate its
8 * A unit test is abstracted as a unit_test_t structure, consisting of
9 * a test function and a logging identifier. A test suite is a suite_t
10 * structure, consisting of an unit_test_t array, fixture set up and
11 * tear down functions.
12 * Test suites are created dynamically. Each of its unit test runs in
13 * its own fork()d process, with the fixture set up and tear down
14 * running before and after each test. The parent process will log a
15 * pass result if the child exits normally, and a fail result in any
16 * other case (non-zero exit status, abnormal signal). The suite
17 * results are then aggregated and logged after the [SUMMARY] keyword,
18 * and finally the test suite is destroyed.
19 * The included test suites cover the Mach memory allocators,
20 * mach_vm_allocate() and mach_vm_map() with various options, and
21 * mach_vm_deallocate(), mach_vm_read(), mach_vm_write(),
22 * mach_vm_protect(), mach_vm_copy().
24 * Author: Renaud Dreyer (rdreyer@apple.com)
26 * Transformed to libdarwintest by Tristan Ye (tristan_ye@apple.com) */
28 #include <darwintest.h>
38 #include <mach/mach.h>
39 #include <mach/mach_init.h>
40 #include <mach/mach_vm.h>
41 #include <sys/sysctl.h>
44 T_GLOBAL_META(T_META_NAMESPACE("xnu.vm"));
46 /**************************/
47 /**************************/
48 /* Unit Testing Framework */
49 /**************************/
50 /**************************/
52 /*********************/
53 /* Private interface */
54 /*********************/
56 static const char frameworkname
[] = "vm_unitester";
58 /* Type for test, fixture set up and fixture tear down functions. */
59 typedef void (*test_fn_t
)();
61 /* Unit test structure. */
67 /* Test suite structure. */
77 int _expected_signal
= 0;
81 uintmax_t passed_tests
;
84 #define logr(format, ...) \
86 if (_quietness <= 1) { \
87 T_LOG(format, ## __VA_ARGS__); \
91 #define logv(format, ...) \
93 if (_quietness == 0) { \
94 T_LOG(format, ## __VA_ARGS__); \
99 create_suite(const char * name
, int numoftests
, test_fn_t set_up
, unit_test_t
* tests
, test_fn_t tear_down
)
101 suite_t
* suite
= (suite_t
*)malloc(sizeof(suite_t
));
102 T_QUIET
; T_WITH_ERRNO
; T_ASSERT_NOTNULL(suite
, "malloc()");
105 suite
->numoftests
= numoftests
;
106 suite
->set_up
= set_up
;
107 suite
->tests
= tests
;
108 suite
->tear_down
= tear_down
;
113 destroy_suite(suite_t
* suite
)
119 log_suite_info(suite_t
* suite
)
121 logr("[TEST] %s", suite
->name
);
122 logr("Number of tests: %d\n", suite
->numoftests
);
126 log_suite_results(suite_t
* suite
, int passed_tests
)
128 results
.numoftests
+= (uintmax_t)suite
->numoftests
;
129 results
.passed_tests
+= (uintmax_t)passed_tests
;
133 log_test_info(unit_test_t
* unit_test
, unsigned test_num
)
135 logr("[BEGIN] #%04d: %s", test_num
, unit_test
->name
);
139 log_test_result(unit_test_t
* unit_test
, boolean_t test_passed
, unsigned test_num
)
141 logr("[%s] #%04d: %s\n", test_passed
? "PASS" : "FAIL", test_num
, unit_test
->name
);
144 /* Run a test with fixture set up and teardown, while enforcing the
145 * time out constraint. */
147 run_test(suite_t
* suite
, unit_test_t
* unit_test
, unsigned test_num
)
149 log_test_info(unit_test
, test_num
);
156 /* Check a child return status. */
158 child_terminated_normally(int child_status
)
160 boolean_t normal_exit
= FALSE
;
162 if (WIFEXITED(child_status
)) {
163 int exit_status
= WEXITSTATUS(child_status
);
165 T_LOG("Child process unexpectedly exited with code %d.",
167 } else if (!_expected_signal
) {
170 } else if (WIFSIGNALED(child_status
)) {
171 int signal
= WTERMSIG(child_status
);
172 if (signal
== _expected_signal
||
173 (_expected_signal
== -1 && (signal
== SIGBUS
|| signal
== SIGSEGV
))) {
174 if (_quietness
<= 0) {
175 T_LOG("Child process died with expected signal "
180 T_LOG("Child process unexpectedly died with signal %d.",
184 T_LOG("Child process unexpectedly did not exit nor die");
190 /* Run a test in its own process, and report the result. */
192 child_test_passed(suite_t
* suite
, unit_test_t
* unit_test
)
195 static unsigned test_num
= 0;
199 pid_t test_pid
= fork();
200 T_QUIET
; T_ASSERT_POSIX_SUCCESS(test_pid
, "fork()");
202 run_test(suite
, unit_test
, test_num
);
205 while (waitpid(test_pid
, &test_status
, 0) != test_pid
) {
208 boolean_t test_result
= child_terminated_normally(test_status
);
209 log_test_result(unit_test
, test_result
, test_num
);
213 /* Run each test in a suite, and report the results. */
215 count_passed_suite_tests(suite_t
* suite
)
217 int passed_tests
= 0;
220 for (i
= 0; i
< suite
->numoftests
; i
++) {
221 passed_tests
+= child_test_passed(suite
, &(suite
->tests
[i
]));
226 /********************/
227 /* Public interface */
228 /********************/
230 #define DEFAULT_QUIETNESS 0 /* verbose */
231 #define RESULT_ERR_QUIETNESS 1 /* result and error */
232 #define ERROR_ONLY_QUIETNESS 2 /* error only */
234 #define run_suite(set_up, tests, tear_down, ...) \
235 _run_suite((sizeof(tests) / sizeof(tests[0])), (set_up), (tests), (tear_down), __VA_ARGS__)
237 typedef unit_test_t UnitTests
[];
239 void _run_suite(int numoftests
, test_fn_t set_up
, UnitTests tests
, test_fn_t tear_down
, const char * format
, ...)
243 _run_suite(int numoftests
, test_fn_t set_up
, UnitTests tests
, test_fn_t tear_down
, const char * format
, ...)
248 va_start(ap
, format
);
249 T_QUIET
; T_ASSERT_POSIX_SUCCESS(vasprintf(&name
, format
, ap
), "vasprintf()");
251 suite_t
* suite
= create_suite(name
, numoftests
, set_up
, tests
, tear_down
);
252 log_suite_info(suite
);
253 log_suite_results(suite
, count_passed_suite_tests(suite
));
255 destroy_suite(suite
);
258 /* Setters and getters for various test framework global
259 * variables. Should only be used outside of the test, set up and tear
262 /* Expected signal for a test, default is 0. */
264 set_expected_signal(int signal
)
266 _expected_signal
= signal
;
270 get_expected_signal()
272 return _expected_signal
;
275 /* Logging verbosity. */
277 set_quietness(int value
)
288 /* For fixture set up and tear down functions, and units tests. */
295 log_aggregated_results()
297 T_LOG("[SUMMARY] Aggregated Test Results\n");
298 T_LOG("Total: %ju", results
.numoftests
);
299 T_LOG("Passed: %ju", results
.passed_tests
);
300 T_LOG("Failed: %ju\n", results
.numoftests
- results
.passed_tests
);
302 T_QUIET
; T_ASSERT_EQ(results
.passed_tests
, results
.numoftests
,
303 "%d passed of total %d tests",
304 results
.passed_tests
, results
.numoftests
);
307 /*******************************/
308 /*******************************/
309 /* Virtual memory unit testing */
310 /*******************************/
311 /*******************************/
315 * 1: fail, generic unexpected failure
316 * 2: fail, unexpected Mach return value
317 * 3: fail, time out */
319 #define DEFAULT_VM_SIZE ((mach_vm_size_t)(1024ULL * 4096ULL))
321 #define POINTER(address) ((char *)(uintptr_t)(address))
322 #define MACH_VM_ADDRESS_T(address) (*((mach_vm_address_t *)(uintptr_t)(address)))
324 static int vm_address_size
= sizeof(mach_vm_address_t
);
326 static char *progname
= "";
328 /*************************/
329 /* xnu version functions */
330 /*************************/
332 /* Find the xnu version string. */
339 mib
[1] = KERN_VERSION
;
342 T_ASSERT_POSIX_SUCCESS(sysctl(mib
, 2, NULL
, &length
, NULL
, 0), "sysctl()");
343 char * version
= (char *)malloc(length
);
346 T_ASSERT_NOTNULL(version
, "malloc()");
348 T_EXPECT_POSIX_SUCCESS(sysctl(mib
, 2, version
, &length
, NULL
, 0), "sysctl()");
349 if (T_RESULT
== T_RESULT_FAIL
) {
353 char * xnu_string
= strstr(version
, "xnu-");
356 T_ASSERT_NOTNULL(xnu_string
, "%s: error finding xnu version string.", progname
);
360 /* Find the xnu major version number. */
365 char * xnu_substring
= xnu_version_string() + 4;
368 unsigned int xnu_version
= strtoul(xnu_substring
, &endptr
, 0);
370 T_ASSERT_TRUE((errno
!= ERANGE
&& endptr
!= xnu_substring
),
371 "%s: error finding xnu major version number.", progname
);
375 /*************************/
376 /* Mach assert functions */
377 /*************************/
380 assert_mach_return(kern_return_t kr
, kern_return_t expected_kr
, const char * mach_routine
)
382 T_QUIET
; T_ASSERT_EQ(kr
, expected_kr
,
383 "%s unexpectedly returned: %s."
384 "Should have returned: %s.",
385 mach_routine
, mach_error_string(kr
),
386 mach_error_string(expected_kr
));
389 /*******************************/
390 /* Arrays for test suite loops */
391 /*******************************/
393 /* Memory allocators */
394 typedef kern_return_t (*allocate_fn_t
)(vm_map_t
, mach_vm_address_t
*, mach_vm_size_t
, int);
398 * Remember any pre-reserved fixed address, which needs to be released prior to an allocation.
400 static mach_vm_address_t fixed_vm_address
= 0x0;
401 static mach_vm_size_t fixed_vm_size
= 0;
404 void assert_deallocate_success(mach_vm_address_t address
, mach_vm_size_t size
);
407 * If trying to allocate at a fixed address, we need to do the delayed deallocate first.
410 check_fixed_address(mach_vm_address_t
*address
, mach_vm_size_t size
)
412 if (fixed_vm_address
!= 0 &&
413 fixed_vm_address
<= *address
&&
414 *address
+ size
<= fixed_vm_address
+ fixed_vm_size
) {
415 assert_deallocate_success(fixed_vm_address
, fixed_vm_size
);
416 fixed_vm_address
= 0;
422 wrapper_mach_vm_allocate(vm_map_t map
, mach_vm_address_t
* address
, mach_vm_size_t size
, int flags
)
424 check_fixed_address(address
, size
);
425 return mach_vm_allocate(map
, address
, size
, flags
);
429 wrapper_mach_vm_map(vm_map_t map
, mach_vm_address_t
* address
, mach_vm_size_t size
, int flags
)
431 check_fixed_address(address
, size
);
432 return mach_vm_map(map
, address
, size
, (mach_vm_offset_t
)0, flags
, MACH_PORT_NULL
, (memory_object_offset_t
)0, FALSE
,
433 VM_PROT_DEFAULT
, VM_PROT_ALL
, VM_INHERIT_DEFAULT
);
436 /* Should have the same behavior as when mask is zero. */
438 wrapper_mach_vm_map_4kB(vm_map_t map
, mach_vm_address_t
* address
, mach_vm_size_t size
, int flags
)
440 check_fixed_address(address
, size
);
441 return mach_vm_map(map
, address
, size
, (mach_vm_offset_t
)0xFFF, flags
, MACH_PORT_NULL
, (memory_object_offset_t
)0, FALSE
,
442 VM_PROT_DEFAULT
, VM_PROT_ALL
, VM_INHERIT_DEFAULT
);
446 wrapper_mach_vm_map_2MB(vm_map_t map
, mach_vm_address_t
* address
, mach_vm_size_t size
, int flags
)
448 check_fixed_address(address
, size
);
449 return mach_vm_map(map
, address
, size
, (mach_vm_offset_t
)0x1FFFFF, flags
, MACH_PORT_NULL
, (memory_object_offset_t
)0, FALSE
,
450 VM_PROT_DEFAULT
, VM_PROT_ALL
, VM_INHERIT_DEFAULT
);
454 memory_entry(mach_vm_size_t
* size
)
456 mach_port_t object_handle
= MACH_PORT_NULL
;
457 mach_vm_size_t original_size
= *size
;
459 T_QUIET
; T_ASSERT_MACH_SUCCESS(mach_make_memory_entry_64(mach_task_self(), size
, (memory_object_offset_t
)0,
460 (MAP_MEM_NAMED_CREATE
| VM_PROT_ALL
), &object_handle
, 0),
461 "mach_make_memory_entry_64()");
462 T_QUIET
; T_ASSERT_EQ(*size
, round_page_kernel(original_size
),
463 "mach_make_memory_entry_64() unexpectedly returned a named "
464 "entry of size 0x%jx (%ju).\n"
465 "Should have returned a "
466 "named entry of size 0x%jx (%ju).",
467 (uintmax_t)*size
, (uintmax_t)*size
, (uintmax_t)original_size
, (uintmax_t)original_size
);
468 return object_handle
;
472 wrapper_mach_vm_map_named_entry(vm_map_t map
, mach_vm_address_t
* address
, mach_vm_size_t size
, int flags
)
474 mach_port_t object_handle
= memory_entry(&size
);
475 check_fixed_address(address
, size
);
476 kern_return_t kr
= mach_vm_map(map
, address
, size
, (mach_vm_offset_t
)0, flags
, object_handle
, (memory_object_offset_t
)0, FALSE
,
477 VM_PROT_DEFAULT
, VM_PROT_ALL
, VM_INHERIT_DEFAULT
);
478 T_QUIET
; T_ASSERT_MACH_SUCCESS(mach_port_deallocate(mach_task_self(), object_handle
), "mach_port_deallocate()");
483 allocate_fn_t allocate
;
484 const char * description
;
486 {wrapper_mach_vm_allocate
, "mach_vm_allocate()"},
487 {wrapper_mach_vm_map
, "mach_vm_map() (zero mask)"},
488 {wrapper_mach_vm_map_4kB
,
490 "(4 kB address alignment)"},
491 {wrapper_mach_vm_map_2MB
,
493 "(2 MB address alignment)"},
494 {wrapper_mach_vm_map_named_entry
,
495 "mach_vm_map() (named "
496 "entry, zero mask)"},
498 static int numofallocators
= sizeof(allocators
) / sizeof(allocators
[0]);
499 static int allocators_idx
;
500 enum { MACH_VM_ALLOCATE
, MACH_VM_MAP
, MACH_VM_MAP_4kB
, MACH_VM_MAP_2MB
, MACH_VM_MAP_NAMED_ENTRY
};
505 const char * description
;
507 {DEFAULT_VM_SIZE
, "default/input"},
509 {4096ULL, "aligned"},
511 {4095ULL, "unaligned"},
512 {4097ULL, "unaligned"},
514 static int numofsizes
= sizeof(vm_sizes
) / sizeof(vm_sizes
[0]);
515 static int sizes_idx
;
516 static int buffer_sizes_idx
;
517 enum { DEFAULT_INPUT
, ZERO_BYTES
, ONE_PAGE
, ONE_BYTE
, ONE_PAGE_MINUS_ONE_BYTE
, ONE_PAGE_AND_ONE_BYTE
};
519 /* Unspecified/fixed address */
522 const char * description
;
523 } address_flags
[] = {
524 {VM_FLAGS_ANYWHERE
, "unspecified"}, {VM_FLAGS_FIXED
, "fixed"},
526 static int numofflags
= sizeof(address_flags
) / sizeof(address_flags
[0]);
527 static int flags_idx
;
528 enum { ANYWHERE
, FIXED
};
530 /* Address alignment */
533 const char * description
;
534 } address_alignments
[] = {
535 {TRUE
, " aligned"}, {FALSE
, " unaligned"},
537 static int numofalignments
= sizeof(address_alignments
) / sizeof(*address_alignments
);
538 static int alignments_idx
;
539 enum { ALIGNED
, UNALIGNED
};
544 const char * description
;
545 } buffer_offsets
[] = {
546 {0, ""}, {1, ""}, {2, ""},
548 static int numofoffsets
= sizeof(buffer_offsets
) / sizeof(buffer_offsets
[0]);
549 static int offsets_idx
;
550 enum { ZERO
, ONE
, TWO
};
552 /* mach_vm_copy() post actions */
553 enum { VMCOPY_MODIFY_SRC
, VMCOPY_MODIFY_DST
, VMCOPY_MODIFY_SHARED_COPIED
};
557 const char * description
;
558 } vmcopy_actions
[] = {
559 {VMCOPY_MODIFY_SRC
, "modify vm_copy() source"},
560 {VMCOPY_MODIFY_DST
, "modify vm_copy() destination"},
561 {VMCOPY_MODIFY_SHARED_COPIED
,
562 "modify vm_copy source's shared "
563 "or copied from/to region"},
565 static int numofvmcopyactions
= sizeof(vmcopy_actions
) / sizeof(vmcopy_actions
[0]);
566 static int vmcopy_action_idx
;
568 /************************************/
569 /* Setters and getters for fixtures */
570 /************************************/
572 /* Allocation memory range. */
573 static allocate_fn_t _allocator
= wrapper_mach_vm_allocate
;
574 static mach_vm_size_t _vm_size
= DEFAULT_VM_SIZE
;
575 static int _address_flag
= VM_FLAGS_ANYWHERE
;
576 static boolean_t _address_alignment
= TRUE
;
577 static mach_vm_address_t _vm_address
= 0x0;
579 /* Buffer for mach_vm_write(). */
580 static mach_vm_size_t _buffer_size
= DEFAULT_VM_SIZE
;
581 static mach_vm_address_t _buffer_address
= 0x0;
582 static int _buffer_offset
= 0;
584 /* Post action for mach_vm_copy(). */
585 static int _vmcopy_post_action
= VMCOPY_MODIFY_SRC
;
588 set_allocator(allocate_fn_t allocate
)
590 _allocator
= allocate
;
600 set_vm_size(mach_vm_size_t size
)
605 static mach_vm_size_t
612 set_address_flag(int flag
)
614 _address_flag
= flag
;
620 return _address_flag
;
624 set_address_alignment(boolean_t alignment
)
626 _address_alignment
= alignment
;
630 get_address_alignment()
632 return _address_alignment
;
636 set_vm_address(mach_vm_address_t address
)
638 _vm_address
= address
;
641 static mach_vm_address_t
648 set_buffer_size(mach_vm_size_t size
)
653 static mach_vm_size_t
660 set_buffer_address(mach_vm_address_t address
)
662 _buffer_address
= address
;
665 static mach_vm_address_t
668 return _buffer_address
;
672 set_buffer_offset(int offset
)
674 _buffer_offset
= offset
;
680 return _buffer_offset
;
684 set_vmcopy_post_action(int action
)
686 _vmcopy_post_action
= action
;
690 get_vmcopy_post_action()
692 return _vmcopy_post_action
;
695 /*******************************/
696 /* Usage and option processing */
697 /*******************************/
698 static boolean_t flag_run_allocate_test
= FALSE
;
699 static boolean_t flag_run_deallocate_test
= FALSE
;
700 static boolean_t flag_run_read_test
= FALSE
;
701 static boolean_t flag_run_write_test
= FALSE
;
702 static boolean_t flag_run_protect_test
= FALSE
;
703 static boolean_t flag_run_copy_test
= FALSE
;
705 #define VM_TEST_ALLOCATE 0x00000001
706 #define VM_TEST_DEALLOCATE 0x00000002
707 #define VM_TEST_READ 0x00000004
708 #define VM_TEST_WRITE 0x00000008
709 #define VM_TEST_PROTECT 0x00000010
710 #define VM_TEST_COPY 0x00000020
712 typedef struct test_option
{
715 mach_vm_size_t to_vmsize
;
718 typedef struct test_info
{
723 static test_option_t test_options
;
725 enum {ALLOCATE
= 0, DEALLOCATE
, READ
, WRITE
, PROTECT
, COPY
};
727 static test_info_t test_info
[] = {
728 {"allocate", &flag_run_allocate_test
},
729 {"deallocate", &flag_run_deallocate_test
},
730 {"read", &flag_run_read_test
},
731 {"write", &flag_run_write_test
},
732 {"protect", &flag_run_protect_test
},
733 {"copy", &flag_run_copy_test
},
738 die_on_invalid_value(int condition
, const char * value_string
)
741 T_ASSERT_EQ(condition
, 0, "%s: invalid value: %s.",
742 progname
, value_string
);
746 process_options(test_option_t options
)
750 setvbuf(stdout
, NULL
, _IONBF
, 0);
752 set_vm_size(DEFAULT_VM_SIZE
);
753 set_quietness(DEFAULT_QUIETNESS
);
755 if (NULL
!= getenv("LTERDOS")) {
756 logr("LTERDOS=YES this is LeanTestEnvironment\nIncreasing quietness by 1.");
757 set_quietness(get_quietness() + 1);
759 if (options
.to_quietness
> 0) {
760 set_quietness(options
.to_quietness
);
764 if (options
.to_vmsize
!= 0) {
765 vm_sizes
[0].size
= options
.to_vmsize
;
768 if (options
.to_flags
== 0) {
769 for (tp
= test_info
; tp
->ti_name
!= NULL
; ++tp
) {
773 if (options
.to_flags
& VM_TEST_ALLOCATE
) {
774 *(test_info
[ALLOCATE
].ti_flag
) = TRUE
;
777 if (options
.to_flags
& VM_TEST_DEALLOCATE
) {
778 *(test_info
[DEALLOCATE
].ti_flag
) = TRUE
;
781 if (options
.to_flags
& VM_TEST_READ
) {
782 *(test_info
[READ
].ti_flag
) = TRUE
;
785 if (options
.to_flags
& VM_TEST_WRITE
) {
786 *(test_info
[WRITE
].ti_flag
) = TRUE
;
789 if (options
.to_flags
& VM_TEST_PROTECT
) {
790 *(test_info
[PROTECT
].ti_flag
) = TRUE
;
793 if (options
.to_flags
& VM_TEST_COPY
) {
794 *(test_info
[COPY
].ti_flag
) = TRUE
;
803 /* Find the allocator address alignment mask. */
807 mach_vm_address_t mask
;
809 if (get_allocator() == wrapper_mach_vm_map_2MB
) {
810 mask
= (mach_vm_address_t
)0x1FFFFF;
812 mask
= vm_page_size
- 1;
817 /* Find the size of the smallest aligned region containing a given
820 aligned_size(mach_vm_address_t address
, mach_vm_size_t size
)
822 return round_page_kernel(address
- mach_vm_trunc_page(address
) + size
);
825 /********************/
826 /* Assert functions */
827 /********************/
829 /* Address is aligned on allocator boundary. */
831 assert_aligned_address(mach_vm_address_t address
)
833 T_QUIET
; T_ASSERT_EQ((address
& get_mask()), 0,
834 "Address 0x%jx is unexpectedly "
839 /* Address is truncated to allocator boundary. */
841 assert_trunc_address(mach_vm_address_t address
, mach_vm_address_t trunc_address
)
843 T_QUIET
; T_ASSERT_EQ(trunc_address
, (address
& ~get_mask()),
845 "0x%jx is unexpectedly not truncated to address 0x%jx.",
846 (uintmax_t)address
, (uintmax_t)trunc_address
);
850 assert_address_value(mach_vm_address_t address
, mach_vm_address_t marker
)
852 /* this assert is used so frequently so that we simply judge on
853 * its own instead of leaving this to LD macro for efficiency
855 if (MACH_VM_ADDRESS_T(address
) != marker
) {
856 T_ASSERT_FAIL("Address 0x%jx unexpectedly has value 0x%jx, "
857 "instead of 0x%jx.", (uintmax_t)address
,
858 (uintmax_t)MACH_VM_ADDRESS_T(address
), (uintmax_t)marker
);
863 assert_allocate_return(mach_vm_address_t
* address
, mach_vm_size_t size
, int address_flag
, kern_return_t expected_kr
)
865 assert_mach_return(get_allocator()(mach_task_self(), address
, size
, address_flag
), expected_kr
, "Allocator");
869 assert_allocate_success(mach_vm_address_t
* address
, mach_vm_size_t size
, int address_flag
)
871 assert_allocate_return(address
, size
, address_flag
, KERN_SUCCESS
);
875 assert_deallocate_return(mach_vm_address_t address
, mach_vm_size_t size
, kern_return_t expected_kr
)
877 assert_mach_return(mach_vm_deallocate(mach_task_self(), address
, size
), expected_kr
, "mach_vm_deallocate()");
881 assert_deallocate_success(mach_vm_address_t address
, mach_vm_size_t size
)
883 assert_deallocate_return(address
, size
, KERN_SUCCESS
);
887 assert_read_return(mach_vm_address_t address
,
890 mach_msg_type_number_t
* data_size
,
891 kern_return_t expected_kr
)
893 assert_mach_return(mach_vm_read(mach_task_self(), address
, size
, data
, data_size
), expected_kr
, "mach_vm_read()");
897 assert_read_success(mach_vm_address_t address
, mach_vm_size_t size
, vm_offset_t
* data
, mach_msg_type_number_t
* data_size
)
899 assert_read_return(address
, size
, data
, data_size
, KERN_SUCCESS
);
900 T_QUIET
; T_ASSERT_EQ(*data_size
, size
,
901 "Returned buffer size 0x%jx "
902 "(%ju) is unexpectedly different from source size 0x%jx "
904 (uintmax_t)*data_size
, (uintmax_t)*data_size
, (uintmax_t)size
, (uintmax_t)size
);
908 assert_write_return(mach_vm_address_t address
, vm_offset_t data
, mach_msg_type_number_t data_size
, kern_return_t expected_kr
)
910 assert_mach_return(mach_vm_write(mach_task_self(), address
, data
, data_size
), expected_kr
, "mach_vm_write()");
914 assert_write_success(mach_vm_address_t address
, vm_offset_t data
, mach_msg_type_number_t data_size
)
916 assert_write_return(address
, data
, data_size
, KERN_SUCCESS
);
920 assert_allocate_copy_return(mach_vm_address_t source
, mach_vm_size_t size
, mach_vm_address_t
* dest
, kern_return_t expected_kr
)
922 assert_allocate_success(dest
, size
, VM_FLAGS_ANYWHERE
);
923 assert_mach_return(mach_vm_copy(mach_task_self(), source
, size
, *dest
), expected_kr
, "mach_vm_copy()");
926 assert_allocate_copy_success(mach_vm_address_t source
, mach_vm_size_t size
, mach_vm_address_t
* dest
)
928 assert_allocate_copy_return(source
, size
, dest
, KERN_SUCCESS
);
932 assert_copy_return(mach_vm_address_t source
, mach_vm_size_t size
, mach_vm_address_t dest
, kern_return_t expected_kr
)
934 assert_mach_return(mach_vm_copy(mach_task_self(), source
, size
, dest
), expected_kr
, "mach_vm_copy()");
938 assert_copy_success(mach_vm_address_t source
, mach_vm_size_t size
, mach_vm_address_t dest
)
940 assert_copy_return(source
, size
, dest
, KERN_SUCCESS
);
943 /*******************/
944 /* Memory patterns */
945 /*******************/
947 typedef boolean_t (*address_filter_t
)(mach_vm_address_t
);
948 typedef void (*address_action_t
)(mach_vm_address_t
, mach_vm_address_t
);
950 /* Map over a memory region pattern and its complement, through a
951 * (possibly reversed) boolean filter and a starting value. */
953 filter_addresses_do_else(address_filter_t filter
,
955 mach_vm_address_t address
,
957 address_action_t if_action
,
958 address_action_t else_action
,
959 mach_vm_address_t start_value
)
962 for (i
= 0; i
+ vm_address_size
< size
; i
+= vm_address_size
) {
963 if (filter(address
+ i
) != reversed
) {
964 if_action(address
+ i
, start_value
+ i
);
966 else_action(address
+ i
, start_value
+ i
);
971 /* Various pattern actions. */
973 no_action(mach_vm_address_t i
, mach_vm_address_t value
)
978 read_zero(mach_vm_address_t i
, mach_vm_address_t value
)
980 assert_address_value(i
, 0);
984 verify_address(mach_vm_address_t i
, mach_vm_address_t value
)
986 assert_address_value(i
, value
);
990 write_address(mach_vm_address_t i
, mach_vm_address_t value
)
992 MACH_VM_ADDRESS_T(i
) = value
;
995 /* Various patterns. */
997 empty(mach_vm_address_t i
)
1003 checkerboard(mach_vm_address_t i
)
1005 return !((i
/ vm_address_size
) & 0x1);
1009 page_ends(mach_vm_address_t i
)
1011 mach_vm_address_t residue
= i
% vm_page_size
;
1013 return residue
== 0 || residue
== vm_page_size
- vm_address_size
;
1016 /*************************************/
1017 /* Global variables set up functions */
1018 /*************************************/
1023 T_QUIET
; T_ASSERT_TRUE(allocators_idx
>= 0 && allocators_idx
< numofallocators
, "Invalid allocators[] index: %d.", allocators_idx
);
1024 set_allocator(allocators
[allocators_idx
].allocate
);
1027 /* Find a fixed allocatable address by retrieving the address
1028 * populated by mach_vm_allocate() with VM_FLAGS_ANYWHERE. */
1030 get_fixed_address(mach_vm_size_t size
)
1032 /* mach_vm_map() starts looking for an address at 0x0. */
1033 mach_vm_address_t address
= 0x0;
1036 * The tests seem to have some funky off by one allocations. To avoid problems, we'll bump anything
1037 * non-zero to have at least an extra couple pages.
1040 size
= round_page_kernel(size
+ 2 * vm_page_size
);
1043 assert_allocate_success(&address
, size
, VM_FLAGS_ANYWHERE
);
1046 * Keep the memory allocated, otherwise the logv()/printf() activity sprinkled in these tests can
1047 * cause malloc() to use the desired range and tests will randomly fail. The allocate routines will
1048 * do the delayed vm_deallocate() to free the fixed memory just before allocation testing in the wrapper.
1050 T_QUIET
; T_ASSERT_EQ(fixed_vm_address
, 0, "previous fixed address not used");
1051 T_QUIET
; T_ASSERT_EQ(fixed_vm_size
, 0, "previous fixed size not used");
1052 fixed_vm_address
= address
;
1053 fixed_vm_size
= size
;
1055 assert_aligned_address(address
);
1059 /* If needed, find an address at which a region of the specified size
1060 * can be allocated. Otherwise, set the address to 0x0. */
1062 set_up_vm_address(mach_vm_size_t size
)
1064 T_QUIET
; T_ASSERT_TRUE(flags_idx
>= 0 && flags_idx
< numofflags
, "Invalid address_flags[] index: %d.", flags_idx
);
1065 T_QUIET
; T_ASSERT_TRUE(alignments_idx
>= 0 && alignments_idx
< numofalignments
, "Invalid address_alignments[] index: %d.", alignments_idx
);
1066 set_address_flag(address_flags
[flags_idx
].flag
);
1067 set_address_alignment(address_alignments
[alignments_idx
].alignment
);
1069 if (!(get_address_flag() & VM_FLAGS_ANYWHERE
)) {
1070 boolean_t aligned
= get_address_alignment();
1072 "Looking for fixed %saligned address for allocation "
1073 "of 0x%jx (%ju) byte%s...",
1074 aligned
? "" : "un", (uintmax_t)size
, (uintmax_t)size
, (size
== 1) ? "" : "s");
1075 mach_vm_address_t address
= get_fixed_address(size
);
1079 set_vm_address(address
);
1080 logv("Found %saligned fixed address 0x%jx.", aligned
? "" : "un", (uintmax_t)address
);
1082 /* mach_vm_map() with VM_FLAGS_ANYWHERE starts looking for
1083 * an address at the one supplied and goes up, without
1084 * wrapping around. */
1085 set_vm_address(0x0);
1092 T_QUIET
; T_ASSERT_TRUE(sizes_idx
>= 0 && sizes_idx
< numofsizes
, "Invalid vm_sizes[] index: %d.", sizes_idx
);
1093 set_vm_size(vm_sizes
[sizes_idx
].size
);
1097 set_up_buffer_size()
1099 T_QUIET
; T_ASSERT_TRUE(buffer_sizes_idx
>= 0 && buffer_sizes_idx
< numofsizes
, "Invalid vm_sizes[] index: %d.", buffer_sizes_idx
);
1100 set_buffer_size(vm_sizes
[buffer_sizes_idx
].size
);
1104 set_up_buffer_offset()
1106 T_QUIET
; T_ASSERT_TRUE(offsets_idx
>= 0 && offsets_idx
< numofoffsets
, "Invalid buffer_offsets[] index: %d.", offsets_idx
);
1107 set_buffer_offset(buffer_offsets
[offsets_idx
].offset
);
1111 set_up_vmcopy_action()
1113 T_QUIET
; T_ASSERT_TRUE(vmcopy_action_idx
>= 0 && vmcopy_action_idx
< numofvmcopyactions
, "Invalid vmcopy_actions[] index: %d.",
1115 set_vmcopy_post_action(vmcopy_actions
[vmcopy_action_idx
].action
);
1119 set_up_allocator_and_vm_size()
1126 set_up_vm_variables()
1129 set_up_vm_address(get_vm_size());
1133 set_up_allocator_and_vm_variables()
1136 set_up_vm_variables();
1140 set_up_buffer_variables()
1142 set_up_buffer_size();
1143 set_up_buffer_offset();
1147 set_up_copy_shared_mode_variables()
1149 set_up_vmcopy_action();
1152 /*******************************/
1153 /* Allocation set up functions */
1154 /*******************************/
1156 /* Allocate VM region of given size. */
1158 allocate(mach_vm_size_t size
)
1160 mach_vm_address_t address
= get_vm_address();
1161 int flag
= get_address_flag();
1163 logv("Allocating 0x%jx (%ju) byte%s", (uintmax_t)size
, (uintmax_t)size
, (size
== 1) ? "" : "s");
1164 if (!(flag
& VM_FLAGS_ANYWHERE
)) {
1165 logv(" at address 0x%jx", (uintmax_t)address
);
1168 assert_allocate_success(&address
, size
, flag
);
1170 "Memory of rounded size 0x%jx (%ju) allocated at "
1172 (uintmax_t)round_page_kernel(size
), (uintmax_t)round_page_kernel(size
), (uintmax_t)address
);
1173 /* Fixed allocation address is truncated to the allocator
1175 if (!(flag
& VM_FLAGS_ANYWHERE
)) {
1176 mach_vm_address_t old_address
= get_vm_address();
1177 assert_trunc_address(old_address
, address
);
1179 "Address 0x%jx is correctly truncated to allocated "
1181 (uintmax_t)old_address
, (uintmax_t)address
);
1183 set_vm_address(address
);
1187 allocate_buffer(mach_vm_size_t buffer_size
)
1189 mach_vm_address_t data
= 0x0;
1191 logv("Allocating 0x%jx (%ju) byte%s...", (uintmax_t)buffer_size
, (uintmax_t)buffer_size
, (buffer_size
== 1) ? "" : "s");
1192 assert_allocate_success(&data
, buffer_size
, VM_FLAGS_ANYWHERE
);
1194 "Memory of rounded size 0x%jx (%ju) allocated at "
1196 (uintmax_t)round_page_kernel(buffer_size
), (uintmax_t)round_page_kernel(buffer_size
), (uintmax_t)data
);
1197 data
+= get_buffer_offset();
1198 T_QUIET
; T_ASSERT_EQ((vm_offset_t
)data
, data
,
1200 "unexpectedly overflows to 0x%jx when cast as "
1201 "vm_offset_t type.",
1202 (uintmax_t)data
, (uintmax_t)(vm_offset_t
)data
);
1203 set_buffer_address(data
);
1206 /****************************************************/
1207 /* Global variables and allocation set up functions */
1208 /****************************************************/
1211 set_up_vm_variables_and_allocate()
1213 set_up_vm_variables();
1214 allocate(get_vm_size());
1218 set_up_allocator_and_vm_variables_and_allocate()
1221 set_up_vm_variables_and_allocate();
1225 set_up_vm_variables_and_allocate_extra_page()
1228 /* Increment the size to insure we get an extra allocated page
1229 * for unaligned start addresses. */
1230 mach_vm_size_t allocation_size
= get_vm_size() + 1;
1231 set_up_vm_address(allocation_size
);
1233 allocate(allocation_size
);
1234 /* In the fixed unaligned address case, restore the returned
1235 * (truncated) allocation address to its unaligned value. */
1236 if (!get_address_alignment()) {
1237 set_vm_address(get_vm_address() + 1);
1242 set_up_buffer_variables_and_allocate_extra_page()
1244 set_up_buffer_variables();
1245 /* Increment the size to insure we get an extra allocated page
1246 * for unaligned start addresses. */
1247 allocate_buffer(get_buffer_size() + get_buffer_offset());
1250 /* Allocate some destination and buffer memory for subsequent
1251 * writing, including extra pages for non-aligned start addresses. */
1253 set_up_vm_and_buffer_variables_allocate_for_writing()
1255 set_up_vm_variables_and_allocate_extra_page();
1256 set_up_buffer_variables_and_allocate_extra_page();
1259 /* Allocate some destination and source regions for subsequent
1260 * copying, including extra pages for non-aligned start addresses. */
1262 set_up_vm_and_buffer_variables_allocate_for_copying()
1264 set_up_vm_and_buffer_variables_allocate_for_writing();
1267 /************************************/
1268 /* Deallocation tear down functions */
1269 /************************************/
1272 deallocate_range(mach_vm_address_t address
, mach_vm_size_t size
)
1274 logv("Deallocating 0x%jx (%ju) byte%s at address 0x%jx...", (uintmax_t)size
, (uintmax_t)size
, (size
== 1) ? "" : "s",
1275 (uintmax_t)address
);
1276 assert_deallocate_success(address
, size
);
1282 deallocate_range(get_vm_address(), get_vm_size());
1285 /* Deallocate source memory, including the extra page for unaligned
1286 * start addresses. */
1288 deallocate_extra_page()
1290 /* Set the address and size to their original allocation
1292 deallocate_range(mach_vm_trunc_page(get_vm_address()), get_vm_size() + 1);
1295 /* Deallocate buffer and destination memory for mach_vm_write(),
1296 * including the extra page for unaligned start addresses. */
1298 deallocate_vm_and_buffer()
1300 deallocate_range(mach_vm_trunc_page(get_vm_address()), get_vm_size() + 1);
1301 deallocate_range(mach_vm_trunc_page(get_buffer_address()), get_buffer_size() + get_buffer_offset());
1304 /***********************************/
1305 /* mach_vm_read() set up functions */
1306 /***********************************/
1308 /* Read the source memory into a buffer, deallocate the source, set
1309 * the global address and size from the buffer's. */
1313 mach_vm_size_t size
= get_vm_size();
1314 mach_vm_address_t address
= get_vm_address();
1315 vm_offset_t read_address
;
1316 mach_msg_type_number_t read_size
;
1318 logv("Reading 0x%jx (%ju) byte%s at address 0x%jx...", (uintmax_t)size
, (uintmax_t)size
, (size
== 1) ? "" : "s",
1319 (uintmax_t)address
);
1320 assert_read_success(address
, size
, &read_address
, &read_size
);
1322 "Memory of size 0x%jx (%ju) read into buffer of "
1324 (uintmax_t)read_size
, (uintmax_t)read_size
, (uintmax_t)read_address
);
1325 /* Deallocate the originally allocated memory, including the
1326 * extra allocated page in
1327 * set_up_vm_variables_and_allocate_extra_page(). */
1328 deallocate_range(mach_vm_trunc_page(address
), size
+ 1);
1330 /* Promoting to mach_vm types after checking for overflow, and
1331 * setting the global address from the buffer's. */
1332 T_QUIET
; T_ASSERT_EQ((mach_vm_address_t
)read_address
, read_address
,
1333 "Address 0x%jx unexpectedly overflows to 0x%jx when cast "
1334 "as mach_vm_address_t type.",
1335 (uintmax_t)read_address
, (uintmax_t)(mach_vm_address_t
)read_address
);
1336 T_QUIET
; T_ASSERT_EQ((mach_vm_size_t
)read_size
, read_size
,
1337 "Size 0x%jx (%ju) unexpectedly overflows to 0x%jx (%ju) "
1338 "when cast as mach_vm_size_t type.",
1339 (uintmax_t)read_size
, (uintmax_t)read_size
, (uintmax_t)(mach_vm_size_t
)read_size
, (uintmax_t)(mach_vm_size_t
)read_size
);
1340 set_vm_address((mach_vm_address_t
)read_address
);
1341 set_vm_size((mach_vm_size_t
)read_size
);
1344 /* Allocate some source memory, read it into a buffer, deallocate the
1345 * source, set the global address and size from the buffer's. */
1347 set_up_vm_variables_allocate_read_deallocate()
1349 set_up_vm_variables_and_allocate_extra_page();
1353 /************************************/
1354 /* mach_vm_write() set up functions */
1355 /************************************/
1357 /* Write the buffer into the destination memory. */
1361 mach_vm_address_t address
= get_vm_address();
1362 vm_offset_t data
= (vm_offset_t
)get_buffer_address();
1363 mach_msg_type_number_t buffer_size
= (mach_msg_type_number_t
)get_buffer_size();
1366 "Writing buffer of address 0x%jx and size 0x%jx (%ju), on "
1367 "memory at address 0x%jx...",
1368 (uintmax_t)data
, (uintmax_t)buffer_size
, (uintmax_t)buffer_size
, (uintmax_t)address
);
1369 assert_write_success(address
, data
, buffer_size
);
1370 logv("Buffer written.");
1373 /* Allocate some destination and buffer memory, and write the buffer
1374 * into the destination memory. */
1376 set_up_vm_and_buffer_variables_allocate_write()
1378 set_up_vm_and_buffer_variables_allocate_for_writing();
1382 /***********************************/
1383 /* mach_vm_copy() set up functions */
1384 /***********************************/
1387 copy_deallocate(void)
1389 mach_vm_size_t size
= get_vm_size();
1390 mach_vm_address_t source
= get_vm_address();
1391 mach_vm_address_t dest
= 0;
1393 logv("Copying 0x%jx (%ju) byte%s at address 0x%jx...", (uintmax_t)size
, (uintmax_t)size
, (size
== 1) ? "" : "s",
1395 assert_allocate_copy_success(source
, size
, &dest
);
1397 "Memory of size 0x%jx (%ju) copy into region of "
1399 (uintmax_t)size
, (uintmax_t)size
, (uintmax_t)dest
);
1400 /* Deallocate the originally allocated memory, including the
1401 * extra allocated page in
1402 * set_up_vm_variables_and_allocate_extra_page(). */
1403 deallocate_range(mach_vm_trunc_page(source
), size
+ 1);
1404 /* Promoting to mach_vm types after checking for overflow, and
1405 * setting the global address from the buffer's. */
1406 T_QUIET
; T_ASSERT_EQ((vm_offset_t
)dest
, dest
,
1407 "Address 0x%jx unexpectedly overflows to 0x%jx when cast "
1408 "as mach_vm_address_t type.",
1409 (uintmax_t)dest
, (uintmax_t)(vm_offset_t
)dest
);
1410 set_vm_address(dest
);
1414 /* Copy the source region into the destination region. */
1418 mach_vm_address_t source
= get_vm_address();
1419 mach_vm_address_t dest
= get_buffer_address();
1420 mach_msg_type_number_t size
= (mach_msg_type_number_t
)get_buffer_size();
1423 "Copying memory region of address 0x%jx and size 0x%jx (%ju), on "
1424 "memory at address 0x%jx...",
1425 (uintmax_t)source
, (uintmax_t)size
, (uintmax_t)size
, (uintmax_t)dest
);
1426 assert_copy_success(source
, size
, dest
);
1427 logv("Buffer written.");
1430 /* Allocate some source memory, copy it to another region, deallocate the
1431 * source, set the global address and size from the designation region. */
1433 set_up_vm_variables_allocate_copy_deallocate()
1435 set_up_vm_variables_and_allocate_extra_page();
1439 /* Allocate some destination and source memory, and copy the source
1440 * into the destination memory. */
1442 set_up_source_and_dest_variables_allocate_copy()
1444 set_up_vm_and_buffer_variables_allocate_for_copying();
1448 /**************************************/
1449 /* mach_vm_protect() set up functions */
1450 /**************************************/
1453 set_up_vm_variables_allocate_protect(vm_prot_t protection
, const char * protection_name
)
1455 set_up_vm_variables_and_allocate_extra_page();
1456 mach_vm_size_t size
= get_vm_size();
1457 mach_vm_address_t address
= get_vm_address();
1460 "Setting %s-protection on 0x%jx (%ju) byte%s at address "
1462 protection_name
, (uintmax_t)size
, (uintmax_t)size
, (size
== 1) ? "" : "s", (uintmax_t)address
);
1463 T_QUIET
; T_ASSERT_MACH_SUCCESS(mach_vm_protect(mach_task_self(), address
, size
, FALSE
, protection
), "mach_vm_protect()");
1464 logv("Region %s-protected.", protection_name
);
1468 set_up_vm_variables_allocate_readprotect()
1470 set_up_vm_variables_allocate_protect(VM_PROT_WRITE
, "read");
1474 set_up_vm_variables_allocate_writeprotect()
1476 set_up_vm_variables_allocate_protect(VM_PROT_READ
, "write");
1483 /* Allocated address is nonzero iff size is nonzero. */
1485 test_nonzero_address_iff_nonzero_size()
1487 mach_vm_address_t address
= get_vm_address();
1488 mach_vm_size_t size
= get_vm_size();
1490 T_QUIET
; T_ASSERT_TRUE((address
&& size
) || (!address
&& !size
), "Address 0x%jx is unexpectedly %szero.", (uintmax_t)address
,
1491 address
? "non" : "");
1492 logv("Address 0x%jx is %szero as expected.", (uintmax_t)address
, size
? "non" : "");
1495 /* Allocated address is aligned. */
1497 test_aligned_address()
1499 mach_vm_address_t address
= get_vm_address();
1501 assert_aligned_address(address
);
1502 logv("Address 0x%jx is aligned.", (uintmax_t)address
);
1505 /************************/
1506 /* Read and write tests */
1507 /************************/
1511 address_filter_t filter
, boolean_t reversed
, mach_vm_address_t address
, mach_vm_size_t size
, const char * pattern_name
)
1514 "Verifying %s pattern on region of address 0x%jx "
1515 "and size 0x%jx (%ju)...",
1516 pattern_name
, (uintmax_t)address
, (uintmax_t)size
, (uintmax_t)size
);
1517 filter_addresses_do_else(filter
, reversed
, address
, size
, verify_address
, read_zero
, address
);
1518 logv("Pattern verified.");
1523 address_filter_t filter
, boolean_t reversed
, mach_vm_address_t address
, mach_vm_size_t size
, const char * pattern_name
)
1526 "Writing %s pattern on region of address 0x%jx "
1527 "and size 0x%jx (%ju)...",
1528 pattern_name
, (uintmax_t)address
, (uintmax_t)size
, (uintmax_t)size
);
1529 filter_addresses_do_else(filter
, reversed
, address
, size
, write_address
, no_action
, address
);
1530 logv("Pattern writen.");
1534 write_and_verify_pattern(
1535 address_filter_t filter
, boolean_t reversed
, mach_vm_address_t address
, mach_vm_size_t size
, const char * pattern_name
)
1538 "Writing and verifying %s pattern on region of "
1539 "address 0x%jx and size 0x%jx (%ju)...",
1540 pattern_name
, (uintmax_t)address
, (uintmax_t)size
, (uintmax_t)size
);
1541 filter_addresses_do_else(filter
, reversed
, address
, size
, write_address
, no_action
, address
);
1542 filter_addresses_do_else(filter
, reversed
, address
, size
, verify_address
, read_zero
, address
);
1543 logv("Pattern written and verified.");
1546 /* Verify that the smallest aligned region containing the
1547 * given range is zero-filled. */
1551 verify_pattern(empty
, FALSE
, mach_vm_trunc_page(get_vm_address()), aligned_size(get_vm_address(), get_vm_size()),
1556 test_write_address_filled()
1558 write_and_verify_pattern(empty
, TRUE
, get_vm_address(), round_page_kernel(get_vm_size()), "address-filled");
1562 test_write_checkerboard()
1564 write_and_verify_pattern(checkerboard
, FALSE
, get_vm_address(), round_page_kernel(get_vm_size()), "checkerboard");
1568 test_write_reverse_checkerboard()
1570 write_and_verify_pattern(checkerboard
, TRUE
, get_vm_address(), round_page_kernel(get_vm_size()), "reverse checkerboard");
1574 test_write_page_ends()
1576 write_and_verify_pattern(page_ends
, FALSE
, get_vm_address(), round_page_kernel(get_vm_size()), "page ends");
1580 test_write_page_interiors()
1582 write_and_verify_pattern(page_ends
, TRUE
, get_vm_address(), round_page_kernel(get_vm_size()), "page interiors");
1585 /*********************************/
1586 /* Allocation error return tests */
1587 /*********************************/
1589 /* Reallocating a page in the smallest aligned region containing the
1590 * given allocated range fails. */
1592 test_reallocate_pages()
1594 allocate_fn_t allocator
= get_allocator();
1595 vm_map_t this_task
= mach_task_self();
1596 mach_vm_address_t address
= mach_vm_trunc_page(get_vm_address());
1597 mach_vm_size_t size
= aligned_size(get_vm_address(), get_vm_size());
1598 mach_vm_address_t i
;
1602 "Reallocating pages in allocated region of address 0x%jx "
1603 "and size 0x%jx (%ju)...",
1604 (uintmax_t)address
, (uintmax_t)size
, (uintmax_t)size
);
1605 for (i
= address
; i
< address
+ size
; i
+= vm_page_size
) {
1606 kr
= allocator(this_task
, &i
, vm_page_size
, VM_FLAGS_FIXED
);
1607 T_QUIET
; T_ASSERT_EQ(kr
, KERN_NO_SPACE
,
1609 "at address 0x%jx unexpectedly returned: %s.\n"
1610 "Should have returned: %s.",
1611 (uintmax_t)address
, mach_error_string(kr
), mach_error_string(KERN_NO_SPACE
));
1613 logv("Returned expected error at each page: %s.", mach_error_string(KERN_NO_SPACE
));
1616 /* Allocating in VM_MAP_NULL fails. */
1618 test_allocate_in_null_map()
1620 mach_vm_address_t address
= get_vm_address();
1621 mach_vm_size_t size
= get_vm_size();
1622 int flag
= get_address_flag();
1624 logv("Allocating 0x%jx (%ju) byte%s", (uintmax_t)size
, (uintmax_t)size
, (size
== 1) ? "" : "s");
1625 if (!(flag
& VM_FLAGS_ANYWHERE
)) {
1626 logv(" at address 0x%jx", (uintmax_t)address
);
1628 logv(" in NULL VM map...");
1629 assert_mach_return(get_allocator()(VM_MAP_NULL
, &address
, size
, flag
), MACH_SEND_INVALID_DEST
, "Allocator");
1630 logv("Returned expected error: %s.", mach_error_string(MACH_SEND_INVALID_DEST
));
1633 /* Allocating with non-user flags fails. */
1635 test_allocate_with_kernel_flags()
1637 allocate_fn_t allocator
= get_allocator();
1638 vm_map_t this_task
= mach_task_self();
1639 mach_vm_address_t address
= get_vm_address();
1640 mach_vm_size_t size
= get_vm_size();
1641 int flag
= get_address_flag();
1644 int kernel_flags
[] = {0x100, 0x200, 0x400, 0x800, 0x1000, 0x2000, 0x8000, INT_MAX
};
1645 int numofflags
= sizeof(kernel_flags
) / sizeof(kernel_flags
[0]);
1647 logv("Allocating 0x%jx (%ju) byte%s", (uintmax_t)size
, (uintmax_t)size
, (size
== 1) ? "" : "s");
1648 if (!(flag
& VM_FLAGS_ANYWHERE
)) {
1649 logv(" at address 0x%jx", (uintmax_t)address
);
1651 logv(" with various kernel flags...");
1652 for (i
= 0; i
< numofflags
; i
++) {
1653 bad_flag
= kernel_flags
[i
] | flag
;
1654 kr
= allocator(this_task
, &address
, size
, bad_flag
);
1655 T_QUIET
; T_ASSERT_EQ(kr
, KERN_INVALID_ARGUMENT
,
1657 "with kernel flag 0x%x unexpectedly returned: %s.\n"
1658 "Should have returned: %s.",
1659 bad_flag
, mach_error_string(kr
), mach_error_string(KERN_INVALID_ARGUMENT
));
1661 logv("Returned expected error with each kernel flag: %s.", mach_error_string(KERN_INVALID_ARGUMENT
));
1664 /*****************************/
1665 /* mach_vm_map() error tests */
1666 /*****************************/
1668 /* mach_vm_map() fails with invalid protection or inheritance
1671 test_mach_vm_map_protection_inheritance_error()
1674 vm_map_t my_task
= mach_task_self();
1675 mach_vm_address_t address
= get_vm_address();
1676 mach_vm_size_t size
= get_vm_size();
1677 vm_map_offset_t mask
= (get_allocator() == wrapper_mach_vm_map
|| get_allocator() == wrapper_mach_vm_map_named_entry
)
1678 ? (mach_vm_offset_t
)0
1679 : (mach_vm_offset_t
)get_mask();
1680 int flag
= get_address_flag();
1681 mach_port_t object_handle
= (get_allocator() == wrapper_mach_vm_map_named_entry
) ? memory_entry(&size
) : MACH_PORT_NULL
;
1682 vm_prot_t cur_protections
[] = {VM_PROT_DEFAULT
, VM_PROT_ALL
+ 1, ~VM_PROT_IS_MASK
, INT_MAX
};
1683 vm_prot_t max_protections
[] = {VM_PROT_ALL
, VM_PROT_ALL
+ 1, ~VM_PROT_IS_MASK
, INT_MAX
};
1684 vm_inherit_t inheritances
[] = {VM_INHERIT_DEFAULT
, VM_INHERIT_LAST_VALID
+ 1, UINT_MAX
};
1687 logv("Allocating 0x%jx (%ju) byte%s", (uintmax_t)size
, (uintmax_t)size
, (size
== 1) ? "" : "s");
1688 if (!(flag
& VM_FLAGS_ANYWHERE
)) {
1689 logv(" at address 0x%jx", (uintmax_t)address
);
1692 " with various invalid protection/inheritance "
1695 for (i
= 0; i
< 4; i
++) {
1696 for (j
= 0; j
< 4; j
++) {
1697 for (k
= 0; k
< 3; k
++) {
1698 /* Skip the case with all valid arguments. */
1699 if (i
== (j
== (k
== 0))) {
1702 kr
= mach_vm_map(my_task
, &address
, size
, mask
, flag
, object_handle
, (memory_object_offset_t
)0, FALSE
,
1703 cur_protections
[i
], max_protections
[j
], inheritances
[k
]);
1704 T_QUIET
; T_ASSERT_EQ(kr
, KERN_INVALID_ARGUMENT
,
1706 "with cur_protection 0x%x, max_protection 0x%x, "
1707 "inheritance 0x%x unexpectedly returned: %s.\n"
1708 "Should have returned: %s.",
1709 cur_protections
[i
], max_protections
[j
], inheritances
[k
], mach_error_string(kr
),
1710 mach_error_string(KERN_INVALID_ARGUMENT
));
1714 logv("Returned expected error in each case: %s.", mach_error_string(KERN_INVALID_ARGUMENT
));
1717 /* mach_vm_map() with unspecified address fails if the starting
1718 * address overflows when rounded up to a boundary value. */
1720 test_mach_vm_map_large_mask_overflow_error()
1722 mach_vm_address_t address
= 0x1;
1723 mach_vm_size_t size
= get_vm_size();
1724 mach_vm_offset_t mask
= (mach_vm_offset_t
)UINTMAX_MAX
;
1725 /* mach_vm_map() cannot allocate 0 bytes at an unspecified
1726 * address, see 8003930. */
1727 kern_return_t kr_expected
= size
? KERN_NO_SPACE
: KERN_INVALID_ARGUMENT
;
1730 "Allocating 0x%jx (%ju) byte%s at an unspecified address "
1731 "starting at 0x%jx with mask 0x%jx...",
1732 (uintmax_t)size
, (uintmax_t)size
, (size
== 1) ? "" : "s", (uintmax_t)address
, (uintmax_t)mask
);
1733 assert_mach_return(mach_vm_map(mach_task_self(), &address
, size
, mask
, VM_FLAGS_ANYWHERE
, MACH_PORT_NULL
,
1734 (memory_object_offset_t
)0, FALSE
, VM_PROT_DEFAULT
, VM_PROT_ALL
, VM_INHERIT_DEFAULT
),
1735 kr_expected
, "mach_vm_map()");
1736 logv("Returned expected error: %s.", mach_error_string(kr_expected
));
1739 /************************/
1740 /* Size edge case tests */
1741 /************************/
1744 allocate_edge_size(mach_vm_address_t
* address
, mach_vm_size_t size
, kern_return_t expected_kr
)
1746 logv("Allocating 0x%jx (%ju) bytes...", (uintmax_t)size
, (uintmax_t)size
);
1747 assert_allocate_return(address
, size
, VM_FLAGS_ANYWHERE
, expected_kr
);
1748 logv("Returned expected value: %s.", mach_error_string(expected_kr
));
1752 test_allocate_zero_size()
1754 mach_vm_address_t address
= 0x0;
1755 /* mach_vm_map() cannot allocate 0 bytes at an unspecified
1756 * address, see 8003930. Other allocators succeed. */
1757 kern_return_t kr_expected
= (get_allocator() != wrapper_mach_vm_allocate
) ? KERN_INVALID_ARGUMENT
: KERN_SUCCESS
;
1759 allocate_edge_size(&address
, 0, kr_expected
);
1760 if (kr_expected
== KERN_SUCCESS
) {
1761 deallocate_range(address
, 0);
1765 /* Testing the allocation of the largest size that does not overflow
1766 * when rounded up to a page-aligned value. */
1768 test_allocate_invalid_large_size()
1770 mach_vm_size_t size
= (mach_vm_size_t
)UINTMAX_MAX
- vm_page_size
+ 1;
1771 if (get_allocator() != wrapper_mach_vm_map_named_entry
) {
1772 mach_vm_address_t address
= 0x0;
1773 allocate_edge_size(&address
, size
, KERN_NO_SPACE
);
1775 /* Named entries cannot currently be bigger than 4 GB
1777 mach_port_t object_handle
= MACH_PORT_NULL
;
1778 logv("Creating named entry of 0x%jx (%ju) bytes...", (uintmax_t)size
, (uintmax_t)size
);
1779 assert_mach_return(mach_make_memory_entry_64(mach_task_self(), &size
, (memory_object_offset_t
)0,
1780 (MAP_MEM_NAMED_CREATE
| VM_PROT_ALL
), &object_handle
, 0),
1781 KERN_FAILURE
, "mach_make_memory_entry_64()");
1782 logv("Returned expected error: %s.", mach_error_string(KERN_FAILURE
));
1786 /* A UINTMAX_MAX VM size will overflow to 0 when rounded up to a
1787 * page-aligned value. */
1789 test_allocate_overflowing_size()
1791 mach_vm_address_t address
= 0x0;
1793 allocate_edge_size(&address
, (mach_vm_size_t
)UINTMAX_MAX
, KERN_INVALID_ARGUMENT
);
1796 /****************************/
1797 /* Address allocation tests */
1798 /****************************/
1800 /* Allocation at address zero fails iff size is nonzero. */
1802 test_allocate_at_zero()
1804 mach_vm_address_t address
= 0x0;
1805 mach_vm_size_t size
= get_vm_size();
1807 kern_return_t kr_expected
=
1808 size
? KERN_INVALID_ADDRESS
: (get_allocator() != wrapper_mach_vm_allocate
) ? KERN_INVALID_ARGUMENT
: KERN_SUCCESS
;
1810 logv("Allocating 0x%jx (%ju) byte%s at address 0x0...", (uintmax_t)size
, (uintmax_t)size
, (size
== 1) ? "" : "s");
1811 assert_allocate_return(&address
, size
, VM_FLAGS_FIXED
, kr_expected
);
1812 logv("Returned expected value: %s.", mach_error_string(kr_expected
));
1813 if (kr_expected
== KERN_SUCCESS
) {
1814 T_QUIET
; T_ASSERT_EQ(address
, 0,
1815 "Address 0x%jx is unexpectedly "
1817 (uintmax_t)address
);
1818 logv("Allocated address 0x%jx is zero.", (uintmax_t)address
);
1819 deallocate_range(address
, size
);
1823 /* Allocation at page-aligned but 2 MB boundary-unaligned address
1824 * fails with KERN_NO_SPACE. */
1826 test_allocate_2MB_boundary_unaligned_page_aligned_address()
1828 mach_vm_size_t size
= get_vm_size();
1830 mach_vm_address_t address
= get_fixed_address(size
+ vm_page_size
) + vm_page_size
;
1832 "Found 2 MB boundary-unaligned, page aligned address "
1834 (uintmax_t)address
);
1836 /* mach_vm_allocate() cannot allocate 0 bytes, and fails with a
1837 * fixed boundary-unaligned truncated address. */
1838 kern_return_t kr_expected
= (!size
&& get_allocator() != wrapper_mach_vm_allocate
)
1839 ? KERN_INVALID_ARGUMENT
1840 : (get_allocator() == wrapper_mach_vm_map_2MB
) ? KERN_NO_SPACE
: KERN_SUCCESS
;
1841 logv("Allocating 0x%jx (%ju) byte%s at address 0x%jx...", (uintmax_t)size
, (uintmax_t)size
, (size
== 1) ? "" : "s",
1842 (uintmax_t)address
);
1843 assert_allocate_return(&address
, size
, VM_FLAGS_FIXED
, kr_expected
);
1844 logv("Returned expected value: %s.", mach_error_string(kr_expected
));
1845 if (kr_expected
== KERN_SUCCESS
) {
1846 deallocate_range(address
, size
);
1850 /* With VM_FLAGS_ANYWHERE set, mach_vm_allocate() starts looking for
1851 * an allocation address at 0x0, while mach_vm_map() starts at the
1852 * supplied address and does not wrap around. See 8016663. */
1854 test_allocate_page_with_highest_address_hint()
1856 /* Highest valid page-aligned address. */
1857 mach_vm_address_t address
= (mach_vm_size_t
)UINTMAX_MAX
- vm_page_size
+ 1;
1860 "Allocating one page with unspecified address, but hint at "
1862 (uintmax_t)address
);
1863 if (get_allocator() == wrapper_mach_vm_allocate
) {
1864 /* mach_vm_allocate() starts from 0x0 and succeeds. */
1865 assert_allocate_success(&address
, vm_page_size
, VM_FLAGS_ANYWHERE
);
1866 logv("Memory allocated at address 0x%jx.", (uintmax_t)address
);
1867 assert_aligned_address(address
);
1868 deallocate_range(address
, vm_page_size
);
1870 /* mach_vm_map() starts from the supplied address, and fails
1871 * with KERN_NO_SPACE, see 8016663. */
1872 assert_allocate_return(&address
, vm_page_size
, VM_FLAGS_ANYWHERE
, KERN_NO_SPACE
);
1873 logv("Returned expected error: %s.", mach_error_string(KERN_NO_SPACE
));
1877 /* Allocators find an allocation address with a first fit strategy. */
1879 test_allocate_first_fit_pages()
1881 allocate_fn_t allocator
= get_allocator();
1882 mach_vm_address_t address1
= 0x0;
1883 mach_vm_address_t i
;
1885 vm_map_t this_task
= mach_task_self();
1888 "Looking for first fit address for allocating one "
1890 assert_allocate_success(&address1
, vm_page_size
, VM_FLAGS_ANYWHERE
);
1891 logv("Found address 0x%jx.", (uintmax_t)address1
);
1892 assert_aligned_address(address1
);
1893 mach_vm_address_t address2
= address1
;
1895 "Looking for next higher first fit address for allocating "
1897 assert_allocate_success(&address2
, vm_page_size
, VM_FLAGS_ANYWHERE
);
1898 logv("Found address 0x%jx.", (uintmax_t)address2
);
1899 assert_aligned_address(address2
);
1900 T_QUIET
; T_ASSERT_GT(address2
, address1
,
1901 "Second address 0x%jx is "
1902 "unexpectedly not higher than first address 0x%jx.",
1903 (uintmax_t)address2
, (uintmax_t)address1
);
1905 logv("Allocating pages between 0x%jx and 0x%jx...", (uintmax_t)address1
, (uintmax_t)address2
);
1906 for (i
= address1
; i
<= address2
; i
+= vm_page_size
) {
1907 kr
= allocator(this_task
, &i
, vm_page_size
, VM_FLAGS_FIXED
);
1908 T_QUIET
; T_ASSERT_NE(kr
, KERN_SUCCESS
,
1909 "Allocator at address 0x%jx "
1910 "unexpectedly succeeded.",
1913 logv("Expectedly returned error at each page.");
1914 deallocate_range(address1
, vm_page_size
);
1915 deallocate_range(address2
, vm_page_size
);
1918 /*******************************/
1919 /* Deallocation segfault tests */
1920 /*******************************/
1922 /* mach_vm_deallocate() deallocates the smallest aligned region
1923 * (integral number of pages) containing the given range. */
1925 /* Addresses in deallocated range are inaccessible. */
1927 access_deallocated_range_address(mach_vm_address_t address
, const char * position
)
1929 logv("Will deallocate and read from %s 0x%jx of deallocated range...", position
, (uintmax_t)address
);
1931 mach_vm_address_t bad_value
= MACH_VM_ADDRESS_T(address
);
1932 T_ASSERT_FAIL("Unexpectedly read value 0x%jx at address 0x%jx.\n"
1933 "Should have died with signal SIGSEGV.",
1934 (uintmax_t)bad_value
, (uintmax_t)address
);
1937 /* Start of deallocated range is inaccessible. */
1939 test_access_deallocated_range_start()
1941 access_deallocated_range_address(get_vm_address(), "start");
1944 /* Middle of deallocated range is inaccessible. */
1946 test_access_deallocated_range_middle()
1948 access_deallocated_range_address(get_vm_address() + (round_page_kernel(get_vm_size()) >> 1), "middle");
1951 /* End of deallocated range is inaccessible. */
1953 test_access_deallocated_range_end()
1955 access_deallocated_range_address(round_page_kernel(get_vm_size()) - vm_address_size
+ get_vm_address(), "end");
1958 /* Deallocating almost the whole address space causes a SIGSEGV or SIGBUS. We
1959 * deallocate the largest valid aligned size to avoid overflowing when
1962 test_deallocate_suicide()
1964 mach_vm_address_t address
= 0x0;
1965 mach_vm_size_t size
= (mach_vm_size_t
)UINTMAX_MAX
- vm_page_size
+ 1;
1967 logv("Deallocating 0x%jx (%ju) bytes at address 0x%jx...", (uintmax_t)size
, (uintmax_t)size
, (uintmax_t)address
);
1968 kern_return_t kr
= mach_vm_deallocate(mach_task_self(), address
, size
);
1969 T_ASSERT_FAIL("mach_vm_deallocate() with address 0x%jx and "
1970 "size 0x%jx (%ju) unexpectedly returned: %s.\n"
1971 "Should have died with signal SIGSEGV or SIGBUS.",
1972 (uintmax_t)address
, (uintmax_t)size
, (uintmax_t)size
, mach_error_string(kr
));
1975 /***************************************/
1976 /* Deallocation and reallocation tests */
1977 /***************************************/
1979 /* Deallocating memory twice succeeds. */
1981 test_deallocate_twice()
1987 /* Deallocated and reallocated memory is zero-filled. Deallocated
1988 * memory is inaccessible since it can be reallocated. */
1990 test_write_pattern_deallocate_reallocate_zero_filled()
1992 mach_vm_address_t address
= get_vm_address();
1993 mach_vm_size_t size
= get_vm_size();
1995 write_pattern(page_ends
, FALSE
, address
, size
, "page ends");
1996 logv("Deallocating, then Allocating 0x%jx (%ju) byte%s at address 0x%jx...", (uintmax_t)size
, (uintmax_t)size
, (size
== 1) ? "" : "s",
1997 (uintmax_t)address
);
1999 assert_allocate_success(&address
, size
, VM_FLAGS_FIXED
);
2000 logv("Memory allocated.");
2001 verify_pattern(empty
, FALSE
, address
, size
, "zero-filled");
2005 /********************************/
2006 /* Deallocation edge case tests */
2007 /********************************/
2009 /* Zero size deallocation always succeeds. */
2011 test_deallocate_zero_size_ranges()
2015 vm_map_t this_task
= mach_task_self();
2016 mach_vm_address_t addresses
[] = {0x0,
2021 (mach_vm_address_t
)UINT_MAX
- vm_page_size
+ 1,
2022 (mach_vm_address_t
)UINT_MAX
,
2023 (mach_vm_address_t
)UINTMAX_MAX
- vm_page_size
+ 1,
2024 (mach_vm_address_t
)UINTMAX_MAX
};
2025 int numofaddresses
= sizeof(addresses
) / sizeof(addresses
[0]);
2027 logv("Deallocating 0x0 (0) bytes at various addresses...");
2028 for (i
= 0; i
< numofaddresses
; i
++) {
2029 kr
= mach_vm_deallocate(this_task
, addresses
[i
], 0);
2030 T_QUIET
; T_ASSERT_MACH_SUCCESS(kr
, "mach_vm_deallocate() at "
2031 "address 0x%jx unexpectedly failed: %s.",
2032 (uintmax_t)addresses
[i
], mach_error_string(kr
));
2034 logv("Deallocations successful.");
2037 /* Deallocation succeeds if the end of the range rounds to 0x0. */
2039 test_deallocate_rounded_zero_end_ranges()
2043 vm_map_t this_task
= mach_task_self();
2045 mach_vm_address_t address
;
2046 mach_vm_size_t size
;
2048 {0x0, (mach_vm_size_t
)UINTMAX_MAX
},
2049 {0x0, (mach_vm_size_t
)UINTMAX_MAX
- vm_page_size
+ 2},
2050 {0x1, (mach_vm_size_t
)UINTMAX_MAX
- 1},
2051 {0x1, (mach_vm_size_t
)UINTMAX_MAX
- vm_page_size
+ 1},
2052 {0x2, (mach_vm_size_t
)UINTMAX_MAX
- 2},
2053 {0x2, (mach_vm_size_t
)UINTMAX_MAX
- vm_page_size
},
2054 {(mach_vm_address_t
)UINTMAX_MAX
- vm_page_size
+ 1, vm_page_size
- 1},
2055 {(mach_vm_address_t
)UINTMAX_MAX
- vm_page_size
+ 1, 1},
2056 {(mach_vm_address_t
)UINTMAX_MAX
- 1, 1},
2058 int numofranges
= sizeof(ranges
) / sizeof(ranges
[0]);
2061 "Deallocating various memory ranges whose end rounds to "
2063 for (i
= 0; i
< numofranges
; i
++) {
2064 kr
= mach_vm_deallocate(this_task
, ranges
[i
].address
, ranges
[i
].size
);
2065 T_QUIET
; T_ASSERT_MACH_SUCCESS(kr
,
2066 "mach_vm_deallocate() with address 0x%jx and size "
2067 "0x%jx (%ju) unexpectedly returned: %s.\n"
2068 "Should have succeeded.",
2069 (uintmax_t)ranges
[i
].address
, (uintmax_t)ranges
[i
].size
, (uintmax_t)ranges
[i
].size
, mach_error_string(kr
));
2071 logv("Deallocations successful.");
2074 /* Deallocating a range wrapped around the address space fails. */
2076 test_deallocate_wrapped_around_ranges()
2080 vm_map_t this_task
= mach_task_self();
2082 mach_vm_address_t address
;
2083 mach_vm_size_t size
;
2085 {0x1, (mach_vm_size_t
)UINTMAX_MAX
},
2086 {vm_page_size
, (mach_vm_size_t
)UINTMAX_MAX
- vm_page_size
+ 1},
2087 {(mach_vm_address_t
)UINTMAX_MAX
- vm_page_size
+ 1, vm_page_size
},
2088 {(mach_vm_address_t
)UINTMAX_MAX
, 1},
2090 int numofranges
= sizeof(ranges
) / sizeof(ranges
[0]);
2093 "Deallocating various memory ranges wrapping around the "
2094 "address space...");
2095 for (i
= 0; i
< numofranges
; i
++) {
2096 kr
= mach_vm_deallocate(this_task
, ranges
[i
].address
, ranges
[i
].size
);
2097 T_QUIET
; T_ASSERT_EQ(kr
, KERN_INVALID_ARGUMENT
,
2098 "mach_vm_deallocate() with address 0x%jx and size "
2099 "0x%jx (%ju) unexpectedly returned: %s.\n"
2100 "Should have returned: %s.",
2101 (uintmax_t)ranges
[i
].address
, (uintmax_t)ranges
[i
].size
, (uintmax_t)ranges
[i
].size
, mach_error_string(kr
),
2102 mach_error_string(KERN_INVALID_ARGUMENT
));
2104 logv("Returned expected error on each range: %s.", mach_error_string(KERN_INVALID_ARGUMENT
));
2107 /* Deallocating in VM_MAP_NULL fails. */
2109 test_deallocate_in_null_map()
2111 mach_vm_address_t address
= get_vm_address();
2112 mach_vm_size_t size
= get_vm_size();
2113 int flag
= get_address_flag();
2115 logv("Deallocating 0x%jx (%ju) byte%s", (uintmax_t)size
, (uintmax_t)size
, (size
== 1) ? "" : "s");
2116 if (!(flag
& VM_FLAGS_ANYWHERE
)) {
2117 logv(" at address 0x%jx", (uintmax_t)address
);
2119 logv(" in NULL VM map...");
2120 assert_mach_return(mach_vm_deallocate(VM_MAP_NULL
, address
, size
), MACH_SEND_INVALID_DEST
, "mach_vm_deallocate()");
2121 logv("Returned expected error: %s.", mach_error_string(MACH_SEND_INVALID_DEST
));
2124 /*****************************/
2125 /* mach_vm_read() main tests */
2126 /*****************************/
2128 /* Read memory of size less than a page has aligned starting
2129 * address. Otherwise, the destination buffer's starting address has
2130 * the same boundary offset as the source region's. */
2132 test_read_address_offset()
2134 mach_vm_address_t address
= get_vm_address();
2135 mach_vm_size_t size
= get_vm_size();
2137 if (size
< vm_page_size
* 2 || get_address_alignment()) {
2138 assert_aligned_address(address
);
2139 logv("Buffer address 0x%jx is aligned as expected.", (uintmax_t)address
);
2141 T_QUIET
; T_ASSERT_EQ(((address
- 1) & (vm_page_size
- 1)), 0,
2143 "address 0x%jx does not have the expected boundary "
2145 (uintmax_t)address
);
2147 "Buffer address 0x%jx has the expected boundary "
2149 (uintmax_t)address
);
2153 /* Reading from VM_MAP_NULL fails. */
2155 test_read_null_map()
2157 mach_vm_address_t address
= get_vm_address();
2158 mach_vm_size_t size
= get_vm_size();
2159 vm_offset_t read_address
;
2160 mach_msg_type_number_t read_size
;
2163 "Reading 0x%jx (%ju) byte%s at address 0x%jx in NULL VM "
2165 (uintmax_t)size
, (uintmax_t)size
, (size
== 1) ? "" : "s", (uintmax_t)address
);
2166 assert_mach_return(mach_vm_read(VM_MAP_NULL
, address
, size
, &read_address
, &read_size
), MACH_SEND_INVALID_DEST
,
2168 logv("Returned expected error: %s.", mach_error_string(MACH_SEND_INVALID_DEST
));
2171 /* Reading partially deallocated memory fails. */
2173 test_read_partially_deallocated_range()
2175 mach_vm_address_t address
= get_vm_address();
2176 mach_vm_size_t size
= get_vm_size();
2177 mach_vm_address_t mid_point
= mach_vm_trunc_page(address
+ size
/ 2);
2178 vm_offset_t read_address
;
2179 mach_msg_type_number_t read_size
;
2181 logv("Deallocating a mid-range page at address 0x%jx...", (uintmax_t)mid_point
);
2182 assert_deallocate_success(mid_point
, vm_page_size
);
2183 logv("Page deallocated.");
2185 logv("Reading 0x%jx (%ju) byte%s at address 0x%jx...", (uintmax_t)size
, (uintmax_t)size
, (size
== 1) ? "" : "s",
2186 (uintmax_t)address
);
2187 assert_read_return(address
, size
, &read_address
, &read_size
, KERN_INVALID_ADDRESS
);
2188 logv("Returned expected error: %s.", mach_error_string(KERN_INVALID_ADDRESS
));
2191 /* Reading partially read-protected memory fails. */
2193 test_read_partially_unreadable_range()
2195 mach_vm_address_t address
= get_vm_address();
2196 mach_vm_size_t size
= get_vm_size();
2197 mach_vm_address_t mid_point
= mach_vm_trunc_page(address
+ size
/ 2);
2198 vm_offset_t read_address
;
2199 mach_msg_type_number_t read_size
;
2201 /* For sizes < msg_ool_size_small, vm_map_copyin_common() uses
2202 * vm_map_copyin_kernel_buffer() to read in the memory,
2203 * returning different errors, see 8182239. */
2204 kern_return_t kr_expected
= (size
< vm_page_size
* 2) ? KERN_INVALID_ADDRESS
: KERN_PROTECTION_FAILURE
;
2206 logv("Read-protecting a mid-range page at address 0x%jx...", (uintmax_t)mid_point
);
2207 T_QUIET
; T_ASSERT_MACH_SUCCESS(mach_vm_protect(mach_task_self(), mid_point
, vm_page_size
, FALSE
, VM_PROT_WRITE
), "mach_vm_protect()");
2208 logv("Page read-protected.");
2210 logv("Reading 0x%jx (%ju) byte%s at address 0x%jx...", (uintmax_t)size
, (uintmax_t)size
, (size
== 1) ? "" : "s",
2211 (uintmax_t)address
);
2212 assert_read_return(address
, size
, &read_address
, &read_size
, kr_expected
);
2213 logv("Returned expected error: %s.", mach_error_string(kr_expected
));
2216 /**********************************/
2217 /* mach_vm_read() edge case tests */
2218 /**********************************/
2221 read_edge_size(mach_vm_size_t size
, kern_return_t expected_kr
)
2225 vm_map_t this_task
= mach_task_self();
2226 mach_vm_address_t addresses
[] = {vm_page_size
- 1,
2229 (mach_vm_address_t
)UINT_MAX
- vm_page_size
+ 1,
2230 (mach_vm_address_t
)UINT_MAX
,
2231 (mach_vm_address_t
)UINTMAX_MAX
- vm_page_size
+ 1,
2232 (mach_vm_address_t
)UINTMAX_MAX
};
2233 int numofaddresses
= sizeof(addresses
) / sizeof(addresses
[0]);
2234 vm_offset_t read_address
;
2235 mach_msg_type_number_t read_size
;
2237 logv("Reading 0x%jx (%ju) bytes at various addresses...", (uintmax_t)size
, (uintmax_t)size
);
2238 for (i
= 0; i
< numofaddresses
; i
++) {
2239 kr
= mach_vm_read(this_task
, addresses
[i
], size
, &read_address
, &read_size
);
2240 T_QUIET
; T_ASSERT_EQ(kr
, expected_kr
,
2241 "mach_vm_read() at "
2242 "address 0x%jx unexpectedly returned: %s.\n"
2243 "Should have returned: %s.",
2244 (uintmax_t)addresses
[i
], mach_error_string(kr
), mach_error_string(expected_kr
));
2247 "mach_vm_read() returned expected value in each case: "
2249 mach_error_string(expected_kr
));
2252 /* Reading 0 bytes always succeeds. */
2254 test_read_zero_size()
2256 read_edge_size(0, KERN_SUCCESS
);
2259 /* Reading 4GB or higher always fails. */
2261 test_read_invalid_large_size()
2263 read_edge_size((mach_vm_size_t
)UINT_MAX
+ 1, KERN_INVALID_ARGUMENT
);
2266 /* Reading a range wrapped around the address space fails. */
2268 test_read_wrapped_around_ranges()
2272 vm_map_t this_task
= mach_task_self();
2274 mach_vm_address_t address
;
2275 mach_vm_size_t size
;
2277 {(mach_vm_address_t
)(UINTMAX_MAX
- UINT_MAX
+ 1), (mach_vm_size_t
)UINT_MAX
},
2278 {(mach_vm_address_t
)(UINTMAX_MAX
- UINT_MAX
+ vm_page_size
), (mach_vm_size_t
)(UINT_MAX
- vm_page_size
+ 1)},
2279 {(mach_vm_address_t
)UINTMAX_MAX
- vm_page_size
+ 1, vm_page_size
},
2280 {(mach_vm_address_t
)UINTMAX_MAX
, 1},
2282 int numofranges
= sizeof(ranges
) / sizeof(ranges
[0]);
2283 vm_offset_t read_address
;
2284 mach_msg_type_number_t read_size
;
2287 "Reading various memory ranges wrapping around the "
2288 "address space...");
2289 for (i
= 0; i
< numofranges
; i
++) {
2290 kr
= mach_vm_read(this_task
, ranges
[i
].address
, ranges
[i
].size
, &read_address
, &read_size
);
2291 T_QUIET
; T_ASSERT_EQ(kr
, KERN_INVALID_ADDRESS
,
2292 "mach_vm_read() at address 0x%jx with size "
2293 "0x%jx (%ju) unexpectedly returned: %s.\n"
2294 "Should have returned: %s.",
2295 (uintmax_t)ranges
[i
].address
, (uintmax_t)ranges
[i
].size
, (uintmax_t)ranges
[i
].size
, mach_error_string(kr
),
2296 mach_error_string(KERN_INVALID_ADDRESS
));
2298 logv("Returned expected error on each range: %s.", mach_error_string(KERN_INVALID_ADDRESS
));
2301 /********************************/
2302 /* mach_vm_read() pattern tests */
2303 /********************************/
2305 /* Write a pattern on pre-allocated memory, read into a buffer and
2306 * verify the pattern on the buffer. */
2308 write_read_verify_pattern(address_filter_t filter
, boolean_t reversed
, const char * pattern_name
)
2310 mach_vm_address_t address
= get_vm_address();
2312 write_pattern(filter
, reversed
, address
, get_vm_size(), pattern_name
);
2314 /* Getting the address and size of the read buffer. */
2315 mach_vm_address_t read_address
= get_vm_address();
2316 mach_vm_size_t read_size
= get_vm_size();
2318 "Verifying %s pattern on buffer of "
2319 "address 0x%jx and size 0x%jx (%ju)...",
2320 pattern_name
, (uintmax_t)read_address
, (uintmax_t)read_size
, (uintmax_t)read_size
);
2321 filter_addresses_do_else(filter
, reversed
, read_address
, read_size
, verify_address
, read_zero
, address
);
2322 logv("Pattern verified on destination buffer.");
2326 test_read_address_filled()
2328 write_read_verify_pattern(empty
, TRUE
, "address-filled");
2332 test_read_checkerboard()
2334 write_read_verify_pattern(checkerboard
, FALSE
, "checkerboard");
2338 test_read_reverse_checkerboard()
2340 write_read_verify_pattern(checkerboard
, TRUE
, "reverse checkerboard");
2343 /***********************************/
2344 /* mach_vm_write() edge case tests */
2345 /***********************************/
2347 /* Writing in VM_MAP_NULL fails. */
2349 test_write_null_map()
2351 mach_vm_address_t address
= get_vm_address();
2352 vm_offset_t data
= (vm_offset_t
)get_buffer_address();
2353 mach_msg_type_number_t buffer_size
= (mach_msg_type_number_t
)get_buffer_size();
2356 "Writing buffer of address 0x%jx and size 0x%jx (%ju), on "
2357 "memory at address 0x%jx in NULL VM MAP...",
2358 (uintmax_t)data
, (uintmax_t)buffer_size
, (uintmax_t)buffer_size
, (uintmax_t)address
);
2359 assert_mach_return(mach_vm_write(VM_MAP_NULL
, address
, data
, buffer_size
), MACH_SEND_INVALID_DEST
, "mach_vm_write()");
2360 logv("Returned expected error: %s.", mach_error_string(MACH_SEND_INVALID_DEST
));
2363 /* Writing 0 bytes always succeeds. */
2365 test_write_zero_size()
2371 /*****************************************/
2372 /* mach_vm_write() inaccessibility tests */
2373 /*****************************************/
2375 /* Writing a partially deallocated buffer fails. */
2377 test_write_partially_deallocated_buffer()
2379 mach_vm_address_t address
= get_vm_address();
2380 vm_offset_t data
= (vm_offset_t
)get_buffer_address();
2381 mach_msg_type_number_t buffer_size
= (mach_msg_type_number_t
)get_buffer_size();
2382 mach_vm_address_t buffer_mid_point
= (mach_vm_address_t
)mach_vm_trunc_page(data
+ buffer_size
/ 2);
2385 "Deallocating a mid-range buffer page at address "
2387 (uintmax_t)buffer_mid_point
);
2388 assert_deallocate_success(buffer_mid_point
, vm_page_size
);
2389 logv("Page deallocated.");
2392 "Writing buffer of address 0x%jx and size 0x%jx (%ju), on "
2393 "memory at address 0x%jx...",
2394 (uintmax_t)data
, (uintmax_t)buffer_size
, (uintmax_t)buffer_size
, (uintmax_t)address
);
2395 assert_write_return(address
, data
, buffer_size
, MACH_SEND_INVALID_MEMORY
);
2396 logv("Returned expected error: %s.", mach_error_string(MACH_SEND_INVALID_MEMORY
));
2399 /* Writing a partially read-protected buffer fails. */
2401 test_write_partially_unreadable_buffer()
2403 mach_vm_address_t address
= get_vm_address();
2404 vm_offset_t data
= (vm_offset_t
)get_buffer_address();
2405 mach_msg_type_number_t buffer_size
= (mach_msg_type_number_t
)get_buffer_size();
2406 mach_vm_address_t buffer_mid_point
= (mach_vm_address_t
)mach_vm_trunc_page(data
+ buffer_size
/ 2);
2409 "Read-protecting a mid-range buffer page at address "
2411 (uintmax_t)buffer_mid_point
);
2412 T_QUIET
; T_ASSERT_MACH_SUCCESS(mach_vm_protect(mach_task_self(), buffer_mid_point
, vm_page_size
, FALSE
, VM_PROT_WRITE
),
2413 "mach_vm_protect()");
2414 logv("Page read-protected.");
2417 "Writing buffer of address 0x%jx and size 0x%jx (%ju), on "
2418 "memory at address 0x%jx...",
2419 (uintmax_t)data
, (uintmax_t)buffer_size
, (uintmax_t)buffer_size
, (uintmax_t)address
);
2420 assert_write_return(address
, data
, buffer_size
, MACH_SEND_INVALID_MEMORY
);
2421 logv("Returned expected error: %s.", mach_error_string(MACH_SEND_INVALID_MEMORY
));
2424 /* Writing on partially deallocated memory fails. */
2426 test_write_on_partially_deallocated_range()
2428 mach_vm_address_t address
= get_vm_address();
2429 mach_vm_address_t start
= mach_vm_trunc_page(address
);
2430 vm_offset_t data
= (vm_offset_t
)get_buffer_address();
2431 mach_msg_type_number_t buffer_size
= (mach_msg_type_number_t
)get_buffer_size();
2434 "Deallocating the first destination page at address "
2437 assert_deallocate_success(start
, vm_page_size
);
2438 logv("Page deallocated.");
2441 "Writing buffer of address 0x%jx and size 0x%jx (%ju), on "
2442 "memory at address 0x%jx...",
2443 (uintmax_t)data
, (uintmax_t)buffer_size
, (uintmax_t)buffer_size
, (uintmax_t)address
);
2444 assert_write_return(address
, data
, buffer_size
, KERN_INVALID_ADDRESS
);
2445 logv("Returned expected error: %s.", mach_error_string(KERN_INVALID_ADDRESS
));
2448 /* Writing on partially unwritable memory fails. */
2450 test_write_on_partially_unwritable_range()
2452 mach_vm_address_t address
= get_vm_address();
2453 mach_vm_address_t start
= mach_vm_trunc_page(address
);
2454 vm_offset_t data
= (vm_offset_t
)get_buffer_address();
2455 mach_msg_type_number_t buffer_size
= (mach_msg_type_number_t
)get_buffer_size();
2457 /* For sizes < msg_ool_size_small,
2458 * vm_map_copy_overwrite_nested() uses
2459 * vm_map_copyout_kernel_buffer() to read in the memory,
2460 * returning different errors, see 8217123. */
2461 kern_return_t kr_expected
= (buffer_size
< vm_page_size
* 2) ? KERN_INVALID_ADDRESS
: KERN_PROTECTION_FAILURE
;
2464 "Write-protecting the first destination page at address "
2467 T_QUIET
; T_ASSERT_MACH_SUCCESS(mach_vm_protect(mach_task_self(), start
, vm_page_size
, FALSE
, VM_PROT_READ
), "mach_vm_protect()");
2468 logv("Page write-protected.");
2471 "Writing buffer of address 0x%jx and size 0x%jx (%ju), on "
2472 "memory at address 0x%jx...",
2473 (uintmax_t)data
, (uintmax_t)buffer_size
, (uintmax_t)buffer_size
, (uintmax_t)address
);
2474 assert_write_return(address
, data
, buffer_size
, kr_expected
);
2475 logv("Returned expected error: %s.", mach_error_string(kr_expected
));
2478 /*********************************/
2479 /* mach_vm_write() pattern tests */
2480 /*********************************/
2482 /* Verify that a zero-filled buffer and destination memory are still
2483 * zero-filled after writing. */
2485 test_zero_filled_write()
2487 verify_pattern(empty
, FALSE
, mach_vm_trunc_page(get_vm_address()), round_page_kernel(get_vm_size() + 1), "zero-filled");
2488 verify_pattern(empty
, FALSE
, mach_vm_trunc_page(get_buffer_address()),
2489 round_page_kernel(get_buffer_size() + get_buffer_offset()), "zero-filled");
2492 /* Write a pattern on a buffer, write the buffer into some destination
2493 * memory, and verify the pattern on both buffer and destination. */
2495 pattern_write(address_filter_t filter
, boolean_t reversed
, const char * pattern_name
)
2497 mach_vm_address_t address
= get_vm_address();
2498 mach_vm_size_t size
= get_vm_size();
2499 mach_vm_address_t buffer_address
= get_buffer_address();
2500 mach_vm_size_t buffer_size
= get_buffer_size();
2502 write_pattern(filter
, reversed
, buffer_address
, buffer_size
, pattern_name
);
2504 verify_pattern(filter
, reversed
, buffer_address
, buffer_size
, pattern_name
);
2506 "Verifying %s pattern on destination of "
2507 "address 0x%jx and size 0x%jx (%ju)...",
2508 pattern_name
, (uintmax_t)address
, (uintmax_t)buffer_size
, (uintmax_t)size
);
2509 filter_addresses_do_else(filter
, reversed
, address
, buffer_size
, verify_address
, read_zero
, buffer_address
);
2510 logv("Pattern verified on destination.");
2514 test_address_filled_write()
2516 pattern_write(empty
, TRUE
, "address-filled");
2520 test_checkerboard_write()
2522 pattern_write(checkerboard
, FALSE
, "checkerboard");
2526 test_reverse_checkerboard_write()
2528 pattern_write(checkerboard
, TRUE
, "reverse checkerboard");
2531 /**********************************/
2532 /* mach_vm_copy() edge case tests */
2533 /**********************************/
2535 /* Copying in VM_MAP_NULL fails. */
2537 test_copy_null_map()
2539 mach_vm_address_t source
= get_vm_address();
2540 mach_vm_address_t dest
= get_buffer_address();
2541 mach_msg_type_number_t size
= (mach_msg_type_number_t
)get_buffer_size();
2544 "Copying buffer of address 0x%jx and size 0x%jx (%ju), on "
2545 "memory at address 0x%jx in NULL VM MAP...",
2546 (uintmax_t)dest
, (uintmax_t)size
, (uintmax_t)size
, (uintmax_t)source
);
2547 assert_mach_return(mach_vm_copy(VM_MAP_NULL
, source
, size
, dest
), MACH_SEND_INVALID_DEST
, "mach_vm_copy()");
2548 logv("Returned expected error: %s.", mach_error_string(MACH_SEND_INVALID_DEST
));
2552 copy_edge_size(mach_vm_size_t size
, kern_return_t expected_kr
)
2556 vm_map_t this_task
= mach_task_self();
2557 mach_vm_address_t addresses
[] = {0x0,
2562 (mach_vm_address_t
)UINT_MAX
- vm_page_size
+ 1,
2563 (mach_vm_address_t
)UINT_MAX
,
2564 (mach_vm_address_t
)UINTMAX_MAX
- vm_page_size
+ 1,
2565 (mach_vm_address_t
)UINTMAX_MAX
};
2566 int numofaddresses
= sizeof(addresses
) / sizeof(addresses
[0]);
2567 mach_vm_address_t dest
= 0;
2569 logv("Allocating 0x%jx (%ju) byte%s...", (uintmax_t)size
, (uintmax_t)size
, (size
== 1) ? "" : "s");
2570 assert_allocate_success(&dest
, 4096, VM_FLAGS_ANYWHERE
);
2571 logv("Copying 0x%jx (%ju) bytes at various addresses...", (uintmax_t)size
, (uintmax_t)size
);
2572 for (i
= 0; i
< numofaddresses
; i
++) {
2573 kr
= mach_vm_copy(this_task
, addresses
[i
], size
, dest
);
2574 T_QUIET
; T_ASSERT_EQ(kr
, expected_kr
,
2575 "mach_vm_copy() at "
2576 "address 0x%jx unexpectedly returned: %s.\n"
2577 "Should have returned: %s.",
2578 (uintmax_t)addresses
[i
], mach_error_string(kr
), mach_error_string(expected_kr
));
2581 "mach_vm_copy() returned expected value in each case: "
2583 mach_error_string(expected_kr
));
2585 deallocate_range(dest
, 4096);
2588 /* Copying 0 bytes always succeeds. */
2590 test_copy_zero_size()
2592 copy_edge_size(0, KERN_SUCCESS
);
2595 /* Copying 4GB or higher always fails. */
2597 test_copy_invalid_large_size()
2599 copy_edge_size((mach_vm_size_t
)UINT_MAX
- 1, KERN_INVALID_ADDRESS
);
2602 /* Reading a range wrapped around the address space fails. */
2604 test_copy_wrapped_around_ranges()
2608 vm_map_t this_task
= mach_task_self();
2610 mach_vm_address_t address
;
2611 mach_vm_size_t size
;
2613 {(mach_vm_address_t
)(UINTMAX_MAX
- UINT_MAX
+ 1), (mach_vm_size_t
)UINT_MAX
},
2614 {(mach_vm_address_t
)(UINTMAX_MAX
- UINT_MAX
+ vm_page_size
), (mach_vm_size_t
)(UINT_MAX
- vm_page_size
+ 1)},
2615 {(mach_vm_address_t
)UINTMAX_MAX
- vm_page_size
+ 1, vm_page_size
},
2616 {(mach_vm_address_t
)UINTMAX_MAX
, 1},
2618 int numofranges
= sizeof(ranges
) / sizeof(ranges
[0]);
2619 mach_vm_address_t dest
= 0;
2621 logv("Allocating 0x1000 (4096) bytes...");
2622 assert_allocate_success(&dest
, 4096, VM_FLAGS_ANYWHERE
);
2625 "Copying various memory ranges wrapping around the "
2626 "address space...");
2627 for (i
= 0; i
< numofranges
; i
++) {
2628 kr
= mach_vm_copy(this_task
, ranges
[i
].address
, ranges
[i
].size
, dest
);
2629 T_QUIET
; T_ASSERT_EQ(kr
, KERN_INVALID_ADDRESS
,
2630 "mach_vm_copy() at address 0x%jx with size "
2631 "0x%jx (%ju) unexpectedly returned: %s.\n"
2632 "Should have returned: %s.",
2633 (uintmax_t)ranges
[i
].address
, (uintmax_t)ranges
[i
].size
, (uintmax_t)ranges
[i
].size
, mach_error_string(kr
),
2634 mach_error_string(KERN_INVALID_ADDRESS
));
2636 logv("Returned expected error on each range: %s.", mach_error_string(KERN_INVALID_ADDRESS
));
2638 deallocate_range(dest
, 4096);
2641 /********************************/
2642 /* mach_vm_copy() pattern tests */
2643 /********************************/
2645 /* Write a pattern on pre-allocated region, copy into another region
2646 * and verify the pattern in the region. */
2648 write_copy_verify_pattern(address_filter_t filter
, boolean_t reversed
, const char * pattern_name
)
2650 mach_vm_address_t source
= get_vm_address();
2651 mach_vm_size_t src_size
= get_vm_size();
2652 write_pattern(filter
, reversed
, source
, src_size
, pattern_name
);
2653 /* Getting the address and size of the dest region */
2654 mach_vm_address_t dest
= get_buffer_address();
2655 mach_vm_size_t dst_size
= get_buffer_size();
2658 "Copying memory region of address 0x%jx and size 0x%jx (%ju), on "
2659 "memory at address 0x%jx...",
2660 (uintmax_t)source
, (uintmax_t)dst_size
, (uintmax_t)dst_size
, (uintmax_t)dest
);
2661 assert_copy_success(source
, dst_size
, dest
);
2663 "Verifying %s pattern in region of "
2664 "address 0x%jx and size 0x%jx (%ju)...",
2665 pattern_name
, (uintmax_t)dest
, (uintmax_t)dst_size
, (uintmax_t)dst_size
);
2666 filter_addresses_do_else(filter
, reversed
, dest
, dst_size
, verify_address
, read_zero
, source
);
2667 logv("Pattern verified on destination region.");
2671 test_copy_address_filled()
2673 write_copy_verify_pattern(empty
, TRUE
, "address-filled");
2677 test_copy_checkerboard()
2679 write_copy_verify_pattern(checkerboard
, FALSE
, "checkerboard");
2683 test_copy_reverse_checkerboard()
2685 write_copy_verify_pattern(checkerboard
, TRUE
, "reverse checkerboard");
2688 /* Verify that a zero-filled source and destination memory are still
2689 * zero-filled after writing. */
2691 test_zero_filled_copy_dest()
2693 verify_pattern(empty
, FALSE
, mach_vm_trunc_page(get_vm_address()), round_page_kernel(get_vm_size() + 1), "zero-filled");
2694 verify_pattern(empty
, FALSE
, mach_vm_trunc_page(get_buffer_address()),
2695 round_page_kernel(get_buffer_size() + get_buffer_offset()), "zero-filled");
2698 /****************************************/
2699 /* mach_vm_copy() inaccessibility tests */
2700 /****************************************/
2702 /* Copying partially deallocated memory fails. */
2704 test_copy_partially_deallocated_range()
2706 mach_vm_address_t source
= get_vm_address();
2707 mach_vm_size_t size
= get_vm_size();
2708 mach_vm_address_t mid_point
= mach_vm_trunc_page(source
+ size
/ 2);
2709 mach_vm_address_t dest
= 0;
2711 logv("Deallocating a mid-range page at address 0x%jx...", (uintmax_t)mid_point
);
2712 assert_deallocate_success(mid_point
, vm_page_size
);
2713 logv("Page deallocated.");
2715 logv("Copying 0x%jx (%ju) byte%s at address 0x%jx...", (uintmax_t)size
, (uintmax_t)size
, (size
== 1) ? "" : "s",
2718 assert_allocate_copy_return(source
, size
, &dest
, KERN_INVALID_ADDRESS
);
2720 logv("Returned expected error: %s.", mach_error_string(KERN_INVALID_ADDRESS
));
2722 deallocate_range(dest
, size
);
2725 /* Copy partially read-protected memory fails. */
2727 test_copy_partially_unreadable_range()
2729 mach_vm_address_t source
= get_vm_address();
2730 mach_vm_size_t size
= get_vm_size();
2731 mach_vm_address_t mid_point
= mach_vm_trunc_page(source
+ size
/ 2);
2732 mach_vm_address_t dest
= 0;
2734 /* For sizes < 1 page, vm_map_copyin_common() uses
2735 * vm_map_copyin_kernel_buffer() to read in the memory,
2736 * returning different errors, see 8182239. */
2737 kern_return_t kr_expected
= (size
< vm_page_size
) ? KERN_INVALID_ADDRESS
: KERN_PROTECTION_FAILURE
;
2739 logv("Read-protecting a mid-range page at address 0x%jx...", (uintmax_t)mid_point
);
2740 T_QUIET
; T_ASSERT_MACH_SUCCESS(mach_vm_protect(mach_task_self(), mid_point
, vm_page_size
, FALSE
, VM_PROT_WRITE
), "mach_vm_protect()");
2741 logv("Page read-protected.");
2743 logv("Copying 0x%jx (%ju) byte%s at address 0x%jx...", (uintmax_t)size
, (uintmax_t)size
, (size
== 1) ? "" : "s",
2745 assert_allocate_copy_return(source
, size
, &dest
, kr_expected
);
2746 logv("Returned expected error: %s.", mach_error_string(kr_expected
));
2748 deallocate_range(dest
, size
);
2751 /* Copying to a partially deallocated region fails. */
2753 test_copy_dest_partially_deallocated_region()
2755 mach_vm_address_t dest
= get_vm_address();
2756 mach_vm_address_t source
= get_buffer_address();
2757 mach_msg_type_number_t size
= (mach_msg_type_number_t
)get_buffer_size();
2758 mach_vm_address_t source_mid_point
= (mach_vm_address_t
)mach_vm_trunc_page(dest
+ size
/ 2);
2759 #if __MAC_OX_X_VERSION_MIN_REQUIRED > 1080
2761 "Deallocating a mid-range source page at address "
2763 (uintmax_t)source_mid_point
);
2764 assert_deallocate_success(source_mid_point
, vm_page_size
);
2765 logv("Page deallocated.");
2768 "Copying region of address 0x%jx and size 0x%jx (%ju), on "
2769 "memory at address 0x%jx...",
2770 (uintmax_t)source
, (uintmax_t)size
, (uintmax_t)size
, (uintmax_t)dest
);
2771 assert_copy_return(source
, size
, dest
, KERN_INVALID_ADDRESS
);
2772 logv("Returned expected error: %s.", mach_error_string(KERN_INVALID_ADDRESS
));
2775 "Bypassing partially deallocated region test "
2776 "(See <rdar://problem/12190999>)");
2777 #endif /* __MAC_OX_X_VERSION_MIN_REQUIRED > 1080 */
2780 /* Copying from a partially deallocated region fails. */
2782 test_copy_source_partially_deallocated_region()
2784 mach_vm_address_t source
= get_vm_address();
2785 mach_vm_address_t dest
= get_buffer_address();
2786 mach_msg_type_number_t size
= (mach_msg_type_number_t
)get_buffer_size();
2787 mach_vm_address_t source_mid_point
= (mach_vm_address_t
)mach_vm_trunc_page(source
+ size
/ 2);
2790 "Deallocating a mid-range source page at address "
2792 (uintmax_t)source_mid_point
);
2793 assert_deallocate_success(source_mid_point
, vm_page_size
);
2794 logv("Page deallocated.");
2797 "Copying region of address 0x%jx and size 0x%jx (%ju), on "
2798 "memory at address 0x%jx...",
2799 (uintmax_t)source
, (uintmax_t)size
, (uintmax_t)size
, (uintmax_t)dest
);
2800 assert_copy_return(source
, size
, dest
, KERN_INVALID_ADDRESS
);
2801 logv("Returned expected error: %s.", mach_error_string(KERN_INVALID_ADDRESS
));
2804 /* Copying from a partially read-protected region fails. */
2806 test_copy_source_partially_unreadable_region()
2808 mach_vm_address_t source
= get_vm_address();
2809 mach_vm_address_t dest
= get_buffer_address();
2810 mach_msg_type_number_t size
= (mach_msg_type_number_t
)get_buffer_size();
2811 mach_vm_address_t mid_point
= (mach_vm_address_t
)mach_vm_trunc_page(source
+ size
/ 2);
2812 kern_return_t kr
= (size
< vm_page_size
* 2) ? KERN_INVALID_ADDRESS
: KERN_PROTECTION_FAILURE
;
2815 "Read-protecting a mid-range buffer page at address "
2817 (uintmax_t)mid_point
);
2818 T_QUIET
; T_ASSERT_MACH_SUCCESS(mach_vm_protect(mach_task_self(), mid_point
, vm_page_size
, FALSE
, VM_PROT_WRITE
), "mach_vm_protect()");
2819 logv("Page read-protected.");
2822 "Copying region at address 0x%jx and size 0x%jx (%ju), on "
2823 "memory at address 0x%jx...",
2824 (uintmax_t)source
, (uintmax_t)size
, (uintmax_t)size
, (uintmax_t)dest
);
2826 assert_copy_return(source
, size
, dest
, kr
);
2827 logv("Returned expected error: %s.", mach_error_string(kr
));
2830 /* Copying to a partially write-protected region fails. */
2832 test_copy_dest_partially_unwriteable_region()
2835 mach_vm_address_t dest
= get_vm_address();
2836 mach_vm_address_t source
= get_buffer_address();
2837 mach_msg_type_number_t size
= (mach_msg_type_number_t
)get_buffer_size();
2838 mach_vm_address_t mid_point
= (mach_vm_address_t
)mach_vm_trunc_page(dest
+ size
/ 2);
2840 #if __MAC_OX_X_VERSION_MIN_REQUIRED > 1080
2842 "Read-protecting a mid-range buffer page at address "
2844 (uintmax_t)mid_point
);
2845 T_QUIET
; T_ASSERT_MACH_SUCCESS(mach_vm_protect(mach_task_self(), mid_point
, vm_page_size
, FALSE
, VM_PROT_READ
), "mach_vm_protect()");
2846 logv("Page read-protected.");
2848 "Copying region at address 0x%jx and size 0x%jx (%ju), on "
2849 "memory at address 0x%jx...",
2850 (uintmax_t)source
, (uintmax_t)size
, (uintmax_t)size
, (uintmax_t)dest
);
2851 if (size
>= vm_page_size
) {
2852 kr
= KERN_PROTECTION_FAILURE
;
2854 kr
= KERN_INVALID_ADDRESS
;
2856 assert_copy_return(source
, size
, dest
, kr
);
2857 logv("Returned expected error: %s.", mach_error_string(kr
));
2860 "Bypassing partially unwriteable region test "
2861 "(See <rdar://problem/12190999>)");
2862 #endif /* __MAC_OX_X_VERSION_MIN_REQUIRED > 1080 */
2865 /* Copying on partially deallocated memory fails. */
2867 test_copy_source_on_partially_deallocated_range()
2869 mach_vm_address_t source
= get_vm_address();
2870 mach_vm_address_t dest
= get_buffer_address();
2871 mach_vm_address_t start
= mach_vm_trunc_page(source
);
2872 mach_msg_type_number_t size
= (mach_msg_type_number_t
)get_buffer_size();
2875 "Deallocating the first source page at address "
2878 assert_deallocate_success(start
, vm_page_size
);
2879 logv("Page deallocated.");
2882 "Writing buffer of address 0x%jx and size 0x%jx (%ju), on "
2883 "memory at address 0x%jx...",
2884 (uintmax_t)dest
, (uintmax_t)size
, (uintmax_t)size
, (uintmax_t)source
);
2885 assert_copy_return(source
, size
, dest
, KERN_INVALID_ADDRESS
);
2886 logv("Returned expected error: %s.", mach_error_string(KERN_INVALID_ADDRESS
));
2889 /* Copying on partially deallocated memory fails. */
2891 test_copy_dest_on_partially_deallocated_range()
2893 mach_vm_address_t source
= get_vm_address();
2894 mach_vm_address_t dest
= get_buffer_address();
2895 mach_vm_address_t start
= mach_vm_trunc_page(dest
);
2896 mach_msg_type_number_t size
= (mach_msg_type_number_t
)get_buffer_size();
2899 "Deallocating the first destination page at address "
2902 assert_deallocate_success(start
, vm_page_size
);
2903 logv("Page deallocated.");
2906 "Writing buffer of address 0x%jx and size 0x%jx (%ju), on "
2907 "memory at address 0x%jx...",
2908 (uintmax_t)dest
, (uintmax_t)size
, (uintmax_t)size
, (uintmax_t)source
);
2909 assert_copy_return(source
, size
, dest
, KERN_INVALID_ADDRESS
);
2910 logv("Returned expected error: %s.", mach_error_string(KERN_INVALID_ADDRESS
));
2913 /* Copying on partially unwritable memory fails. */
2915 test_copy_dest_on_partially_unwritable_range()
2917 mach_vm_address_t source
= get_vm_address();
2918 mach_vm_address_t dest
= get_buffer_address();
2919 mach_vm_address_t start
= mach_vm_trunc_page(dest
);
2920 mach_msg_type_number_t size
= (mach_msg_type_number_t
)get_buffer_size();
2922 /* For sizes < msg_ool_size_small,
2923 * vm_map_copy_overwrite_nested() uses
2924 * vm_map_copyout_kernel_buffer() to read in the memory,
2925 * returning different errors, see 8217123. */
2926 kern_return_t kr_expected
= (size
< vm_page_size
* 2) ? KERN_INVALID_ADDRESS
: KERN_PROTECTION_FAILURE
;
2929 "Write-protecting the first destination page at address "
2932 T_QUIET
; T_ASSERT_MACH_SUCCESS(mach_vm_protect(mach_task_self(), start
, vm_page_size
, FALSE
, VM_PROT_READ
), "mach_vm_protect()");
2933 logv("Page write-protected.");
2936 "Writing buffer of address 0x%jx and size 0x%jx (%ju), on "
2937 "memory at address 0x%jx...",
2938 (uintmax_t)dest
, (uintmax_t)size
, (uintmax_t)size
, (uintmax_t)source
);
2939 assert_copy_return(source
, size
, dest
, kr_expected
);
2940 logv("Returned expected error: %s.", mach_error_string(kr_expected
));
2943 /* Copying on partially unreadable memory fails. */
2945 test_copy_source_on_partially_unreadable_range()
2947 mach_vm_address_t source
= get_vm_address();
2948 mach_vm_address_t dest
= get_buffer_address();
2949 mach_vm_address_t start
= mach_vm_trunc_page(source
);
2950 mach_msg_type_number_t size
= (mach_msg_type_number_t
)get_buffer_size();
2952 /* For sizes < msg_ool_size_small,
2953 * vm_map_copy_overwrite_nested() uses
2954 * vm_map_copyout_kernel_buffer() to read in the memory,
2955 * returning different errors, see 8217123. */
2956 kern_return_t kr_expected
= (size
< vm_page_size
* 2) ? KERN_INVALID_ADDRESS
: KERN_PROTECTION_FAILURE
;
2959 "Read-protecting the first destination page at address "
2962 T_QUIET
; T_ASSERT_MACH_SUCCESS(mach_vm_protect(mach_task_self(), start
, vm_page_size
, FALSE
, VM_PROT_WRITE
), "mach_vm_protect()");
2963 logv("Page read-protected.");
2966 "Writing buffer of address 0x%jx and size 0x%jx (%ju), on "
2967 "memory at address 0x%jx...",
2968 (uintmax_t)dest
, (uintmax_t)size
, (uintmax_t)size
, (uintmax_t)source
);
2969 assert_copy_return(source
, size
, dest
, kr_expected
);
2970 logv("Returned expected error: %s.", mach_error_string(kr_expected
));
2973 /********************************/
2974 /* mach_vm_protect() main tests */
2975 /********************************/
2978 test_zero_filled_extended()
2980 verify_pattern(empty
, FALSE
, mach_vm_trunc_page(get_vm_address()), round_page_kernel(get_vm_size() + 1), "zero-filled");
2983 /* Allocated region is still zero-filled after read-protecting it and
2984 * then restoring read-access. */
2986 test_zero_filled_readprotect()
2988 mach_vm_address_t address
= get_vm_address();
2989 mach_vm_size_t size
= get_vm_size();
2991 logv("Setting read access on 0x%jx (%ju) byte%s at address 0x%jx...", (uintmax_t)size
, (uintmax_t)size
,
2992 (size
== 1) ? "" : "s", (uintmax_t)address
);
2993 T_QUIET
; T_ASSERT_MACH_SUCCESS(mach_vm_protect(mach_task_self(), address
, size
, FALSE
, VM_PROT_DEFAULT
), "mach_vm_protect()");
2994 logv("Region has read access.");
2995 test_zero_filled_extended();
2999 verify_protection(vm_prot_t protection
, const char * protection_name
)
3001 mach_vm_address_t address
= get_vm_address();
3002 mach_vm_size_t size
= get_vm_size();
3003 mach_vm_size_t original_size
= size
;
3004 vm_region_basic_info_data_64_t info
;
3005 mach_msg_type_number_t count
= VM_REGION_BASIC_INFO_COUNT_64
;
3009 "Verifying %s-protection on region of address 0x%jx and "
3010 "size 0x%jx (%ju) with mach_vm_region()...",
3011 protection_name
, (uintmax_t)address
, (uintmax_t)size
, (uintmax_t)size
);
3012 T_QUIET
; T_ASSERT_MACH_SUCCESS(
3013 mach_vm_region(mach_task_self(), &address
, &size
, VM_REGION_BASIC_INFO_64
, (vm_region_info_t
)&info
, &count
, &unused
),
3014 "mach_vm_region()");
3015 if (original_size
) {
3016 T_QUIET
; T_ASSERT_EQ((info
.protection
& protection
), 0,
3018 "is unexpectedly %s-unprotected.",
3020 logv("Region is %s-protected as expected.", protection_name
);
3022 T_QUIET
; T_ASSERT_NE(info
.protection
& protection
, 0,
3024 "unexpectedly %s-protected.",
3026 logv("Region is %s-unprotected as expected.", protection_name
);
3031 test_verify_readprotection()
3033 verify_protection(VM_PROT_READ
, "read");
3037 test_verify_writeprotection()
3039 verify_protection(VM_PROT_WRITE
, "write");
3042 /******************************/
3043 /* Protection bus error tests */
3044 /******************************/
3046 /* mach_vm_protect() affects the smallest aligned region (integral
3047 * number of pages) containing the given range. */
3049 /* Addresses in read-protected range are inaccessible. */
3051 access_readprotected_range_address(mach_vm_address_t address
, const char * position
)
3053 logv("Reading from %s 0x%jx of read-protected range...", position
, (uintmax_t)address
);
3054 mach_vm_address_t bad_value
= MACH_VM_ADDRESS_T(address
);
3055 T_ASSERT_FAIL("Unexpectedly read value 0x%jx at address 0x%jx."
3056 "Should have died with signal SIGBUS.",
3057 (uintmax_t)bad_value
, (uintmax_t)address
);
3060 /* Start of read-protected range is inaccessible. */
3062 test_access_readprotected_range_start()
3064 access_readprotected_range_address(mach_vm_trunc_page(get_vm_address()), "start");
3067 /* Middle of read-protected range is inaccessible. */
3069 test_access_readprotected_range_middle()
3071 mach_vm_address_t address
= get_vm_address();
3072 access_readprotected_range_address(mach_vm_trunc_page(address
) + (aligned_size(address
, get_vm_size()) >> 1), "middle");
3075 /* End of read-protected range is inaccessible. */
3077 test_access_readprotected_range_end()
3079 access_readprotected_range_address(round_page_kernel(get_vm_address() + get_vm_size()) - vm_address_size
, "end");
3082 /* Addresses in write-protected range are unwritable. */
3084 write_writeprotected_range_address(mach_vm_address_t address
, const char * position
)
3086 logv("Writing on %s 0x%jx of write-protected range...", position
, (uintmax_t)address
);
3087 MACH_VM_ADDRESS_T(address
) = 0x0;
3088 T_ASSERT_FAIL("Unexpectedly wrote value 0x0 value at address 0x%jx."
3089 "Should have died with signal SIGBUS.",
3090 (uintmax_t)address
);
3093 /* Start of write-protected range is unwritable. */
3095 test_write_writeprotected_range_start()
3097 write_writeprotected_range_address(mach_vm_trunc_page(get_vm_address()), "start");
3100 /* Middle of write-protected range is unwritable. */
3102 test_write_writeprotected_range_middle()
3104 mach_vm_address_t address
= get_vm_address();
3105 write_writeprotected_range_address(mach_vm_trunc_page(address
) + (aligned_size(address
, get_vm_size()) >> 1), "middle");
3108 /* End of write-protected range is unwritable. */
3110 test_write_writeprotected_range_end()
3112 write_writeprotected_range_address(round_page_kernel(get_vm_address() + get_vm_size()) - vm_address_size
, "end");
3115 /*************************************/
3116 /* mach_vm_protect() edge case tests */
3117 /*************************************/
3120 protect_zero_size(vm_prot_t protection
, const char * protection_name
)
3124 vm_map_t this_task
= mach_task_self();
3125 mach_vm_address_t addresses
[] = {0x0,
3130 (mach_vm_address_t
)UINT_MAX
- vm_page_size
+ 1,
3131 (mach_vm_address_t
)UINT_MAX
,
3132 (mach_vm_address_t
)UINTMAX_MAX
- vm_page_size
+ 1,
3133 (mach_vm_address_t
)UINTMAX_MAX
};
3134 int numofaddresses
= sizeof(addresses
) / sizeof(addresses
[0]);
3136 logv("%s-protecting 0x0 (0) bytes at various addresses...", protection_name
);
3137 for (i
= 0; i
< numofaddresses
; i
++) {
3138 kr
= mach_vm_protect(this_task
, addresses
[i
], 0, FALSE
, protection
);
3139 T_QUIET
; T_ASSERT_MACH_SUCCESS(kr
,
3140 "mach_vm_protect() at "
3141 "address 0x%jx unexpectedly failed: %s.",
3142 (uintmax_t)addresses
[i
], mach_error_string(kr
));
3144 logv("Protection successful.");
3148 test_readprotect_zero_size()
3150 protect_zero_size(VM_PROT_READ
, "Read");
3154 test_writeprotect_zero_size()
3156 protect_zero_size(VM_PROT_WRITE
, "Write");
3159 /* Protecting a range wrapped around the address space fails. */
3161 protect_wrapped_around_ranges(vm_prot_t protection
, const char * protection_name
)
3165 vm_map_t this_task
= mach_task_self();
3167 mach_vm_address_t address
;
3168 mach_vm_size_t size
;
3170 {0x1, (mach_vm_size_t
)UINTMAX_MAX
},
3171 {vm_page_size
, (mach_vm_size_t
)UINTMAX_MAX
- vm_page_size
+ 1},
3172 {(mach_vm_address_t
)UINTMAX_MAX
- vm_page_size
+ 1, vm_page_size
},
3173 {(mach_vm_address_t
)UINTMAX_MAX
, 1},
3175 int numofranges
= sizeof(ranges
) / sizeof(ranges
[0]);
3178 "%s-protecting various memory ranges wrapping around the "
3181 for (i
= 0; i
< numofranges
; i
++) {
3182 kr
= mach_vm_protect(this_task
, ranges
[i
].address
, ranges
[i
].size
, FALSE
, protection
);
3183 T_QUIET
; T_ASSERT_EQ(kr
, KERN_INVALID_ARGUMENT
,
3184 "mach_vm_protect() with address 0x%jx and size "
3185 "0x%jx (%ju) unexpectedly returned: %s.\n"
3186 "Should have returned: %s.",
3187 (uintmax_t)ranges
[i
].address
, (uintmax_t)ranges
[i
].size
, (uintmax_t)ranges
[i
].size
, mach_error_string(kr
),
3188 mach_error_string(KERN_INVALID_ARGUMENT
));
3190 logv("Returned expected error on each range: %s.", mach_error_string(KERN_INVALID_ARGUMENT
));
3194 test_readprotect_wrapped_around_ranges()
3196 protect_wrapped_around_ranges(VM_PROT_READ
, "Read");
3200 test_writeprotect_wrapped_around_ranges()
3202 protect_wrapped_around_ranges(VM_PROT_WRITE
, "Write");
3205 /*******************/
3206 /* vm_copy() tests */
3207 /*******************/
3209 /* Verify the address space is being shared. */
3211 assert_share_mode(mach_vm_address_t address
, unsigned share_mode
, const char * share_mode_name
)
3213 mach_vm_size_t size
= get_vm_size();
3214 vm_region_extended_info_data_t info
;
3215 mach_msg_type_number_t count
= VM_REGION_EXTENDED_INFO_COUNT
;
3219 * XXX Fails on UVM kernel. See <rdar://problem/12164664>
3221 #if notyet /* __MAC_OS_X_VERSION_MIN_REQUIRED < 1090 */
3223 "Verifying %s share mode on region of address 0x%jx and "
3224 "size 0x%jx (%ju)...",
3225 share_mode_name
, (uintmax_t)address
, (uintmax_t)size
, (uintmax_t)size
);
3226 T_QUIET
; T_ASSERT_MACH_SUCCESS(
3227 mach_vm_region(mach_task_self(), &address
, &size
, VM_REGION_EXTENDED_INFO
, (vm_region_info_t
)&info
, &count
, &unused
),
3228 "mach_vm_region()");
3229 T_QUIET
; T_ASSERT_EQ(info
.share_mode
, share_mode
,
3230 "Region's share mode "
3231 " unexpectedly is not %s but %d.",
3232 share_mode_name
, info
.share_mode
);
3233 logv("Region has a share mode of %s as expected.", share_mode_name
);
3235 logv("Bypassing share_mode verification (See <rdar://problem/12164664>)");
3236 #endif /* __MAC_OS_X_VERSION_MIN_REQUIRED < 1090 */
3239 /* Do the vm_copy() and verify its success. */
3241 assert_vmcopy_success(vm_address_t src
, vm_address_t dst
, const char * source_name
)
3244 mach_vm_size_t size
= get_vm_size();
3246 logv("Copying (using mach_vm_copy()) from a %s source...", source_name
);
3247 kr
= mach_vm_copy(mach_task_self(), src
, size
, dst
);
3248 T_QUIET
; T_ASSERT_MACH_SUCCESS(kr
,
3249 "mach_vm_copy() with the source address "
3250 "0x%jx, designation address 0x%jx, and size 0x%jx (%ju) unexpectly "
3251 "returned %s.\n Should have returned: %s.",
3252 (uintmax_t)src
, (uintmax_t)dst
, (uintmax_t)size
, (uintmax_t)size
, mach_error_string(kr
),
3253 mach_error_string(KERN_SUCCESS
));
3254 logv("Copy (mach_vm_copy()) was successful as expected.");
3258 write_region(mach_vm_address_t address
, mach_vm_size_t start
)
3260 mach_vm_size_t size
= get_vm_size();
3262 filter_addresses_do_else(empty
, FALSE
, address
, size
, write_address
, write_address
, start
);
3266 verify_region(mach_vm_address_t address
, mach_vm_address_t start
)
3268 mach_vm_size_t size
= get_vm_size();
3270 filter_addresses_do_else(empty
, FALSE
, address
, size
, verify_address
, verify_address
, start
);
3273 /* Perform the post vm_copy() action and verify its results. */
3275 modify_one_and_verify_all_regions(vm_address_t src
, vm_address_t dst
, vm_address_t shared_copied
, boolean_t shared
)
3277 mach_vm_size_t size
= get_vm_size();
3278 int action
= get_vmcopy_post_action();
3280 /* Do the post vm_copy() action. */
3282 case VMCOPY_MODIFY_SRC
:
3283 logv("Modifying: source%s...", shared
? " (shared with other region)" : "");
3284 write_region(src
, 1);
3287 case VMCOPY_MODIFY_DST
:
3288 logv("Modifying: destination...");
3289 write_region(dst
, 1);
3292 case VMCOPY_MODIFY_SHARED_COPIED
:
3293 /* If no shared_copied then no need to verify (nothing changed). */
3294 if (!shared_copied
) {
3297 logv("Modifying: shared/copied%s...", shared
? " (shared with source region)" : "");
3298 write_region(shared_copied
, 1);
3302 T_ASSERT_FAIL("Unknown post vm_copy() action (%d)", action
);
3304 logv("Modification was successful as expected.");
3306 /* Verify all the regions with what is expected. */
3307 logv("Verifying: source... ");
3308 verify_region(src
, (VMCOPY_MODIFY_SRC
== action
|| (shared
&& VMCOPY_MODIFY_SHARED_COPIED
== action
)) ? 1 : 0);
3309 logv("destination... ");
3310 verify_region(dst
, (VMCOPY_MODIFY_DST
== action
) ? 1 : 0);
3311 if (shared_copied
) {
3312 logv("shared/copied... ");
3313 verify_region(shared_copied
, (VMCOPY_MODIFY_SHARED_COPIED
== action
|| (shared
&& VMCOPY_MODIFY_SRC
== action
)) ? 1 : 0);
3315 logv("Verification was successful as expected.");
3318 /* Test source being a simple fresh region. */
3320 test_vmcopy_fresh_source()
3322 mach_vm_size_t size
= get_vm_size();
3323 mach_vm_address_t src
, dst
;
3325 if (get_vmcopy_post_action() == VMCOPY_MODIFY_SHARED_COPIED
) {
3326 /* No shared/copied region to modify so just return. */
3327 logv("No shared/copied region as expected.");
3331 assert_allocate_success(&src
, size
, TRUE
);
3333 assert_share_mode(src
, SM_EMPTY
, "SM_EMPTY");
3335 write_region(src
, 0);
3337 assert_allocate_success(&dst
, size
, TRUE
);
3339 assert_vmcopy_success(src
, dst
, "freshly allocated");
3341 modify_one_and_verify_all_regions(src
, dst
, 0, FALSE
);
3343 assert_deallocate_success(src
, size
);
3344 assert_deallocate_success(dst
, size
);
3347 /* Test source copied from a shared region. */
3349 test_vmcopy_shared_source()
3351 mach_vm_size_t size
= get_vm_size();
3352 mach_vm_address_t src
, dst
, shared
;
3353 int action
= get_vmcopy_post_action();
3356 assert_allocate_success(&src
, size
, TRUE
);
3358 T_QUIET
; T_ASSERT_MACH_SUCCESS(mach_vm_inherit(mach_task_self(), src
, size
, VM_INHERIT_SHARE
), "mach_vm_inherit()");
3360 write_region(src
, 0);
3364 /* Verify that the child's 'src' is shared with the
3366 assert_share_mode(src
, SM_SHARED
, "SM_SHARED");
3367 assert_allocate_success(&dst
, size
, TRUE
);
3368 assert_vmcopy_success(src
, dst
, "shared");
3369 if (VMCOPY_MODIFY_SHARED_COPIED
== action
) {
3370 logv("Modifying: shared...");
3371 write_region(src
, 1);
3372 logv("Modification was successsful as expected.");
3373 logv("Verifying: source... ");
3374 verify_region(src
, 1);
3375 logv("destination...");
3376 verify_region(dst
, (VMCOPY_MODIFY_DST
== action
) ? 1 : 0);
3377 logv("Verification was successful as expected.");
3379 modify_one_and_verify_all_regions(src
, dst
, 0, TRUE
);
3381 assert_deallocate_success(dst
, size
);
3383 } else if (pid
> 0) {
3384 /* In the parent the src becomes the shared */
3387 if (WEXITSTATUS(status
) != 0) {
3390 /* verify shared (shared with child's src) */
3391 logv("Verifying: shared...");
3392 verify_region(shared
, (VMCOPY_MODIFY_SHARED_COPIED
== action
|| VMCOPY_MODIFY_SRC
== action
) ? 1 : 0);
3393 logv("Verification was successful as expected.");
3395 T_WITH_ERRNO
; T_ASSERT_FAIL("fork failed");
3398 assert_deallocate_success(src
, size
);
3401 /* Test source copied from another mapping. */
3403 test_vmcopy_copied_from_source()
3405 mach_vm_size_t size
= get_vm_size();
3406 mach_vm_address_t src
, dst
, copied
;
3408 assert_allocate_success(&copied
, size
, TRUE
);
3409 write_region(copied
, 0);
3411 assert_allocate_success(&src
, size
, TRUE
);
3413 T_QUIET
; T_ASSERT_MACH_SUCCESS(mach_vm_copy(mach_task_self(), copied
, size
, src
), "mach_vm_copy()");
3415 assert_share_mode(src
, SM_COW
, "SM_COW");
3417 assert_allocate_success(&dst
, size
, TRUE
);
3419 assert_vmcopy_success(src
, dst
, "copied from");
3421 modify_one_and_verify_all_regions(src
, dst
, copied
, FALSE
);
3423 assert_deallocate_success(src
, size
);
3424 assert_deallocate_success(dst
, size
);
3425 assert_deallocate_success(copied
, size
);
3428 /* Test source copied to another mapping. */
3430 test_vmcopy_copied_to_source()
3432 mach_vm_size_t size
= get_vm_size();
3433 mach_vm_address_t src
, dst
, copied
;
3435 assert_allocate_success(&src
, size
, TRUE
);
3436 write_region(src
, 0);
3438 assert_allocate_success(&copied
, size
, TRUE
);
3440 T_QUIET
; T_ASSERT_MACH_SUCCESS(mach_vm_copy(mach_task_self(), src
, size
, copied
), "mach_vm_copy()");
3442 assert_share_mode(src
, SM_COW
, "SM_COW");
3444 assert_allocate_success(&dst
, size
, TRUE
);
3446 assert_vmcopy_success(src
, dst
, "copied to");
3448 modify_one_and_verify_all_regions(src
, dst
, copied
, FALSE
);
3450 assert_deallocate_success(src
, size
);
3451 assert_deallocate_success(dst
, size
);
3452 assert_deallocate_success(copied
, size
);
3455 /* Test a truedshared source copied. */
3457 test_vmcopy_trueshared_source()
3459 mach_vm_size_t size
= get_vm_size();
3460 mach_vm_address_t src
= 0x0, dst
, shared
;
3461 vm_prot_t cur_protect
= (VM_PROT_READ
| VM_PROT_WRITE
);
3462 vm_prot_t max_protect
= (VM_PROT_READ
| VM_PROT_WRITE
);
3463 mem_entry_name_port_t mem_obj
;
3465 assert_allocate_success(&shared
, size
, TRUE
);
3466 write_region(shared
, 0);
3468 T_QUIET
; T_ASSERT_MACH_SUCCESS(mach_make_memory_entry_64(mach_task_self(), &size
, (memory_object_offset_t
)shared
, cur_protect
, &mem_obj
,
3469 (mem_entry_name_port_t
)NULL
),
3470 "mach_make_memory_entry_64()");
3471 T_QUIET
; T_ASSERT_MACH_SUCCESS(
3472 mach_vm_map(mach_task_self(), &src
, size
, 0, TRUE
, mem_obj
, 0, FALSE
, cur_protect
, max_protect
, VM_INHERIT_NONE
),
3475 assert_share_mode(src
, SM_TRUESHARED
, "SM_TRUESHARED");
3477 assert_allocate_success(&dst
, size
, TRUE
);
3479 assert_vmcopy_success(src
, dst
, "true shared");
3481 modify_one_and_verify_all_regions(src
, dst
, shared
, TRUE
);
3483 assert_deallocate_success(src
, size
);
3484 assert_deallocate_success(dst
, size
);
3485 assert_deallocate_success(shared
, size
);
3488 /* Test a private aliazed source copied. */
3490 test_vmcopy_private_aliased_source()
3492 mach_vm_size_t size
= get_vm_size();
3493 mach_vm_address_t src
= 0x0, dst
, shared
;
3494 vm_prot_t cur_protect
= (VM_PROT_READ
| VM_PROT_WRITE
);
3495 vm_prot_t max_protect
= (VM_PROT_READ
| VM_PROT_WRITE
);
3497 assert_allocate_success(&shared
, size
, TRUE
);
3498 write_region(shared
, 0);
3500 T_QUIET
; T_ASSERT_MACH_SUCCESS(mach_vm_remap(mach_task_self(), &src
, size
, 0, TRUE
, mach_task_self(), shared
, FALSE
, &cur_protect
,
3501 &max_protect
, VM_INHERIT_NONE
),
3504 assert_share_mode(src
, SM_PRIVATE_ALIASED
, "SM_PRIVATE_ALIASED");
3506 assert_allocate_success(&dst
, size
, TRUE
);
3508 assert_vmcopy_success(src
, dst
, "true shared");
3510 modify_one_and_verify_all_regions(src
, dst
, shared
, TRUE
);
3512 assert_deallocate_success(src
, size
);
3513 assert_deallocate_success(dst
, size
);
3514 assert_deallocate_success(shared
, size
);
3522 run_allocate_test_suites()
3524 /* <rdar://problem/10304215> CoreOSZin 12Z30: VMUnitTest fails:
3525 * error finding xnu major version number. */
3526 /* unsigned int xnu_version = xnu_major_version(); */
3528 UnitTests allocate_main_tests
= {
3529 {"Allocated address is nonzero iff size is nonzero", test_nonzero_address_iff_nonzero_size
},
3530 {"Allocated address is page-aligned", test_aligned_address
},
3531 {"Allocated memory is zero-filled", test_zero_filled
},
3532 {"Write and verify address-filled pattern", test_write_address_filled
},
3533 {"Write and verify checkerboard pattern", test_write_checkerboard
},
3534 {"Write and verify reverse checkerboard pattern", test_write_reverse_checkerboard
},
3535 {"Write and verify page ends pattern", test_write_page_ends
},
3536 {"Write and verify page interiors pattern", test_write_page_interiors
},
3537 {"Reallocate allocated pages", test_reallocate_pages
},
3539 UnitTests allocate_address_error_tests
= {
3540 {"Allocate at address zero", test_allocate_at_zero
},
3541 {"Allocate at a 2 MB boundary-unaligned, page-aligned "
3543 test_allocate_2MB_boundary_unaligned_page_aligned_address
},
3545 UnitTests allocate_argument_error_tests
= {
3546 {"Allocate in NULL VM map", test_allocate_in_null_map
}, {"Allocate with kernel flags", test_allocate_with_kernel_flags
},
3548 UnitTests allocate_fixed_size_tests
= {
3549 {"Allocate zero size", test_allocate_zero_size
},
3550 {"Allocate overflowing size", test_allocate_overflowing_size
},
3551 {"Allocate a page with highest address hint", test_allocate_page_with_highest_address_hint
},
3552 {"Allocate two pages and verify first fit strategy", test_allocate_first_fit_pages
},
3554 UnitTests allocate_invalid_large_size_test
= {
3555 {"Allocate invalid large size", test_allocate_invalid_large_size
},
3557 UnitTests mach_vm_map_protection_inheritance_error_test
= {
3558 {"mach_vm_map() with invalid protection/inheritance "
3560 test_mach_vm_map_protection_inheritance_error
},
3562 UnitTests mach_vm_map_large_mask_overflow_error_test
= {
3563 {"mach_vm_map() with large address mask", test_mach_vm_map_large_mask_overflow_error
},
3566 /* Run the test suites with various allocators and VM sizes, and
3567 * unspecified or fixed (page-aligned or page-unaligned),
3569 for (allocators_idx
= 0; allocators_idx
< numofallocators
; allocators_idx
++) {
3570 for (sizes_idx
= 0; sizes_idx
< numofsizes
; sizes_idx
++) {
3571 for (flags_idx
= 0; flags_idx
< numofflags
; flags_idx
++) {
3572 for (alignments_idx
= 0; alignments_idx
< numofalignments
; alignments_idx
++) {
3573 /* An allocated address will be page-aligned. */
3574 /* Only run the zero size mach_vm_map() error tests in the
3575 * unspecified address case, since we won't be able to retrieve a
3576 * fixed address for allocation. See 8003930. */
3577 if ((flags_idx
== ANYWHERE
&& alignments_idx
== UNALIGNED
) ||
3578 (allocators_idx
!= MACH_VM_ALLOCATE
&& sizes_idx
== ZERO_BYTES
&& flags_idx
== FIXED
)) {
3581 run_suite(set_up_allocator_and_vm_variables
, allocate_argument_error_tests
, do_nothing
,
3582 "%s argument error tests, %s%s address, "
3583 "%s size: 0x%jx (%ju)",
3584 allocators
[allocators_idx
].description
, address_flags
[flags_idx
].description
,
3585 (flags_idx
== ANYWHERE
) ? "" : address_alignments
[alignments_idx
].description
,
3586 vm_sizes
[sizes_idx
].description
, (uintmax_t)vm_sizes
[sizes_idx
].size
,
3587 (uintmax_t)vm_sizes
[sizes_idx
].size
);
3588 /* mach_vm_map() only protection and inheritance error
3590 if (allocators_idx
!= MACH_VM_ALLOCATE
) {
3591 run_suite(set_up_allocator_and_vm_variables
, mach_vm_map_protection_inheritance_error_test
, do_nothing
,
3592 "%s protection and inheritance "
3593 "error test, %s%s address, %s size: 0x%jx "
3595 allocators
[allocators_idx
].description
, address_flags
[flags_idx
].description
,
3596 (flags_idx
== ANYWHERE
) ? "" : address_alignments
[alignments_idx
].description
,
3597 vm_sizes
[sizes_idx
].description
, (uintmax_t)vm_sizes
[sizes_idx
].size
,
3598 (uintmax_t)vm_sizes
[sizes_idx
].size
);
3600 /* mach_vm_map() cannot allocate 0 bytes, see 8003930. */
3601 if (allocators_idx
== MACH_VM_ALLOCATE
|| sizes_idx
!= ZERO_BYTES
) {
3602 run_suite(set_up_allocator_and_vm_variables_and_allocate
, allocate_main_tests
, deallocate
,
3604 "allocation tests, %s%s address, %s size: 0x%jx "
3606 allocators
[allocators_idx
].description
, address_flags
[flags_idx
].description
,
3607 (flags_idx
== ANYWHERE
) ? "" : address_alignments
[alignments_idx
].description
,
3608 vm_sizes
[sizes_idx
].description
, (uintmax_t)vm_sizes
[sizes_idx
].size
,
3609 (uintmax_t)vm_sizes
[sizes_idx
].size
);
3613 run_suite(set_up_allocator_and_vm_size
, allocate_address_error_tests
, do_nothing
,
3615 "error allocation tests, %s size: 0x%jx (%ju)",
3616 allocators
[allocators_idx
].description
, vm_sizes
[sizes_idx
].description
, (uintmax_t)vm_sizes
[sizes_idx
].size
,
3617 (uintmax_t)vm_sizes
[sizes_idx
].size
);
3619 run_suite(set_up_allocator
, allocate_fixed_size_tests
, do_nothing
, "%s fixed size allocation tests",
3620 allocators
[allocators_idx
].description
);
3621 /* <rdar://problem/10304215> CoreOSZin 12Z30: VMUnitTest fails:
3622 * error finding xnu major version number. */
3623 /* mach_vm_map() with a named entry triggers a panic with this test
3624 * unless under xnu-1598 or later, see 8048580. */
3625 /* if (allocators_idx != MACH_VM_MAP_NAMED_ENTRY
3626 || xnu_version >= 1598) { */
3627 if (allocators_idx
!= MACH_VM_MAP_NAMED_ENTRY
) {
3628 run_suite(set_up_allocator
, allocate_invalid_large_size_test
, do_nothing
, "%s invalid large size allocation test",
3629 allocators
[allocators_idx
].description
);
3632 /* mach_vm_map() only large mask overflow tests. */
3633 for (sizes_idx
= 0; sizes_idx
< numofsizes
; sizes_idx
++) {
3634 run_suite(set_up_vm_size
, mach_vm_map_large_mask_overflow_error_test
, do_nothing
,
3635 "mach_vm_map() large mask overflow "
3636 "error test, size: 0x%jx (%ju)",
3637 (uintmax_t)vm_sizes
[sizes_idx
].size
, (uintmax_t)vm_sizes
[sizes_idx
].size
);
3642 run_deallocate_test_suites()
3644 UnitTests access_deallocated_memory_tests
= {
3645 {"Read start of deallocated range", test_access_deallocated_range_start
},
3646 {"Read middle of deallocated range", test_access_deallocated_range_middle
},
3647 {"Read end of deallocated range", test_access_deallocated_range_end
},
3649 UnitTests deallocate_reallocate_tests
= {
3650 {"Deallocate twice", test_deallocate_twice
},
3651 {"Write pattern, deallocate, reallocate (deallocated "
3652 "memory is inaccessible), and verify memory is "
3654 test_write_pattern_deallocate_reallocate_zero_filled
},
3656 UnitTests deallocate_null_map_test
= {
3657 {"Deallocate in NULL VM map", test_deallocate_in_null_map
},
3659 UnitTests deallocate_edge_case_tests
= {
3660 {"Deallocate zero size ranges", test_deallocate_zero_size_ranges
},
3661 {"Deallocate memory ranges whose end rounds to 0x0", test_deallocate_rounded_zero_end_ranges
},
3662 {"Deallocate wrapped around memory ranges", test_deallocate_wrapped_around_ranges
},
3664 UnitTests deallocate_suicide_test
= {
3665 {"Deallocate whole address space", test_deallocate_suicide
},
3668 /* All allocations done with mach_vm_allocate(). */
3669 set_allocator(wrapper_mach_vm_allocate
);
3671 /* Run the test suites with various VM sizes, and unspecified or
3672 * fixed (page-aligned or page-unaligned), addresses. */
3673 for (sizes_idx
= 0; sizes_idx
< numofsizes
; sizes_idx
++) {
3674 for (flags_idx
= 0; flags_idx
< numofflags
; flags_idx
++) {
3675 for (alignments_idx
= 0; alignments_idx
< numofalignments
; alignments_idx
++) {
3676 /* An allocated address will be page-aligned. */
3677 if (flags_idx
== ANYWHERE
&& alignments_idx
== UNALIGNED
) {
3680 /* Accessing deallocated memory should cause a segmentation
3682 /* Nothing gets deallocated if size is zero. */
3683 if (sizes_idx
!= ZERO_BYTES
) {
3684 set_expected_signal(SIGSEGV
);
3685 run_suite(set_up_vm_variables_and_allocate
, access_deallocated_memory_tests
, do_nothing
,
3686 "Deallocated memory access tests, "
3687 "%s%s address, %s size: 0x%jx (%ju)",
3688 address_flags
[flags_idx
].description
,
3689 (flags_idx
== ANYWHERE
) ? "" : address_alignments
[alignments_idx
].description
,
3690 vm_sizes
[sizes_idx
].description
, (uintmax_t)vm_sizes
[sizes_idx
].size
,
3691 (uintmax_t)vm_sizes
[sizes_idx
].size
);
3692 set_expected_signal(0);
3694 run_suite(set_up_vm_variables_and_allocate
, deallocate_reallocate_tests
, do_nothing
,
3695 "Deallocation and reallocation tests, %s%s "
3696 "address, %s size: 0x%jx (%ju)",
3697 address_flags
[flags_idx
].description
,
3698 (flags_idx
== ANYWHERE
) ? "" : address_alignments
[alignments_idx
].description
,
3699 vm_sizes
[sizes_idx
].description
, (uintmax_t)vm_sizes
[sizes_idx
].size
,
3700 (uintmax_t)vm_sizes
[sizes_idx
].size
);
3701 run_suite(set_up_vm_variables
, deallocate_null_map_test
, do_nothing
,
3702 "mach_vm_deallocate() null map test, "
3703 "%s%s address, %s size: 0x%jx (%ju)",
3704 address_flags
[flags_idx
].description
,
3705 (flags_idx
== ANYWHERE
) ? "" : address_alignments
[alignments_idx
].description
,
3706 vm_sizes
[sizes_idx
].description
, (uintmax_t)vm_sizes
[sizes_idx
].size
,
3707 (uintmax_t)vm_sizes
[sizes_idx
].size
);
3711 run_suite(do_nothing
, deallocate_edge_case_tests
, do_nothing
, "Edge case deallocation tests");
3713 set_expected_signal(-1); /* SIGSEGV or SIGBUS */
3714 run_suite(do_nothing
, deallocate_suicide_test
, do_nothing
, "Whole address space deallocation test");
3715 set_expected_signal(0);
3719 run_read_test_suites()
3721 UnitTests read_main_tests
= {
3722 {"Read address is nonzero iff size is nonzero", test_nonzero_address_iff_nonzero_size
},
3723 {"Read address has the correct boundary offset", test_read_address_offset
},
3724 {"Reallocate read pages", test_reallocate_pages
},
3725 {"Read and verify zero-filled memory", test_zero_filled
},
3727 UnitTests read_pattern_tests
= {
3728 {"Read address-filled pattern", test_read_address_filled
},
3729 {"Read checkerboard pattern", test_read_checkerboard
},
3730 {"Read reverse checkerboard pattern", test_read_reverse_checkerboard
},
3732 UnitTests read_null_map_test
= {
3733 {"Read from NULL VM map", test_read_null_map
},
3735 UnitTests read_edge_case_tests
= {
3736 {"Read zero size", test_read_zero_size
},
3737 {"Read invalid large size", test_read_invalid_large_size
},
3738 {"Read wrapped around memory ranges", test_read_wrapped_around_ranges
},
3740 UnitTests read_inaccessible_tests
= {
3741 {"Read partially decallocated memory", test_read_partially_deallocated_range
},
3742 {"Read partially read-protected memory", test_read_partially_unreadable_range
},
3745 /* All allocations done with mach_vm_allocate(). */
3746 set_allocator(wrapper_mach_vm_allocate
);
3748 /* Run the test suites with various VM sizes, and unspecified or
3749 * fixed (page-aligned or page-unaligned) addresses. */
3750 for (sizes_idx
= 0; sizes_idx
< numofsizes
; sizes_idx
++) {
3751 for (flags_idx
= 0; flags_idx
< numofflags
; flags_idx
++) {
3752 for (alignments_idx
= 0; alignments_idx
< numofalignments
; alignments_idx
++) {
3753 /* An allocated address will be page-aligned. */
3754 if (flags_idx
== ANYWHERE
&& alignments_idx
== UNALIGNED
) {
3757 run_suite(set_up_vm_variables_allocate_read_deallocate
, read_main_tests
, deallocate
,
3759 "main tests, %s%s address, %s size: 0x%jx (%ju)",
3760 address_flags
[flags_idx
].description
,
3761 (flags_idx
== ANYWHERE
) ? "" : address_alignments
[alignments_idx
].description
,
3762 vm_sizes
[sizes_idx
].description
, (uintmax_t)vm_sizes
[sizes_idx
].size
,
3763 (uintmax_t)vm_sizes
[sizes_idx
].size
);
3764 run_suite(set_up_vm_variables_and_allocate_extra_page
, read_pattern_tests
, deallocate
,
3765 "mach_vm_read() pattern tests, %s%s address, %s "
3766 "size: 0x%jx (%ju)",
3767 address_flags
[flags_idx
].description
,
3768 (flags_idx
== ANYWHERE
) ? "" : address_alignments
[alignments_idx
].description
,
3769 vm_sizes
[sizes_idx
].description
, (uintmax_t)vm_sizes
[sizes_idx
].size
,
3770 (uintmax_t)vm_sizes
[sizes_idx
].size
);
3771 run_suite(set_up_vm_variables_and_allocate_extra_page
, read_null_map_test
, deallocate_extra_page
,
3772 "mach_vm_read() null map test, "
3773 "%s%s address, %s size: 0x%jx (%ju)",
3774 address_flags
[flags_idx
].description
,
3775 (flags_idx
== ANYWHERE
) ? "" : address_alignments
[alignments_idx
].description
,
3776 vm_sizes
[sizes_idx
].description
, (uintmax_t)vm_sizes
[sizes_idx
].size
,
3777 (uintmax_t)vm_sizes
[sizes_idx
].size
);
3778 /* A zero size range is always accessible. */
3779 if (sizes_idx
!= ZERO_BYTES
) {
3780 run_suite(set_up_vm_variables_and_allocate_extra_page
, read_inaccessible_tests
, deallocate_extra_page
,
3781 "mach_vm_read() inaccessibility tests, %s%s "
3782 "address, %s size: 0x%jx (%ju)",
3783 address_flags
[flags_idx
].description
,
3784 (flags_idx
== ANYWHERE
) ? "" : address_alignments
[alignments_idx
].description
,
3785 vm_sizes
[sizes_idx
].description
, (uintmax_t)vm_sizes
[sizes_idx
].size
,
3786 (uintmax_t)vm_sizes
[sizes_idx
].size
);
3791 run_suite(do_nothing
, read_edge_case_tests
, do_nothing
, "mach_vm_read() fixed size tests");
3795 run_write_test_suites()
3797 UnitTests write_main_tests
= {
3798 {"Write and verify zero-filled memory", test_zero_filled_write
},
3800 UnitTests write_pattern_tests
= {
3801 {"Write address-filled pattern", test_address_filled_write
},
3802 {"Write checkerboard pattern", test_checkerboard_write
},
3803 {"Write reverse checkerboard pattern", test_reverse_checkerboard_write
},
3805 UnitTests write_edge_case_tests
= {
3806 {"Write into NULL VM map", test_write_null_map
}, {"Write zero size", test_write_zero_size
},
3808 UnitTests write_inaccessible_tests
= {
3809 {"Write partially decallocated buffer", test_write_partially_deallocated_buffer
},
3810 {"Write partially read-protected buffer", test_write_partially_unreadable_buffer
},
3811 {"Write on partially deallocated range", test_write_on_partially_deallocated_range
},
3812 {"Write on partially write-protected range", test_write_on_partially_unwritable_range
},
3815 /* All allocations done with mach_vm_allocate(). */
3816 set_allocator(wrapper_mach_vm_allocate
);
3818 /* Run the test suites with various destination sizes and
3819 * unspecified or fixed (page-aligned or page-unaligned)
3820 * addresses, and various buffer sizes and boundary offsets. */
3821 for (sizes_idx
= 0; sizes_idx
< numofsizes
; sizes_idx
++) {
3822 for (flags_idx
= 0; flags_idx
< numofflags
; flags_idx
++) {
3823 for (alignments_idx
= 0; alignments_idx
< numofalignments
; alignments_idx
++) {
3824 for (buffer_sizes_idx
= 0; buffer_sizes_idx
< numofsizes
; buffer_sizes_idx
++) {
3825 for (offsets_idx
= 0; offsets_idx
< numofoffsets
; offsets_idx
++) {
3826 /* An allocated address will be page-aligned. */
3827 if ((flags_idx
== ANYWHERE
&& alignments_idx
== UNALIGNED
)) {
3830 run_suite(set_up_vm_and_buffer_variables_allocate_for_writing
, write_edge_case_tests
,
3831 deallocate_vm_and_buffer
,
3832 "mach_vm_write() edge case tests, %s%s address, %s "
3833 "size: 0x%jx (%ju), buffer %s size: 0x%jx (%ju), "
3834 "buffer boundary offset: %d",
3835 address_flags
[flags_idx
].description
,
3836 (flags_idx
== ANYWHERE
) ? "" : address_alignments
[alignments_idx
].description
,
3837 vm_sizes
[sizes_idx
].description
, (uintmax_t)vm_sizes
[sizes_idx
].size
,
3838 (uintmax_t)vm_sizes
[sizes_idx
].size
, vm_sizes
[buffer_sizes_idx
].description
,
3839 (uintmax_t)vm_sizes
[buffer_sizes_idx
].size
, (uintmax_t)vm_sizes
[buffer_sizes_idx
].size
,
3840 buffer_offsets
[offsets_idx
].offset
);
3841 /* A zero size buffer is always accessible. */
3842 if (buffer_sizes_idx
!= ZERO_BYTES
) {
3843 run_suite(set_up_vm_and_buffer_variables_allocate_for_writing
, write_inaccessible_tests
,
3844 deallocate_vm_and_buffer
,
3845 "mach_vm_write() inaccessibility tests, "
3846 "%s%s address, %s size: 0x%jx (%ju), buffer "
3847 "%s size: 0x%jx (%ju), buffer boundary "
3849 address_flags
[flags_idx
].description
,
3850 (flags_idx
== ANYWHERE
) ? "" : address_alignments
[alignments_idx
].description
,
3851 vm_sizes
[sizes_idx
].description
, (uintmax_t)vm_sizes
[sizes_idx
].size
,
3852 (uintmax_t)vm_sizes
[sizes_idx
].size
, vm_sizes
[buffer_sizes_idx
].description
,
3853 (uintmax_t)vm_sizes
[buffer_sizes_idx
].size
, (uintmax_t)vm_sizes
[buffer_sizes_idx
].size
,
3854 buffer_offsets
[offsets_idx
].offset
);
3856 /* The buffer cannot be larger than the destination. */
3857 if (vm_sizes
[sizes_idx
].size
< vm_sizes
[buffer_sizes_idx
].size
) {
3860 run_suite(set_up_vm_and_buffer_variables_allocate_write
, write_main_tests
, deallocate_vm_and_buffer
,
3861 "mach_vm_write() main tests, %s%s address, %s "
3862 "size: 0x%jx (%ju), buffer %s size: 0x%jx (%ju), "
3863 "buffer boundary offset: %d",
3864 address_flags
[flags_idx
].description
,
3865 (flags_idx
== ANYWHERE
) ? "" : address_alignments
[alignments_idx
].description
,
3866 vm_sizes
[sizes_idx
].description
, (uintmax_t)vm_sizes
[sizes_idx
].size
,
3867 (uintmax_t)vm_sizes
[sizes_idx
].size
, vm_sizes
[buffer_sizes_idx
].description
,
3868 (uintmax_t)vm_sizes
[buffer_sizes_idx
].size
, (uintmax_t)vm_sizes
[buffer_sizes_idx
].size
,
3869 buffer_offsets
[offsets_idx
].offset
);
3870 run_suite(set_up_vm_and_buffer_variables_allocate_for_writing
, write_pattern_tests
,
3871 deallocate_vm_and_buffer
,
3872 "mach_vm_write() pattern tests, %s%s address, %s "
3873 "size: 0x%jx (%ju), buffer %s size: 0x%jx (%ju), "
3874 "buffer boundary offset: %d",
3875 address_flags
[flags_idx
].description
,
3876 (flags_idx
== ANYWHERE
) ? "" : address_alignments
[alignments_idx
].description
,
3877 vm_sizes
[sizes_idx
].description
, (uintmax_t)vm_sizes
[sizes_idx
].size
,
3878 (uintmax_t)vm_sizes
[sizes_idx
].size
, vm_sizes
[buffer_sizes_idx
].description
,
3879 (uintmax_t)vm_sizes
[buffer_sizes_idx
].size
, (uintmax_t)vm_sizes
[buffer_sizes_idx
].size
,
3880 buffer_offsets
[offsets_idx
].offset
);
3889 run_protect_test_suites()
3891 UnitTests readprotection_main_tests
= {
3892 {"Read-protect, read-allow and verify zero-filled memory", test_zero_filled_readprotect
},
3893 {"Verify that region is read-protected iff size is "
3895 test_verify_readprotection
},
3897 UnitTests access_readprotected_memory_tests
= {
3898 {"Read start of read-protected range", test_access_readprotected_range_start
},
3899 {"Read middle of read-protected range", test_access_readprotected_range_middle
},
3900 {"Read end of read-protected range", test_access_readprotected_range_end
},
3902 UnitTests writeprotection_main_tests
= {
3903 {"Write-protect and verify zero-filled memory", test_zero_filled_extended
},
3904 {"Verify that region is write-protected iff size is "
3906 test_verify_writeprotection
},
3908 UnitTests write_writeprotected_memory_tests
= {
3909 {"Write at start of write-protected range", test_write_writeprotected_range_start
},
3910 {"Write in middle of write-protected range", test_write_writeprotected_range_middle
},
3911 {"Write at end of write-protected range", test_write_writeprotected_range_end
},
3913 UnitTests protect_edge_case_tests
= {
3914 {"Read-protect zero size ranges", test_readprotect_zero_size
},
3915 {"Write-protect zero size ranges", test_writeprotect_zero_size
},
3916 {"Read-protect wrapped around memory ranges", test_readprotect_wrapped_around_ranges
},
3917 {"Write-protect wrapped around memory ranges", test_writeprotect_wrapped_around_ranges
},
3920 /* All allocations done with mach_vm_allocate(). */
3921 set_allocator(wrapper_mach_vm_allocate
);
3923 /* Run the test suites with various VM sizes, and unspecified or
3924 * fixed (page-aligned or page-unaligned), addresses. */
3925 for (sizes_idx
= 0; sizes_idx
< numofsizes
; sizes_idx
++) {
3926 for (flags_idx
= 0; flags_idx
< numofflags
; flags_idx
++) {
3927 for (alignments_idx
= 0; alignments_idx
< numofalignments
; alignments_idx
++) {
3928 /* An allocated address will be page-aligned. */
3929 if (flags_idx
== ANYWHERE
&& alignments_idx
== UNALIGNED
) {
3932 run_suite(set_up_vm_variables_allocate_readprotect
, readprotection_main_tests
, deallocate_extra_page
,
3933 "Main read-protection tests, %s%s address, %s "
3934 "size: 0x%jx (%ju)",
3935 address_flags
[flags_idx
].description
,
3936 (flags_idx
== ANYWHERE
) ? "" : address_alignments
[alignments_idx
].description
,
3937 vm_sizes
[sizes_idx
].description
, (uintmax_t)vm_sizes
[sizes_idx
].size
,
3938 (uintmax_t)vm_sizes
[sizes_idx
].size
);
3939 run_suite(set_up_vm_variables_allocate_writeprotect
, writeprotection_main_tests
, deallocate_extra_page
,
3940 "Main write-protection tests, %s%s address, %s "
3941 "size: 0x%jx (%ju)",
3942 address_flags
[flags_idx
].description
,
3943 (flags_idx
== ANYWHERE
) ? "" : address_alignments
[alignments_idx
].description
,
3944 vm_sizes
[sizes_idx
].description
, (uintmax_t)vm_sizes
[sizes_idx
].size
,
3945 (uintmax_t)vm_sizes
[sizes_idx
].size
);
3946 /* Nothing gets protected if size is zero. */
3947 if (sizes_idx
!= ZERO_BYTES
) {
3948 set_expected_signal(SIGBUS
);
3949 /* Accessing read-protected memory should cause a bus
3951 run_suite(set_up_vm_variables_allocate_readprotect
, access_readprotected_memory_tests
, deallocate_extra_page
,
3952 "Read-protected memory access tests, %s%s "
3953 "address, %s size: 0x%jx (%ju)",
3954 address_flags
[flags_idx
].description
,
3955 (flags_idx
== ANYWHERE
) ? "" : address_alignments
[alignments_idx
].description
,
3956 vm_sizes
[sizes_idx
].description
, (uintmax_t)vm_sizes
[sizes_idx
].size
,
3957 (uintmax_t)vm_sizes
[sizes_idx
].size
);
3958 /* Writing on write-protected memory should cause a bus
3960 run_suite(set_up_vm_variables_allocate_writeprotect
, write_writeprotected_memory_tests
, deallocate_extra_page
,
3961 "Write-protected memory writing tests, %s%s "
3962 "address, %s size: 0x%jx (%ju)",
3963 address_flags
[flags_idx
].description
,
3964 (flags_idx
== ANYWHERE
) ? "" : address_alignments
[alignments_idx
].description
,
3965 vm_sizes
[sizes_idx
].description
, (uintmax_t)vm_sizes
[sizes_idx
].size
,
3966 (uintmax_t)vm_sizes
[sizes_idx
].size
);
3967 set_expected_signal(0);
3972 run_suite(do_nothing
, protect_edge_case_tests
, do_nothing
, "Edge case protection tests");
3976 run_copy_test_suites()
3979 UnitTests copy_main_tests
= {
3980 {"Copy and verify zero-filled memory", test_zero_filled_copy_dest
},
3982 UnitTests copy_pattern_tests
= {
3983 {"Copy address-filled pattern", test_copy_address_filled
},
3984 {"Copy checkerboard pattern", test_copy_checkerboard
},
3985 {"Copy reverse checkerboard pattern", test_copy_reverse_checkerboard
},
3987 UnitTests copy_edge_case_tests
= {
3988 {"Copy with NULL VM map", test_copy_null_map
},
3989 {"Copy zero size", test_copy_zero_size
},
3990 {"Copy invalid large size", test_copy_invalid_large_size
},
3991 {"Read wrapped around memory ranges", test_copy_wrapped_around_ranges
},
3993 UnitTests copy_inaccessible_tests
= {
3994 {"Copy source partially decallocated region", test_copy_source_partially_deallocated_region
},
3996 {"Copy destination partially decallocated region", test_copy_dest_partially_deallocated_region
},
3997 {"Copy source partially read-protected region", test_copy_source_partially_unreadable_region
},
3999 {"Copy destination partially write-protected region", test_copy_dest_partially_unwriteable_region
},
4000 {"Copy source on partially deallocated range", test_copy_source_on_partially_deallocated_range
},
4001 {"Copy destination on partially deallocated range", test_copy_dest_on_partially_deallocated_range
},
4002 {"Copy source on partially read-protected range", test_copy_source_on_partially_unreadable_range
},
4003 {"Copy destination on partially write-protected range", test_copy_dest_on_partially_unwritable_range
},
4006 UnitTests copy_shared_mode_tests
= {
4007 {"Copy using freshly allocated source", test_vmcopy_fresh_source
},
4008 {"Copy using shared source", test_vmcopy_shared_source
},
4009 {"Copy using a \'copied from\' source", test_vmcopy_copied_from_source
},
4010 {"Copy using a \'copied to\' source", test_vmcopy_copied_to_source
},
4011 {"Copy using a true shared source", test_vmcopy_trueshared_source
},
4012 {"Copy using a private aliased source", test_vmcopy_private_aliased_source
},
4015 /* All allocations done with mach_vm_allocate(). */
4016 set_allocator(wrapper_mach_vm_allocate
);
4018 /* All the tests are done with page size regions. */
4019 set_vm_size(vm_page_size
);
4021 /* Run the test suites with various shared modes for source */
4022 for (vmcopy_action_idx
= 0; vmcopy_action_idx
< numofvmcopyactions
; vmcopy_action_idx
++) {
4023 run_suite(set_up_copy_shared_mode_variables
, copy_shared_mode_tests
, do_nothing
, "Copy shared mode tests, %s",
4024 vmcopy_actions
[vmcopy_action_idx
].description
);
4027 for (sizes_idx
= 0; sizes_idx
< numofsizes
; sizes_idx
++) {
4028 for (flags_idx
= 0; flags_idx
< numofflags
; flags_idx
++) {
4029 for (alignments_idx
= 0; alignments_idx
< numofalignments
; alignments_idx
++) {
4030 for (buffer_sizes_idx
= 0; buffer_sizes_idx
< numofsizes
; buffer_sizes_idx
++) {
4031 for (offsets_idx
= 0; offsets_idx
< numofoffsets
; offsets_idx
++) {
4032 /* An allocated address will be page-aligned. */
4033 if ((flags_idx
== ANYWHERE
&& alignments_idx
== UNALIGNED
)) {
4036 run_suite(set_up_vm_and_buffer_variables_allocate_for_copying
, copy_edge_case_tests
,
4037 deallocate_vm_and_buffer
,
4038 "mach_vm_copy() edge case tests, %s%s address, %s "
4039 "size: 0x%jx (%ju), buffer %s size: 0x%jx (%ju), "
4040 "buffer boundary offset: %d",
4041 address_flags
[flags_idx
].description
,
4042 (flags_idx
== ANYWHERE
) ? "" : address_alignments
[alignments_idx
].description
,
4043 vm_sizes
[sizes_idx
].description
, (uintmax_t)vm_sizes
[sizes_idx
].size
,
4044 (uintmax_t)vm_sizes
[sizes_idx
].size
, vm_sizes
[buffer_sizes_idx
].description
,
4045 (uintmax_t)vm_sizes
[buffer_sizes_idx
].size
, (uintmax_t)vm_sizes
[buffer_sizes_idx
].size
,
4046 buffer_offsets
[offsets_idx
].offset
);
4047 /* The buffer cannot be larger than the destination. */
4048 if (vm_sizes
[sizes_idx
].size
< vm_sizes
[buffer_sizes_idx
].size
) {
4052 /* A zero size buffer is always accessible. */
4053 if (buffer_sizes_idx
!= ZERO_BYTES
) {
4054 run_suite(set_up_vm_and_buffer_variables_allocate_for_copying
, copy_inaccessible_tests
,
4055 deallocate_vm_and_buffer
,
4056 "mach_vm_copy() inaccessibility tests, "
4057 "%s%s address, %s size: 0x%jx (%ju), buffer "
4058 "%s size: 0x%jx (%ju), buffer boundary "
4060 address_flags
[flags_idx
].description
,
4061 (flags_idx
== ANYWHERE
) ? "" : address_alignments
[alignments_idx
].description
,
4062 vm_sizes
[sizes_idx
].description
, (uintmax_t)vm_sizes
[sizes_idx
].size
,
4063 (uintmax_t)vm_sizes
[sizes_idx
].size
, vm_sizes
[buffer_sizes_idx
].description
,
4064 (uintmax_t)vm_sizes
[buffer_sizes_idx
].size
, (uintmax_t)vm_sizes
[buffer_sizes_idx
].size
,
4065 buffer_offsets
[offsets_idx
].offset
);
4067 run_suite(set_up_source_and_dest_variables_allocate_copy
, copy_main_tests
, deallocate_vm_and_buffer
,
4068 "mach_vm_copy() main tests, %s%s address, %s "
4069 "size: 0x%jx (%ju), destination %s size: 0x%jx (%ju), "
4070 "destination boundary offset: %d",
4071 address_flags
[flags_idx
].description
,
4072 (flags_idx
== ANYWHERE
) ? "" : address_alignments
[alignments_idx
].description
,
4073 vm_sizes
[sizes_idx
].description
, (uintmax_t)vm_sizes
[sizes_idx
].size
,
4074 (uintmax_t)vm_sizes
[sizes_idx
].size
, vm_sizes
[buffer_sizes_idx
].description
,
4075 (uintmax_t)vm_sizes
[buffer_sizes_idx
].size
, (uintmax_t)vm_sizes
[buffer_sizes_idx
].size
,
4076 buffer_offsets
[offsets_idx
].offset
);
4077 run_suite(set_up_source_and_dest_variables_allocate_copy
, copy_pattern_tests
, deallocate_vm_and_buffer
,
4078 "mach_vm_copy() pattern tests, %s%s address, %s "
4079 "size: 0x%jx (%ju) destination %s size: 0x%jx (%ju), "
4080 "destination boundary offset: %d",
4081 address_flags
[flags_idx
].description
,
4082 (flags_idx
== ANYWHERE
) ? "" : address_alignments
[alignments_idx
].description
,
4083 vm_sizes
[sizes_idx
].description
, (uintmax_t)vm_sizes
[sizes_idx
].size
,
4084 (uintmax_t)vm_sizes
[sizes_idx
].size
, vm_sizes
[buffer_sizes_idx
].description
,
4085 (uintmax_t)vm_sizes
[buffer_sizes_idx
].size
, (uintmax_t)vm_sizes
[buffer_sizes_idx
].size
,
4086 buffer_offsets
[offsets_idx
].offset
);
4095 perform_test_with_options(test_option_t options
)
4097 process_options(options
);
4099 /* <rdar://problem/10304215> CoreOSZin 12Z30: VMUnitTest fails:
4100 * error finding xnu major version number. */
4101 /* printf("xnu version is %s.\n\n", xnu_version_string()); */
4103 if (flag_run_allocate_test
) {
4104 run_allocate_test_suites();
4107 if (flag_run_deallocate_test
) {
4108 run_deallocate_test_suites();
4111 if (flag_run_read_test
) {
4112 run_read_test_suites();
4115 if (flag_run_write_test
) {
4116 run_write_test_suites();
4119 if (flag_run_protect_test
) {
4120 run_protect_test_suites();
4123 if (flag_run_copy_test
) {
4124 run_copy_test_suites();
4127 log_aggregated_results();
4130 T_DECL(vm_test_allocate
, "Allocate VM unit test")
4132 test_options
.to_flags
= VM_TEST_ALLOCATE
;
4133 test_options
.to_vmsize
= 0;
4134 test_options
.to_quietness
= ERROR_ONLY_QUIETNESS
;
4136 perform_test_with_options(test_options
);
4139 T_DECL(vm_test_deallocate
, "Deallocate VM unit test",
4140 T_META_IGNORECRASHES(".*vm_allocation.*"))
4142 test_options
.to_flags
= VM_TEST_DEALLOCATE
;
4143 test_options
.to_vmsize
= 0;
4144 test_options
.to_quietness
= ERROR_ONLY_QUIETNESS
;
4146 perform_test_with_options(test_options
);
4149 T_DECL(vm_test_read
, "Read VM unit test")
4151 test_options
.to_flags
= VM_TEST_READ
;
4152 test_options
.to_vmsize
= 0;
4153 test_options
.to_quietness
= ERROR_ONLY_QUIETNESS
;
4155 perform_test_with_options(test_options
);
4158 T_DECL(vm_test_write
, "Write VM unit test")
4160 test_options
.to_flags
= VM_TEST_WRITE
;
4161 test_options
.to_vmsize
= 0;
4162 test_options
.to_quietness
= ERROR_ONLY_QUIETNESS
;
4164 perform_test_with_options(test_options
);
4167 T_DECL(vm_test_protect
, "Protect VM unit test",
4168 T_META_IGNORECRASHES(".*vm_allocation.*"))
4170 test_options
.to_flags
= VM_TEST_PROTECT
;
4171 test_options
.to_vmsize
= 0;
4172 test_options
.to_quietness
= ERROR_ONLY_QUIETNESS
;
4174 perform_test_with_options(test_options
);
4177 T_DECL(vm_test_copy
, "Copy VM unit test")
4179 test_options
.to_flags
= VM_TEST_COPY
;
4180 test_options
.to_vmsize
= 0;
4181 test_options
.to_quietness
= ERROR_ONLY_QUIETNESS
;
4183 perform_test_with_options(test_options
);