]> git.saurik.com Git - apple/xnu.git/blame - tests/vm/vm_allocation.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / tests / vm / vm_allocation.c
CommitLineData
f427ee49
A
1/* Mach virtual memory unit tests
2 *
3 * The main goal of this code is to facilitate the construction,
4 * running, result logging and clean up of a test suite, taking care
5 * of all the scaffolding. A test suite is a sequence of very targeted
6 * unit tests, each running as a separate process to isolate its
7 * address space.
8 * A unit test is abstracted as a unit_test_t structure, consisting of
9 * a test function and a logging identifier. A test suite is a suite_t
10 * structure, consisting of an unit_test_t array, fixture set up and
11 * tear down functions.
12 * Test suites are created dynamically. Each of its unit test runs in
13 * its own fork()d process, with the fixture set up and tear down
14 * running before and after each test. The parent process will log a
15 * pass result if the child exits normally, and a fail result in any
16 * other case (non-zero exit status, abnormal signal). The suite
17 * results are then aggregated and logged after the [SUMMARY] keyword,
18 * and finally the test suite is destroyed.
19 * The included test suites cover the Mach memory allocators,
20 * mach_vm_allocate() and mach_vm_map() with various options, and
21 * mach_vm_deallocate(), mach_vm_read(), mach_vm_write(),
22 * mach_vm_protect(), mach_vm_copy().
23 *
24 * Author: Renaud Dreyer (rdreyer@apple.com)
25 *
26 * Transformed to libdarwintest by Tristan Ye (tristan_ye@apple.com) */
27
28#include <darwintest.h>
29
30#include <stdlib.h>
31#include <ctype.h>
32#include <inttypes.h>
33#include <stdio.h>
34#include <math.h>
35#include <errno.h>
36#include <signal.h>
37#include <getopt.h>
38#include <mach/mach.h>
39#include <mach/mach_init.h>
40#include <mach/mach_vm.h>
41#include <sys/sysctl.h>
42#include <time.h>
43
44T_GLOBAL_META(T_META_NAMESPACE("xnu.vm"));
45
46/**************************/
47/**************************/
48/* Unit Testing Framework */
49/**************************/
50/**************************/
51
52/*********************/
53/* Private interface */
54/*********************/
55
56static const char frameworkname[] = "vm_unitester";
57
58/* Type for test, fixture set up and fixture tear down functions. */
59typedef void (*test_fn_t)();
60
61/* Unit test structure. */
62typedef struct {
63 const char * name;
64 test_fn_t test;
65} unit_test_t;
66
67/* Test suite structure. */
68typedef struct {
69 const char * name;
70 int numoftests;
71 test_fn_t set_up;
72 unit_test_t * tests;
73 test_fn_t tear_down;
74} suite_t;
75
76int _quietness = 0;
77int _expected_signal = 0;
78
79struct {
80 uintmax_t numoftests;
81 uintmax_t passed_tests;
82} results = {0, 0};
83
84#define logr(format, ...) \
85 do { \
86 if (_quietness <= 1) { \
87 T_LOG(format, ## __VA_ARGS__); \
88 } \
89 } while (0)
90
91#define logv(format, ...) \
92 do { \
93 if (_quietness == 0) { \
94 T_LOG(format, ## __VA_ARGS__); \
95 } \
96 } while (0)
97
98static suite_t *
99create_suite(const char * name, int numoftests, test_fn_t set_up, unit_test_t * tests, test_fn_t tear_down)
100{
101 suite_t * suite = (suite_t *)malloc(sizeof(suite_t));
102 T_QUIET; T_WITH_ERRNO; T_ASSERT_NOTNULL(suite, "malloc()");
103
104 suite->name = name;
105 suite->numoftests = numoftests;
106 suite->set_up = set_up;
107 suite->tests = tests;
108 suite->tear_down = tear_down;
109 return suite;
110}
111
112static void
113destroy_suite(suite_t * suite)
114{
115 free(suite);
116}
117
118static void
119log_suite_info(suite_t * suite)
120{
121 logr("[TEST] %s", suite->name);
122 logr("Number of tests: %d\n", suite->numoftests);
123}
124
125static void
126log_suite_results(suite_t * suite, int passed_tests)
127{
128 results.numoftests += (uintmax_t)suite->numoftests;
129 results.passed_tests += (uintmax_t)passed_tests;
130}
131
132static void
133log_test_info(unit_test_t * unit_test, unsigned test_num)
134{
135 logr("[BEGIN] #%04d: %s", test_num, unit_test->name);
136}
137
138static void
139log_test_result(unit_test_t * unit_test, boolean_t test_passed, unsigned test_num)
140{
141 logr("[%s] #%04d: %s\n", test_passed ? "PASS" : "FAIL", test_num, unit_test->name);
142}
143
144/* Run a test with fixture set up and teardown, while enforcing the
145 * time out constraint. */
146static void
147run_test(suite_t * suite, unit_test_t * unit_test, unsigned test_num)
148{
149 log_test_info(unit_test, test_num);
150
151 suite->set_up();
152 unit_test->test();
153 suite->tear_down();
154}
155
156/* Check a child return status. */
157static boolean_t
158child_terminated_normally(int child_status)
159{
160 boolean_t normal_exit = FALSE;
161
162 if (WIFEXITED(child_status)) {
163 int exit_status = WEXITSTATUS(child_status);
164 if (exit_status) {
165 T_LOG("Child process unexpectedly exited with code %d.",
166 exit_status);
167 } else if (!_expected_signal) {
168 normal_exit = TRUE;
169 }
170 } else if (WIFSIGNALED(child_status)) {
171 int signal = WTERMSIG(child_status);
172 if (signal == _expected_signal ||
173 (_expected_signal == -1 && (signal == SIGBUS || signal == SIGSEGV))) {
174 if (_quietness <= 0) {
175 T_LOG("Child process died with expected signal "
176 "%d.", signal);
177 }
178 normal_exit = TRUE;
179 } else {
180 T_LOG("Child process unexpectedly died with signal %d.",
181 signal);
182 }
183 } else {
184 T_LOG("Child process unexpectedly did not exit nor die");
185 }
186
187 return normal_exit;
188}
189
190/* Run a test in its own process, and report the result. */
191static boolean_t
192child_test_passed(suite_t * suite, unit_test_t * unit_test)
193{
194 int test_status;
195 static unsigned test_num = 0;
196
197 test_num++;
198
199 pid_t test_pid = fork();
200 T_QUIET; T_ASSERT_POSIX_SUCCESS(test_pid, "fork()");
201 if (!test_pid) {
202 run_test(suite, unit_test, test_num);
203 exit(0);
204 }
205 while (waitpid(test_pid, &test_status, 0) != test_pid) {
206 continue;
207 }
208 boolean_t test_result = child_terminated_normally(test_status);
209 log_test_result(unit_test, test_result, test_num);
210 return test_result;
211}
212
213/* Run each test in a suite, and report the results. */
214static int
215count_passed_suite_tests(suite_t * suite)
216{
217 int passed_tests = 0;
218 int i;
219
220 for (i = 0; i < suite->numoftests; i++) {
221 passed_tests += child_test_passed(suite, &(suite->tests[i]));
222 }
223 return passed_tests;
224}
225
226/********************/
227/* Public interface */
228/********************/
229
230#define DEFAULT_QUIETNESS 0 /* verbose */
231#define RESULT_ERR_QUIETNESS 1 /* result and error */
232#define ERROR_ONLY_QUIETNESS 2 /* error only */
233
234#define run_suite(set_up, tests, tear_down, ...) \
235 _run_suite((sizeof(tests) / sizeof(tests[0])), (set_up), (tests), (tear_down), __VA_ARGS__)
236
237typedef unit_test_t UnitTests[];
238
239void _run_suite(int numoftests, test_fn_t set_up, UnitTests tests, test_fn_t tear_down, const char * format, ...)
240__printflike(5, 6);
241
242void
243_run_suite(int numoftests, test_fn_t set_up, UnitTests tests, test_fn_t tear_down, const char * format, ...)
244{
245 va_list ap;
246 char * name;
247
248 va_start(ap, format);
249 T_QUIET; T_ASSERT_POSIX_SUCCESS(vasprintf(&name, format, ap), "vasprintf()");
250 va_end(ap);
251 suite_t * suite = create_suite(name, numoftests, set_up, tests, tear_down);
252 log_suite_info(suite);
253 log_suite_results(suite, count_passed_suite_tests(suite));
254 free(name);
255 destroy_suite(suite);
256}
257
258/* Setters and getters for various test framework global
259 * variables. Should only be used outside of the test, set up and tear
260 * down functions. */
261
262/* Expected signal for a test, default is 0. */
263void
264set_expected_signal(int signal)
265{
266 _expected_signal = signal;
267}
268
269int
270get_expected_signal()
271{
272 return _expected_signal;
273}
274
275/* Logging verbosity. */
276void
277set_quietness(int value)
278{
279 _quietness = value;
280}
281
282int
283get_quietness()
284{
285 return _quietness;
286}
287
288/* For fixture set up and tear down functions, and units tests. */
289void
290do_nothing()
291{
292}
293
294void
295log_aggregated_results()
296{
297 T_LOG("[SUMMARY] Aggregated Test Results\n");
298 T_LOG("Total: %ju", results.numoftests);
299 T_LOG("Passed: %ju", results.passed_tests);
300 T_LOG("Failed: %ju\n", results.numoftests - results.passed_tests);
301
302 T_QUIET; T_ASSERT_EQ(results.passed_tests, results.numoftests,
303 "%d passed of total %d tests",
304 results.passed_tests, results.numoftests);
305}
306
307/*******************************/
308/*******************************/
309/* Virtual memory unit testing */
310/*******************************/
311/*******************************/
312
313/* Test exit values:
314 * 0: pass
315 * 1: fail, generic unexpected failure
316 * 2: fail, unexpected Mach return value
317 * 3: fail, time out */
318
319#define DEFAULT_VM_SIZE ((mach_vm_size_t)(1024ULL * 4096ULL))
320
321#define POINTER(address) ((char *)(uintptr_t)(address))
322#define MACH_VM_ADDRESS_T(address) (*((mach_vm_address_t *)(uintptr_t)(address)))
323
324static int vm_address_size = sizeof(mach_vm_address_t);
325
326static char *progname = "";
327
328/*************************/
329/* xnu version functions */
330/*************************/
331
332/* Find the xnu version string. */
333char *
334xnu_version_string()
335{
336 size_t length;
337 int mib[2];
338 mib[0] = CTL_KERN;
339 mib[1] = KERN_VERSION;
340
341 T_QUIET;
342 T_ASSERT_POSIX_SUCCESS(sysctl(mib, 2, NULL, &length, NULL, 0), "sysctl()");
343 char * version = (char *)malloc(length);
344 T_QUIET;
345 T_WITH_ERRNO;
346 T_ASSERT_NOTNULL(version, "malloc()");
347 T_QUIET;
348 T_EXPECT_POSIX_SUCCESS(sysctl(mib, 2, version, &length, NULL, 0), "sysctl()");
349 if (T_RESULT == T_RESULT_FAIL) {
350 free(version);
351 T_END;
352 }
353 char * xnu_string = strstr(version, "xnu-");
354 free(version);
355 T_QUIET;
356 T_ASSERT_NOTNULL(xnu_string, "%s: error finding xnu version string.", progname);
357 return xnu_string;
358}
359
360/* Find the xnu major version number. */
361unsigned int
362xnu_major_version()
363{
364 char * endptr;
365 char * xnu_substring = xnu_version_string() + 4;
366
367 errno = 0;
368 unsigned int xnu_version = strtoul(xnu_substring, &endptr, 0);
369 T_QUIET;
370 T_ASSERT_TRUE((errno != ERANGE && endptr != xnu_substring),
371 "%s: error finding xnu major version number.", progname);
372 return xnu_version;
373}
374
375/*************************/
376/* Mach assert functions */
377/*************************/
378
379static inline void
380assert_mach_return(kern_return_t kr, kern_return_t expected_kr, const char * mach_routine)
381{
382 T_QUIET; T_ASSERT_EQ(kr, expected_kr,
383 "%s unexpectedly returned: %s."
384 "Should have returned: %s.",
385 mach_routine, mach_error_string(kr),
386 mach_error_string(expected_kr));
387}
388
389/*******************************/
390/* Arrays for test suite loops */
391/*******************************/
392
393/* Memory allocators */
394typedef kern_return_t (*allocate_fn_t)(vm_map_t, mach_vm_address_t *, mach_vm_size_t, int);
395
396
397/*
398 * Remember any pre-reserved fixed address, which needs to be released prior to an allocation.
399 */
400static mach_vm_address_t fixed_vm_address = 0x0;
401static mach_vm_size_t fixed_vm_size = 0;
402
403/* forward decl */
404void assert_deallocate_success(mach_vm_address_t address, mach_vm_size_t size);
405
406/*
407 * If trying to allocate at a fixed address, we need to do the delayed deallocate first.
408 */
409static void
410check_fixed_address(mach_vm_address_t *address, mach_vm_size_t size)
411{
412 if (fixed_vm_address != 0 &&
413 fixed_vm_address <= *address &&
414 *address + size <= fixed_vm_address + fixed_vm_size) {
415 assert_deallocate_success(fixed_vm_address, fixed_vm_size);
416 fixed_vm_address = 0;
417 fixed_vm_size = 0;
418 }
419}
420
421kern_return_t
422wrapper_mach_vm_allocate(vm_map_t map, mach_vm_address_t * address, mach_vm_size_t size, int flags)
423{
424 check_fixed_address(address, size);
425 return mach_vm_allocate(map, address, size, flags);
426}
427
428kern_return_t
429wrapper_mach_vm_map(vm_map_t map, mach_vm_address_t * address, mach_vm_size_t size, int flags)
430{
431 check_fixed_address(address, size);
432 return mach_vm_map(map, address, size, (mach_vm_offset_t)0, flags, MACH_PORT_NULL, (memory_object_offset_t)0, FALSE,
433 VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
434}
435
436/* Should have the same behavior as when mask is zero. */
437kern_return_t
438wrapper_mach_vm_map_4kB(vm_map_t map, mach_vm_address_t * address, mach_vm_size_t size, int flags)
439{
440 check_fixed_address(address, size);
441 return mach_vm_map(map, address, size, (mach_vm_offset_t)0xFFF, flags, MACH_PORT_NULL, (memory_object_offset_t)0, FALSE,
442 VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
443}
444
445kern_return_t
446wrapper_mach_vm_map_2MB(vm_map_t map, mach_vm_address_t * address, mach_vm_size_t size, int flags)
447{
448 check_fixed_address(address, size);
449 return mach_vm_map(map, address, size, (mach_vm_offset_t)0x1FFFFF, flags, MACH_PORT_NULL, (memory_object_offset_t)0, FALSE,
450 VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
451}
452
453mach_port_t
454memory_entry(mach_vm_size_t * size)
455{
456 mach_port_t object_handle = MACH_PORT_NULL;
457 mach_vm_size_t original_size = *size;
458
459 T_QUIET; T_ASSERT_MACH_SUCCESS(mach_make_memory_entry_64(mach_task_self(), size, (memory_object_offset_t)0,
460 (MAP_MEM_NAMED_CREATE | VM_PROT_ALL), &object_handle, 0),
461 "mach_make_memory_entry_64()");
462 T_QUIET; T_ASSERT_EQ(*size, round_page_kernel(original_size),
463 "mach_make_memory_entry_64() unexpectedly returned a named "
464 "entry of size 0x%jx (%ju).\n"
465 "Should have returned a "
466 "named entry of size 0x%jx (%ju).",
467 (uintmax_t)*size, (uintmax_t)*size, (uintmax_t)original_size, (uintmax_t)original_size);
468 return object_handle;
469}
470
471kern_return_t
472wrapper_mach_vm_map_named_entry(vm_map_t map, mach_vm_address_t * address, mach_vm_size_t size, int flags)
473{
474 mach_port_t object_handle = memory_entry(&size);
475 check_fixed_address(address, size);
476 kern_return_t kr = mach_vm_map(map, address, size, (mach_vm_offset_t)0, flags, object_handle, (memory_object_offset_t)0, FALSE,
477 VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT);
478 T_QUIET; T_ASSERT_MACH_SUCCESS(mach_port_deallocate(mach_task_self(), object_handle), "mach_port_deallocate()");
479 return kr;
480}
481
482static struct {
483 allocate_fn_t allocate;
484 const char * description;
485} allocators[] = {
486 {wrapper_mach_vm_allocate, "mach_vm_allocate()"},
487 {wrapper_mach_vm_map, "mach_vm_map() (zero mask)"},
488 {wrapper_mach_vm_map_4kB,
489 "mach_vm_map() "
490 "(4 kB address alignment)"},
491 {wrapper_mach_vm_map_2MB,
492 "mach_vm_map() "
493 "(2 MB address alignment)"},
494 {wrapper_mach_vm_map_named_entry,
495 "mach_vm_map() (named "
496 "entry, zero mask)"},
497};
498static int numofallocators = sizeof(allocators) / sizeof(allocators[0]);
499static int allocators_idx;
500enum { MACH_VM_ALLOCATE, MACH_VM_MAP, MACH_VM_MAP_4kB, MACH_VM_MAP_2MB, MACH_VM_MAP_NAMED_ENTRY };
501
502/* VM size */
503static struct {
504 mach_vm_size_t size;
505 const char * description;
506} vm_sizes[] = {
507 {DEFAULT_VM_SIZE, "default/input"},
508 {0, "zero"},
509 {4096ULL, "aligned"},
510 {1ULL, "unaligned"},
511 {4095ULL, "unaligned"},
512 {4097ULL, "unaligned"},
513};
514static int numofsizes = sizeof(vm_sizes) / sizeof(vm_sizes[0]);
515static int sizes_idx;
516static int buffer_sizes_idx;
517enum { DEFAULT_INPUT, ZERO_BYTES, ONE_PAGE, ONE_BYTE, ONE_PAGE_MINUS_ONE_BYTE, ONE_PAGE_AND_ONE_BYTE };
518
519/* Unspecified/fixed address */
520static struct {
521 int flag;
522 const char * description;
523} address_flags[] = {
524 {VM_FLAGS_ANYWHERE, "unspecified"}, {VM_FLAGS_FIXED, "fixed"},
525};
526static int numofflags = sizeof(address_flags) / sizeof(address_flags[0]);
527static int flags_idx;
528enum { ANYWHERE, FIXED };
529
530/* Address alignment */
531static struct {
532 boolean_t alignment;
533 const char * description;
534} address_alignments[] = {
535 {TRUE, " aligned"}, {FALSE, " unaligned"},
536};
537static int numofalignments = sizeof(address_alignments) / sizeof(*address_alignments);
538static int alignments_idx;
539enum { ALIGNED, UNALIGNED };
540
541/* Buffer offset */
542static struct {
543 int offset;
544 const char * description;
545} buffer_offsets[] = {
546 {0, ""}, {1, ""}, {2, ""},
547};
548static int numofoffsets = sizeof(buffer_offsets) / sizeof(buffer_offsets[0]);
549static int offsets_idx;
550enum { ZERO, ONE, TWO };
551
552/* mach_vm_copy() post actions */
553enum { VMCOPY_MODIFY_SRC, VMCOPY_MODIFY_DST, VMCOPY_MODIFY_SHARED_COPIED };
554
555static struct {
556 int action;
557 const char * description;
558} vmcopy_actions[] = {
559 {VMCOPY_MODIFY_SRC, "modify vm_copy() source"},
560 {VMCOPY_MODIFY_DST, "modify vm_copy() destination"},
561 {VMCOPY_MODIFY_SHARED_COPIED,
562 "modify vm_copy source's shared "
563 "or copied from/to region"},
564};
565static int numofvmcopyactions = sizeof(vmcopy_actions) / sizeof(vmcopy_actions[0]);
566static int vmcopy_action_idx;
567
568/************************************/
569/* Setters and getters for fixtures */
570/************************************/
571
572/* Allocation memory range. */
573static allocate_fn_t _allocator = wrapper_mach_vm_allocate;
574static mach_vm_size_t _vm_size = DEFAULT_VM_SIZE;
575static int _address_flag = VM_FLAGS_ANYWHERE;
576static boolean_t _address_alignment = TRUE;
577static mach_vm_address_t _vm_address = 0x0;
578
579/* Buffer for mach_vm_write(). */
580static mach_vm_size_t _buffer_size = DEFAULT_VM_SIZE;
581static mach_vm_address_t _buffer_address = 0x0;
582static int _buffer_offset = 0;
583
584/* Post action for mach_vm_copy(). */
585static int _vmcopy_post_action = VMCOPY_MODIFY_SRC;
586
587static void
588set_allocator(allocate_fn_t allocate)
589{
590 _allocator = allocate;
591}
592
593static allocate_fn_t
594get_allocator()
595{
596 return _allocator;
597}
598
599static void
600set_vm_size(mach_vm_size_t size)
601{
602 _vm_size = size;
603}
604
605static mach_vm_size_t
606get_vm_size()
607{
608 return _vm_size;
609}
610
611static void
612set_address_flag(int flag)
613{
614 _address_flag = flag;
615}
616
617static int
618get_address_flag()
619{
620 return _address_flag;
621}
622
623static void
624set_address_alignment(boolean_t alignment)
625{
626 _address_alignment = alignment;
627}
628
629static boolean_t
630get_address_alignment()
631{
632 return _address_alignment;
633}
634
635static void
636set_vm_address(mach_vm_address_t address)
637{
638 _vm_address = address;
639}
640
641static mach_vm_address_t
642get_vm_address()
643{
644 return _vm_address;
645}
646
647static void
648set_buffer_size(mach_vm_size_t size)
649{
650 _buffer_size = size;
651}
652
653static mach_vm_size_t
654get_buffer_size()
655{
656 return _buffer_size;
657}
658
659static void
660set_buffer_address(mach_vm_address_t address)
661{
662 _buffer_address = address;
663}
664
665static mach_vm_address_t
666get_buffer_address()
667{
668 return _buffer_address;
669}
670
671static void
672set_buffer_offset(int offset)
673{
674 _buffer_offset = offset;
675}
676
677static int
678get_buffer_offset()
679{
680 return _buffer_offset;
681}
682
683static void
684set_vmcopy_post_action(int action)
685{
686 _vmcopy_post_action = action;
687}
688
689static int
690get_vmcopy_post_action()
691{
692 return _vmcopy_post_action;
693}
694
695/*******************************/
696/* Usage and option processing */
697/*******************************/
698static boolean_t flag_run_allocate_test = FALSE;
699static boolean_t flag_run_deallocate_test = FALSE;
700static boolean_t flag_run_read_test = FALSE;
701static boolean_t flag_run_write_test = FALSE;
702static boolean_t flag_run_protect_test = FALSE;
703static boolean_t flag_run_copy_test = FALSE;
704
705#define VM_TEST_ALLOCATE 0x00000001
706#define VM_TEST_DEALLOCATE 0x00000002
707#define VM_TEST_READ 0x00000004
708#define VM_TEST_WRITE 0x00000008
709#define VM_TEST_PROTECT 0x00000010
710#define VM_TEST_COPY 0x00000020
711
712typedef struct test_option {
713 uint32_t to_flags;
714 int to_quietness;
715 mach_vm_size_t to_vmsize;
716} test_option_t;
717
718typedef struct test_info {
719 char *ti_name;
720 boolean_t *ti_flag;
721} test_info_t;
722
723static test_option_t test_options;
724
725enum {ALLOCATE = 0, DEALLOCATE, READ, WRITE, PROTECT, COPY};
726
727static test_info_t test_info[] = {
728 {"allocate", &flag_run_allocate_test},
729 {"deallocate", &flag_run_deallocate_test},
730 {"read", &flag_run_read_test},
731 {"write", &flag_run_write_test},
732 {"protect", &flag_run_protect_test},
733 {"copy", &flag_run_copy_test},
734 {NULL, NULL}
735};
736
737static void
738die_on_invalid_value(int condition, const char * value_string)
739{
740 T_QUIET;
741 T_ASSERT_EQ(condition, 0, "%s: invalid value: %s.",
742 progname, value_string);
743}
744
745static void
746process_options(test_option_t options)
747{
748 test_info_t *tp;
749
750 setvbuf(stdout, NULL, _IONBF, 0);
751
752 set_vm_size(DEFAULT_VM_SIZE);
753 set_quietness(DEFAULT_QUIETNESS);
754
755 if (NULL != getenv("LTERDOS")) {
756 logr("LTERDOS=YES this is LeanTestEnvironment\nIncreasing quietness by 1.");
757 set_quietness(get_quietness() + 1);
758 } else {
759 if (options.to_quietness > 0) {
760 set_quietness(options.to_quietness);
761 }
762 }
763
764 if (options.to_vmsize != 0) {
765 vm_sizes[0].size = options.to_vmsize;
766 }
767
768 if (options.to_flags == 0) {
769 for (tp = test_info; tp->ti_name != NULL; ++tp) {
770 *tp->ti_flag = TRUE;
771 }
772 } else {
773 if (options.to_flags & VM_TEST_ALLOCATE) {
774 *(test_info[ALLOCATE].ti_flag) = TRUE;
775 }
776
777 if (options.to_flags & VM_TEST_DEALLOCATE) {
778 *(test_info[DEALLOCATE].ti_flag) = TRUE;
779 }
780
781 if (options.to_flags & VM_TEST_READ) {
782 *(test_info[READ].ti_flag) = TRUE;
783 }
784
785 if (options.to_flags & VM_TEST_WRITE) {
786 *(test_info[WRITE].ti_flag) = TRUE;
787 }
788
789 if (options.to_flags & VM_TEST_PROTECT) {
790 *(test_info[PROTECT].ti_flag) = TRUE;
791 }
792
793 if (options.to_flags & VM_TEST_COPY) {
794 *(test_info[COPY].ti_flag) = TRUE;
795 }
796 }
797}
798
799/*****************/
800/* Various tools */
801/*****************/
802
803/* Find the allocator address alignment mask. */
804mach_vm_address_t
805get_mask()
806{
807 mach_vm_address_t mask;
808
809 if (get_allocator() == wrapper_mach_vm_map_2MB) {
810 mask = (mach_vm_address_t)0x1FFFFF;
811 } else {
812 mask = vm_page_size - 1;
813 }
814 return mask;
815}
816
817/* Find the size of the smallest aligned region containing a given
818 * memory range. */
819mach_vm_size_t
820aligned_size(mach_vm_address_t address, mach_vm_size_t size)
821{
822 return round_page_kernel(address - mach_vm_trunc_page(address) + size);
823}
824
825/********************/
826/* Assert functions */
827/********************/
828
829/* Address is aligned on allocator boundary. */
830static inline void
831assert_aligned_address(mach_vm_address_t address)
832{
833 T_QUIET; T_ASSERT_EQ((address & get_mask()), 0,
834 "Address 0x%jx is unexpectedly "
835 "unaligned.",
836 (uintmax_t)address);
837}
838
839/* Address is truncated to allocator boundary. */
840static inline void
841assert_trunc_address(mach_vm_address_t address, mach_vm_address_t trunc_address)
842{
843 T_QUIET; T_ASSERT_EQ(trunc_address, (address & ~get_mask()),
844 "Address "
845 "0x%jx is unexpectedly not truncated to address 0x%jx.",
846 (uintmax_t)address, (uintmax_t)trunc_address);
847}
848
849static inline void
850assert_address_value(mach_vm_address_t address, mach_vm_address_t marker)
851{
852 /* this assert is used so frequently so that we simply judge on
853 * its own instead of leaving this to LD macro for efficiency
854 */
855 if (MACH_VM_ADDRESS_T(address) != marker) {
856 T_ASSERT_FAIL("Address 0x%jx unexpectedly has value 0x%jx, "
857 "instead of 0x%jx.", (uintmax_t)address,
858 (uintmax_t)MACH_VM_ADDRESS_T(address), (uintmax_t)marker);
859 }
860}
861
862void
863assert_allocate_return(mach_vm_address_t * address, mach_vm_size_t size, int address_flag, kern_return_t expected_kr)
864{
865 assert_mach_return(get_allocator()(mach_task_self(), address, size, address_flag), expected_kr, "Allocator");
866}
867
868void
869assert_allocate_success(mach_vm_address_t * address, mach_vm_size_t size, int address_flag)
870{
871 assert_allocate_return(address, size, address_flag, KERN_SUCCESS);
872}
873
874void
875assert_deallocate_return(mach_vm_address_t address, mach_vm_size_t size, kern_return_t expected_kr)
876{
877 assert_mach_return(mach_vm_deallocate(mach_task_self(), address, size), expected_kr, "mach_vm_deallocate()");
878}
879
880void
881assert_deallocate_success(mach_vm_address_t address, mach_vm_size_t size)
882{
883 assert_deallocate_return(address, size, KERN_SUCCESS);
884}
885
886void
887assert_read_return(mach_vm_address_t address,
888 mach_vm_size_t size,
889 vm_offset_t * data,
890 mach_msg_type_number_t * data_size,
891 kern_return_t expected_kr)
892{
893 assert_mach_return(mach_vm_read(mach_task_self(), address, size, data, data_size), expected_kr, "mach_vm_read()");
894}
895
896void
897assert_read_success(mach_vm_address_t address, mach_vm_size_t size, vm_offset_t * data, mach_msg_type_number_t * data_size)
898{
899 assert_read_return(address, size, data, data_size, KERN_SUCCESS);
900 T_QUIET; T_ASSERT_EQ(*data_size, size,
901 "Returned buffer size 0x%jx "
902 "(%ju) is unexpectedly different from source size 0x%jx "
903 "(%ju).",
904 (uintmax_t)*data_size, (uintmax_t)*data_size, (uintmax_t)size, (uintmax_t)size);
905}
906
907void
908assert_write_return(mach_vm_address_t address, vm_offset_t data, mach_msg_type_number_t data_size, kern_return_t expected_kr)
909{
910 assert_mach_return(mach_vm_write(mach_task_self(), address, data, data_size), expected_kr, "mach_vm_write()");
911}
912
913void
914assert_write_success(mach_vm_address_t address, vm_offset_t data, mach_msg_type_number_t data_size)
915{
916 assert_write_return(address, data, data_size, KERN_SUCCESS);
917}
918
919void
920assert_allocate_copy_return(mach_vm_address_t source, mach_vm_size_t size, mach_vm_address_t * dest, kern_return_t expected_kr)
921{
922 assert_allocate_success(dest, size, VM_FLAGS_ANYWHERE);
923 assert_mach_return(mach_vm_copy(mach_task_self(), source, size, *dest), expected_kr, "mach_vm_copy()");
924}
925void
926assert_allocate_copy_success(mach_vm_address_t source, mach_vm_size_t size, mach_vm_address_t * dest)
927{
928 assert_allocate_copy_return(source, size, dest, KERN_SUCCESS);
929}
930
931void
932assert_copy_return(mach_vm_address_t source, mach_vm_size_t size, mach_vm_address_t dest, kern_return_t expected_kr)
933{
934 assert_mach_return(mach_vm_copy(mach_task_self(), source, size, dest), expected_kr, "mach_vm_copy()");
935}
936
937void
938assert_copy_success(mach_vm_address_t source, mach_vm_size_t size, mach_vm_address_t dest)
939{
940 assert_copy_return(source, size, dest, KERN_SUCCESS);
941}
942
943/*******************/
944/* Memory patterns */
945/*******************/
946
947typedef boolean_t (*address_filter_t)(mach_vm_address_t);
948typedef void (*address_action_t)(mach_vm_address_t, mach_vm_address_t);
949
950/* Map over a memory region pattern and its complement, through a
951 * (possibly reversed) boolean filter and a starting value. */
952void
953filter_addresses_do_else(address_filter_t filter,
954 boolean_t reversed,
955 mach_vm_address_t address,
956 mach_vm_size_t size,
957 address_action_t if_action,
958 address_action_t else_action,
959 mach_vm_address_t start_value)
960{
961 mach_vm_address_t i;
962 for (i = 0; i + vm_address_size < size; i += vm_address_size) {
963 if (filter(address + i) != reversed) {
964 if_action(address + i, start_value + i);
965 } else {
966 else_action(address + i, start_value + i);
967 }
968 }
969}
970
971/* Various pattern actions. */
972void
973no_action(mach_vm_address_t i, mach_vm_address_t value)
974{
975}
976
977void
978read_zero(mach_vm_address_t i, mach_vm_address_t value)
979{
980 assert_address_value(i, 0);
981}
982
983void
984verify_address(mach_vm_address_t i, mach_vm_address_t value)
985{
986 assert_address_value(i, value);
987}
988
989void
990write_address(mach_vm_address_t i, mach_vm_address_t value)
991{
992 MACH_VM_ADDRESS_T(i) = value;
993}
994
995/* Various patterns. */
996boolean_t
997empty(mach_vm_address_t i)
998{
999 return FALSE;
1000}
1001
1002boolean_t
1003checkerboard(mach_vm_address_t i)
1004{
1005 return !((i / vm_address_size) & 0x1);
1006}
1007
1008boolean_t
1009page_ends(mach_vm_address_t i)
1010{
1011 mach_vm_address_t residue = i % vm_page_size;
1012
1013 return residue == 0 || residue == vm_page_size - vm_address_size;
1014}
1015
1016/*************************************/
1017/* Global variables set up functions */
1018/*************************************/
1019
1020void
1021set_up_allocator()
1022{
1023 T_QUIET; T_ASSERT_TRUE(allocators_idx >= 0 && allocators_idx < numofallocators, "Invalid allocators[] index: %d.", allocators_idx);
1024 set_allocator(allocators[allocators_idx].allocate);
1025}
1026
1027/* Find a fixed allocatable address by retrieving the address
1028 * populated by mach_vm_allocate() with VM_FLAGS_ANYWHERE. */
1029mach_vm_address_t
1030get_fixed_address(mach_vm_size_t size)
1031{
1032 /* mach_vm_map() starts looking for an address at 0x0. */
1033 mach_vm_address_t address = 0x0;
1034
1035 /*
1036 * The tests seem to have some funky off by one allocations. To avoid problems, we'll bump anything
1037 * non-zero to have at least an extra couple pages.
1038 */
1039 if (size != 0) {
1040 size = round_page_kernel(size + 2 * vm_page_size);
1041 }
1042
1043 assert_allocate_success(&address, size, VM_FLAGS_ANYWHERE);
1044
1045 /*
1046 * Keep the memory allocated, otherwise the logv()/printf() activity sprinkled in these tests can
1047 * cause malloc() to use the desired range and tests will randomly fail. The allocate routines will
1048 * do the delayed vm_deallocate() to free the fixed memory just before allocation testing in the wrapper.
1049 */
1050 T_QUIET; T_ASSERT_EQ(fixed_vm_address, 0, "previous fixed address not used");
1051 T_QUIET; T_ASSERT_EQ(fixed_vm_size, 0, "previous fixed size not used");
1052 fixed_vm_address = address;
1053 fixed_vm_size = size;
1054
1055 assert_aligned_address(address);
1056 return address;
1057}
1058
1059/* If needed, find an address at which a region of the specified size
1060 * can be allocated. Otherwise, set the address to 0x0. */
1061void
1062set_up_vm_address(mach_vm_size_t size)
1063{
1064 T_QUIET; T_ASSERT_TRUE(flags_idx >= 0 && flags_idx < numofflags, "Invalid address_flags[] index: %d.", flags_idx);
1065 T_QUIET; T_ASSERT_TRUE(alignments_idx >= 0 && alignments_idx < numofalignments, "Invalid address_alignments[] index: %d.", alignments_idx);
1066 set_address_flag(address_flags[flags_idx].flag);
1067 set_address_alignment(address_alignments[alignments_idx].alignment);
1068
1069 if (!(get_address_flag() & VM_FLAGS_ANYWHERE)) {
1070 boolean_t aligned = get_address_alignment();
1071 logv(
1072 "Looking for fixed %saligned address for allocation "
1073 "of 0x%jx (%ju) byte%s...",
1074 aligned ? "" : "un", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s");
1075 mach_vm_address_t address = get_fixed_address(size);
1076 if (!aligned) {
1077 address++;
1078 }
1079 set_vm_address(address);
1080 logv("Found %saligned fixed address 0x%jx.", aligned ? "" : "un", (uintmax_t)address);
1081 } else {
1082 /* mach_vm_map() with VM_FLAGS_ANYWHERE starts looking for
1083 * an address at the one supplied and goes up, without
1084 * wrapping around. */
1085 set_vm_address(0x0);
1086 }
1087}
1088
1089void
1090set_up_vm_size()
1091{
1092 T_QUIET; T_ASSERT_TRUE(sizes_idx >= 0 && sizes_idx < numofsizes, "Invalid vm_sizes[] index: %d.", sizes_idx);
1093 set_vm_size(vm_sizes[sizes_idx].size);
1094}
1095
1096void
1097set_up_buffer_size()
1098{
1099 T_QUIET; T_ASSERT_TRUE(buffer_sizes_idx >= 0 && buffer_sizes_idx < numofsizes, "Invalid vm_sizes[] index: %d.", buffer_sizes_idx);
1100 set_buffer_size(vm_sizes[buffer_sizes_idx].size);
1101}
1102
1103void
1104set_up_buffer_offset()
1105{
1106 T_QUIET; T_ASSERT_TRUE(offsets_idx >= 0 && offsets_idx < numofoffsets, "Invalid buffer_offsets[] index: %d.", offsets_idx);
1107 set_buffer_offset(buffer_offsets[offsets_idx].offset);
1108}
1109
1110void
1111set_up_vmcopy_action()
1112{
1113 T_QUIET; T_ASSERT_TRUE(vmcopy_action_idx >= 0 && vmcopy_action_idx < numofvmcopyactions, "Invalid vmcopy_actions[] index: %d.",
1114 vmcopy_action_idx);
1115 set_vmcopy_post_action(vmcopy_actions[vmcopy_action_idx].action);
1116}
1117
1118void
1119set_up_allocator_and_vm_size()
1120{
1121 set_up_allocator();
1122 set_up_vm_size();
1123}
1124
1125void
1126set_up_vm_variables()
1127{
1128 set_up_vm_size();
1129 set_up_vm_address(get_vm_size());
1130}
1131
1132void
1133set_up_allocator_and_vm_variables()
1134{
1135 set_up_allocator();
1136 set_up_vm_variables();
1137}
1138
1139void
1140set_up_buffer_variables()
1141{
1142 set_up_buffer_size();
1143 set_up_buffer_offset();
1144}
1145
1146void
1147set_up_copy_shared_mode_variables()
1148{
1149 set_up_vmcopy_action();
1150}
1151
1152/*******************************/
1153/* Allocation set up functions */
1154/*******************************/
1155
1156/* Allocate VM region of given size. */
1157void
1158allocate(mach_vm_size_t size)
1159{
1160 mach_vm_address_t address = get_vm_address();
1161 int flag = get_address_flag();
1162
1163 logv("Allocating 0x%jx (%ju) byte%s", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s");
1164 if (!(flag & VM_FLAGS_ANYWHERE)) {
1165 logv(" at address 0x%jx", (uintmax_t)address);
1166 }
1167 logv("...");
1168 assert_allocate_success(&address, size, flag);
1169 logv(
1170 "Memory of rounded size 0x%jx (%ju) allocated at "
1171 "address 0x%jx.",
1172 (uintmax_t)round_page_kernel(size), (uintmax_t)round_page_kernel(size), (uintmax_t)address);
1173 /* Fixed allocation address is truncated to the allocator
1174 * boundary. */
1175 if (!(flag & VM_FLAGS_ANYWHERE)) {
1176 mach_vm_address_t old_address = get_vm_address();
1177 assert_trunc_address(old_address, address);
1178 logv(
1179 "Address 0x%jx is correctly truncated to allocated "
1180 "address 0x%jx.",
1181 (uintmax_t)old_address, (uintmax_t)address);
1182 }
1183 set_vm_address(address);
1184}
1185
1186void
1187allocate_buffer(mach_vm_size_t buffer_size)
1188{
1189 mach_vm_address_t data = 0x0;
1190
1191 logv("Allocating 0x%jx (%ju) byte%s...", (uintmax_t)buffer_size, (uintmax_t)buffer_size, (buffer_size == 1) ? "" : "s");
1192 assert_allocate_success(&data, buffer_size, VM_FLAGS_ANYWHERE);
1193 logv(
1194 "Memory of rounded size 0x%jx (%ju) allocated at "
1195 "address 0x%jx.",
1196 (uintmax_t)round_page_kernel(buffer_size), (uintmax_t)round_page_kernel(buffer_size), (uintmax_t)data);
1197 data += get_buffer_offset();
1198 T_QUIET; T_ASSERT_EQ((vm_offset_t)data, data,
1199 "Address 0x%jx "
1200 "unexpectedly overflows to 0x%jx when cast as "
1201 "vm_offset_t type.",
1202 (uintmax_t)data, (uintmax_t)(vm_offset_t)data);
1203 set_buffer_address(data);
1204}
1205
1206/****************************************************/
1207/* Global variables and allocation set up functions */
1208/****************************************************/
1209
1210void
1211set_up_vm_variables_and_allocate()
1212{
1213 set_up_vm_variables();
1214 allocate(get_vm_size());
1215}
1216
1217void
1218set_up_allocator_and_vm_variables_and_allocate()
1219{
1220 set_up_allocator();
1221 set_up_vm_variables_and_allocate();
1222}
1223
1224void
1225set_up_vm_variables_and_allocate_extra_page()
1226{
1227 set_up_vm_size();
1228 /* Increment the size to insure we get an extra allocated page
1229 * for unaligned start addresses. */
1230 mach_vm_size_t allocation_size = get_vm_size() + 1;
1231 set_up_vm_address(allocation_size);
1232
1233 allocate(allocation_size);
1234 /* In the fixed unaligned address case, restore the returned
1235 * (truncated) allocation address to its unaligned value. */
1236 if (!get_address_alignment()) {
1237 set_vm_address(get_vm_address() + 1);
1238 }
1239}
1240
1241void
1242set_up_buffer_variables_and_allocate_extra_page()
1243{
1244 set_up_buffer_variables();
1245 /* Increment the size to insure we get an extra allocated page
1246 * for unaligned start addresses. */
1247 allocate_buffer(get_buffer_size() + get_buffer_offset());
1248}
1249
1250/* Allocate some destination and buffer memory for subsequent
1251 * writing, including extra pages for non-aligned start addresses. */
1252void
1253set_up_vm_and_buffer_variables_allocate_for_writing()
1254{
1255 set_up_vm_variables_and_allocate_extra_page();
1256 set_up_buffer_variables_and_allocate_extra_page();
1257}
1258
1259/* Allocate some destination and source regions for subsequent
1260 * copying, including extra pages for non-aligned start addresses. */
1261void
1262set_up_vm_and_buffer_variables_allocate_for_copying()
1263{
1264 set_up_vm_and_buffer_variables_allocate_for_writing();
1265}
1266
1267/************************************/
1268/* Deallocation tear down functions */
1269/************************************/
1270
1271void
1272deallocate_range(mach_vm_address_t address, mach_vm_size_t size)
1273{
1274 logv("Deallocating 0x%jx (%ju) byte%s at address 0x%jx...", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s",
1275 (uintmax_t)address);
1276 assert_deallocate_success(address, size);
1277}
1278
1279void
1280deallocate()
1281{
1282 deallocate_range(get_vm_address(), get_vm_size());
1283}
1284
1285/* Deallocate source memory, including the extra page for unaligned
1286 * start addresses. */
1287void
1288deallocate_extra_page()
1289{
1290 /* Set the address and size to their original allocation
1291 * values. */
1292 deallocate_range(mach_vm_trunc_page(get_vm_address()), get_vm_size() + 1);
1293}
1294
1295/* Deallocate buffer and destination memory for mach_vm_write(),
1296 * including the extra page for unaligned start addresses. */
1297void
1298deallocate_vm_and_buffer()
1299{
1300 deallocate_range(mach_vm_trunc_page(get_vm_address()), get_vm_size() + 1);
1301 deallocate_range(mach_vm_trunc_page(get_buffer_address()), get_buffer_size() + get_buffer_offset());
1302}
1303
1304/***********************************/
1305/* mach_vm_read() set up functions */
1306/***********************************/
1307
1308/* Read the source memory into a buffer, deallocate the source, set
1309 * the global address and size from the buffer's. */
1310void
1311read_deallocate()
1312{
1313 mach_vm_size_t size = get_vm_size();
1314 mach_vm_address_t address = get_vm_address();
1315 vm_offset_t read_address;
1316 mach_msg_type_number_t read_size;
1317
1318 logv("Reading 0x%jx (%ju) byte%s at address 0x%jx...", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s",
1319 (uintmax_t)address);
1320 assert_read_success(address, size, &read_address, &read_size);
1321 logv(
1322 "Memory of size 0x%jx (%ju) read into buffer of "
1323 "address 0x%jx.",
1324 (uintmax_t)read_size, (uintmax_t)read_size, (uintmax_t)read_address);
1325 /* Deallocate the originally allocated memory, including the
1326 * extra allocated page in
1327 * set_up_vm_variables_and_allocate_extra_page(). */
1328 deallocate_range(mach_vm_trunc_page(address), size + 1);
1329
1330 /* Promoting to mach_vm types after checking for overflow, and
1331 * setting the global address from the buffer's. */
1332 T_QUIET; T_ASSERT_EQ((mach_vm_address_t)read_address, read_address,
1333 "Address 0x%jx unexpectedly overflows to 0x%jx when cast "
1334 "as mach_vm_address_t type.",
1335 (uintmax_t)read_address, (uintmax_t)(mach_vm_address_t)read_address);
1336 T_QUIET; T_ASSERT_EQ((mach_vm_size_t)read_size, read_size,
1337 "Size 0x%jx (%ju) unexpectedly overflows to 0x%jx (%ju) "
1338 "when cast as mach_vm_size_t type.",
1339 (uintmax_t)read_size, (uintmax_t)read_size, (uintmax_t)(mach_vm_size_t)read_size, (uintmax_t)(mach_vm_size_t)read_size);
1340 set_vm_address((mach_vm_address_t)read_address);
1341 set_vm_size((mach_vm_size_t)read_size);
1342}
1343
1344/* Allocate some source memory, read it into a buffer, deallocate the
1345 * source, set the global address and size from the buffer's. */
1346void
1347set_up_vm_variables_allocate_read_deallocate()
1348{
1349 set_up_vm_variables_and_allocate_extra_page();
1350 read_deallocate();
1351}
1352
1353/************************************/
1354/* mach_vm_write() set up functions */
1355/************************************/
1356
1357/* Write the buffer into the destination memory. */
1358void
1359write_buffer()
1360{
1361 mach_vm_address_t address = get_vm_address();
1362 vm_offset_t data = (vm_offset_t)get_buffer_address();
1363 mach_msg_type_number_t buffer_size = (mach_msg_type_number_t)get_buffer_size();
1364
1365 logv(
1366 "Writing buffer of address 0x%jx and size 0x%jx (%ju), on "
1367 "memory at address 0x%jx...",
1368 (uintmax_t)data, (uintmax_t)buffer_size, (uintmax_t)buffer_size, (uintmax_t)address);
1369 assert_write_success(address, data, buffer_size);
1370 logv("Buffer written.");
1371}
1372
1373/* Allocate some destination and buffer memory, and write the buffer
1374 * into the destination memory. */
1375void
1376set_up_vm_and_buffer_variables_allocate_write()
1377{
1378 set_up_vm_and_buffer_variables_allocate_for_writing();
1379 write_buffer();
1380}
1381
1382/***********************************/
1383/* mach_vm_copy() set up functions */
1384/***********************************/
1385
1386void
1387copy_deallocate(void)
1388{
1389 mach_vm_size_t size = get_vm_size();
1390 mach_vm_address_t source = get_vm_address();
1391 mach_vm_address_t dest = 0;
1392
1393 logv("Copying 0x%jx (%ju) byte%s at address 0x%jx...", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s",
1394 (uintmax_t)source);
1395 assert_allocate_copy_success(source, size, &dest);
1396 logv(
1397 "Memory of size 0x%jx (%ju) copy into region of "
1398 "address 0x%jx.",
1399 (uintmax_t)size, (uintmax_t)size, (uintmax_t)dest);
1400 /* Deallocate the originally allocated memory, including the
1401 * extra allocated page in
1402 * set_up_vm_variables_and_allocate_extra_page(). */
1403 deallocate_range(mach_vm_trunc_page(source), size + 1);
1404 /* Promoting to mach_vm types after checking for overflow, and
1405 * setting the global address from the buffer's. */
1406 T_QUIET; T_ASSERT_EQ((vm_offset_t)dest, dest,
1407 "Address 0x%jx unexpectedly overflows to 0x%jx when cast "
1408 "as mach_vm_address_t type.",
1409 (uintmax_t)dest, (uintmax_t)(vm_offset_t)dest);
1410 set_vm_address(dest);
1411 set_vm_size(size);
1412}
1413
1414/* Copy the source region into the destination region. */
1415void
1416copy_region()
1417{
1418 mach_vm_address_t source = get_vm_address();
1419 mach_vm_address_t dest = get_buffer_address();
1420 mach_msg_type_number_t size = (mach_msg_type_number_t)get_buffer_size();
1421
1422 logv(
1423 "Copying memory region of address 0x%jx and size 0x%jx (%ju), on "
1424 "memory at address 0x%jx...",
1425 (uintmax_t)source, (uintmax_t)size, (uintmax_t)size, (uintmax_t)dest);
1426 assert_copy_success(source, size, dest);
1427 logv("Buffer written.");
1428}
1429
1430/* Allocate some source memory, copy it to another region, deallocate the
1431* source, set the global address and size from the designation region. */
1432void
1433set_up_vm_variables_allocate_copy_deallocate()
1434{
1435 set_up_vm_variables_and_allocate_extra_page();
1436 copy_deallocate();
1437}
1438
1439/* Allocate some destination and source memory, and copy the source
1440 * into the destination memory. */
1441void
1442set_up_source_and_dest_variables_allocate_copy()
1443{
1444 set_up_vm_and_buffer_variables_allocate_for_copying();
1445 copy_region();
1446}
1447
1448/**************************************/
1449/* mach_vm_protect() set up functions */
1450/**************************************/
1451
1452void
1453set_up_vm_variables_allocate_protect(vm_prot_t protection, const char * protection_name)
1454{
1455 set_up_vm_variables_and_allocate_extra_page();
1456 mach_vm_size_t size = get_vm_size();
1457 mach_vm_address_t address = get_vm_address();
1458
1459 logv(
1460 "Setting %s-protection on 0x%jx (%ju) byte%s at address "
1461 "0x%jx...",
1462 protection_name, (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s", (uintmax_t)address);
1463 T_QUIET; T_ASSERT_MACH_SUCCESS(mach_vm_protect(mach_task_self(), address, size, FALSE, protection), "mach_vm_protect()");
1464 logv("Region %s-protected.", protection_name);
1465}
1466
1467void
1468set_up_vm_variables_allocate_readprotect()
1469{
1470 set_up_vm_variables_allocate_protect(VM_PROT_WRITE, "read");
1471}
1472
1473void
1474set_up_vm_variables_allocate_writeprotect()
1475{
1476 set_up_vm_variables_allocate_protect(VM_PROT_READ, "write");
1477}
1478
1479/*****************/
1480/* Address tests */
1481/*****************/
1482
1483/* Allocated address is nonzero iff size is nonzero. */
1484void
1485test_nonzero_address_iff_nonzero_size()
1486{
1487 mach_vm_address_t address = get_vm_address();
1488 mach_vm_size_t size = get_vm_size();
1489
1490 T_QUIET; T_ASSERT_TRUE((address && size) || (!address && !size), "Address 0x%jx is unexpectedly %szero.", (uintmax_t)address,
1491 address ? "non" : "");
1492 logv("Address 0x%jx is %szero as expected.", (uintmax_t)address, size ? "non" : "");
1493}
1494
1495/* Allocated address is aligned. */
1496void
1497test_aligned_address()
1498{
1499 mach_vm_address_t address = get_vm_address();
1500
1501 assert_aligned_address(address);
1502 logv("Address 0x%jx is aligned.", (uintmax_t)address);
1503}
1504
1505/************************/
1506/* Read and write tests */
1507/************************/
1508
1509void
1510verify_pattern(
1511 address_filter_t filter, boolean_t reversed, mach_vm_address_t address, mach_vm_size_t size, const char * pattern_name)
1512{
1513 logv(
1514 "Verifying %s pattern on region of address 0x%jx "
1515 "and size 0x%jx (%ju)...",
1516 pattern_name, (uintmax_t)address, (uintmax_t)size, (uintmax_t)size);
1517 filter_addresses_do_else(filter, reversed, address, size, verify_address, read_zero, address);
1518 logv("Pattern verified.");
1519}
1520
1521void
1522write_pattern(
1523 address_filter_t filter, boolean_t reversed, mach_vm_address_t address, mach_vm_size_t size, const char * pattern_name)
1524{
1525 logv(
1526 "Writing %s pattern on region of address 0x%jx "
1527 "and size 0x%jx (%ju)...",
1528 pattern_name, (uintmax_t)address, (uintmax_t)size, (uintmax_t)size);
1529 filter_addresses_do_else(filter, reversed, address, size, write_address, no_action, address);
1530 logv("Pattern writen.");
1531}
1532
1533void
1534write_and_verify_pattern(
1535 address_filter_t filter, boolean_t reversed, mach_vm_address_t address, mach_vm_size_t size, const char * pattern_name)
1536{
1537 logv(
1538 "Writing and verifying %s pattern on region of "
1539 "address 0x%jx and size 0x%jx (%ju)...",
1540 pattern_name, (uintmax_t)address, (uintmax_t)size, (uintmax_t)size);
1541 filter_addresses_do_else(filter, reversed, address, size, write_address, no_action, address);
1542 filter_addresses_do_else(filter, reversed, address, size, verify_address, read_zero, address);
1543 logv("Pattern written and verified.");
1544}
1545
1546/* Verify that the smallest aligned region containing the
1547 * given range is zero-filled. */
1548void
1549test_zero_filled()
1550{
1551 verify_pattern(empty, FALSE, mach_vm_trunc_page(get_vm_address()), aligned_size(get_vm_address(), get_vm_size()),
1552 "zero-filled");
1553}
1554
1555void
1556test_write_address_filled()
1557{
1558 write_and_verify_pattern(empty, TRUE, get_vm_address(), round_page_kernel(get_vm_size()), "address-filled");
1559}
1560
1561void
1562test_write_checkerboard()
1563{
1564 write_and_verify_pattern(checkerboard, FALSE, get_vm_address(), round_page_kernel(get_vm_size()), "checkerboard");
1565}
1566
1567void
1568test_write_reverse_checkerboard()
1569{
1570 write_and_verify_pattern(checkerboard, TRUE, get_vm_address(), round_page_kernel(get_vm_size()), "reverse checkerboard");
1571}
1572
1573void
1574test_write_page_ends()
1575{
1576 write_and_verify_pattern(page_ends, FALSE, get_vm_address(), round_page_kernel(get_vm_size()), "page ends");
1577}
1578
1579void
1580test_write_page_interiors()
1581{
1582 write_and_verify_pattern(page_ends, TRUE, get_vm_address(), round_page_kernel(get_vm_size()), "page interiors");
1583}
1584
1585/*********************************/
1586/* Allocation error return tests */
1587/*********************************/
1588
1589/* Reallocating a page in the smallest aligned region containing the
1590 * given allocated range fails. */
1591void
1592test_reallocate_pages()
1593{
1594 allocate_fn_t allocator = get_allocator();
1595 vm_map_t this_task = mach_task_self();
1596 mach_vm_address_t address = mach_vm_trunc_page(get_vm_address());
1597 mach_vm_size_t size = aligned_size(get_vm_address(), get_vm_size());
1598 mach_vm_address_t i;
1599 kern_return_t kr;
1600
1601 logv(
1602 "Reallocating pages in allocated region of address 0x%jx "
1603 "and size 0x%jx (%ju)...",
1604 (uintmax_t)address, (uintmax_t)size, (uintmax_t)size);
1605 for (i = address; i < address + size; i += vm_page_size) {
1606 kr = allocator(this_task, &i, vm_page_size, VM_FLAGS_FIXED);
1607 T_QUIET; T_ASSERT_EQ(kr, KERN_NO_SPACE,
1608 "Allocator "
1609 "at address 0x%jx unexpectedly returned: %s.\n"
1610 "Should have returned: %s.",
1611 (uintmax_t)address, mach_error_string(kr), mach_error_string(KERN_NO_SPACE));
1612 }
1613 logv("Returned expected error at each page: %s.", mach_error_string(KERN_NO_SPACE));
1614}
1615
1616/* Allocating in VM_MAP_NULL fails. */
1617void
1618test_allocate_in_null_map()
1619{
1620 mach_vm_address_t address = get_vm_address();
1621 mach_vm_size_t size = get_vm_size();
1622 int flag = get_address_flag();
1623
1624 logv("Allocating 0x%jx (%ju) byte%s", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s");
1625 if (!(flag & VM_FLAGS_ANYWHERE)) {
1626 logv(" at address 0x%jx", (uintmax_t)address);
1627 }
1628 logv(" in NULL VM map...");
1629 assert_mach_return(get_allocator()(VM_MAP_NULL, &address, size, flag), MACH_SEND_INVALID_DEST, "Allocator");
1630 logv("Returned expected error: %s.", mach_error_string(MACH_SEND_INVALID_DEST));
1631}
1632
1633/* Allocating with non-user flags fails. */
1634void
1635test_allocate_with_kernel_flags()
1636{
1637 allocate_fn_t allocator = get_allocator();
1638 vm_map_t this_task = mach_task_self();
1639 mach_vm_address_t address = get_vm_address();
1640 mach_vm_size_t size = get_vm_size();
1641 int flag = get_address_flag();
1642 int bad_flag, i;
1643 kern_return_t kr;
1644 int kernel_flags[] = {0x100, 0x200, 0x400, 0x800, 0x1000, 0x2000, 0x8000, INT_MAX};
1645 int numofflags = sizeof(kernel_flags) / sizeof(kernel_flags[0]);
1646
1647 logv("Allocating 0x%jx (%ju) byte%s", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s");
1648 if (!(flag & VM_FLAGS_ANYWHERE)) {
1649 logv(" at address 0x%jx", (uintmax_t)address);
1650 }
1651 logv(" with various kernel flags...");
1652 for (i = 0; i < numofflags; i++) {
1653 bad_flag = kernel_flags[i] | flag;
1654 kr = allocator(this_task, &address, size, bad_flag);
1655 T_QUIET; T_ASSERT_EQ(kr, KERN_INVALID_ARGUMENT,
1656 "Allocator "
1657 "with kernel flag 0x%x unexpectedly returned: %s.\n"
1658 "Should have returned: %s.",
1659 bad_flag, mach_error_string(kr), mach_error_string(KERN_INVALID_ARGUMENT));
1660 }
1661 logv("Returned expected error with each kernel flag: %s.", mach_error_string(KERN_INVALID_ARGUMENT));
1662}
1663
1664/*****************************/
1665/* mach_vm_map() error tests */
1666/*****************************/
1667
1668/* mach_vm_map() fails with invalid protection or inheritance
1669 * arguments. */
1670void
1671test_mach_vm_map_protection_inheritance_error()
1672{
1673 kern_return_t kr;
1674 vm_map_t my_task = mach_task_self();
1675 mach_vm_address_t address = get_vm_address();
1676 mach_vm_size_t size = get_vm_size();
1677 vm_map_offset_t mask = (get_allocator() == wrapper_mach_vm_map || get_allocator() == wrapper_mach_vm_map_named_entry)
1678 ? (mach_vm_offset_t)0
1679 : (mach_vm_offset_t)get_mask();
1680 int flag = get_address_flag();
1681 mach_port_t object_handle = (get_allocator() == wrapper_mach_vm_map_named_entry) ? memory_entry(&size) : MACH_PORT_NULL;
1682 vm_prot_t cur_protections[] = {VM_PROT_DEFAULT, VM_PROT_ALL + 1, ~VM_PROT_IS_MASK, INT_MAX};
1683 vm_prot_t max_protections[] = {VM_PROT_ALL, VM_PROT_ALL + 1, ~VM_PROT_IS_MASK, INT_MAX};
1684 vm_inherit_t inheritances[] = {VM_INHERIT_DEFAULT, VM_INHERIT_LAST_VALID + 1, UINT_MAX};
1685 int i, j, k;
1686
1687 logv("Allocating 0x%jx (%ju) byte%s", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s");
1688 if (!(flag & VM_FLAGS_ANYWHERE)) {
1689 logv(" at address 0x%jx", (uintmax_t)address);
1690 }
1691 logv(
1692 " with various invalid protection/inheritance "
1693 "arguments...");
1694
1695 for (i = 0; i < 4; i++) {
1696 for (j = 0; j < 4; j++) {
1697 for (k = 0; k < 3; k++) {
1698 /* Skip the case with all valid arguments. */
1699 if (i == (j == (k == 0))) {
1700 continue;
1701 }
1702 kr = mach_vm_map(my_task, &address, size, mask, flag, object_handle, (memory_object_offset_t)0, FALSE,
1703 cur_protections[i], max_protections[j], inheritances[k]);
1704 T_QUIET; T_ASSERT_EQ(kr, KERN_INVALID_ARGUMENT,
1705 "mach_vm_map() "
1706 "with cur_protection 0x%x, max_protection 0x%x, "
1707 "inheritance 0x%x unexpectedly returned: %s.\n"
1708 "Should have returned: %s.",
1709 cur_protections[i], max_protections[j], inheritances[k], mach_error_string(kr),
1710 mach_error_string(KERN_INVALID_ARGUMENT));
1711 }
1712 }
1713 }
1714 logv("Returned expected error in each case: %s.", mach_error_string(KERN_INVALID_ARGUMENT));
1715}
1716
1717/* mach_vm_map() with unspecified address fails if the starting
1718 * address overflows when rounded up to a boundary value. */
1719void
1720test_mach_vm_map_large_mask_overflow_error()
1721{
1722 mach_vm_address_t address = 0x1;
1723 mach_vm_size_t size = get_vm_size();
1724 mach_vm_offset_t mask = (mach_vm_offset_t)UINTMAX_MAX;
1725 /* mach_vm_map() cannot allocate 0 bytes at an unspecified
1726 * address, see 8003930. */
1727 kern_return_t kr_expected = size ? KERN_NO_SPACE : KERN_INVALID_ARGUMENT;
1728
1729 logv(
1730 "Allocating 0x%jx (%ju) byte%s at an unspecified address "
1731 "starting at 0x%jx with mask 0x%jx...",
1732 (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s", (uintmax_t)address, (uintmax_t)mask);
1733 assert_mach_return(mach_vm_map(mach_task_self(), &address, size, mask, VM_FLAGS_ANYWHERE, MACH_PORT_NULL,
1734 (memory_object_offset_t)0, FALSE, VM_PROT_DEFAULT, VM_PROT_ALL, VM_INHERIT_DEFAULT),
1735 kr_expected, "mach_vm_map()");
1736 logv("Returned expected error: %s.", mach_error_string(kr_expected));
1737}
1738
1739/************************/
1740/* Size edge case tests */
1741/************************/
1742
1743void
1744allocate_edge_size(mach_vm_address_t * address, mach_vm_size_t size, kern_return_t expected_kr)
1745{
1746 logv("Allocating 0x%jx (%ju) bytes...", (uintmax_t)size, (uintmax_t)size);
1747 assert_allocate_return(address, size, VM_FLAGS_ANYWHERE, expected_kr);
1748 logv("Returned expected value: %s.", mach_error_string(expected_kr));
1749}
1750
1751void
1752test_allocate_zero_size()
1753{
1754 mach_vm_address_t address = 0x0;
1755 /* mach_vm_map() cannot allocate 0 bytes at an unspecified
1756 * address, see 8003930. Other allocators succeed. */
1757 kern_return_t kr_expected = (get_allocator() != wrapper_mach_vm_allocate) ? KERN_INVALID_ARGUMENT : KERN_SUCCESS;
1758
1759 allocate_edge_size(&address, 0, kr_expected);
1760 if (kr_expected == KERN_SUCCESS) {
1761 deallocate_range(address, 0);
1762 }
1763}
1764
1765/* Testing the allocation of the largest size that does not overflow
1766 * when rounded up to a page-aligned value. */
1767void
1768test_allocate_invalid_large_size()
1769{
1770 mach_vm_size_t size = (mach_vm_size_t)UINTMAX_MAX - vm_page_size + 1;
1771 if (get_allocator() != wrapper_mach_vm_map_named_entry) {
1772 mach_vm_address_t address = 0x0;
1773 allocate_edge_size(&address, size, KERN_NO_SPACE);
1774 } else {
1775 /* Named entries cannot currently be bigger than 4 GB
1776 * - 4 kb. */
1777 mach_port_t object_handle = MACH_PORT_NULL;
1778 logv("Creating named entry of 0x%jx (%ju) bytes...", (uintmax_t)size, (uintmax_t)size);
1779 assert_mach_return(mach_make_memory_entry_64(mach_task_self(), &size, (memory_object_offset_t)0,
1780 (MAP_MEM_NAMED_CREATE | VM_PROT_ALL), &object_handle, 0),
1781 KERN_FAILURE, "mach_make_memory_entry_64()");
1782 logv("Returned expected error: %s.", mach_error_string(KERN_FAILURE));
1783 }
1784}
1785
1786/* A UINTMAX_MAX VM size will overflow to 0 when rounded up to a
1787 * page-aligned value. */
1788void
1789test_allocate_overflowing_size()
1790{
1791 mach_vm_address_t address = 0x0;
1792
1793 allocate_edge_size(&address, (mach_vm_size_t)UINTMAX_MAX, KERN_INVALID_ARGUMENT);
1794}
1795
1796/****************************/
1797/* Address allocation tests */
1798/****************************/
1799
1800/* Allocation at address zero fails iff size is nonzero. */
1801void
1802test_allocate_at_zero()
1803{
1804 mach_vm_address_t address = 0x0;
1805 mach_vm_size_t size = get_vm_size();
1806
1807 kern_return_t kr_expected =
1808 size ? KERN_INVALID_ADDRESS : (get_allocator() != wrapper_mach_vm_allocate) ? KERN_INVALID_ARGUMENT : KERN_SUCCESS;
1809
1810 logv("Allocating 0x%jx (%ju) byte%s at address 0x0...", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s");
1811 assert_allocate_return(&address, size, VM_FLAGS_FIXED, kr_expected);
1812 logv("Returned expected value: %s.", mach_error_string(kr_expected));
1813 if (kr_expected == KERN_SUCCESS) {
1814 T_QUIET; T_ASSERT_EQ(address, 0,
1815 "Address 0x%jx is unexpectedly "
1816 "nonzero.\n",
1817 (uintmax_t)address);
1818 logv("Allocated address 0x%jx is zero.", (uintmax_t)address);
1819 deallocate_range(address, size);
1820 }
1821}
1822
1823/* Allocation at page-aligned but 2 MB boundary-unaligned address
1824 * fails with KERN_NO_SPACE. */
1825void
1826test_allocate_2MB_boundary_unaligned_page_aligned_address()
1827{
1828 mach_vm_size_t size = get_vm_size();
1829
1830 mach_vm_address_t address = get_fixed_address(size + vm_page_size) + vm_page_size;
1831 logv(
1832 "Found 2 MB boundary-unaligned, page aligned address "
1833 "0x%jx.",
1834 (uintmax_t)address);
1835
1836 /* mach_vm_allocate() cannot allocate 0 bytes, and fails with a
1837 * fixed boundary-unaligned truncated address. */
1838 kern_return_t kr_expected = (!size && get_allocator() != wrapper_mach_vm_allocate)
1839 ? KERN_INVALID_ARGUMENT
1840 : (get_allocator() == wrapper_mach_vm_map_2MB) ? KERN_NO_SPACE : KERN_SUCCESS;
1841 logv("Allocating 0x%jx (%ju) byte%s at address 0x%jx...", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s",
1842 (uintmax_t)address);
1843 assert_allocate_return(&address, size, VM_FLAGS_FIXED, kr_expected);
1844 logv("Returned expected value: %s.", mach_error_string(kr_expected));
1845 if (kr_expected == KERN_SUCCESS) {
1846 deallocate_range(address, size);
1847 }
1848}
1849
1850/* With VM_FLAGS_ANYWHERE set, mach_vm_allocate() starts looking for
1851 * an allocation address at 0x0, while mach_vm_map() starts at the
1852 * supplied address and does not wrap around. See 8016663. */
1853void
1854test_allocate_page_with_highest_address_hint()
1855{
1856 /* Highest valid page-aligned address. */
1857 mach_vm_address_t address = (mach_vm_size_t)UINTMAX_MAX - vm_page_size + 1;
1858
1859 logv(
1860 "Allocating one page with unspecified address, but hint at "
1861 "0x%jx...",
1862 (uintmax_t)address);
1863 if (get_allocator() == wrapper_mach_vm_allocate) {
1864 /* mach_vm_allocate() starts from 0x0 and succeeds. */
1865 assert_allocate_success(&address, vm_page_size, VM_FLAGS_ANYWHERE);
1866 logv("Memory allocated at address 0x%jx.", (uintmax_t)address);
1867 assert_aligned_address(address);
1868 deallocate_range(address, vm_page_size);
1869 } else {
1870 /* mach_vm_map() starts from the supplied address, and fails
1871 * with KERN_NO_SPACE, see 8016663. */
1872 assert_allocate_return(&address, vm_page_size, VM_FLAGS_ANYWHERE, KERN_NO_SPACE);
1873 logv("Returned expected error: %s.", mach_error_string(KERN_NO_SPACE));
1874 }
1875}
1876
1877/* Allocators find an allocation address with a first fit strategy. */
1878void
1879test_allocate_first_fit_pages()
1880{
1881 allocate_fn_t allocator = get_allocator();
1882 mach_vm_address_t address1 = 0x0;
1883 mach_vm_address_t i;
1884 kern_return_t kr;
1885 vm_map_t this_task = mach_task_self();
1886
1887 logv(
1888 "Looking for first fit address for allocating one "
1889 "page...");
1890 assert_allocate_success(&address1, vm_page_size, VM_FLAGS_ANYWHERE);
1891 logv("Found address 0x%jx.", (uintmax_t)address1);
1892 assert_aligned_address(address1);
1893 mach_vm_address_t address2 = address1;
1894 logv(
1895 "Looking for next higher first fit address for allocating "
1896 "one page...");
1897 assert_allocate_success(&address2, vm_page_size, VM_FLAGS_ANYWHERE);
1898 logv("Found address 0x%jx.", (uintmax_t)address2);
1899 assert_aligned_address(address2);
1900 T_QUIET; T_ASSERT_GT(address2, address1,
1901 "Second address 0x%jx is "
1902 "unexpectedly not higher than first address 0x%jx.",
1903 (uintmax_t)address2, (uintmax_t)address1);
1904
1905 logv("Allocating pages between 0x%jx and 0x%jx...", (uintmax_t)address1, (uintmax_t)address2);
1906 for (i = address1; i <= address2; i += vm_page_size) {
1907 kr = allocator(this_task, &i, vm_page_size, VM_FLAGS_FIXED);
1908 T_QUIET; T_ASSERT_NE(kr, KERN_SUCCESS,
1909 "Allocator at address 0x%jx "
1910 "unexpectedly succeeded.",
1911 (uintmax_t)i);
1912 }
1913 logv("Expectedly returned error at each page.");
1914 deallocate_range(address1, vm_page_size);
1915 deallocate_range(address2, vm_page_size);
1916}
1917
1918/*******************************/
1919/* Deallocation segfault tests */
1920/*******************************/
1921
1922/* mach_vm_deallocate() deallocates the smallest aligned region
1923 * (integral number of pages) containing the given range. */
1924
1925/* Addresses in deallocated range are inaccessible. */
1926void
1927access_deallocated_range_address(mach_vm_address_t address, const char * position)
1928{
1929 logv("Will deallocate and read from %s 0x%jx of deallocated range...", position, (uintmax_t)address);
1930 deallocate();
1931 mach_vm_address_t bad_value = MACH_VM_ADDRESS_T(address);
1932 T_ASSERT_FAIL("Unexpectedly read value 0x%jx at address 0x%jx.\n"
1933 "Should have died with signal SIGSEGV.",
1934 (uintmax_t)bad_value, (uintmax_t)address);
1935}
1936
1937/* Start of deallocated range is inaccessible. */
1938void
1939test_access_deallocated_range_start()
1940{
1941 access_deallocated_range_address(get_vm_address(), "start");
1942}
1943
1944/* Middle of deallocated range is inaccessible. */
1945void
1946test_access_deallocated_range_middle()
1947{
1948 access_deallocated_range_address(get_vm_address() + (round_page_kernel(get_vm_size()) >> 1), "middle");
1949}
1950
1951/* End of deallocated range is inaccessible. */
1952void
1953test_access_deallocated_range_end()
1954{
1955 access_deallocated_range_address(round_page_kernel(get_vm_size()) - vm_address_size + get_vm_address(), "end");
1956}
1957
1958/* Deallocating almost the whole address space causes a SIGSEGV or SIGBUS. We
1959 * deallocate the largest valid aligned size to avoid overflowing when
1960 * rounding up. */
1961void
1962test_deallocate_suicide()
1963{
1964 mach_vm_address_t address = 0x0;
1965 mach_vm_size_t size = (mach_vm_size_t)UINTMAX_MAX - vm_page_size + 1;
1966
1967 logv("Deallocating 0x%jx (%ju) bytes at address 0x%jx...", (uintmax_t)size, (uintmax_t)size, (uintmax_t)address);
1968 kern_return_t kr = mach_vm_deallocate(mach_task_self(), address, size);
1969 T_ASSERT_FAIL("mach_vm_deallocate() with address 0x%jx and "
1970 "size 0x%jx (%ju) unexpectedly returned: %s.\n"
1971 "Should have died with signal SIGSEGV or SIGBUS.",
1972 (uintmax_t)address, (uintmax_t)size, (uintmax_t)size, mach_error_string(kr));
1973}
1974
1975/***************************************/
1976/* Deallocation and reallocation tests */
1977/***************************************/
1978
1979/* Deallocating memory twice succeeds. */
1980void
1981test_deallocate_twice()
1982{
1983 deallocate();
1984 deallocate();
1985}
1986
1987/* Deallocated and reallocated memory is zero-filled. Deallocated
1988 * memory is inaccessible since it can be reallocated. */
1989void
1990test_write_pattern_deallocate_reallocate_zero_filled()
1991{
1992 mach_vm_address_t address = get_vm_address();
1993 mach_vm_size_t size = get_vm_size();
1994
1995 write_pattern(page_ends, FALSE, address, size, "page ends");
1996 logv("Deallocating, then Allocating 0x%jx (%ju) byte%s at address 0x%jx...", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s",
1997 (uintmax_t)address);
1998 deallocate();
1999 assert_allocate_success(&address, size, VM_FLAGS_FIXED);
2000 logv("Memory allocated.");
2001 verify_pattern(empty, FALSE, address, size, "zero-filled");
2002 deallocate();
2003}
2004
2005/********************************/
2006/* Deallocation edge case tests */
2007/********************************/
2008
2009/* Zero size deallocation always succeeds. */
2010void
2011test_deallocate_zero_size_ranges()
2012{
2013 int i;
2014 kern_return_t kr;
2015 vm_map_t this_task = mach_task_self();
2016 mach_vm_address_t addresses[] = {0x0,
2017 0x1,
2018 vm_page_size - 1,
2019 vm_page_size,
2020 vm_page_size + 1,
2021 (mach_vm_address_t)UINT_MAX - vm_page_size + 1,
2022 (mach_vm_address_t)UINT_MAX,
2023 (mach_vm_address_t)UINTMAX_MAX - vm_page_size + 1,
2024 (mach_vm_address_t)UINTMAX_MAX};
2025 int numofaddresses = sizeof(addresses) / sizeof(addresses[0]);
2026
2027 logv("Deallocating 0x0 (0) bytes at various addresses...");
2028 for (i = 0; i < numofaddresses; i++) {
2029 kr = mach_vm_deallocate(this_task, addresses[i], 0);
2030 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "mach_vm_deallocate() at "
2031 "address 0x%jx unexpectedly failed: %s.",
2032 (uintmax_t)addresses[i], mach_error_string(kr));
2033 }
2034 logv("Deallocations successful.");
2035}
2036
2037/* Deallocation succeeds if the end of the range rounds to 0x0. */
2038void
2039test_deallocate_rounded_zero_end_ranges()
2040{
2041 int i;
2042 kern_return_t kr;
2043 vm_map_t this_task = mach_task_self();
2044 struct {
2045 mach_vm_address_t address;
2046 mach_vm_size_t size;
2047 } ranges[] = {
2048 {0x0, (mach_vm_size_t)UINTMAX_MAX},
2049 {0x0, (mach_vm_size_t)UINTMAX_MAX - vm_page_size + 2},
2050 {0x1, (mach_vm_size_t)UINTMAX_MAX - 1},
2051 {0x1, (mach_vm_size_t)UINTMAX_MAX - vm_page_size + 1},
2052 {0x2, (mach_vm_size_t)UINTMAX_MAX - 2},
2053 {0x2, (mach_vm_size_t)UINTMAX_MAX - vm_page_size},
2054 {(mach_vm_address_t)UINTMAX_MAX - vm_page_size + 1, vm_page_size - 1},
2055 {(mach_vm_address_t)UINTMAX_MAX - vm_page_size + 1, 1},
2056 {(mach_vm_address_t)UINTMAX_MAX - 1, 1},
2057 };
2058 int numofranges = sizeof(ranges) / sizeof(ranges[0]);
2059
2060 logv(
2061 "Deallocating various memory ranges whose end rounds to "
2062 "0x0...");
2063 for (i = 0; i < numofranges; i++) {
2064 kr = mach_vm_deallocate(this_task, ranges[i].address, ranges[i].size);
2065 T_QUIET; T_ASSERT_MACH_SUCCESS(kr,
2066 "mach_vm_deallocate() with address 0x%jx and size "
2067 "0x%jx (%ju) unexpectedly returned: %s.\n"
2068 "Should have succeeded.",
2069 (uintmax_t)ranges[i].address, (uintmax_t)ranges[i].size, (uintmax_t)ranges[i].size, mach_error_string(kr));
2070 }
2071 logv("Deallocations successful.");
2072}
2073
2074/* Deallocating a range wrapped around the address space fails. */
2075void
2076test_deallocate_wrapped_around_ranges()
2077{
2078 int i;
2079 kern_return_t kr;
2080 vm_map_t this_task = mach_task_self();
2081 struct {
2082 mach_vm_address_t address;
2083 mach_vm_size_t size;
2084 } ranges[] = {
2085 {0x1, (mach_vm_size_t)UINTMAX_MAX},
2086 {vm_page_size, (mach_vm_size_t)UINTMAX_MAX - vm_page_size + 1},
2087 {(mach_vm_address_t)UINTMAX_MAX - vm_page_size + 1, vm_page_size},
2088 {(mach_vm_address_t)UINTMAX_MAX, 1},
2089 };
2090 int numofranges = sizeof(ranges) / sizeof(ranges[0]);
2091
2092 logv(
2093 "Deallocating various memory ranges wrapping around the "
2094 "address space...");
2095 for (i = 0; i < numofranges; i++) {
2096 kr = mach_vm_deallocate(this_task, ranges[i].address, ranges[i].size);
2097 T_QUIET; T_ASSERT_EQ(kr, KERN_INVALID_ARGUMENT,
2098 "mach_vm_deallocate() with address 0x%jx and size "
2099 "0x%jx (%ju) unexpectedly returned: %s.\n"
2100 "Should have returned: %s.",
2101 (uintmax_t)ranges[i].address, (uintmax_t)ranges[i].size, (uintmax_t)ranges[i].size, mach_error_string(kr),
2102 mach_error_string(KERN_INVALID_ARGUMENT));
2103 }
2104 logv("Returned expected error on each range: %s.", mach_error_string(KERN_INVALID_ARGUMENT));
2105}
2106
2107/* Deallocating in VM_MAP_NULL fails. */
2108void
2109test_deallocate_in_null_map()
2110{
2111 mach_vm_address_t address = get_vm_address();
2112 mach_vm_size_t size = get_vm_size();
2113 int flag = get_address_flag();
2114
2115 logv("Deallocating 0x%jx (%ju) byte%s", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s");
2116 if (!(flag & VM_FLAGS_ANYWHERE)) {
2117 logv(" at address 0x%jx", (uintmax_t)address);
2118 }
2119 logv(" in NULL VM map...");
2120 assert_mach_return(mach_vm_deallocate(VM_MAP_NULL, address, size), MACH_SEND_INVALID_DEST, "mach_vm_deallocate()");
2121 logv("Returned expected error: %s.", mach_error_string(MACH_SEND_INVALID_DEST));
2122}
2123
2124/*****************************/
2125/* mach_vm_read() main tests */
2126/*****************************/
2127
2128/* Read memory of size less than a page has aligned starting
2129 * address. Otherwise, the destination buffer's starting address has
2130 * the same boundary offset as the source region's. */
2131void
2132test_read_address_offset()
2133{
2134 mach_vm_address_t address = get_vm_address();
2135 mach_vm_size_t size = get_vm_size();
2136
2137 if (size < vm_page_size * 2 || get_address_alignment()) {
2138 assert_aligned_address(address);
2139 logv("Buffer address 0x%jx is aligned as expected.", (uintmax_t)address);
2140 } else {
2141 T_QUIET; T_ASSERT_EQ(((address - 1) & (vm_page_size - 1)), 0,
2142 "Buffer "
2143 "address 0x%jx does not have the expected boundary "
2144 "offset of 1.",
2145 (uintmax_t)address);
2146 logv(
2147 "Buffer address 0x%jx has the expected boundary "
2148 "offset of 1.",
2149 (uintmax_t)address);
2150 }
2151}
2152
2153/* Reading from VM_MAP_NULL fails. */
2154void
2155test_read_null_map()
2156{
2157 mach_vm_address_t address = get_vm_address();
2158 mach_vm_size_t size = get_vm_size();
2159 vm_offset_t read_address;
2160 mach_msg_type_number_t read_size;
2161
2162 logv(
2163 "Reading 0x%jx (%ju) byte%s at address 0x%jx in NULL VM "
2164 "map...",
2165 (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s", (uintmax_t)address);
2166 assert_mach_return(mach_vm_read(VM_MAP_NULL, address, size, &read_address, &read_size), MACH_SEND_INVALID_DEST,
2167 "mach_vm_read()");
2168 logv("Returned expected error: %s.", mach_error_string(MACH_SEND_INVALID_DEST));
2169}
2170
2171/* Reading partially deallocated memory fails. */
2172void
2173test_read_partially_deallocated_range()
2174{
2175 mach_vm_address_t address = get_vm_address();
2176 mach_vm_size_t size = get_vm_size();
2177 mach_vm_address_t mid_point = mach_vm_trunc_page(address + size / 2);
2178 vm_offset_t read_address;
2179 mach_msg_type_number_t read_size;
2180
2181 logv("Deallocating a mid-range page at address 0x%jx...", (uintmax_t)mid_point);
2182 assert_deallocate_success(mid_point, vm_page_size);
2183 logv("Page deallocated.");
2184
2185 logv("Reading 0x%jx (%ju) byte%s at address 0x%jx...", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s",
2186 (uintmax_t)address);
2187 assert_read_return(address, size, &read_address, &read_size, KERN_INVALID_ADDRESS);
2188 logv("Returned expected error: %s.", mach_error_string(KERN_INVALID_ADDRESS));
2189}
2190
2191/* Reading partially read-protected memory fails. */
2192void
2193test_read_partially_unreadable_range()
2194{
2195 mach_vm_address_t address = get_vm_address();
2196 mach_vm_size_t size = get_vm_size();
2197 mach_vm_address_t mid_point = mach_vm_trunc_page(address + size / 2);
2198 vm_offset_t read_address;
2199 mach_msg_type_number_t read_size;
2200
2201 /* For sizes < msg_ool_size_small, vm_map_copyin_common() uses
2202 * vm_map_copyin_kernel_buffer() to read in the memory,
2203 * returning different errors, see 8182239. */
2204 kern_return_t kr_expected = (size < vm_page_size * 2) ? KERN_INVALID_ADDRESS : KERN_PROTECTION_FAILURE;
2205
2206 logv("Read-protecting a mid-range page at address 0x%jx...", (uintmax_t)mid_point);
2207 T_QUIET; T_ASSERT_MACH_SUCCESS(mach_vm_protect(mach_task_self(), mid_point, vm_page_size, FALSE, VM_PROT_WRITE), "mach_vm_protect()");
2208 logv("Page read-protected.");
2209
2210 logv("Reading 0x%jx (%ju) byte%s at address 0x%jx...", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s",
2211 (uintmax_t)address);
2212 assert_read_return(address, size, &read_address, &read_size, kr_expected);
2213 logv("Returned expected error: %s.", mach_error_string(kr_expected));
2214}
2215
2216/**********************************/
2217/* mach_vm_read() edge case tests */
2218/**********************************/
2219
2220void
2221read_edge_size(mach_vm_size_t size, kern_return_t expected_kr)
2222{
2223 int i;
2224 kern_return_t kr;
2225 vm_map_t this_task = mach_task_self();
2226 mach_vm_address_t addresses[] = {vm_page_size - 1,
2227 vm_page_size,
2228 vm_page_size + 1,
2229 (mach_vm_address_t)UINT_MAX - vm_page_size + 1,
2230 (mach_vm_address_t)UINT_MAX,
2231 (mach_vm_address_t)UINTMAX_MAX - vm_page_size + 1,
2232 (mach_vm_address_t)UINTMAX_MAX};
2233 int numofaddresses = sizeof(addresses) / sizeof(addresses[0]);
2234 vm_offset_t read_address;
2235 mach_msg_type_number_t read_size;
2236
2237 logv("Reading 0x%jx (%ju) bytes at various addresses...", (uintmax_t)size, (uintmax_t)size);
2238 for (i = 0; i < numofaddresses; i++) {
2239 kr = mach_vm_read(this_task, addresses[i], size, &read_address, &read_size);
2240 T_QUIET; T_ASSERT_EQ(kr, expected_kr,
2241 "mach_vm_read() at "
2242 "address 0x%jx unexpectedly returned: %s.\n"
2243 "Should have returned: %s.",
2244 (uintmax_t)addresses[i], mach_error_string(kr), mach_error_string(expected_kr));
2245 }
2246 logv(
2247 "mach_vm_read() returned expected value in each case: "
2248 "%s.",
2249 mach_error_string(expected_kr));
2250}
2251
2252/* Reading 0 bytes always succeeds. */
2253void
2254test_read_zero_size()
2255{
2256 read_edge_size(0, KERN_SUCCESS);
2257}
2258
2259/* Reading 4GB or higher always fails. */
2260void
2261test_read_invalid_large_size()
2262{
2263 read_edge_size((mach_vm_size_t)UINT_MAX + 1, KERN_INVALID_ARGUMENT);
2264}
2265
2266/* Reading a range wrapped around the address space fails. */
2267void
2268test_read_wrapped_around_ranges()
2269{
2270 int i;
2271 kern_return_t kr;
2272 vm_map_t this_task = mach_task_self();
2273 struct {
2274 mach_vm_address_t address;
2275 mach_vm_size_t size;
2276 } ranges[] = {
2277 {(mach_vm_address_t)(UINTMAX_MAX - UINT_MAX + 1), (mach_vm_size_t)UINT_MAX},
2278 {(mach_vm_address_t)(UINTMAX_MAX - UINT_MAX + vm_page_size), (mach_vm_size_t)(UINT_MAX - vm_page_size + 1)},
2279 {(mach_vm_address_t)UINTMAX_MAX - vm_page_size + 1, vm_page_size},
2280 {(mach_vm_address_t)UINTMAX_MAX, 1},
2281 };
2282 int numofranges = sizeof(ranges) / sizeof(ranges[0]);
2283 vm_offset_t read_address;
2284 mach_msg_type_number_t read_size;
2285
2286 logv(
2287 "Reading various memory ranges wrapping around the "
2288 "address space...");
2289 for (i = 0; i < numofranges; i++) {
2290 kr = mach_vm_read(this_task, ranges[i].address, ranges[i].size, &read_address, &read_size);
2291 T_QUIET; T_ASSERT_EQ(kr, KERN_INVALID_ADDRESS,
2292 "mach_vm_read() at address 0x%jx with size "
2293 "0x%jx (%ju) unexpectedly returned: %s.\n"
2294 "Should have returned: %s.",
2295 (uintmax_t)ranges[i].address, (uintmax_t)ranges[i].size, (uintmax_t)ranges[i].size, mach_error_string(kr),
2296 mach_error_string(KERN_INVALID_ADDRESS));
2297 }
2298 logv("Returned expected error on each range: %s.", mach_error_string(KERN_INVALID_ADDRESS));
2299}
2300
2301/********************************/
2302/* mach_vm_read() pattern tests */
2303/********************************/
2304
2305/* Write a pattern on pre-allocated memory, read into a buffer and
2306 * verify the pattern on the buffer. */
2307void
2308write_read_verify_pattern(address_filter_t filter, boolean_t reversed, const char * pattern_name)
2309{
2310 mach_vm_address_t address = get_vm_address();
2311
2312 write_pattern(filter, reversed, address, get_vm_size(), pattern_name);
2313 read_deallocate();
2314 /* Getting the address and size of the read buffer. */
2315 mach_vm_address_t read_address = get_vm_address();
2316 mach_vm_size_t read_size = get_vm_size();
2317 logv(
2318 "Verifying %s pattern on buffer of "
2319 "address 0x%jx and size 0x%jx (%ju)...",
2320 pattern_name, (uintmax_t)read_address, (uintmax_t)read_size, (uintmax_t)read_size);
2321 filter_addresses_do_else(filter, reversed, read_address, read_size, verify_address, read_zero, address);
2322 logv("Pattern verified on destination buffer.");
2323}
2324
2325void
2326test_read_address_filled()
2327{
2328 write_read_verify_pattern(empty, TRUE, "address-filled");
2329}
2330
2331void
2332test_read_checkerboard()
2333{
2334 write_read_verify_pattern(checkerboard, FALSE, "checkerboard");
2335}
2336
2337void
2338test_read_reverse_checkerboard()
2339{
2340 write_read_verify_pattern(checkerboard, TRUE, "reverse checkerboard");
2341}
2342
2343/***********************************/
2344/* mach_vm_write() edge case tests */
2345/***********************************/
2346
2347/* Writing in VM_MAP_NULL fails. */
2348void
2349test_write_null_map()
2350{
2351 mach_vm_address_t address = get_vm_address();
2352 vm_offset_t data = (vm_offset_t)get_buffer_address();
2353 mach_msg_type_number_t buffer_size = (mach_msg_type_number_t)get_buffer_size();
2354
2355 logv(
2356 "Writing buffer of address 0x%jx and size 0x%jx (%ju), on "
2357 "memory at address 0x%jx in NULL VM MAP...",
2358 (uintmax_t)data, (uintmax_t)buffer_size, (uintmax_t)buffer_size, (uintmax_t)address);
2359 assert_mach_return(mach_vm_write(VM_MAP_NULL, address, data, buffer_size), MACH_SEND_INVALID_DEST, "mach_vm_write()");
2360 logv("Returned expected error: %s.", mach_error_string(MACH_SEND_INVALID_DEST));
2361}
2362
2363/* Writing 0 bytes always succeeds. */
2364void
2365test_write_zero_size()
2366{
2367 set_buffer_size(0);
2368 write_buffer();
2369}
2370
2371/*****************************************/
2372/* mach_vm_write() inaccessibility tests */
2373/*****************************************/
2374
2375/* Writing a partially deallocated buffer fails. */
2376void
2377test_write_partially_deallocated_buffer()
2378{
2379 mach_vm_address_t address = get_vm_address();
2380 vm_offset_t data = (vm_offset_t)get_buffer_address();
2381 mach_msg_type_number_t buffer_size = (mach_msg_type_number_t)get_buffer_size();
2382 mach_vm_address_t buffer_mid_point = (mach_vm_address_t)mach_vm_trunc_page(data + buffer_size / 2);
2383
2384 logv(
2385 "Deallocating a mid-range buffer page at address "
2386 "0x%jx...",
2387 (uintmax_t)buffer_mid_point);
2388 assert_deallocate_success(buffer_mid_point, vm_page_size);
2389 logv("Page deallocated.");
2390
2391 logv(
2392 "Writing buffer of address 0x%jx and size 0x%jx (%ju), on "
2393 "memory at address 0x%jx...",
2394 (uintmax_t)data, (uintmax_t)buffer_size, (uintmax_t)buffer_size, (uintmax_t)address);
2395 assert_write_return(address, data, buffer_size, MACH_SEND_INVALID_MEMORY);
2396 logv("Returned expected error: %s.", mach_error_string(MACH_SEND_INVALID_MEMORY));
2397}
2398
2399/* Writing a partially read-protected buffer fails. */
2400void
2401test_write_partially_unreadable_buffer()
2402{
2403 mach_vm_address_t address = get_vm_address();
2404 vm_offset_t data = (vm_offset_t)get_buffer_address();
2405 mach_msg_type_number_t buffer_size = (mach_msg_type_number_t)get_buffer_size();
2406 mach_vm_address_t buffer_mid_point = (mach_vm_address_t)mach_vm_trunc_page(data + buffer_size / 2);
2407
2408 logv(
2409 "Read-protecting a mid-range buffer page at address "
2410 "0x%jx...",
2411 (uintmax_t)buffer_mid_point);
2412 T_QUIET; T_ASSERT_MACH_SUCCESS(mach_vm_protect(mach_task_self(), buffer_mid_point, vm_page_size, FALSE, VM_PROT_WRITE),
2413 "mach_vm_protect()");
2414 logv("Page read-protected.");
2415
2416 logv(
2417 "Writing buffer of address 0x%jx and size 0x%jx (%ju), on "
2418 "memory at address 0x%jx...",
2419 (uintmax_t)data, (uintmax_t)buffer_size, (uintmax_t)buffer_size, (uintmax_t)address);
2420 assert_write_return(address, data, buffer_size, MACH_SEND_INVALID_MEMORY);
2421 logv("Returned expected error: %s.", mach_error_string(MACH_SEND_INVALID_MEMORY));
2422}
2423
2424/* Writing on partially deallocated memory fails. */
2425void
2426test_write_on_partially_deallocated_range()
2427{
2428 mach_vm_address_t address = get_vm_address();
2429 mach_vm_address_t start = mach_vm_trunc_page(address);
2430 vm_offset_t data = (vm_offset_t)get_buffer_address();
2431 mach_msg_type_number_t buffer_size = (mach_msg_type_number_t)get_buffer_size();
2432
2433 logv(
2434 "Deallocating the first destination page at address "
2435 "0x%jx...",
2436 (uintmax_t)start);
2437 assert_deallocate_success(start, vm_page_size);
2438 logv("Page deallocated.");
2439
2440 logv(
2441 "Writing buffer of address 0x%jx and size 0x%jx (%ju), on "
2442 "memory at address 0x%jx...",
2443 (uintmax_t)data, (uintmax_t)buffer_size, (uintmax_t)buffer_size, (uintmax_t)address);
2444 assert_write_return(address, data, buffer_size, KERN_INVALID_ADDRESS);
2445 logv("Returned expected error: %s.", mach_error_string(KERN_INVALID_ADDRESS));
2446}
2447
2448/* Writing on partially unwritable memory fails. */
2449void
2450test_write_on_partially_unwritable_range()
2451{
2452 mach_vm_address_t address = get_vm_address();
2453 mach_vm_address_t start = mach_vm_trunc_page(address);
2454 vm_offset_t data = (vm_offset_t)get_buffer_address();
2455 mach_msg_type_number_t buffer_size = (mach_msg_type_number_t)get_buffer_size();
2456
2457 /* For sizes < msg_ool_size_small,
2458 * vm_map_copy_overwrite_nested() uses
2459 * vm_map_copyout_kernel_buffer() to read in the memory,
2460 * returning different errors, see 8217123. */
2461 kern_return_t kr_expected = (buffer_size < vm_page_size * 2) ? KERN_INVALID_ADDRESS : KERN_PROTECTION_FAILURE;
2462
2463 logv(
2464 "Write-protecting the first destination page at address "
2465 "0x%jx...",
2466 (uintmax_t)start);
2467 T_QUIET; T_ASSERT_MACH_SUCCESS(mach_vm_protect(mach_task_self(), start, vm_page_size, FALSE, VM_PROT_READ), "mach_vm_protect()");
2468 logv("Page write-protected.");
2469
2470 logv(
2471 "Writing buffer of address 0x%jx and size 0x%jx (%ju), on "
2472 "memory at address 0x%jx...",
2473 (uintmax_t)data, (uintmax_t)buffer_size, (uintmax_t)buffer_size, (uintmax_t)address);
2474 assert_write_return(address, data, buffer_size, kr_expected);
2475 logv("Returned expected error: %s.", mach_error_string(kr_expected));
2476}
2477
2478/*********************************/
2479/* mach_vm_write() pattern tests */
2480/*********************************/
2481
2482/* Verify that a zero-filled buffer and destination memory are still
2483 * zero-filled after writing. */
2484void
2485test_zero_filled_write()
2486{
2487 verify_pattern(empty, FALSE, mach_vm_trunc_page(get_vm_address()), round_page_kernel(get_vm_size() + 1), "zero-filled");
2488 verify_pattern(empty, FALSE, mach_vm_trunc_page(get_buffer_address()),
2489 round_page_kernel(get_buffer_size() + get_buffer_offset()), "zero-filled");
2490}
2491
2492/* Write a pattern on a buffer, write the buffer into some destination
2493 * memory, and verify the pattern on both buffer and destination. */
2494void
2495pattern_write(address_filter_t filter, boolean_t reversed, const char * pattern_name)
2496{
2497 mach_vm_address_t address = get_vm_address();
2498 mach_vm_size_t size = get_vm_size();
2499 mach_vm_address_t buffer_address = get_buffer_address();
2500 mach_vm_size_t buffer_size = get_buffer_size();
2501
2502 write_pattern(filter, reversed, buffer_address, buffer_size, pattern_name);
2503 write_buffer();
2504 verify_pattern(filter, reversed, buffer_address, buffer_size, pattern_name);
2505 logv(
2506 "Verifying %s pattern on destination of "
2507 "address 0x%jx and size 0x%jx (%ju)...",
2508 pattern_name, (uintmax_t)address, (uintmax_t)buffer_size, (uintmax_t)size);
2509 filter_addresses_do_else(filter, reversed, address, buffer_size, verify_address, read_zero, buffer_address);
2510 logv("Pattern verified on destination.");
2511}
2512
2513void
2514test_address_filled_write()
2515{
2516 pattern_write(empty, TRUE, "address-filled");
2517}
2518
2519void
2520test_checkerboard_write()
2521{
2522 pattern_write(checkerboard, FALSE, "checkerboard");
2523}
2524
2525void
2526test_reverse_checkerboard_write()
2527{
2528 pattern_write(checkerboard, TRUE, "reverse checkerboard");
2529}
2530
2531/**********************************/
2532/* mach_vm_copy() edge case tests */
2533/**********************************/
2534
2535/* Copying in VM_MAP_NULL fails. */
2536void
2537test_copy_null_map()
2538{
2539 mach_vm_address_t source = get_vm_address();
2540 mach_vm_address_t dest = get_buffer_address();
2541 mach_msg_type_number_t size = (mach_msg_type_number_t)get_buffer_size();
2542
2543 logv(
2544 "Copying buffer of address 0x%jx and size 0x%jx (%ju), on "
2545 "memory at address 0x%jx in NULL VM MAP...",
2546 (uintmax_t)dest, (uintmax_t)size, (uintmax_t)size, (uintmax_t)source);
2547 assert_mach_return(mach_vm_copy(VM_MAP_NULL, source, size, dest), MACH_SEND_INVALID_DEST, "mach_vm_copy()");
2548 logv("Returned expected error: %s.", mach_error_string(MACH_SEND_INVALID_DEST));
2549}
2550
2551void
2552copy_edge_size(mach_vm_size_t size, kern_return_t expected_kr)
2553{
2554 int i;
2555 kern_return_t kr;
2556 vm_map_t this_task = mach_task_self();
2557 mach_vm_address_t addresses[] = {0x0,
2558 0x1,
2559 vm_page_size - 1,
2560 vm_page_size,
2561 vm_page_size + 1,
2562 (mach_vm_address_t)UINT_MAX - vm_page_size + 1,
2563 (mach_vm_address_t)UINT_MAX,
2564 (mach_vm_address_t)UINTMAX_MAX - vm_page_size + 1,
2565 (mach_vm_address_t)UINTMAX_MAX};
2566 int numofaddresses = sizeof(addresses) / sizeof(addresses[0]);
2567 mach_vm_address_t dest = 0;
2568
2569 logv("Allocating 0x%jx (%ju) byte%s...", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s");
2570 assert_allocate_success(&dest, 4096, VM_FLAGS_ANYWHERE);
2571 logv("Copying 0x%jx (%ju) bytes at various addresses...", (uintmax_t)size, (uintmax_t)size);
2572 for (i = 0; i < numofaddresses; i++) {
2573 kr = mach_vm_copy(this_task, addresses[i], size, dest);
2574 T_QUIET; T_ASSERT_EQ(kr, expected_kr,
2575 "mach_vm_copy() at "
2576 "address 0x%jx unexpectedly returned: %s.\n"
2577 "Should have returned: %s.",
2578 (uintmax_t)addresses[i], mach_error_string(kr), mach_error_string(expected_kr));
2579 }
2580 logv(
2581 "mach_vm_copy() returned expected value in each case: "
2582 "%s.",
2583 mach_error_string(expected_kr));
2584
2585 deallocate_range(dest, 4096);
2586}
2587
2588/* Copying 0 bytes always succeeds. */
2589void
2590test_copy_zero_size()
2591{
2592 copy_edge_size(0, KERN_SUCCESS);
2593}
2594
2595/* Copying 4GB or higher always fails. */
2596void
2597test_copy_invalid_large_size()
2598{
2599 copy_edge_size((mach_vm_size_t)UINT_MAX - 1, KERN_INVALID_ADDRESS);
2600}
2601
2602/* Reading a range wrapped around the address space fails. */
2603void
2604test_copy_wrapped_around_ranges()
2605{
2606 int i;
2607 kern_return_t kr;
2608 vm_map_t this_task = mach_task_self();
2609 struct {
2610 mach_vm_address_t address;
2611 mach_vm_size_t size;
2612 } ranges[] = {
2613 {(mach_vm_address_t)(UINTMAX_MAX - UINT_MAX + 1), (mach_vm_size_t)UINT_MAX},
2614 {(mach_vm_address_t)(UINTMAX_MAX - UINT_MAX + vm_page_size), (mach_vm_size_t)(UINT_MAX - vm_page_size + 1)},
2615 {(mach_vm_address_t)UINTMAX_MAX - vm_page_size + 1, vm_page_size},
2616 {(mach_vm_address_t)UINTMAX_MAX, 1},
2617 };
2618 int numofranges = sizeof(ranges) / sizeof(ranges[0]);
2619 mach_vm_address_t dest = 0;
2620
2621 logv("Allocating 0x1000 (4096) bytes...");
2622 assert_allocate_success(&dest, 4096, VM_FLAGS_ANYWHERE);
2623
2624 logv(
2625 "Copying various memory ranges wrapping around the "
2626 "address space...");
2627 for (i = 0; i < numofranges; i++) {
2628 kr = mach_vm_copy(this_task, ranges[i].address, ranges[i].size, dest);
2629 T_QUIET; T_ASSERT_EQ(kr, KERN_INVALID_ADDRESS,
2630 "mach_vm_copy() at address 0x%jx with size "
2631 "0x%jx (%ju) unexpectedly returned: %s.\n"
2632 "Should have returned: %s.",
2633 (uintmax_t)ranges[i].address, (uintmax_t)ranges[i].size, (uintmax_t)ranges[i].size, mach_error_string(kr),
2634 mach_error_string(KERN_INVALID_ADDRESS));
2635 }
2636 logv("Returned expected error on each range: %s.", mach_error_string(KERN_INVALID_ADDRESS));
2637
2638 deallocate_range(dest, 4096);
2639}
2640
2641/********************************/
2642/* mach_vm_copy() pattern tests */
2643/********************************/
2644
2645/* Write a pattern on pre-allocated region, copy into another region
2646 * and verify the pattern in the region. */
2647void
2648write_copy_verify_pattern(address_filter_t filter, boolean_t reversed, const char * pattern_name)
2649{
2650 mach_vm_address_t source = get_vm_address();
2651 mach_vm_size_t src_size = get_vm_size();
2652 write_pattern(filter, reversed, source, src_size, pattern_name);
2653 /* Getting the address and size of the dest region */
2654 mach_vm_address_t dest = get_buffer_address();
2655 mach_vm_size_t dst_size = get_buffer_size();
2656
2657 logv(
2658 "Copying memory region of address 0x%jx and size 0x%jx (%ju), on "
2659 "memory at address 0x%jx...",
2660 (uintmax_t)source, (uintmax_t)dst_size, (uintmax_t)dst_size, (uintmax_t)dest);
2661 assert_copy_success(source, dst_size, dest);
2662 logv(
2663 "Verifying %s pattern in region of "
2664 "address 0x%jx and size 0x%jx (%ju)...",
2665 pattern_name, (uintmax_t)dest, (uintmax_t)dst_size, (uintmax_t)dst_size);
2666 filter_addresses_do_else(filter, reversed, dest, dst_size, verify_address, read_zero, source);
2667 logv("Pattern verified on destination region.");
2668}
2669
2670void
2671test_copy_address_filled()
2672{
2673 write_copy_verify_pattern(empty, TRUE, "address-filled");
2674}
2675
2676void
2677test_copy_checkerboard()
2678{
2679 write_copy_verify_pattern(checkerboard, FALSE, "checkerboard");
2680}
2681
2682void
2683test_copy_reverse_checkerboard()
2684{
2685 write_copy_verify_pattern(checkerboard, TRUE, "reverse checkerboard");
2686}
2687
2688/* Verify that a zero-filled source and destination memory are still
2689 * zero-filled after writing. */
2690void
2691test_zero_filled_copy_dest()
2692{
2693 verify_pattern(empty, FALSE, mach_vm_trunc_page(get_vm_address()), round_page_kernel(get_vm_size() + 1), "zero-filled");
2694 verify_pattern(empty, FALSE, mach_vm_trunc_page(get_buffer_address()),
2695 round_page_kernel(get_buffer_size() + get_buffer_offset()), "zero-filled");
2696}
2697
2698/****************************************/
2699/* mach_vm_copy() inaccessibility tests */
2700/****************************************/
2701
2702/* Copying partially deallocated memory fails. */
2703void
2704test_copy_partially_deallocated_range()
2705{
2706 mach_vm_address_t source = get_vm_address();
2707 mach_vm_size_t size = get_vm_size();
2708 mach_vm_address_t mid_point = mach_vm_trunc_page(source + size / 2);
2709 mach_vm_address_t dest = 0;
2710
2711 logv("Deallocating a mid-range page at address 0x%jx...", (uintmax_t)mid_point);
2712 assert_deallocate_success(mid_point, vm_page_size);
2713 logv("Page deallocated.");
2714
2715 logv("Copying 0x%jx (%ju) byte%s at address 0x%jx...", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s",
2716 (uintmax_t)source);
2717
2718 assert_allocate_copy_return(source, size, &dest, KERN_INVALID_ADDRESS);
2719
2720 logv("Returned expected error: %s.", mach_error_string(KERN_INVALID_ADDRESS));
2721
2722 deallocate_range(dest, size);
2723}
2724
2725/* Copy partially read-protected memory fails. */
2726void
2727test_copy_partially_unreadable_range()
2728{
2729 mach_vm_address_t source = get_vm_address();
2730 mach_vm_size_t size = get_vm_size();
2731 mach_vm_address_t mid_point = mach_vm_trunc_page(source + size / 2);
2732 mach_vm_address_t dest = 0;
2733
2734 /* For sizes < 1 page, vm_map_copyin_common() uses
2735 * vm_map_copyin_kernel_buffer() to read in the memory,
2736 * returning different errors, see 8182239. */
2737 kern_return_t kr_expected = (size < vm_page_size) ? KERN_INVALID_ADDRESS : KERN_PROTECTION_FAILURE;
2738
2739 logv("Read-protecting a mid-range page at address 0x%jx...", (uintmax_t)mid_point);
2740 T_QUIET; T_ASSERT_MACH_SUCCESS(mach_vm_protect(mach_task_self(), mid_point, vm_page_size, FALSE, VM_PROT_WRITE), "mach_vm_protect()");
2741 logv("Page read-protected.");
2742
2743 logv("Copying 0x%jx (%ju) byte%s at address 0x%jx...", (uintmax_t)size, (uintmax_t)size, (size == 1) ? "" : "s",
2744 (uintmax_t)source);
2745 assert_allocate_copy_return(source, size, &dest, kr_expected);
2746 logv("Returned expected error: %s.", mach_error_string(kr_expected));
2747
2748 deallocate_range(dest, size);
2749}
2750
2751/* Copying to a partially deallocated region fails. */
2752void
2753test_copy_dest_partially_deallocated_region()
2754{
2755 mach_vm_address_t dest = get_vm_address();
2756 mach_vm_address_t source = get_buffer_address();
2757 mach_msg_type_number_t size = (mach_msg_type_number_t)get_buffer_size();
2758 mach_vm_address_t source_mid_point = (mach_vm_address_t)mach_vm_trunc_page(dest + size / 2);
2759#if __MAC_OX_X_VERSION_MIN_REQUIRED > 1080
2760 logv(
2761 "Deallocating a mid-range source page at address "
2762 "0x%jx...",
2763 (uintmax_t)source_mid_point);
2764 assert_deallocate_success(source_mid_point, vm_page_size);
2765 logv("Page deallocated.");
2766
2767 logv(
2768 "Copying region of address 0x%jx and size 0x%jx (%ju), on "
2769 "memory at address 0x%jx...",
2770 (uintmax_t)source, (uintmax_t)size, (uintmax_t)size, (uintmax_t)dest);
2771 assert_copy_return(source, size, dest, KERN_INVALID_ADDRESS);
2772 logv("Returned expected error: %s.", mach_error_string(KERN_INVALID_ADDRESS));
2773#else
2774 logv(
2775 "Bypassing partially deallocated region test "
2776 "(See <rdar://problem/12190999>)");
2777#endif /* __MAC_OX_X_VERSION_MIN_REQUIRED > 1080 */
2778}
2779
2780/* Copying from a partially deallocated region fails. */
2781void
2782test_copy_source_partially_deallocated_region()
2783{
2784 mach_vm_address_t source = get_vm_address();
2785 mach_vm_address_t dest = get_buffer_address();
2786 mach_msg_type_number_t size = (mach_msg_type_number_t)get_buffer_size();
2787 mach_vm_address_t source_mid_point = (mach_vm_address_t)mach_vm_trunc_page(source + size / 2);
2788
2789 logv(
2790 "Deallocating a mid-range source page at address "
2791 "0x%jx...",
2792 (uintmax_t)source_mid_point);
2793 assert_deallocate_success(source_mid_point, vm_page_size);
2794 logv("Page deallocated.");
2795
2796 logv(
2797 "Copying region of address 0x%jx and size 0x%jx (%ju), on "
2798 "memory at address 0x%jx...",
2799 (uintmax_t)source, (uintmax_t)size, (uintmax_t)size, (uintmax_t)dest);
2800 assert_copy_return(source, size, dest, KERN_INVALID_ADDRESS);
2801 logv("Returned expected error: %s.", mach_error_string(KERN_INVALID_ADDRESS));
2802}
2803
2804/* Copying from a partially read-protected region fails. */
2805void
2806test_copy_source_partially_unreadable_region()
2807{
2808 mach_vm_address_t source = get_vm_address();
2809 mach_vm_address_t dest = get_buffer_address();
2810 mach_msg_type_number_t size = (mach_msg_type_number_t)get_buffer_size();
2811 mach_vm_address_t mid_point = (mach_vm_address_t)mach_vm_trunc_page(source + size / 2);
2812 kern_return_t kr = (size < vm_page_size * 2) ? KERN_INVALID_ADDRESS : KERN_PROTECTION_FAILURE;
2813
2814 logv(
2815 "Read-protecting a mid-range buffer page at address "
2816 "0x%jx...",
2817 (uintmax_t)mid_point);
2818 T_QUIET; T_ASSERT_MACH_SUCCESS(mach_vm_protect(mach_task_self(), mid_point, vm_page_size, FALSE, VM_PROT_WRITE), "mach_vm_protect()");
2819 logv("Page read-protected.");
2820
2821 logv(
2822 "Copying region at address 0x%jx and size 0x%jx (%ju), on "
2823 "memory at address 0x%jx...",
2824 (uintmax_t)source, (uintmax_t)size, (uintmax_t)size, (uintmax_t)dest);
2825
2826 assert_copy_return(source, size, dest, kr);
2827 logv("Returned expected error: %s.", mach_error_string(kr));
2828}
2829
2830/* Copying to a partially write-protected region fails. */
2831void
2832test_copy_dest_partially_unwriteable_region()
2833{
2834 kern_return_t kr;
2835 mach_vm_address_t dest = get_vm_address();
2836 mach_vm_address_t source = get_buffer_address();
2837 mach_msg_type_number_t size = (mach_msg_type_number_t)get_buffer_size();
2838 mach_vm_address_t mid_point = (mach_vm_address_t)mach_vm_trunc_page(dest + size / 2);
2839
2840#if __MAC_OX_X_VERSION_MIN_REQUIRED > 1080
2841 logv(
2842 "Read-protecting a mid-range buffer page at address "
2843 "0x%jx...",
2844 (uintmax_t)mid_point);
2845 T_QUIET; T_ASSERT_MACH_SUCCESS(mach_vm_protect(mach_task_self(), mid_point, vm_page_size, FALSE, VM_PROT_READ), "mach_vm_protect()");
2846 logv("Page read-protected.");
2847 logv(
2848 "Copying region at address 0x%jx and size 0x%jx (%ju), on "
2849 "memory at address 0x%jx...",
2850 (uintmax_t)source, (uintmax_t)size, (uintmax_t)size, (uintmax_t)dest);
2851 if (size >= vm_page_size) {
2852 kr = KERN_PROTECTION_FAILURE;
2853 } else {
2854 kr = KERN_INVALID_ADDRESS;
2855 }
2856 assert_copy_return(source, size, dest, kr);
2857 logv("Returned expected error: %s.", mach_error_string(kr));
2858#else
2859 logv(
2860 "Bypassing partially unwriteable region test "
2861 "(See <rdar://problem/12190999>)");
2862#endif /* __MAC_OX_X_VERSION_MIN_REQUIRED > 1080 */
2863}
2864
2865/* Copying on partially deallocated memory fails. */
2866void
2867test_copy_source_on_partially_deallocated_range()
2868{
2869 mach_vm_address_t source = get_vm_address();
2870 mach_vm_address_t dest = get_buffer_address();
2871 mach_vm_address_t start = mach_vm_trunc_page(source);
2872 mach_msg_type_number_t size = (mach_msg_type_number_t)get_buffer_size();
2873
2874 logv(
2875 "Deallocating the first source page at address "
2876 "0x%jx...",
2877 (uintmax_t)start);
2878 assert_deallocate_success(start, vm_page_size);
2879 logv("Page deallocated.");
2880
2881 logv(
2882 "Writing buffer of address 0x%jx and size 0x%jx (%ju), on "
2883 "memory at address 0x%jx...",
2884 (uintmax_t)dest, (uintmax_t)size, (uintmax_t)size, (uintmax_t)source);
2885 assert_copy_return(source, size, dest, KERN_INVALID_ADDRESS);
2886 logv("Returned expected error: %s.", mach_error_string(KERN_INVALID_ADDRESS));
2887}
2888
2889/* Copying on partially deallocated memory fails. */
2890void
2891test_copy_dest_on_partially_deallocated_range()
2892{
2893 mach_vm_address_t source = get_vm_address();
2894 mach_vm_address_t dest = get_buffer_address();
2895 mach_vm_address_t start = mach_vm_trunc_page(dest);
2896 mach_msg_type_number_t size = (mach_msg_type_number_t)get_buffer_size();
2897
2898 logv(
2899 "Deallocating the first destination page at address "
2900 "0x%jx...",
2901 (uintmax_t)start);
2902 assert_deallocate_success(start, vm_page_size);
2903 logv("Page deallocated.");
2904
2905 logv(
2906 "Writing buffer of address 0x%jx and size 0x%jx (%ju), on "
2907 "memory at address 0x%jx...",
2908 (uintmax_t)dest, (uintmax_t)size, (uintmax_t)size, (uintmax_t)source);
2909 assert_copy_return(source, size, dest, KERN_INVALID_ADDRESS);
2910 logv("Returned expected error: %s.", mach_error_string(KERN_INVALID_ADDRESS));
2911}
2912
2913/* Copying on partially unwritable memory fails. */
2914void
2915test_copy_dest_on_partially_unwritable_range()
2916{
2917 mach_vm_address_t source = get_vm_address();
2918 mach_vm_address_t dest = get_buffer_address();
2919 mach_vm_address_t start = mach_vm_trunc_page(dest);
2920 mach_msg_type_number_t size = (mach_msg_type_number_t)get_buffer_size();
2921
2922 /* For sizes < msg_ool_size_small,
2923 * vm_map_copy_overwrite_nested() uses
2924 * vm_map_copyout_kernel_buffer() to read in the memory,
2925 * returning different errors, see 8217123. */
2926 kern_return_t kr_expected = (size < vm_page_size * 2) ? KERN_INVALID_ADDRESS : KERN_PROTECTION_FAILURE;
2927
2928 logv(
2929 "Write-protecting the first destination page at address "
2930 "0x%jx...",
2931 (uintmax_t)start);
2932 T_QUIET; T_ASSERT_MACH_SUCCESS(mach_vm_protect(mach_task_self(), start, vm_page_size, FALSE, VM_PROT_READ), "mach_vm_protect()");
2933 logv("Page write-protected.");
2934
2935 logv(
2936 "Writing buffer of address 0x%jx and size 0x%jx (%ju), on "
2937 "memory at address 0x%jx...",
2938 (uintmax_t)dest, (uintmax_t)size, (uintmax_t)size, (uintmax_t)source);
2939 assert_copy_return(source, size, dest, kr_expected);
2940 logv("Returned expected error: %s.", mach_error_string(kr_expected));
2941}
2942
2943/* Copying on partially unreadable memory fails. */
2944void
2945test_copy_source_on_partially_unreadable_range()
2946{
2947 mach_vm_address_t source = get_vm_address();
2948 mach_vm_address_t dest = get_buffer_address();
2949 mach_vm_address_t start = mach_vm_trunc_page(source);
2950 mach_msg_type_number_t size = (mach_msg_type_number_t)get_buffer_size();
2951
2952 /* For sizes < msg_ool_size_small,
2953 * vm_map_copy_overwrite_nested() uses
2954 * vm_map_copyout_kernel_buffer() to read in the memory,
2955 * returning different errors, see 8217123. */
2956 kern_return_t kr_expected = (size < vm_page_size * 2) ? KERN_INVALID_ADDRESS : KERN_PROTECTION_FAILURE;
2957
2958 logv(
2959 "Read-protecting the first destination page at address "
2960 "0x%jx...",
2961 (uintmax_t)start);
2962 T_QUIET; T_ASSERT_MACH_SUCCESS(mach_vm_protect(mach_task_self(), start, vm_page_size, FALSE, VM_PROT_WRITE), "mach_vm_protect()");
2963 logv("Page read-protected.");
2964
2965 logv(
2966 "Writing buffer of address 0x%jx and size 0x%jx (%ju), on "
2967 "memory at address 0x%jx...",
2968 (uintmax_t)dest, (uintmax_t)size, (uintmax_t)size, (uintmax_t)source);
2969 assert_copy_return(source, size, dest, kr_expected);
2970 logv("Returned expected error: %s.", mach_error_string(kr_expected));
2971}
2972
2973/********************************/
2974/* mach_vm_protect() main tests */
2975/********************************/
2976
2977void
2978test_zero_filled_extended()
2979{
2980 verify_pattern(empty, FALSE, mach_vm_trunc_page(get_vm_address()), round_page_kernel(get_vm_size() + 1), "zero-filled");
2981}
2982
2983/* Allocated region is still zero-filled after read-protecting it and
2984 * then restoring read-access. */
2985void
2986test_zero_filled_readprotect()
2987{
2988 mach_vm_address_t address = get_vm_address();
2989 mach_vm_size_t size = get_vm_size();
2990
2991 logv("Setting read access on 0x%jx (%ju) byte%s at address 0x%jx...", (uintmax_t)size, (uintmax_t)size,
2992 (size == 1) ? "" : "s", (uintmax_t)address);
2993 T_QUIET; T_ASSERT_MACH_SUCCESS(mach_vm_protect(mach_task_self(), address, size, FALSE, VM_PROT_DEFAULT), "mach_vm_protect()");
2994 logv("Region has read access.");
2995 test_zero_filled_extended();
2996}
2997
2998void
2999verify_protection(vm_prot_t protection, const char * protection_name)
3000{
3001 mach_vm_address_t address = get_vm_address();
3002 mach_vm_size_t size = get_vm_size();
3003 mach_vm_size_t original_size = size;
3004 vm_region_basic_info_data_64_t info;
3005 mach_msg_type_number_t count = VM_REGION_BASIC_INFO_COUNT_64;
3006 mach_port_t unused;
3007
3008 logv(
3009 "Verifying %s-protection on region of address 0x%jx and "
3010 "size 0x%jx (%ju) with mach_vm_region()...",
3011 protection_name, (uintmax_t)address, (uintmax_t)size, (uintmax_t)size);
3012 T_QUIET; T_ASSERT_MACH_SUCCESS(
3013 mach_vm_region(mach_task_self(), &address, &size, VM_REGION_BASIC_INFO_64, (vm_region_info_t)&info, &count, &unused),
3014 "mach_vm_region()");
3015 if (original_size) {
3016 T_QUIET; T_ASSERT_EQ((info.protection & protection), 0,
3017 "Region "
3018 "is unexpectedly %s-unprotected.",
3019 protection_name);
3020 logv("Region is %s-protected as expected.", protection_name);
3021 } else {
3022 T_QUIET; T_ASSERT_NE(info.protection & protection, 0,
3023 "Region is "
3024 "unexpectedly %s-protected.",
3025 protection_name);
3026 logv("Region is %s-unprotected as expected.", protection_name);
3027 }
3028}
3029
3030void
3031test_verify_readprotection()
3032{
3033 verify_protection(VM_PROT_READ, "read");
3034}
3035
3036void
3037test_verify_writeprotection()
3038{
3039 verify_protection(VM_PROT_WRITE, "write");
3040}
3041
3042/******************************/
3043/* Protection bus error tests */
3044/******************************/
3045
3046/* mach_vm_protect() affects the smallest aligned region (integral
3047 * number of pages) containing the given range. */
3048
3049/* Addresses in read-protected range are inaccessible. */
3050void
3051access_readprotected_range_address(mach_vm_address_t address, const char * position)
3052{
3053 logv("Reading from %s 0x%jx of read-protected range...", position, (uintmax_t)address);
3054 mach_vm_address_t bad_value = MACH_VM_ADDRESS_T(address);
3055 T_ASSERT_FAIL("Unexpectedly read value 0x%jx at address 0x%jx."
3056 "Should have died with signal SIGBUS.",
3057 (uintmax_t)bad_value, (uintmax_t)address);
3058}
3059
3060/* Start of read-protected range is inaccessible. */
3061void
3062test_access_readprotected_range_start()
3063{
3064 access_readprotected_range_address(mach_vm_trunc_page(get_vm_address()), "start");
3065}
3066
3067/* Middle of read-protected range is inaccessible. */
3068void
3069test_access_readprotected_range_middle()
3070{
3071 mach_vm_address_t address = get_vm_address();
3072 access_readprotected_range_address(mach_vm_trunc_page(address) + (aligned_size(address, get_vm_size()) >> 1), "middle");
3073}
3074
3075/* End of read-protected range is inaccessible. */
3076void
3077test_access_readprotected_range_end()
3078{
3079 access_readprotected_range_address(round_page_kernel(get_vm_address() + get_vm_size()) - vm_address_size, "end");
3080}
3081
3082/* Addresses in write-protected range are unwritable. */
3083void
3084write_writeprotected_range_address(mach_vm_address_t address, const char * position)
3085{
3086 logv("Writing on %s 0x%jx of write-protected range...", position, (uintmax_t)address);
3087 MACH_VM_ADDRESS_T(address) = 0x0;
3088 T_ASSERT_FAIL("Unexpectedly wrote value 0x0 value at address 0x%jx."
3089 "Should have died with signal SIGBUS.",
3090 (uintmax_t)address);
3091}
3092
3093/* Start of write-protected range is unwritable. */
3094void
3095test_write_writeprotected_range_start()
3096{
3097 write_writeprotected_range_address(mach_vm_trunc_page(get_vm_address()), "start");
3098}
3099
3100/* Middle of write-protected range is unwritable. */
3101void
3102test_write_writeprotected_range_middle()
3103{
3104 mach_vm_address_t address = get_vm_address();
3105 write_writeprotected_range_address(mach_vm_trunc_page(address) + (aligned_size(address, get_vm_size()) >> 1), "middle");
3106}
3107
3108/* End of write-protected range is unwritable. */
3109void
3110test_write_writeprotected_range_end()
3111{
3112 write_writeprotected_range_address(round_page_kernel(get_vm_address() + get_vm_size()) - vm_address_size, "end");
3113}
3114
3115/*************************************/
3116/* mach_vm_protect() edge case tests */
3117/*************************************/
3118
3119void
3120protect_zero_size(vm_prot_t protection, const char * protection_name)
3121{
3122 int i;
3123 kern_return_t kr;
3124 vm_map_t this_task = mach_task_self();
3125 mach_vm_address_t addresses[] = {0x0,
3126 0x1,
3127 vm_page_size - 1,
3128 vm_page_size,
3129 vm_page_size + 1,
3130 (mach_vm_address_t)UINT_MAX - vm_page_size + 1,
3131 (mach_vm_address_t)UINT_MAX,
3132 (mach_vm_address_t)UINTMAX_MAX - vm_page_size + 1,
3133 (mach_vm_address_t)UINTMAX_MAX};
3134 int numofaddresses = sizeof(addresses) / sizeof(addresses[0]);
3135
3136 logv("%s-protecting 0x0 (0) bytes at various addresses...", protection_name);
3137 for (i = 0; i < numofaddresses; i++) {
3138 kr = mach_vm_protect(this_task, addresses[i], 0, FALSE, protection);
3139 T_QUIET; T_ASSERT_MACH_SUCCESS(kr,
3140 "mach_vm_protect() at "
3141 "address 0x%jx unexpectedly failed: %s.",
3142 (uintmax_t)addresses[i], mach_error_string(kr));
3143 }
3144 logv("Protection successful.");
3145}
3146
3147void
3148test_readprotect_zero_size()
3149{
3150 protect_zero_size(VM_PROT_READ, "Read");
3151}
3152
3153void
3154test_writeprotect_zero_size()
3155{
3156 protect_zero_size(VM_PROT_WRITE, "Write");
3157}
3158
3159/* Protecting a range wrapped around the address space fails. */
3160void
3161protect_wrapped_around_ranges(vm_prot_t protection, const char * protection_name)
3162{
3163 int i;
3164 kern_return_t kr;
3165 vm_map_t this_task = mach_task_self();
3166 struct {
3167 mach_vm_address_t address;
3168 mach_vm_size_t size;
3169 } ranges[] = {
3170 {0x1, (mach_vm_size_t)UINTMAX_MAX},
3171 {vm_page_size, (mach_vm_size_t)UINTMAX_MAX - vm_page_size + 1},
3172 {(mach_vm_address_t)UINTMAX_MAX - vm_page_size + 1, vm_page_size},
3173 {(mach_vm_address_t)UINTMAX_MAX, 1},
3174 };
3175 int numofranges = sizeof(ranges) / sizeof(ranges[0]);
3176
3177 logv(
3178 "%s-protecting various memory ranges wrapping around the "
3179 "address space...",
3180 protection_name);
3181 for (i = 0; i < numofranges; i++) {
3182 kr = mach_vm_protect(this_task, ranges[i].address, ranges[i].size, FALSE, protection);
3183 T_QUIET; T_ASSERT_EQ(kr, KERN_INVALID_ARGUMENT,
3184 "mach_vm_protect() with address 0x%jx and size "
3185 "0x%jx (%ju) unexpectedly returned: %s.\n"
3186 "Should have returned: %s.",
3187 (uintmax_t)ranges[i].address, (uintmax_t)ranges[i].size, (uintmax_t)ranges[i].size, mach_error_string(kr),
3188 mach_error_string(KERN_INVALID_ARGUMENT));
3189 }
3190 logv("Returned expected error on each range: %s.", mach_error_string(KERN_INVALID_ARGUMENT));
3191}
3192
3193void
3194test_readprotect_wrapped_around_ranges()
3195{
3196 protect_wrapped_around_ranges(VM_PROT_READ, "Read");
3197}
3198
3199void
3200test_writeprotect_wrapped_around_ranges()
3201{
3202 protect_wrapped_around_ranges(VM_PROT_WRITE, "Write");
3203}
3204
3205/*******************/
3206/* vm_copy() tests */
3207/*******************/
3208
3209/* Verify the address space is being shared. */
3210void
3211assert_share_mode(mach_vm_address_t address, unsigned share_mode, const char * share_mode_name)
3212{
3213 mach_vm_size_t size = get_vm_size();
3214 vm_region_extended_info_data_t info;
3215 mach_msg_type_number_t count = VM_REGION_EXTENDED_INFO_COUNT;
3216 mach_port_t unused;
3217
3218/*
3219 * XXX Fails on UVM kernel. See <rdar://problem/12164664>
3220 */
3221#if notyet /* __MAC_OS_X_VERSION_MIN_REQUIRED < 1090 */
3222 logv(
3223 "Verifying %s share mode on region of address 0x%jx and "
3224 "size 0x%jx (%ju)...",
3225 share_mode_name, (uintmax_t)address, (uintmax_t)size, (uintmax_t)size);
3226 T_QUIET; T_ASSERT_MACH_SUCCESS(
3227 mach_vm_region(mach_task_self(), &address, &size, VM_REGION_EXTENDED_INFO, (vm_region_info_t)&info, &count, &unused),
3228 "mach_vm_region()");
3229 T_QUIET; T_ASSERT_EQ(info.share_mode, share_mode,
3230 "Region's share mode "
3231 " unexpectedly is not %s but %d.",
3232 share_mode_name, info.share_mode);
3233 logv("Region has a share mode of %s as expected.", share_mode_name);
3234#else
3235 logv("Bypassing share_mode verification (See <rdar://problem/12164664>)");
3236#endif /* __MAC_OS_X_VERSION_MIN_REQUIRED < 1090 */
3237}
3238
3239/* Do the vm_copy() and verify its success. */
3240void
3241assert_vmcopy_success(vm_address_t src, vm_address_t dst, const char * source_name)
3242{
3243 kern_return_t kr;
3244 mach_vm_size_t size = get_vm_size();
3245
3246 logv("Copying (using mach_vm_copy()) from a %s source...", source_name);
3247 kr = mach_vm_copy(mach_task_self(), src, size, dst);
3248 T_QUIET; T_ASSERT_MACH_SUCCESS(kr,
3249 "mach_vm_copy() with the source address "
3250 "0x%jx, designation address 0x%jx, and size 0x%jx (%ju) unexpectly "
3251 "returned %s.\n Should have returned: %s.",
3252 (uintmax_t)src, (uintmax_t)dst, (uintmax_t)size, (uintmax_t)size, mach_error_string(kr),
3253 mach_error_string(KERN_SUCCESS));
3254 logv("Copy (mach_vm_copy()) was successful as expected.");
3255}
3256
3257void
3258write_region(mach_vm_address_t address, mach_vm_size_t start)
3259{
3260 mach_vm_size_t size = get_vm_size();
3261
3262 filter_addresses_do_else(empty, FALSE, address, size, write_address, write_address, start);
3263}
3264
3265void
3266verify_region(mach_vm_address_t address, mach_vm_address_t start)
3267{
3268 mach_vm_size_t size = get_vm_size();
3269
3270 filter_addresses_do_else(empty, FALSE, address, size, verify_address, verify_address, start);
3271}
3272
3273/* Perform the post vm_copy() action and verify its results. */
3274void
3275modify_one_and_verify_all_regions(vm_address_t src, vm_address_t dst, vm_address_t shared_copied, boolean_t shared)
3276{
3277 mach_vm_size_t size = get_vm_size();
3278 int action = get_vmcopy_post_action();
3279
3280 /* Do the post vm_copy() action. */
3281 switch (action) {
3282 case VMCOPY_MODIFY_SRC:
3283 logv("Modifying: source%s...", shared ? " (shared with other region)" : "");
3284 write_region(src, 1);
3285 break;
3286
3287 case VMCOPY_MODIFY_DST:
3288 logv("Modifying: destination...");
3289 write_region(dst, 1);
3290 break;
3291
3292 case VMCOPY_MODIFY_SHARED_COPIED:
3293 /* If no shared_copied then no need to verify (nothing changed). */
3294 if (!shared_copied) {
3295 return;
3296 }
3297 logv("Modifying: shared/copied%s...", shared ? " (shared with source region)" : "");
3298 write_region(shared_copied, 1);
3299 break;
3300
3301 default:
3302 T_ASSERT_FAIL("Unknown post vm_copy() action (%d)", action);
3303 }
3304 logv("Modification was successful as expected.");
3305
3306 /* Verify all the regions with what is expected. */
3307 logv("Verifying: source... ");
3308 verify_region(src, (VMCOPY_MODIFY_SRC == action || (shared && VMCOPY_MODIFY_SHARED_COPIED == action)) ? 1 : 0);
3309 logv("destination... ");
3310 verify_region(dst, (VMCOPY_MODIFY_DST == action) ? 1 : 0);
3311 if (shared_copied) {
3312 logv("shared/copied... ");
3313 verify_region(shared_copied, (VMCOPY_MODIFY_SHARED_COPIED == action || (shared && VMCOPY_MODIFY_SRC == action)) ? 1 : 0);
3314 }
3315 logv("Verification was successful as expected.");
3316}
3317
3318/* Test source being a simple fresh region. */
3319void
3320test_vmcopy_fresh_source()
3321{
3322 mach_vm_size_t size = get_vm_size();
3323 mach_vm_address_t src, dst;
3324
3325 if (get_vmcopy_post_action() == VMCOPY_MODIFY_SHARED_COPIED) {
3326 /* No shared/copied region to modify so just return. */
3327 logv("No shared/copied region as expected.");
3328 return;
3329 }
3330
3331 assert_allocate_success(&src, size, TRUE);
3332
3333 assert_share_mode(src, SM_EMPTY, "SM_EMPTY");
3334
3335 write_region(src, 0);
3336
3337 assert_allocate_success(&dst, size, TRUE);
3338
3339 assert_vmcopy_success(src, dst, "freshly allocated");
3340
3341 modify_one_and_verify_all_regions(src, dst, 0, FALSE);
3342
3343 assert_deallocate_success(src, size);
3344 assert_deallocate_success(dst, size);
3345}
3346
3347/* Test source copied from a shared region. */
3348void
3349test_vmcopy_shared_source()
3350{
3351 mach_vm_size_t size = get_vm_size();
3352 mach_vm_address_t src, dst, shared;
3353 int action = get_vmcopy_post_action();
3354 int pid, status;
3355
3356 assert_allocate_success(&src, size, TRUE);
3357
3358 T_QUIET; T_ASSERT_MACH_SUCCESS(mach_vm_inherit(mach_task_self(), src, size, VM_INHERIT_SHARE), "mach_vm_inherit()");
3359
3360 write_region(src, 0);
3361
3362 pid = fork();
3363 if (pid == 0) {
3364 /* Verify that the child's 'src' is shared with the
3365 * parent's src */
3366 assert_share_mode(src, SM_SHARED, "SM_SHARED");
3367 assert_allocate_success(&dst, size, TRUE);
3368 assert_vmcopy_success(src, dst, "shared");
3369 if (VMCOPY_MODIFY_SHARED_COPIED == action) {
3370 logv("Modifying: shared...");
3371 write_region(src, 1);
3372 logv("Modification was successsful as expected.");
3373 logv("Verifying: source... ");
3374 verify_region(src, 1);
3375 logv("destination...");
3376 verify_region(dst, (VMCOPY_MODIFY_DST == action) ? 1 : 0);
3377 logv("Verification was successful as expected.");
3378 } else {
3379 modify_one_and_verify_all_regions(src, dst, 0, TRUE);
3380 }
3381 assert_deallocate_success(dst, size);
3382 exit(0);
3383 } else if (pid > 0) {
3384 /* In the parent the src becomes the shared */
3385 shared = src;
3386 wait(&status);
3387 if (WEXITSTATUS(status) != 0) {
3388 exit(status);
3389 }
3390 /* verify shared (shared with child's src) */
3391 logv("Verifying: shared...");
3392 verify_region(shared, (VMCOPY_MODIFY_SHARED_COPIED == action || VMCOPY_MODIFY_SRC == action) ? 1 : 0);
3393 logv("Verification was successful as expected.");
3394 } else {
3395 T_WITH_ERRNO; T_ASSERT_FAIL("fork failed");
3396 }
3397
3398 assert_deallocate_success(src, size);
3399}
3400
3401/* Test source copied from another mapping. */
3402void
3403test_vmcopy_copied_from_source()
3404{
3405 mach_vm_size_t size = get_vm_size();
3406 mach_vm_address_t src, dst, copied;
3407
3408 assert_allocate_success(&copied, size, TRUE);
3409 write_region(copied, 0);
3410
3411 assert_allocate_success(&src, size, TRUE);
3412
3413 T_QUIET; T_ASSERT_MACH_SUCCESS(mach_vm_copy(mach_task_self(), copied, size, src), "mach_vm_copy()");
3414
3415 assert_share_mode(src, SM_COW, "SM_COW");
3416
3417 assert_allocate_success(&dst, size, TRUE);
3418
3419 assert_vmcopy_success(src, dst, "copied from");
3420
3421 modify_one_and_verify_all_regions(src, dst, copied, FALSE);
3422
3423 assert_deallocate_success(src, size);
3424 assert_deallocate_success(dst, size);
3425 assert_deallocate_success(copied, size);
3426}
3427
3428/* Test source copied to another mapping. */
3429void
3430test_vmcopy_copied_to_source()
3431{
3432 mach_vm_size_t size = get_vm_size();
3433 mach_vm_address_t src, dst, copied;
3434
3435 assert_allocate_success(&src, size, TRUE);
3436 write_region(src, 0);
3437
3438 assert_allocate_success(&copied, size, TRUE);
3439
3440 T_QUIET; T_ASSERT_MACH_SUCCESS(mach_vm_copy(mach_task_self(), src, size, copied), "mach_vm_copy()");
3441
3442 assert_share_mode(src, SM_COW, "SM_COW");
3443
3444 assert_allocate_success(&dst, size, TRUE);
3445
3446 assert_vmcopy_success(src, dst, "copied to");
3447
3448 modify_one_and_verify_all_regions(src, dst, copied, FALSE);
3449
3450 assert_deallocate_success(src, size);
3451 assert_deallocate_success(dst, size);
3452 assert_deallocate_success(copied, size);
3453}
3454
3455/* Test a truedshared source copied. */
3456void
3457test_vmcopy_trueshared_source()
3458{
3459 mach_vm_size_t size = get_vm_size();
3460 mach_vm_address_t src = 0x0, dst, shared;
3461 vm_prot_t cur_protect = (VM_PROT_READ | VM_PROT_WRITE);
3462 vm_prot_t max_protect = (VM_PROT_READ | VM_PROT_WRITE);
3463 mem_entry_name_port_t mem_obj;
3464
3465 assert_allocate_success(&shared, size, TRUE);
3466 write_region(shared, 0);
3467
3468 T_QUIET; T_ASSERT_MACH_SUCCESS(mach_make_memory_entry_64(mach_task_self(), &size, (memory_object_offset_t)shared, cur_protect, &mem_obj,
3469 (mem_entry_name_port_t)NULL),
3470 "mach_make_memory_entry_64()");
3471 T_QUIET; T_ASSERT_MACH_SUCCESS(
3472 mach_vm_map(mach_task_self(), &src, size, 0, TRUE, mem_obj, 0, FALSE, cur_protect, max_protect, VM_INHERIT_NONE),
3473 "mach_vm_map()");
3474
3475 assert_share_mode(src, SM_TRUESHARED, "SM_TRUESHARED");
3476
3477 assert_allocate_success(&dst, size, TRUE);
3478
3479 assert_vmcopy_success(src, dst, "true shared");
3480
3481 modify_one_and_verify_all_regions(src, dst, shared, TRUE);
3482
3483 assert_deallocate_success(src, size);
3484 assert_deallocate_success(dst, size);
3485 assert_deallocate_success(shared, size);
3486}
3487
3488/* Test a private aliazed source copied. */
3489void
3490test_vmcopy_private_aliased_source()
3491{
3492 mach_vm_size_t size = get_vm_size();
3493 mach_vm_address_t src = 0x0, dst, shared;
3494 vm_prot_t cur_protect = (VM_PROT_READ | VM_PROT_WRITE);
3495 vm_prot_t max_protect = (VM_PROT_READ | VM_PROT_WRITE);
3496
3497 assert_allocate_success(&shared, size, TRUE);
3498 write_region(shared, 0);
3499
3500 T_QUIET; T_ASSERT_MACH_SUCCESS(mach_vm_remap(mach_task_self(), &src, size, 0, TRUE, mach_task_self(), shared, FALSE, &cur_protect,
3501 &max_protect, VM_INHERIT_NONE),
3502 "mach_vm_remap()");
3503
3504 assert_share_mode(src, SM_PRIVATE_ALIASED, "SM_PRIVATE_ALIASED");
3505
3506 assert_allocate_success(&dst, size, TRUE);
3507
3508 assert_vmcopy_success(src, dst, "true shared");
3509
3510 modify_one_and_verify_all_regions(src, dst, shared, TRUE);
3511
3512 assert_deallocate_success(src, size);
3513 assert_deallocate_success(dst, size);
3514 assert_deallocate_success(shared, size);
3515}
3516
3517/*************/
3518/* VM Suites */
3519/*************/
3520
3521void
3522run_allocate_test_suites()
3523{
3524 /* <rdar://problem/10304215> CoreOSZin 12Z30: VMUnitTest fails:
3525 * error finding xnu major version number. */
3526 /* unsigned int xnu_version = xnu_major_version(); */
3527
3528 UnitTests allocate_main_tests = {
3529 {"Allocated address is nonzero iff size is nonzero", test_nonzero_address_iff_nonzero_size},
3530 {"Allocated address is page-aligned", test_aligned_address},
3531 {"Allocated memory is zero-filled", test_zero_filled},
3532 {"Write and verify address-filled pattern", test_write_address_filled},
3533 {"Write and verify checkerboard pattern", test_write_checkerboard},
3534 {"Write and verify reverse checkerboard pattern", test_write_reverse_checkerboard},
3535 {"Write and verify page ends pattern", test_write_page_ends},
3536 {"Write and verify page interiors pattern", test_write_page_interiors},
3537 {"Reallocate allocated pages", test_reallocate_pages},
3538 };
3539 UnitTests allocate_address_error_tests = {
3540 {"Allocate at address zero", test_allocate_at_zero},
3541 {"Allocate at a 2 MB boundary-unaligned, page-aligned "
3542 "address",
3543 test_allocate_2MB_boundary_unaligned_page_aligned_address},
3544 };
3545 UnitTests allocate_argument_error_tests = {
3546 {"Allocate in NULL VM map", test_allocate_in_null_map}, {"Allocate with kernel flags", test_allocate_with_kernel_flags},
3547 };
3548 UnitTests allocate_fixed_size_tests = {
3549 {"Allocate zero size", test_allocate_zero_size},
3550 {"Allocate overflowing size", test_allocate_overflowing_size},
3551 {"Allocate a page with highest address hint", test_allocate_page_with_highest_address_hint},
3552 {"Allocate two pages and verify first fit strategy", test_allocate_first_fit_pages},
3553 };
3554 UnitTests allocate_invalid_large_size_test = {
3555 {"Allocate invalid large size", test_allocate_invalid_large_size},
3556 };
3557 UnitTests mach_vm_map_protection_inheritance_error_test = {
3558 {"mach_vm_map() with invalid protection/inheritance "
3559 "arguments",
3560 test_mach_vm_map_protection_inheritance_error},
3561 };
3562 UnitTests mach_vm_map_large_mask_overflow_error_test = {
3563 {"mach_vm_map() with large address mask", test_mach_vm_map_large_mask_overflow_error},
3564 };
3565
3566 /* Run the test suites with various allocators and VM sizes, and
3567 * unspecified or fixed (page-aligned or page-unaligned),
3568 * addresses. */
3569 for (allocators_idx = 0; allocators_idx < numofallocators; allocators_idx++) {
3570 for (sizes_idx = 0; sizes_idx < numofsizes; sizes_idx++) {
3571 for (flags_idx = 0; flags_idx < numofflags; flags_idx++) {
3572 for (alignments_idx = 0; alignments_idx < numofalignments; alignments_idx++) {
3573 /* An allocated address will be page-aligned. */
3574 /* Only run the zero size mach_vm_map() error tests in the
3575 * unspecified address case, since we won't be able to retrieve a
3576 * fixed address for allocation. See 8003930. */
3577 if ((flags_idx == ANYWHERE && alignments_idx == UNALIGNED) ||
3578 (allocators_idx != MACH_VM_ALLOCATE && sizes_idx == ZERO_BYTES && flags_idx == FIXED)) {
3579 continue;
3580 }
3581 run_suite(set_up_allocator_and_vm_variables, allocate_argument_error_tests, do_nothing,
3582 "%s argument error tests, %s%s address, "
3583 "%s size: 0x%jx (%ju)",
3584 allocators[allocators_idx].description, address_flags[flags_idx].description,
3585 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3586 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3587 (uintmax_t)vm_sizes[sizes_idx].size);
3588 /* mach_vm_map() only protection and inheritance error
3589 * tests. */
3590 if (allocators_idx != MACH_VM_ALLOCATE) {
3591 run_suite(set_up_allocator_and_vm_variables, mach_vm_map_protection_inheritance_error_test, do_nothing,
3592 "%s protection and inheritance "
3593 "error test, %s%s address, %s size: 0x%jx "
3594 "(%ju)",
3595 allocators[allocators_idx].description, address_flags[flags_idx].description,
3596 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3597 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3598 (uintmax_t)vm_sizes[sizes_idx].size);
3599 }
3600 /* mach_vm_map() cannot allocate 0 bytes, see 8003930. */
3601 if (allocators_idx == MACH_VM_ALLOCATE || sizes_idx != ZERO_BYTES) {
3602 run_suite(set_up_allocator_and_vm_variables_and_allocate, allocate_main_tests, deallocate,
3603 "%s main "
3604 "allocation tests, %s%s address, %s size: 0x%jx "
3605 "(%ju)",
3606 allocators[allocators_idx].description, address_flags[flags_idx].description,
3607 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3608 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3609 (uintmax_t)vm_sizes[sizes_idx].size);
3610 }
3611 }
3612 }
3613 run_suite(set_up_allocator_and_vm_size, allocate_address_error_tests, do_nothing,
3614 "%s address "
3615 "error allocation tests, %s size: 0x%jx (%ju)",
3616 allocators[allocators_idx].description, vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3617 (uintmax_t)vm_sizes[sizes_idx].size);
3618 }
3619 run_suite(set_up_allocator, allocate_fixed_size_tests, do_nothing, "%s fixed size allocation tests",
3620 allocators[allocators_idx].description);
3621 /* <rdar://problem/10304215> CoreOSZin 12Z30: VMUnitTest fails:
3622 * error finding xnu major version number. */
3623 /* mach_vm_map() with a named entry triggers a panic with this test
3624 * unless under xnu-1598 or later, see 8048580. */
3625 /* if (allocators_idx != MACH_VM_MAP_NAMED_ENTRY
3626 || xnu_version >= 1598) { */
3627 if (allocators_idx != MACH_VM_MAP_NAMED_ENTRY) {
3628 run_suite(set_up_allocator, allocate_invalid_large_size_test, do_nothing, "%s invalid large size allocation test",
3629 allocators[allocators_idx].description);
3630 }
3631 }
3632 /* mach_vm_map() only large mask overflow tests. */
3633 for (sizes_idx = 0; sizes_idx < numofsizes; sizes_idx++) {
3634 run_suite(set_up_vm_size, mach_vm_map_large_mask_overflow_error_test, do_nothing,
3635 "mach_vm_map() large mask overflow "
3636 "error test, size: 0x%jx (%ju)",
3637 (uintmax_t)vm_sizes[sizes_idx].size, (uintmax_t)vm_sizes[sizes_idx].size);
3638 }
3639}
3640
3641void
3642run_deallocate_test_suites()
3643{
3644 UnitTests access_deallocated_memory_tests = {
3645 {"Read start of deallocated range", test_access_deallocated_range_start},
3646 {"Read middle of deallocated range", test_access_deallocated_range_middle},
3647 {"Read end of deallocated range", test_access_deallocated_range_end},
3648 };
3649 UnitTests deallocate_reallocate_tests = {
3650 {"Deallocate twice", test_deallocate_twice},
3651 {"Write pattern, deallocate, reallocate (deallocated "
3652 "memory is inaccessible), and verify memory is "
3653 "zero-filled",
3654 test_write_pattern_deallocate_reallocate_zero_filled},
3655 };
3656 UnitTests deallocate_null_map_test = {
3657 {"Deallocate in NULL VM map", test_deallocate_in_null_map},
3658 };
3659 UnitTests deallocate_edge_case_tests = {
3660 {"Deallocate zero size ranges", test_deallocate_zero_size_ranges},
3661 {"Deallocate memory ranges whose end rounds to 0x0", test_deallocate_rounded_zero_end_ranges},
3662 {"Deallocate wrapped around memory ranges", test_deallocate_wrapped_around_ranges},
3663 };
3664 UnitTests deallocate_suicide_test = {
3665 {"Deallocate whole address space", test_deallocate_suicide},
3666 };
3667
3668 /* All allocations done with mach_vm_allocate(). */
3669 set_allocator(wrapper_mach_vm_allocate);
3670
3671 /* Run the test suites with various VM sizes, and unspecified or
3672 * fixed (page-aligned or page-unaligned), addresses. */
3673 for (sizes_idx = 0; sizes_idx < numofsizes; sizes_idx++) {
3674 for (flags_idx = 0; flags_idx < numofflags; flags_idx++) {
3675 for (alignments_idx = 0; alignments_idx < numofalignments; alignments_idx++) {
3676 /* An allocated address will be page-aligned. */
3677 if (flags_idx == ANYWHERE && alignments_idx == UNALIGNED) {
3678 continue;
3679 }
3680 /* Accessing deallocated memory should cause a segmentation
3681 * fault. */
3682 /* Nothing gets deallocated if size is zero. */
3683 if (sizes_idx != ZERO_BYTES) {
3684 set_expected_signal(SIGSEGV);
3685 run_suite(set_up_vm_variables_and_allocate, access_deallocated_memory_tests, do_nothing,
3686 "Deallocated memory access tests, "
3687 "%s%s address, %s size: 0x%jx (%ju)",
3688 address_flags[flags_idx].description,
3689 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3690 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3691 (uintmax_t)vm_sizes[sizes_idx].size);
3692 set_expected_signal(0);
3693 }
3694 run_suite(set_up_vm_variables_and_allocate, deallocate_reallocate_tests, do_nothing,
3695 "Deallocation and reallocation tests, %s%s "
3696 "address, %s size: 0x%jx (%ju)",
3697 address_flags[flags_idx].description,
3698 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3699 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3700 (uintmax_t)vm_sizes[sizes_idx].size);
3701 run_suite(set_up_vm_variables, deallocate_null_map_test, do_nothing,
3702 "mach_vm_deallocate() null map test, "
3703 "%s%s address, %s size: 0x%jx (%ju)",
3704 address_flags[flags_idx].description,
3705 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3706 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3707 (uintmax_t)vm_sizes[sizes_idx].size);
3708 }
3709 }
3710 }
3711 run_suite(do_nothing, deallocate_edge_case_tests, do_nothing, "Edge case deallocation tests");
3712
3713 set_expected_signal(-1); /* SIGSEGV or SIGBUS */
3714 run_suite(do_nothing, deallocate_suicide_test, do_nothing, "Whole address space deallocation test");
3715 set_expected_signal(0);
3716}
3717
3718void
3719run_read_test_suites()
3720{
3721 UnitTests read_main_tests = {
3722 {"Read address is nonzero iff size is nonzero", test_nonzero_address_iff_nonzero_size},
3723 {"Read address has the correct boundary offset", test_read_address_offset},
3724 {"Reallocate read pages", test_reallocate_pages},
3725 {"Read and verify zero-filled memory", test_zero_filled},
3726 };
3727 UnitTests read_pattern_tests = {
3728 {"Read address-filled pattern", test_read_address_filled},
3729 {"Read checkerboard pattern", test_read_checkerboard},
3730 {"Read reverse checkerboard pattern", test_read_reverse_checkerboard},
3731 };
3732 UnitTests read_null_map_test = {
3733 {"Read from NULL VM map", test_read_null_map},
3734 };
3735 UnitTests read_edge_case_tests = {
3736 {"Read zero size", test_read_zero_size},
3737 {"Read invalid large size", test_read_invalid_large_size},
3738 {"Read wrapped around memory ranges", test_read_wrapped_around_ranges},
3739 };
3740 UnitTests read_inaccessible_tests = {
3741 {"Read partially decallocated memory", test_read_partially_deallocated_range},
3742 {"Read partially read-protected memory", test_read_partially_unreadable_range},
3743 };
3744
3745 /* All allocations done with mach_vm_allocate(). */
3746 set_allocator(wrapper_mach_vm_allocate);
3747
3748 /* Run the test suites with various VM sizes, and unspecified or
3749 * fixed (page-aligned or page-unaligned) addresses. */
3750 for (sizes_idx = 0; sizes_idx < numofsizes; sizes_idx++) {
3751 for (flags_idx = 0; flags_idx < numofflags; flags_idx++) {
3752 for (alignments_idx = 0; alignments_idx < numofalignments; alignments_idx++) {
3753 /* An allocated address will be page-aligned. */
3754 if (flags_idx == ANYWHERE && alignments_idx == UNALIGNED) {
3755 continue;
3756 }
3757 run_suite(set_up_vm_variables_allocate_read_deallocate, read_main_tests, deallocate,
3758 "mach_vm_read() "
3759 "main tests, %s%s address, %s size: 0x%jx (%ju)",
3760 address_flags[flags_idx].description,
3761 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3762 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3763 (uintmax_t)vm_sizes[sizes_idx].size);
3764 run_suite(set_up_vm_variables_and_allocate_extra_page, read_pattern_tests, deallocate,
3765 "mach_vm_read() pattern tests, %s%s address, %s "
3766 "size: 0x%jx (%ju)",
3767 address_flags[flags_idx].description,
3768 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3769 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3770 (uintmax_t)vm_sizes[sizes_idx].size);
3771 run_suite(set_up_vm_variables_and_allocate_extra_page, read_null_map_test, deallocate_extra_page,
3772 "mach_vm_read() null map test, "
3773 "%s%s address, %s size: 0x%jx (%ju)",
3774 address_flags[flags_idx].description,
3775 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3776 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3777 (uintmax_t)vm_sizes[sizes_idx].size);
3778 /* A zero size range is always accessible. */
3779 if (sizes_idx != ZERO_BYTES) {
3780 run_suite(set_up_vm_variables_and_allocate_extra_page, read_inaccessible_tests, deallocate_extra_page,
3781 "mach_vm_read() inaccessibility tests, %s%s "
3782 "address, %s size: 0x%jx (%ju)",
3783 address_flags[flags_idx].description,
3784 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3785 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3786 (uintmax_t)vm_sizes[sizes_idx].size);
3787 }
3788 }
3789 }
3790 }
3791 run_suite(do_nothing, read_edge_case_tests, do_nothing, "mach_vm_read() fixed size tests");
3792}
3793
3794void
3795run_write_test_suites()
3796{
3797 UnitTests write_main_tests = {
3798 {"Write and verify zero-filled memory", test_zero_filled_write},
3799 };
3800 UnitTests write_pattern_tests = {
3801 {"Write address-filled pattern", test_address_filled_write},
3802 {"Write checkerboard pattern", test_checkerboard_write},
3803 {"Write reverse checkerboard pattern", test_reverse_checkerboard_write},
3804 };
3805 UnitTests write_edge_case_tests = {
3806 {"Write into NULL VM map", test_write_null_map}, {"Write zero size", test_write_zero_size},
3807 };
3808 UnitTests write_inaccessible_tests = {
3809 {"Write partially decallocated buffer", test_write_partially_deallocated_buffer},
3810 {"Write partially read-protected buffer", test_write_partially_unreadable_buffer},
3811 {"Write on partially deallocated range", test_write_on_partially_deallocated_range},
3812 {"Write on partially write-protected range", test_write_on_partially_unwritable_range},
3813 };
3814
3815 /* All allocations done with mach_vm_allocate(). */
3816 set_allocator(wrapper_mach_vm_allocate);
3817
3818 /* Run the test suites with various destination sizes and
3819 * unspecified or fixed (page-aligned or page-unaligned)
3820 * addresses, and various buffer sizes and boundary offsets. */
3821 for (sizes_idx = 0; sizes_idx < numofsizes; sizes_idx++) {
3822 for (flags_idx = 0; flags_idx < numofflags; flags_idx++) {
3823 for (alignments_idx = 0; alignments_idx < numofalignments; alignments_idx++) {
3824 for (buffer_sizes_idx = 0; buffer_sizes_idx < numofsizes; buffer_sizes_idx++) {
3825 for (offsets_idx = 0; offsets_idx < numofoffsets; offsets_idx++) {
3826 /* An allocated address will be page-aligned. */
3827 if ((flags_idx == ANYWHERE && alignments_idx == UNALIGNED)) {
3828 continue;
3829 }
3830 run_suite(set_up_vm_and_buffer_variables_allocate_for_writing, write_edge_case_tests,
3831 deallocate_vm_and_buffer,
3832 "mach_vm_write() edge case tests, %s%s address, %s "
3833 "size: 0x%jx (%ju), buffer %s size: 0x%jx (%ju), "
3834 "buffer boundary offset: %d",
3835 address_flags[flags_idx].description,
3836 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3837 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3838 (uintmax_t)vm_sizes[sizes_idx].size, vm_sizes[buffer_sizes_idx].description,
3839 (uintmax_t)vm_sizes[buffer_sizes_idx].size, (uintmax_t)vm_sizes[buffer_sizes_idx].size,
3840 buffer_offsets[offsets_idx].offset);
3841 /* A zero size buffer is always accessible. */
3842 if (buffer_sizes_idx != ZERO_BYTES) {
3843 run_suite(set_up_vm_and_buffer_variables_allocate_for_writing, write_inaccessible_tests,
3844 deallocate_vm_and_buffer,
3845 "mach_vm_write() inaccessibility tests, "
3846 "%s%s address, %s size: 0x%jx (%ju), buffer "
3847 "%s size: 0x%jx (%ju), buffer boundary "
3848 "offset: %d",
3849 address_flags[flags_idx].description,
3850 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3851 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3852 (uintmax_t)vm_sizes[sizes_idx].size, vm_sizes[buffer_sizes_idx].description,
3853 (uintmax_t)vm_sizes[buffer_sizes_idx].size, (uintmax_t)vm_sizes[buffer_sizes_idx].size,
3854 buffer_offsets[offsets_idx].offset);
3855 }
3856 /* The buffer cannot be larger than the destination. */
3857 if (vm_sizes[sizes_idx].size < vm_sizes[buffer_sizes_idx].size) {
3858 continue;
3859 }
3860 run_suite(set_up_vm_and_buffer_variables_allocate_write, write_main_tests, deallocate_vm_and_buffer,
3861 "mach_vm_write() main tests, %s%s address, %s "
3862 "size: 0x%jx (%ju), buffer %s size: 0x%jx (%ju), "
3863 "buffer boundary offset: %d",
3864 address_flags[flags_idx].description,
3865 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3866 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3867 (uintmax_t)vm_sizes[sizes_idx].size, vm_sizes[buffer_sizes_idx].description,
3868 (uintmax_t)vm_sizes[buffer_sizes_idx].size, (uintmax_t)vm_sizes[buffer_sizes_idx].size,
3869 buffer_offsets[offsets_idx].offset);
3870 run_suite(set_up_vm_and_buffer_variables_allocate_for_writing, write_pattern_tests,
3871 deallocate_vm_and_buffer,
3872 "mach_vm_write() pattern tests, %s%s address, %s "
3873 "size: 0x%jx (%ju), buffer %s size: 0x%jx (%ju), "
3874 "buffer boundary offset: %d",
3875 address_flags[flags_idx].description,
3876 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3877 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3878 (uintmax_t)vm_sizes[sizes_idx].size, vm_sizes[buffer_sizes_idx].description,
3879 (uintmax_t)vm_sizes[buffer_sizes_idx].size, (uintmax_t)vm_sizes[buffer_sizes_idx].size,
3880 buffer_offsets[offsets_idx].offset);
3881 }
3882 }
3883 }
3884 }
3885 }
3886}
3887
3888void
3889run_protect_test_suites()
3890{
3891 UnitTests readprotection_main_tests = {
3892 {"Read-protect, read-allow and verify zero-filled memory", test_zero_filled_readprotect},
3893 {"Verify that region is read-protected iff size is "
3894 "nonzero",
3895 test_verify_readprotection},
3896 };
3897 UnitTests access_readprotected_memory_tests = {
3898 {"Read start of read-protected range", test_access_readprotected_range_start},
3899 {"Read middle of read-protected range", test_access_readprotected_range_middle},
3900 {"Read end of read-protected range", test_access_readprotected_range_end},
3901 };
3902 UnitTests writeprotection_main_tests = {
3903 {"Write-protect and verify zero-filled memory", test_zero_filled_extended},
3904 {"Verify that region is write-protected iff size is "
3905 "nonzero",
3906 test_verify_writeprotection},
3907 };
3908 UnitTests write_writeprotected_memory_tests = {
3909 {"Write at start of write-protected range", test_write_writeprotected_range_start},
3910 {"Write in middle of write-protected range", test_write_writeprotected_range_middle},
3911 {"Write at end of write-protected range", test_write_writeprotected_range_end},
3912 };
3913 UnitTests protect_edge_case_tests = {
3914 {"Read-protect zero size ranges", test_readprotect_zero_size},
3915 {"Write-protect zero size ranges", test_writeprotect_zero_size},
3916 {"Read-protect wrapped around memory ranges", test_readprotect_wrapped_around_ranges},
3917 {"Write-protect wrapped around memory ranges", test_writeprotect_wrapped_around_ranges},
3918 };
3919
3920 /* All allocations done with mach_vm_allocate(). */
3921 set_allocator(wrapper_mach_vm_allocate);
3922
3923 /* Run the test suites with various VM sizes, and unspecified or
3924 * fixed (page-aligned or page-unaligned), addresses. */
3925 for (sizes_idx = 0; sizes_idx < numofsizes; sizes_idx++) {
3926 for (flags_idx = 0; flags_idx < numofflags; flags_idx++) {
3927 for (alignments_idx = 0; alignments_idx < numofalignments; alignments_idx++) {
3928 /* An allocated address will be page-aligned. */
3929 if (flags_idx == ANYWHERE && alignments_idx == UNALIGNED) {
3930 continue;
3931 }
3932 run_suite(set_up_vm_variables_allocate_readprotect, readprotection_main_tests, deallocate_extra_page,
3933 "Main read-protection tests, %s%s address, %s "
3934 "size: 0x%jx (%ju)",
3935 address_flags[flags_idx].description,
3936 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3937 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3938 (uintmax_t)vm_sizes[sizes_idx].size);
3939 run_suite(set_up_vm_variables_allocate_writeprotect, writeprotection_main_tests, deallocate_extra_page,
3940 "Main write-protection tests, %s%s address, %s "
3941 "size: 0x%jx (%ju)",
3942 address_flags[flags_idx].description,
3943 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3944 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3945 (uintmax_t)vm_sizes[sizes_idx].size);
3946 /* Nothing gets protected if size is zero. */
3947 if (sizes_idx != ZERO_BYTES) {
3948 set_expected_signal(SIGBUS);
3949 /* Accessing read-protected memory should cause a bus
3950 * error. */
3951 run_suite(set_up_vm_variables_allocate_readprotect, access_readprotected_memory_tests, deallocate_extra_page,
3952 "Read-protected memory access tests, %s%s "
3953 "address, %s size: 0x%jx (%ju)",
3954 address_flags[flags_idx].description,
3955 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3956 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3957 (uintmax_t)vm_sizes[sizes_idx].size);
3958 /* Writing on write-protected memory should cause a bus
3959 * error. */
3960 run_suite(set_up_vm_variables_allocate_writeprotect, write_writeprotected_memory_tests, deallocate_extra_page,
3961 "Write-protected memory writing tests, %s%s "
3962 "address, %s size: 0x%jx (%ju)",
3963 address_flags[flags_idx].description,
3964 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
3965 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
3966 (uintmax_t)vm_sizes[sizes_idx].size);
3967 set_expected_signal(0);
3968 }
3969 }
3970 }
3971 }
3972 run_suite(do_nothing, protect_edge_case_tests, do_nothing, "Edge case protection tests");
3973}
3974
3975void
3976run_copy_test_suites()
3977{
3978 /* Copy tests */
3979 UnitTests copy_main_tests = {
3980 {"Copy and verify zero-filled memory", test_zero_filled_copy_dest},
3981 };
3982 UnitTests copy_pattern_tests = {
3983 {"Copy address-filled pattern", test_copy_address_filled},
3984 {"Copy checkerboard pattern", test_copy_checkerboard},
3985 {"Copy reverse checkerboard pattern", test_copy_reverse_checkerboard},
3986 };
3987 UnitTests copy_edge_case_tests = {
3988 {"Copy with NULL VM map", test_copy_null_map},
3989 {"Copy zero size", test_copy_zero_size},
3990 {"Copy invalid large size", test_copy_invalid_large_size},
3991 {"Read wrapped around memory ranges", test_copy_wrapped_around_ranges},
3992 };
3993 UnitTests copy_inaccessible_tests = {
3994 {"Copy source partially decallocated region", test_copy_source_partially_deallocated_region},
3995 /* XXX */
3996 {"Copy destination partially decallocated region", test_copy_dest_partially_deallocated_region},
3997 {"Copy source partially read-protected region", test_copy_source_partially_unreadable_region},
3998 /* XXX */
3999 {"Copy destination partially write-protected region", test_copy_dest_partially_unwriteable_region},
4000 {"Copy source on partially deallocated range", test_copy_source_on_partially_deallocated_range},
4001 {"Copy destination on partially deallocated range", test_copy_dest_on_partially_deallocated_range},
4002 {"Copy source on partially read-protected range", test_copy_source_on_partially_unreadable_range},
4003 {"Copy destination on partially write-protected range", test_copy_dest_on_partially_unwritable_range},
4004 };
4005
4006 UnitTests copy_shared_mode_tests = {
4007 {"Copy using freshly allocated source", test_vmcopy_fresh_source},
4008 {"Copy using shared source", test_vmcopy_shared_source},
4009 {"Copy using a \'copied from\' source", test_vmcopy_copied_from_source},
4010 {"Copy using a \'copied to\' source", test_vmcopy_copied_to_source},
4011 {"Copy using a true shared source", test_vmcopy_trueshared_source},
4012 {"Copy using a private aliased source", test_vmcopy_private_aliased_source},
4013 };
4014
4015 /* All allocations done with mach_vm_allocate(). */
4016 set_allocator(wrapper_mach_vm_allocate);
4017
4018 /* All the tests are done with page size regions. */
4019 set_vm_size(vm_page_size);
4020
4021 /* Run the test suites with various shared modes for source */
4022 for (vmcopy_action_idx = 0; vmcopy_action_idx < numofvmcopyactions; vmcopy_action_idx++) {
4023 run_suite(set_up_copy_shared_mode_variables, copy_shared_mode_tests, do_nothing, "Copy shared mode tests, %s",
4024 vmcopy_actions[vmcopy_action_idx].description);
4025 }
4026
4027 for (sizes_idx = 0; sizes_idx < numofsizes; sizes_idx++) {
4028 for (flags_idx = 0; flags_idx < numofflags; flags_idx++) {
4029 for (alignments_idx = 0; alignments_idx < numofalignments; alignments_idx++) {
4030 for (buffer_sizes_idx = 0; buffer_sizes_idx < numofsizes; buffer_sizes_idx++) {
4031 for (offsets_idx = 0; offsets_idx < numofoffsets; offsets_idx++) {
4032 /* An allocated address will be page-aligned. */
4033 if ((flags_idx == ANYWHERE && alignments_idx == UNALIGNED)) {
4034 continue;
4035 }
4036 run_suite(set_up_vm_and_buffer_variables_allocate_for_copying, copy_edge_case_tests,
4037 deallocate_vm_and_buffer,
4038 "mach_vm_copy() edge case tests, %s%s address, %s "
4039 "size: 0x%jx (%ju), buffer %s size: 0x%jx (%ju), "
4040 "buffer boundary offset: %d",
4041 address_flags[flags_idx].description,
4042 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
4043 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
4044 (uintmax_t)vm_sizes[sizes_idx].size, vm_sizes[buffer_sizes_idx].description,
4045 (uintmax_t)vm_sizes[buffer_sizes_idx].size, (uintmax_t)vm_sizes[buffer_sizes_idx].size,
4046 buffer_offsets[offsets_idx].offset);
4047 /* The buffer cannot be larger than the destination. */
4048 if (vm_sizes[sizes_idx].size < vm_sizes[buffer_sizes_idx].size) {
4049 continue;
4050 }
4051
4052 /* A zero size buffer is always accessible. */
4053 if (buffer_sizes_idx != ZERO_BYTES) {
4054 run_suite(set_up_vm_and_buffer_variables_allocate_for_copying, copy_inaccessible_tests,
4055 deallocate_vm_and_buffer,
4056 "mach_vm_copy() inaccessibility tests, "
4057 "%s%s address, %s size: 0x%jx (%ju), buffer "
4058 "%s size: 0x%jx (%ju), buffer boundary "
4059 "offset: %d",
4060 address_flags[flags_idx].description,
4061 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
4062 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
4063 (uintmax_t)vm_sizes[sizes_idx].size, vm_sizes[buffer_sizes_idx].description,
4064 (uintmax_t)vm_sizes[buffer_sizes_idx].size, (uintmax_t)vm_sizes[buffer_sizes_idx].size,
4065 buffer_offsets[offsets_idx].offset);
4066 }
4067 run_suite(set_up_source_and_dest_variables_allocate_copy, copy_main_tests, deallocate_vm_and_buffer,
4068 "mach_vm_copy() main tests, %s%s address, %s "
4069 "size: 0x%jx (%ju), destination %s size: 0x%jx (%ju), "
4070 "destination boundary offset: %d",
4071 address_flags[flags_idx].description,
4072 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
4073 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
4074 (uintmax_t)vm_sizes[sizes_idx].size, vm_sizes[buffer_sizes_idx].description,
4075 (uintmax_t)vm_sizes[buffer_sizes_idx].size, (uintmax_t)vm_sizes[buffer_sizes_idx].size,
4076 buffer_offsets[offsets_idx].offset);
4077 run_suite(set_up_source_and_dest_variables_allocate_copy, copy_pattern_tests, deallocate_vm_and_buffer,
4078 "mach_vm_copy() pattern tests, %s%s address, %s "
4079 "size: 0x%jx (%ju) destination %s size: 0x%jx (%ju), "
4080 "destination boundary offset: %d",
4081 address_flags[flags_idx].description,
4082 (flags_idx == ANYWHERE) ? "" : address_alignments[alignments_idx].description,
4083 vm_sizes[sizes_idx].description, (uintmax_t)vm_sizes[sizes_idx].size,
4084 (uintmax_t)vm_sizes[sizes_idx].size, vm_sizes[buffer_sizes_idx].description,
4085 (uintmax_t)vm_sizes[buffer_sizes_idx].size, (uintmax_t)vm_sizes[buffer_sizes_idx].size,
4086 buffer_offsets[offsets_idx].offset);
4087 }
4088 }
4089 }
4090 }
4091 }
4092}
4093
4094void
4095perform_test_with_options(test_option_t options)
4096{
4097 process_options(options);
4098
4099 /* <rdar://problem/10304215> CoreOSZin 12Z30: VMUnitTest fails:
4100 * error finding xnu major version number. */
4101 /* printf("xnu version is %s.\n\n", xnu_version_string()); */
4102
4103 if (flag_run_allocate_test) {
4104 run_allocate_test_suites();
4105 }
4106
4107 if (flag_run_deallocate_test) {
4108 run_deallocate_test_suites();
4109 }
4110
4111 if (flag_run_read_test) {
4112 run_read_test_suites();
4113 }
4114
4115 if (flag_run_write_test) {
4116 run_write_test_suites();
4117 }
4118
4119 if (flag_run_protect_test) {
4120 run_protect_test_suites();
4121 }
4122
4123 if (flag_run_copy_test) {
4124 run_copy_test_suites();
4125 }
4126
4127 log_aggregated_results();
4128}
4129
4130T_DECL(vm_test_allocate, "Allocate VM unit test")
4131{
4132 test_options.to_flags = VM_TEST_ALLOCATE;
4133 test_options.to_vmsize = 0;
4134 test_options.to_quietness = ERROR_ONLY_QUIETNESS;
4135
4136 perform_test_with_options(test_options);
4137}
4138
4139T_DECL(vm_test_deallocate, "Deallocate VM unit test",
4140 T_META_IGNORECRASHES(".*vm_allocation.*"))
4141{
4142 test_options.to_flags = VM_TEST_DEALLOCATE;
4143 test_options.to_vmsize = 0;
4144 test_options.to_quietness = ERROR_ONLY_QUIETNESS;
4145
4146 perform_test_with_options(test_options);
4147}
4148
4149T_DECL(vm_test_read, "Read VM unit test")
4150{
4151 test_options.to_flags = VM_TEST_READ;
4152 test_options.to_vmsize = 0;
4153 test_options.to_quietness = ERROR_ONLY_QUIETNESS;
4154
4155 perform_test_with_options(test_options);
4156}
4157
4158T_DECL(vm_test_write, "Write VM unit test")
4159{
4160 test_options.to_flags = VM_TEST_WRITE;
4161 test_options.to_vmsize = 0;
4162 test_options.to_quietness = ERROR_ONLY_QUIETNESS;
4163
4164 perform_test_with_options(test_options);
4165}
4166
4167T_DECL(vm_test_protect, "Protect VM unit test",
4168 T_META_IGNORECRASHES(".*vm_allocation.*"))
4169{
4170 test_options.to_flags = VM_TEST_PROTECT;
4171 test_options.to_vmsize = 0;
4172 test_options.to_quietness = ERROR_ONLY_QUIETNESS;
4173
4174 perform_test_with_options(test_options);
4175}
4176
4177T_DECL(vm_test_copy, "Copy VM unit test")
4178{
4179 test_options.to_flags = VM_TEST_COPY;
4180 test_options.to_vmsize = 0;
4181 test_options.to_quietness = ERROR_ONLY_QUIETNESS;
4182
4183 perform_test_with_options(test_options);
4184}