]> git.saurik.com Git - apple/xnu.git/blob - tests/memorystatus_zone_test.c
xnu-7195.81.3.tar.gz
[apple/xnu.git] / tests / memorystatus_zone_test.c
1 #include <stdio.h>
2 #include <mach/mach_vm.h>
3 #include <mach/mach_port.h>
4 #include <mach/mach_host.h>
5 #include <mach/mach_error.h>
6 #include <mach-o/dyld.h>
7 #include <sys/sysctl.h>
8 #include <sys/kdebug.h>
9 #include <sys/mman.h>
10 #include <sys/kern_memorystatus.h>
11 #include <ktrace/session.h>
12 #include <dispatch/private.h>
13
14 #ifdef T_NAMESPACE
15 #undef T_NAMESPACE
16 #endif
17 #include <darwintest.h>
18 #include <darwintest_utils.h>
19
20 T_GLOBAL_META(
21 T_META_NAMESPACE("xnu.vm"),
22 T_META_CHECK_LEAKS(false)
23 );
24
25 #define TIMEOUT_SECS 10 * 60 /* abort if test takes > 10 minutes */
26
27 #if (TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR)
28 #define ALLOCATION_SIZE_VM_REGION (16*1024) /* 16 KB */
29 #define ALLOCATION_SIZE_VM_OBJECT ALLOCATION_SIZE_VM_REGION
30 #else
31 #define ALLOCATION_SIZE_VM_REGION (1024*1024*100) /* 100 MB */
32 #define ALLOCATION_SIZE_VM_OBJECT (16*1024) /* 16 KB */
33 #endif
34 #define MAX_CHILD_PROCS 100
35
36 #define NUM_GIVE_BACK 5
37 #define NUM_GIVE_BACK_PORTS 20
38
39 /* 60% is too high on bridgeOS to achieve without vm-pageshortage jetsams. Set it to 40%. */
40 #if TARGET_OS_BRIDGE
41 #define ZONEMAP_JETSAM_LIMIT_SYSCTL "kern.zone_map_jetsam_limit=40"
42 #else
43 #define ZONEMAP_JETSAM_LIMIT_SYSCTL "kern.zone_map_jetsam_limit=60"
44 #endif
45
46 #define VME_ZONE_TEST_OPT "allocate_vm_regions"
47 #define VM_OBJECTS_ZONE_TEST_OPT "allocate_vm_objects"
48 #define GENERIC_ZONE_TEST_OPT "allocate_from_generic_zone"
49
50 #define VME_ZONE "VM map entries"
51 #define VMOBJECTS_ZONE "vm objects"
52 #define VMENTRY_TO_VMOBJECT_COMPARISON_RATIO 98
53
54 #define VM_TAG1 100
55 #define VM_TAG2 101
56
57 #define LARGE_MEM_GB 32
58 #define LARGE_MEM_JETSAM_LIMIT 40
59 #define JETSAM_LIMIT_LOWEST 10
60
61 enum {
62 VME_ZONE_TEST = 0,
63 VM_OBJECTS_ZONE_TEST,
64 GENERIC_ZONE_TEST,
65 };
66
67 typedef struct test_config_struct {
68 int test_index;
69 int num_zones;
70 const char *helper_func;
71 mach_zone_name_array_t zone_names;
72 } test_config_struct;
73
74 static test_config_struct current_test;
75 static dispatch_source_t ds_signal = NULL;
76 static dispatch_source_t ds_timer = NULL;
77 static dispatch_queue_t dq_spawn = NULL;
78 static ktrace_session_t session = NULL;
79
80 static mach_zone_info_array_t zone_info_array = NULL;
81 static mach_zone_name_t largest_zone_name;
82 static mach_zone_info_t largest_zone_info;
83
84 static pthread_mutex_t test_mtx = PTHREAD_MUTEX_INITIALIZER; /* protects the next 3 things */
85 static bool test_ending = false;
86 static int num_children = 0;
87 static pid_t child_pids[MAX_CHILD_PROCS];
88
89 static char testpath[PATH_MAX];
90 static void allocate_vm_stuff(int);
91 static void allocate_from_generic_zone(void);
92 static void begin_test_teardown(void);
93 static void cleanup_and_end_test(void);
94 static void setup_ktrace_session(void);
95 static void spawn_child_process(void);
96 static void run_test(void);
97 static bool verify_generic_jetsam_criteria(void);
98 static bool vme_zone_compares_to_vm_objects(void);
99 static int query_zone_map_size(void);
100 static void query_zone_info(void);
101 static void print_zone_info(mach_zone_name_t *zn, mach_zone_info_t *zi);
102
103 extern void mach_zone_force_gc(host_t host);
104 extern kern_return_t mach_zone_info_for_largest_zone(
105 host_priv_t host,
106 mach_zone_name_t *name,
107 mach_zone_info_t *info
108 );
109
110 static bool
111 check_time(time_t start, int timeout)
112 {
113 return start + timeout < time(NULL);
114 }
115
116 /*
117 * flag values for allocate_vm_stuff()
118 */
119 #define REGIONS 1
120 #define OBJECTS 2
121
122 static void
123 allocate_vm_stuff(int flags)
124 {
125 uint64_t alloc_size, i;
126 time_t start = time(NULL);
127 mach_vm_address_t give_back[NUM_GIVE_BACK];
128 char *msg;
129
130 if (flags == REGIONS) {
131 alloc_size = ALLOCATION_SIZE_VM_REGION;
132 msg = "";
133 } else {
134 alloc_size = ALLOCATION_SIZE_VM_OBJECT;
135 msg = " each region backed by a VM object";
136 }
137
138 printf("[%d] Allocating VM regions, each of size %lld KB%s\n", getpid(), (alloc_size >> 10), msg);
139
140 for (i = 0;; i++) {
141 mach_vm_address_t addr = (mach_vm_address_t)NULL;
142
143 /* Alternate VM tags between consecutive regions to prevent coalescing */
144 int vmflags = VM_MAKE_TAG((i % 2)? VM_TAG1: VM_TAG2) | VM_FLAGS_ANYWHERE;
145
146 if ((mach_vm_allocate(mach_task_self(), &addr, (mach_vm_size_t)alloc_size, vmflags)) != KERN_SUCCESS) {
147 break;
148 }
149
150 /*
151 * If interested in objects, touch the region so the VM object is created,
152 * then free this page. Keeps us from holding a lot of dirty pages.
153 */
154 if (flags == OBJECTS) {
155 *((int *)addr) = 0;
156 madvise((void *)addr, (size_t)alloc_size, MADV_FREE);
157 }
158
159 if (check_time(start, TIMEOUT_SECS)) {
160 printf("[%d] child timeout during allocations\n", getpid());
161 exit(0);
162 }
163
164 if (i < NUM_GIVE_BACK) {
165 give_back[i] = addr;
166 }
167 }
168
169 /* return some of the resource to avoid O-O-M problems */
170 for (uint64_t j = 0; j < NUM_GIVE_BACK && j < i; ++j) {
171 mach_vm_deallocate(mach_task_self(), give_back[j], (mach_vm_size_t)alloc_size);
172 }
173
174 printf("[%d] Number of allocations: %lld\n", getpid(), i);
175
176 /* Signal to the parent that we're done allocating */
177 kill(getppid(), SIGUSR1);
178
179 while (1) {
180 sleep(2);
181 /* Exit if parent has exited. Ensures child processes don't linger around after the test exits */
182 if (getppid() == 1) {
183 exit(0);
184 }
185
186 if (check_time(start, TIMEOUT_SECS)) {
187 printf("[%d] child timeout while waiting\n", getpid());
188 exit(0);
189 }
190 }
191 }
192
193
194 static void
195 allocate_from_generic_zone(void)
196 {
197 uint64_t i = 0;
198 time_t start = time(NULL);
199 mach_port_t give_back[NUM_GIVE_BACK_PORTS];
200
201 printf("[%d] Allocating mach_ports\n", getpid());
202 for (i = 0;; i++) {
203 mach_port_t port;
204
205 if ((mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, &port)) != KERN_SUCCESS) {
206 break;
207 }
208
209 if (check_time(start, TIMEOUT_SECS)) {
210 printf("[%d] child timeout during allocations\n", getpid());
211 exit(0);
212 }
213
214 if (i < NUM_GIVE_BACK_PORTS) {
215 give_back[i] = port;
216 }
217 }
218
219 /* return some of the resource to avoid O-O-M problems */
220 for (uint64_t j = 0; j < NUM_GIVE_BACK_PORTS && j < i; ++j) {
221 int ret;
222 ret = mach_port_mod_refs(mach_task_self(), give_back[j], MACH_PORT_RIGHT_RECEIVE, -1);
223 T_ASSERT_MACH_SUCCESS(ret, "mach_port_mod_refs(RECV_RIGHT, -1)");
224 }
225 printf("[%d] Number of allocations: %lld\n", getpid(), i);
226
227 /* Signal to the parent that we're done allocating */
228 kill(getppid(), SIGUSR1);
229
230 while (1) {
231 sleep(2);
232 /* Exit if parent has exited. Ensures child processes don't linger around after the test exits */
233 if (getppid() == 1) {
234 exit(0);
235 }
236
237 if (check_time(start, TIMEOUT_SECS)) {
238 printf("[%d] child timeout while waiting\n", getpid());
239 exit(0);
240 }
241 }
242 }
243
244 static void
245 print_zone_info(mach_zone_name_t *zn, mach_zone_info_t *zi)
246 {
247 T_LOG("ZONE NAME: %-35sSIZE: %-25lluELEMENTS: %llu",
248 zn->mzn_name, zi->mzi_cur_size, zi->mzi_count);
249 }
250
251 static time_t main_start;
252
253 static void
254 query_zone_info(void)
255 {
256 int i;
257 kern_return_t kr;
258 static uint64_t num_calls = 0;
259
260 if (check_time(main_start, TIMEOUT_SECS)) {
261 T_ASSERT_FAIL("Global timeout expired");
262 }
263 for (i = 0; i < current_test.num_zones; i++) {
264 kr = mach_zone_info_for_zone(mach_host_self(), current_test.zone_names[i], &(zone_info_array[i]));
265 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "mach_zone_info_for_zone(%s) returned %d [%s]", current_test.zone_names[i].mzn_name, kr, mach_error_string(kr));
266 }
267 kr = mach_zone_info_for_largest_zone(mach_host_self(), &largest_zone_name, &largest_zone_info);
268 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "mach_zone_info_for_largest_zone returned %d [%s]", kr, mach_error_string(kr));
269
270 num_calls++;
271 if (num_calls % 5 != 0) {
272 return;
273 }
274
275 /* Print out size and element count for zones relevant to the test */
276 for (i = 0; i < current_test.num_zones; i++) {
277 print_zone_info(&(current_test.zone_names[i]), &(zone_info_array[i]));
278 }
279 }
280
281 static bool
282 vme_zone_compares_to_vm_objects(void)
283 {
284 int i;
285 uint64_t vm_object_element_count = 0, vm_map_entry_element_count = 0;
286
287 T_LOG("Comparing element counts of \"VM map entries\" and \"vm objects\" zones");
288 for (i = 0; i < current_test.num_zones; i++) {
289 if (!strcmp(current_test.zone_names[i].mzn_name, VME_ZONE)) {
290 vm_map_entry_element_count = zone_info_array[i].mzi_count;
291 } else if (!strcmp(current_test.zone_names[i].mzn_name, VMOBJECTS_ZONE)) {
292 vm_object_element_count = zone_info_array[i].mzi_count;
293 }
294 print_zone_info(&(current_test.zone_names[i]), &(zone_info_array[i]));
295 }
296
297 T_LOG("# VM map entries as percentage of # vm objects = %llu", (vm_map_entry_element_count * 100) / vm_object_element_count);
298 if (vm_map_entry_element_count >= ((vm_object_element_count * VMENTRY_TO_VMOBJECT_COMPARISON_RATIO) / 100)) {
299 T_LOG("Number of VM map entries is comparable to vm objects\n\n");
300 return true;
301 }
302 T_LOG("Number of VM map entries is NOT comparable to vm objects\n\n");
303 return false;
304 }
305
306 static bool
307 verify_generic_jetsam_criteria(void)
308 {
309 T_LOG("Largest zone info");
310 print_zone_info(&largest_zone_name, &largest_zone_info);
311
312 /* If VM map entries is not the largest zone */
313 if (strcmp(largest_zone_name.mzn_name, VME_ZONE)) {
314 /* If vm objects is the largest zone and the VM map entries zone had comparable # of elements, return false */
315 if (!strcmp(largest_zone_name.mzn_name, VMOBJECTS_ZONE) && vme_zone_compares_to_vm_objects()) {
316 return false;
317 }
318 return true;
319 }
320 return false;
321 }
322
323 static void
324 begin_test_teardown(void)
325 {
326 int ret, old_limit = 95;
327
328 /*
329 * Restore kern.zone_map_jetsam_limit to the default high value, to prevent further jetsams.
330 * We should change the value of old_limit if ZONE_MAP_JETSAM_LIMIT_DEFAULT changes in the kernel.
331 * We don't have a way to capture what the original value was before the test, because the
332 * T_META_SYSCTL_INT macro will have changed the value before the test starts running.
333 */
334 ret = sysctlbyname("kern.zone_map_jetsam_limit", NULL, NULL, &old_limit, sizeof(old_limit));
335 T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "sysctl kern.zone_map_jetsam_limit failed");
336 T_LOG("kern.zone_map_jetsam_limit set to %d%%", old_limit);
337
338
339 /* End ktrace session */
340 if (session != NULL) {
341 T_LOG("Ending ktrace session...");
342 ktrace_end(session, 1);
343 }
344
345 dispatch_sync(dq_spawn, ^{
346 T_LOG("Cancelling dispatch sources...");
347
348 /* Disable the timer that queries and prints zone info periodically */
349 if (ds_timer != NULL) {
350 dispatch_source_cancel(ds_timer);
351 }
352
353 /* Disable signal handler that spawns child processes */
354 if (ds_signal != NULL) {
355 /*
356 * No need for a dispatch_source_cancel_and_wait here.
357 * We're queueing this on the spawn queue, so no further
358 * processes will be spawned after the source is cancelled.
359 */
360 dispatch_source_cancel(ds_signal);
361 }
362 });
363 }
364
365 static void
366 cleanup_and_end_test(void)
367 {
368 int i;
369
370 /*
371 * The atend handler executes on a different dispatch queue.
372 * We want to do the cleanup only once.
373 */
374 pthread_mutex_lock(&test_mtx);
375 if (test_ending) {
376 pthread_mutex_unlock(&test_mtx);
377 return;
378 }
379 test_ending = TRUE;
380 pthread_mutex_unlock(&test_mtx);
381
382 dispatch_async(dq_spawn, ^{
383 /*
384 * If the test succeeds, we will call dispatch_source_cancel twice, which is fine since
385 * the operation is idempotent. Just make sure to not drop all references to the dispatch sources
386 * (in this case we're not, we have globals holding references to them), or we can end up with
387 * use-after-frees which would be a problem.
388 */
389 /* Disable the timer that queries and prints zone info periodically */
390 if (ds_timer != NULL) {
391 dispatch_source_cancel(ds_timer);
392 }
393
394 /* Disable signal handler that spawns child processes */
395 if (ds_signal != NULL) {
396 dispatch_source_cancel(ds_signal);
397 }
398 });
399
400 pthread_mutex_lock(&test_mtx);
401 T_LOG("Number of processes spawned: %d", num_children);
402 T_LOG("Killing child processes...");
403
404 /* Kill all the child processes that were spawned */
405 for (i = 0; i < num_children; i++) {
406 pid_t pid = child_pids[i];
407 int status = 0;
408
409 /*
410 * Kill and wait for each child to exit
411 * Without this we were seeing hw_lock_bit timeouts in BATS.
412 */
413 kill(pid, SIGKILL);
414 pthread_mutex_unlock(&test_mtx);
415 if (waitpid(pid, &status, 0) < 0) {
416 T_LOG("waitpid returned status %d", status);
417 }
418 pthread_mutex_lock(&test_mtx);
419 }
420 sleep(1);
421
422 /* Force zone_gc before starting test for another zone or exiting */
423 mach_zone_force_gc(mach_host_self());
424
425 /* End ktrace session */
426 if (session != NULL) {
427 ktrace_end(session, 1);
428 }
429
430 if (current_test.num_zones > 0) {
431 T_LOG("Relevant zone info at the end of the test:");
432 for (i = 0; i < current_test.num_zones; i++) {
433 print_zone_info(&(current_test.zone_names[i]), &(zone_info_array[i]));
434 }
435 }
436 }
437
438 static void
439 setup_ktrace_session(void)
440 {
441 int ret = 0;
442
443 T_LOG("Setting up ktrace session...");
444 session = ktrace_session_create();
445 T_QUIET; T_ASSERT_NOTNULL(session, "ktrace_session_create");
446
447 ktrace_set_interactive(session);
448
449 ktrace_set_dropped_events_handler(session, ^{
450 T_FAIL("Dropped ktrace events; might have missed an expected jetsam event. Terminating early.");
451 });
452
453 ktrace_set_completion_handler(session, ^{
454 ktrace_session_destroy(session);
455 T_END;
456 });
457
458 /* Listen for memorystatus_do_kill trace events */
459 ret = ktrace_events_single(session, (BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_DO_KILL)), ^(ktrace_event_t event) {
460 int i;
461 bool received_jetsam_event = false;
462
463 /*
464 * libktrace does not support DBG_FUNC_START/END in the event filter. It simply ignores it.
465 * So we need to explicitly check for the end event (a successful jetsam kill) here,
466 * instead of passing in ((BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_DO_KILL)) | DBG_FUNC_START).
467 */
468 if (!(event->debugid & DBG_FUNC_START)) {
469 return;
470 }
471
472 /* Check for zone-map-exhaustion jetsam. */
473 if (event->arg2 == kMemorystatusKilledZoneMapExhaustion) {
474 begin_test_teardown();
475 T_LOG("[memorystatus_do_kill] jetsam reason: zone-map-exhaustion, pid: %d\n\n", (int)event->arg1);
476 if (current_test.test_index == VME_ZONE_TEST || current_test.test_index == VM_OBJECTS_ZONE_TEST) {
477 /*
478 * For the VM map entries zone we try to kill the leaking process.
479 * Verify that we jetsammed one of the processes we spawned.
480 *
481 * For the vm objects zone we pick the leaking process via the VM map entries
482 * zone, if the number of vm objects and VM map entries are comparable.
483 * The test simulates this scenario, we should see a targeted jetsam for the
484 * vm objects zone too.
485 */
486 pthread_mutex_lock(&test_mtx);
487 for (i = 0; i < num_children; i++) {
488 if (child_pids[i] == (pid_t)event->arg1) {
489 received_jetsam_event = true;
490 T_LOG("Received jetsam event for a child");
491 break;
492 }
493 }
494 pthread_mutex_unlock(&test_mtx);
495 /*
496 * If we didn't see a targeted jetsam, verify that the largest zone actually
497 * fulfilled the criteria for generic jetsams.
498 */
499 if (!received_jetsam_event && verify_generic_jetsam_criteria()) {
500 received_jetsam_event = true;
501 T_LOG("Did not receive jetsam event for a child, but generic jetsam criteria holds");
502 }
503 } else {
504 received_jetsam_event = true;
505 T_LOG("Received generic jetsam event");
506 }
507
508 T_QUIET; T_ASSERT_TRUE(received_jetsam_event, "Jetsam event not as expected");
509 } else {
510 /*
511 * The test relies on the children being able to send a signal to the parent, to continue spawning new processes
512 * that leak more zone memory. If a child is jetsammed for some other reason, the parent can get stuck waiting for
513 * a signal from the child, never being able to make progress (We spawn only a single process at a time to rate-limit
514 * the zone memory bloat.). If this happens, the test eventually times out. So if a child is jetsammed for some
515 * reason other than zone-map-exhaustion, end the test early.
516 *
517 * This typically happens when we end up triggering vm-pageshortage jetsams before zone-map-exhaustion jetsams.
518 * Lowering the zone_map_jetsam_limit if the zone map size was initially low should help with this too.
519 * See sysctlbyname("kern.zone_map_jetsam_limit"...) in run_test() below.
520 */
521 pthread_mutex_lock(&test_mtx);
522 for (i = 0; i < num_children; i++) {
523 if (child_pids[i] == (pid_t)event->arg1) {
524 begin_test_teardown();
525 T_PASS("Child pid %d was jetsammed due to reason %d. Terminating early.",
526 (int)event->arg1, (int)event->arg2);
527 }
528 }
529 pthread_mutex_unlock(&test_mtx);
530 }
531 });
532 T_QUIET; T_ASSERT_POSIX_ZERO(ret, "ktrace_events_single");
533
534 ret = ktrace_start(session, dispatch_get_main_queue());
535 T_QUIET; T_ASSERT_POSIX_ZERO(ret, "ktrace_start");
536 }
537
538 static int
539 query_zone_map_size(void)
540 {
541 int ret;
542 uint64_t zstats[2];
543 size_t zstats_size = sizeof(zstats);
544
545 ret = sysctlbyname("kern.zone_map_size_and_capacity", &zstats, &zstats_size, NULL, 0);
546 T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "sysctl kern.zone_map_size_and_capacity failed");
547
548 T_LOG("Zone map capacity: %-30lldZone map size: %lld [%lld%% full]", zstats[1], zstats[0], (zstats[0] * 100) / zstats[1]);
549
550 #if (TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR)
551 int memstat_level;
552 size_t memstat_level_size = sizeof(memstat_level);
553 ret = sysctlbyname("kern.memorystatus_level", &memstat_level, &memstat_level_size, NULL, 0);
554 T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "sysctl kern.memorystatus_level failed");
555
556 T_LOG("kern.memorystatus_level = %d%%", memstat_level);
557 #endif
558 return (int)(zstats[0] * 100 / zstats[1]);
559 }
560
561 static void
562 spawn_child_process(void)
563 {
564 pid_t pid = -1;
565 char helper_func[50];
566 char *launch_tool_args[4];
567
568 pthread_mutex_lock(&test_mtx);
569 if (!test_ending) {
570 if (num_children == MAX_CHILD_PROCS) {
571 pthread_mutex_unlock(&test_mtx);
572 T_ASSERT_FAIL("Spawned too many children. Aborting test");
573 /* not reached */
574 }
575
576 strlcpy(helper_func, current_test.helper_func, sizeof(helper_func));
577 launch_tool_args[0] = testpath;
578 launch_tool_args[1] = "-n";
579 launch_tool_args[2] = helper_func;
580 launch_tool_args[3] = NULL;
581
582 /* Spawn the child process */
583 int rc = dt_launch_tool(&pid, launch_tool_args, false, NULL, NULL);
584 if (rc != 0) {
585 T_LOG("dt_launch tool returned %d with error code %d", rc, errno);
586 }
587 T_QUIET; T_ASSERT_POSIX_SUCCESS(pid, "dt_launch_tool");
588
589 child_pids[num_children++] = pid;
590 }
591 pthread_mutex_unlock(&test_mtx);
592 }
593
594 static void
595 run_test(void)
596 {
597 uint64_t mem;
598 uint32_t testpath_buf_size, pages;
599 int ret, dev, pgsz, initial_zone_occupancy, old_limit, new_limit = 0;
600 size_t sysctl_size;
601
602 T_ATEND(cleanup_and_end_test);
603 T_SETUPBEGIN;
604
605 main_start = time(NULL);
606 dev = 0;
607 sysctl_size = sizeof(dev);
608 ret = sysctlbyname("kern.development", &dev, &sysctl_size, NULL, 0);
609 T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "sysctl kern.development failed");
610 if (dev == 0) {
611 T_SKIP("Skipping test on release kernel");
612 }
613
614 testpath_buf_size = sizeof(testpath);
615 ret = _NSGetExecutablePath(testpath, &testpath_buf_size);
616 T_QUIET; T_ASSERT_POSIX_ZERO(ret, "_NSGetExecutablePath");
617 T_LOG("Executable path: %s", testpath);
618
619 sysctl_size = sizeof(mem);
620 ret = sysctlbyname("hw.memsize", &mem, &sysctl_size, NULL, 0);
621 T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "sysctl hw.memsize failed");
622 T_LOG("hw.memsize: %llu", mem);
623
624 sysctl_size = sizeof(pgsz);
625 ret = sysctlbyname("vm.pagesize", &pgsz, &sysctl_size, NULL, 0);
626 T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "sysctl vm.pagesize failed");
627 T_LOG("vm.pagesize: %d", pgsz);
628
629 sysctl_size = sizeof(pages);
630 ret = sysctlbyname("vm.pages", &pages, &sysctl_size, NULL, 0);
631 T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "sysctl vm.pages failed");
632 T_LOG("vm.pages: %d", pages);
633
634 sysctl_size = sizeof(old_limit);
635 ret = sysctlbyname("kern.zone_map_jetsam_limit", &old_limit, &sysctl_size, NULL, 0);
636 T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "sysctl kern.zone_map_jetsam_limit failed");
637 T_LOG("kern.zone_map_jetsam_limit: %d", old_limit);
638
639 initial_zone_occupancy = query_zone_map_size();
640
641 /* On large memory systems, set the zone maps jetsam limit lower so we can hit it without timing out. */
642 if (mem > (uint64_t)LARGE_MEM_GB * 1024 * 1024 * 1024) {
643 new_limit = LARGE_MEM_JETSAM_LIMIT;
644 }
645
646 /*
647 * If we start out with the zone map < 5% full, aim for 10% as the limit, so we don't time out.
648 * For anything else aim for 2x the initial size, capped by whatever value was set by T_META_SYSCTL_INT,
649 * or LARGE_MEM_JETSAM_LIMIT for large memory systems.
650 */
651 if (initial_zone_occupancy < 5) {
652 new_limit = JETSAM_LIMIT_LOWEST;
653 } else {
654 new_limit = initial_zone_occupancy * 2;
655 }
656
657 if (new_limit > 0 && new_limit < old_limit) {
658 /*
659 * We should be fine messing with the zone_map_jetsam_limit here, i.e. outside of T_META_SYSCTL_INT.
660 * When the test ends, T_META_SYSCTL_INT will restore the zone_map_jetsam_limit to what it was
661 * before the test anyway.
662 */
663 ret = sysctlbyname("kern.zone_map_jetsam_limit", NULL, NULL, &new_limit, sizeof(new_limit));
664 T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "sysctl kern.zone_map_jetsam_limit failed");
665 T_LOG("kern.zone_map_jetsam_limit set to %d%%", new_limit);
666 }
667
668 zone_info_array = (mach_zone_info_array_t) calloc((unsigned long)current_test.num_zones, sizeof *zone_info_array);
669
670 /*
671 * If the timeout specified by T_META_TIMEOUT is hit, the atend handler does not get called.
672 * So we're queueing a dispatch block to fire after TIMEOUT_SECS seconds, so we can exit cleanly.
673 */
674 dispatch_after(dispatch_time(DISPATCH_TIME_NOW, TIMEOUT_SECS * NSEC_PER_SEC), dispatch_get_main_queue(), ^{
675 T_ASSERT_FAIL("Timed out after %d seconds", TIMEOUT_SECS);
676 });
677
678 /*
679 * Create a dispatch source for the signal SIGUSR1. When a child is done allocating zone memory, it
680 * sends SIGUSR1 to the parent. Only then does the parent spawn another child. This prevents us from
681 * spawning many children at once and creating a lot of memory pressure.
682 */
683 signal(SIGUSR1, SIG_IGN);
684 dq_spawn = dispatch_queue_create("spawn_queue", DISPATCH_QUEUE_SERIAL);
685 ds_signal = dispatch_source_create(DISPATCH_SOURCE_TYPE_SIGNAL, SIGUSR1, 0, dq_spawn);
686 T_QUIET; T_ASSERT_NOTNULL(ds_signal, "dispatch_source_create: signal");
687
688 dispatch_source_set_event_handler(ds_signal, ^{
689 (void)query_zone_map_size();
690
691 /* Wait a few seconds before spawning another child. Keeps us from allocating too aggressively */
692 sleep(5);
693 spawn_child_process();
694 });
695 dispatch_activate(ds_signal);
696
697 /* Timer to query jetsam-relevant zone info every second. Print it every 5 seconds. */
698 ds_timer = dispatch_source_create(DISPATCH_SOURCE_TYPE_TIMER, 0, 0, dispatch_queue_create("timer_queue", NULL));
699 T_QUIET; T_ASSERT_NOTNULL(ds_timer, "dispatch_source_create: timer");
700 dispatch_source_set_timer(ds_timer, dispatch_time(DISPATCH_TIME_NOW, NSEC_PER_SEC), NSEC_PER_SEC, 0);
701
702 dispatch_source_set_event_handler(ds_timer, ^{
703 query_zone_info();
704 });
705 dispatch_activate(ds_timer);
706
707 /* Set up a ktrace session to listen for jetsam events */
708 setup_ktrace_session();
709
710 T_SETUPEND;
711
712 /* Spawn the first child process */
713 T_LOG("Spawning child processes to allocate zone memory...\n\n");
714 spawn_child_process();
715
716 dispatch_main();
717 }
718
719 static void
720 move_to_idle_band(void)
721 {
722 memorystatus_priority_properties_t props;
723
724 /*
725 * We want to move the processes we spawn into the idle band, so that jetsam can target them first.
726 * This prevents other important BATS tasks from getting killed, specially in LTE where we have very few
727 * processes running.
728 *
729 * This is only needed for tests which (are likely to) lead us down the generic jetsam path.
730 */
731 props.priority = JETSAM_PRIORITY_IDLE;
732 props.user_data = 0;
733
734 if (memorystatus_control(MEMORYSTATUS_CMD_SET_PRIORITY_PROPERTIES, getpid(), 0, &props, sizeof(props))) {
735 printf("memorystatus call to change jetsam priority failed\n");
736 exit(-1);
737 }
738 }
739
740 T_HELPER_DECL(allocate_vm_regions, "allocates VM regions")
741 {
742 move_to_idle_band();
743 allocate_vm_stuff(REGIONS);
744 }
745
746 T_HELPER_DECL(allocate_vm_objects, "allocates VM objects and VM regions")
747 {
748 move_to_idle_band();
749 allocate_vm_stuff(OBJECTS);
750 }
751
752 T_HELPER_DECL(allocate_from_generic_zone, "allocates from a generic zone")
753 {
754 move_to_idle_band();
755 allocate_from_generic_zone();
756 }
757
758 /*
759 * T_META_SYSCTL_INT(ZONEMAP_JETSAM_LIMIT_SYSCTL) changes the zone_map_jetsam_limit to a
760 * lower value, so that the test can complete faster.
761 * The test allocates zone memory pretty aggressively which can cause the system to panic
762 * if the jetsam limit is quite high; a lower value keeps us from panicking.
763 */
764 T_DECL( memorystatus_vme_zone_test,
765 "allocates elements from the VM map entries zone, verifies zone-map-exhaustion jetsams",
766 T_META_ASROOT(true),
767 T_META_TIMEOUT(1800),
768 /* T_META_LTEPHASE(LTE_POSTINIT),
769 */
770 T_META_SYSCTL_INT(ZONEMAP_JETSAM_LIMIT_SYSCTL))
771 {
772 current_test = (test_config_struct) {
773 .test_index = VME_ZONE_TEST,
774 .helper_func = VME_ZONE_TEST_OPT,
775 .num_zones = 1,
776 .zone_names = (mach_zone_name_t[]){
777 { .mzn_name = VME_ZONE }
778 }
779 };
780 run_test();
781 }
782
783 T_DECL( memorystatus_vm_objects_zone_test,
784 "allocates elements from the VM objects and the VM map entries zones, verifies zone-map-exhaustion jetsams",
785 T_META_ASROOT(true),
786 T_META_TIMEOUT(1800),
787 /* T_META_LTEPHASE(LTE_POSTINIT),
788 */
789 T_META_SYSCTL_INT(ZONEMAP_JETSAM_LIMIT_SYSCTL))
790 {
791 current_test = (test_config_struct) {
792 .test_index = VM_OBJECTS_ZONE_TEST,
793 .helper_func = VM_OBJECTS_ZONE_TEST_OPT,
794 .num_zones = 2,
795 .zone_names = (mach_zone_name_t[]){
796 { .mzn_name = VME_ZONE },
797 { .mzn_name = VMOBJECTS_ZONE}
798 }
799 };
800 run_test();
801 }
802
803 T_DECL( memorystatus_generic_zone_test,
804 "allocates elements from a zone that doesn't have an optimized jetsam path, verifies zone-map-exhaustion jetsams",
805 T_META_ASROOT(true),
806 T_META_TIMEOUT(1800),
807 /* T_META_LTEPHASE(LTE_POSTINIT),
808 */
809 T_META_SYSCTL_INT(ZONEMAP_JETSAM_LIMIT_SYSCTL))
810 {
811 current_test = (test_config_struct) {
812 .test_index = GENERIC_ZONE_TEST,
813 .helper_func = GENERIC_ZONE_TEST_OPT,
814 .num_zones = 0,
815 .zone_names = NULL
816 };
817 run_test();
818 }