2 #include <mach/mach_vm.h>
3 #include <mach/mach_port.h>
4 #include <mach/mach_host.h>
5 #include <mach/mach_error.h>
6 #include <mach-o/dyld.h>
7 #include <sys/sysctl.h>
8 #include <sys/kdebug.h>
10 #include <sys/kern_memorystatus.h>
11 #include <ktrace/session.h>
12 #include <dispatch/private.h>
17 #include <darwintest.h>
18 #include <darwintest_utils.h>
21 T_META_NAMESPACE("xnu.vm"),
22 T_META_CHECK_LEAKS(false)
25 #define TIMEOUT_SECS 10 * 60 /* abort if test takes > 10 minutes */
27 #if (TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR)
28 #define ALLOCATION_SIZE_VM_REGION (16*1024) /* 16 KB */
29 #define ALLOCATION_SIZE_VM_OBJECT ALLOCATION_SIZE_VM_REGION
31 #define ALLOCATION_SIZE_VM_REGION (1024*1024*100) /* 100 MB */
32 #define ALLOCATION_SIZE_VM_OBJECT (16*1024) /* 16 KB */
34 #define MAX_CHILD_PROCS 100
36 #define NUM_GIVE_BACK 5
37 #define NUM_GIVE_BACK_PORTS 20
39 /* 60% is too high on bridgeOS to achieve without vm-pageshortage jetsams. Set it to 40%. */
41 #define ZONEMAP_JETSAM_LIMIT_SYSCTL "kern.zone_map_jetsam_limit=40"
43 #define ZONEMAP_JETSAM_LIMIT_SYSCTL "kern.zone_map_jetsam_limit=60"
46 #define VME_ZONE_TEST_OPT "allocate_vm_regions"
47 #define VM_OBJECTS_ZONE_TEST_OPT "allocate_vm_objects"
48 #define GENERIC_ZONE_TEST_OPT "allocate_from_generic_zone"
50 #define VME_ZONE "VM map entries"
51 #define VMOBJECTS_ZONE "vm objects"
52 #define VMENTRY_TO_VMOBJECT_COMPARISON_RATIO 98
57 #define LARGE_MEM_GB 32
58 #define LARGE_MEM_JETSAM_LIMIT 40
59 #define JETSAM_LIMIT_LOWEST 10
67 typedef struct test_config_struct
{
70 const char *helper_func
;
71 mach_zone_name_array_t zone_names
;
74 static test_config_struct current_test
;
75 static dispatch_source_t ds_signal
= NULL
;
76 static dispatch_source_t ds_timer
= NULL
;
77 static dispatch_queue_t dq_spawn
= NULL
;
78 static ktrace_session_t session
= NULL
;
80 static mach_zone_info_array_t zone_info_array
= NULL
;
81 static mach_zone_name_t largest_zone_name
;
82 static mach_zone_info_t largest_zone_info
;
84 static pthread_mutex_t test_mtx
= PTHREAD_MUTEX_INITIALIZER
; /* protects the next 3 things */
85 static bool test_ending
= false;
86 static int num_children
= 0;
87 static pid_t child_pids
[MAX_CHILD_PROCS
];
89 static char testpath
[PATH_MAX
];
90 static void allocate_vm_stuff(int);
91 static void allocate_from_generic_zone(void);
92 static void begin_test_teardown(void);
93 static void cleanup_and_end_test(void);
94 static void setup_ktrace_session(void);
95 static void spawn_child_process(void);
96 static void run_test(void);
97 static bool verify_generic_jetsam_criteria(void);
98 static bool vme_zone_compares_to_vm_objects(void);
99 static int query_zone_map_size(void);
100 static void query_zone_info(void);
101 static void print_zone_info(mach_zone_name_t
*zn
, mach_zone_info_t
*zi
);
103 extern void mach_zone_force_gc(host_t host
);
104 extern kern_return_t
mach_zone_info_for_largest_zone(
106 mach_zone_name_t
*name
,
107 mach_zone_info_t
*info
111 check_time(time_t start
, int timeout
)
113 return start
+ timeout
< time(NULL
);
117 * flag values for allocate_vm_stuff()
123 allocate_vm_stuff(int flags
)
125 uint64_t alloc_size
, i
;
126 time_t start
= time(NULL
);
127 mach_vm_address_t give_back
[NUM_GIVE_BACK
];
130 if (flags
== REGIONS
) {
131 alloc_size
= ALLOCATION_SIZE_VM_REGION
;
134 alloc_size
= ALLOCATION_SIZE_VM_OBJECT
;
135 msg
= " each region backed by a VM object";
138 printf("[%d] Allocating VM regions, each of size %lld KB%s\n", getpid(), (alloc_size
>> 10), msg
);
141 mach_vm_address_t addr
= (mach_vm_address_t
)NULL
;
143 /* Alternate VM tags between consecutive regions to prevent coalescing */
144 int vmflags
= VM_MAKE_TAG((i
% 2)? VM_TAG1
: VM_TAG2
) | VM_FLAGS_ANYWHERE
;
146 if ((mach_vm_allocate(mach_task_self(), &addr
, (mach_vm_size_t
)alloc_size
, vmflags
)) != KERN_SUCCESS
) {
151 * If interested in objects, touch the region so the VM object is created,
152 * then free this page. Keeps us from holding a lot of dirty pages.
154 if (flags
== OBJECTS
) {
156 madvise((void *)addr
, (size_t)alloc_size
, MADV_FREE
);
159 if (check_time(start
, TIMEOUT_SECS
)) {
160 printf("[%d] child timeout during allocations\n", getpid());
164 if (i
< NUM_GIVE_BACK
) {
169 /* return some of the resource to avoid O-O-M problems */
170 for (uint64_t j
= 0; j
< NUM_GIVE_BACK
&& j
< i
; ++j
) {
171 mach_vm_deallocate(mach_task_self(), give_back
[j
], (mach_vm_size_t
)alloc_size
);
174 printf("[%d] Number of allocations: %lld\n", getpid(), i
);
176 /* Signal to the parent that we're done allocating */
177 kill(getppid(), SIGUSR1
);
181 /* Exit if parent has exited. Ensures child processes don't linger around after the test exits */
182 if (getppid() == 1) {
186 if (check_time(start
, TIMEOUT_SECS
)) {
187 printf("[%d] child timeout while waiting\n", getpid());
195 allocate_from_generic_zone(void)
198 time_t start
= time(NULL
);
199 mach_port_t give_back
[NUM_GIVE_BACK_PORTS
];
201 printf("[%d] Allocating mach_ports\n", getpid());
205 if ((mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE
, &port
)) != KERN_SUCCESS
) {
209 if (check_time(start
, TIMEOUT_SECS
)) {
210 printf("[%d] child timeout during allocations\n", getpid());
214 if (i
< NUM_GIVE_BACK_PORTS
) {
219 /* return some of the resource to avoid O-O-M problems */
220 for (uint64_t j
= 0; j
< NUM_GIVE_BACK_PORTS
&& j
< i
; ++j
) {
222 ret
= mach_port_mod_refs(mach_task_self(), give_back
[j
], MACH_PORT_RIGHT_RECEIVE
, -1);
223 T_ASSERT_MACH_SUCCESS(ret
, "mach_port_mod_refs(RECV_RIGHT, -1)");
225 printf("[%d] Number of allocations: %lld\n", getpid(), i
);
227 /* Signal to the parent that we're done allocating */
228 kill(getppid(), SIGUSR1
);
232 /* Exit if parent has exited. Ensures child processes don't linger around after the test exits */
233 if (getppid() == 1) {
237 if (check_time(start
, TIMEOUT_SECS
)) {
238 printf("[%d] child timeout while waiting\n", getpid());
245 print_zone_info(mach_zone_name_t
*zn
, mach_zone_info_t
*zi
)
247 T_LOG("ZONE NAME: %-35sSIZE: %-25lluELEMENTS: %llu",
248 zn
->mzn_name
, zi
->mzi_cur_size
, zi
->mzi_count
);
251 static time_t main_start
;
254 query_zone_info(void)
258 static uint64_t num_calls
= 0;
260 if (check_time(main_start
, TIMEOUT_SECS
)) {
261 T_ASSERT_FAIL("Global timeout expired");
263 for (i
= 0; i
< current_test
.num_zones
; i
++) {
264 kr
= mach_zone_info_for_zone(mach_host_self(), current_test
.zone_names
[i
], &(zone_info_array
[i
]));
265 T_QUIET
; T_ASSERT_MACH_SUCCESS(kr
, "mach_zone_info_for_zone(%s) returned %d [%s]", current_test
.zone_names
[i
].mzn_name
, kr
, mach_error_string(kr
));
267 kr
= mach_zone_info_for_largest_zone(mach_host_self(), &largest_zone_name
, &largest_zone_info
);
268 T_QUIET
; T_ASSERT_MACH_SUCCESS(kr
, "mach_zone_info_for_largest_zone returned %d [%s]", kr
, mach_error_string(kr
));
271 if (num_calls
% 5 != 0) {
275 /* Print out size and element count for zones relevant to the test */
276 for (i
= 0; i
< current_test
.num_zones
; i
++) {
277 print_zone_info(&(current_test
.zone_names
[i
]), &(zone_info_array
[i
]));
282 vme_zone_compares_to_vm_objects(void)
285 uint64_t vm_object_element_count
= 0, vm_map_entry_element_count
= 0;
287 T_LOG("Comparing element counts of \"VM map entries\" and \"vm objects\" zones");
288 for (i
= 0; i
< current_test
.num_zones
; i
++) {
289 if (!strcmp(current_test
.zone_names
[i
].mzn_name
, VME_ZONE
)) {
290 vm_map_entry_element_count
= zone_info_array
[i
].mzi_count
;
291 } else if (!strcmp(current_test
.zone_names
[i
].mzn_name
, VMOBJECTS_ZONE
)) {
292 vm_object_element_count
= zone_info_array
[i
].mzi_count
;
294 print_zone_info(&(current_test
.zone_names
[i
]), &(zone_info_array
[i
]));
297 T_LOG("# VM map entries as percentage of # vm objects = %llu", (vm_map_entry_element_count
* 100) / vm_object_element_count
);
298 if (vm_map_entry_element_count
>= ((vm_object_element_count
* VMENTRY_TO_VMOBJECT_COMPARISON_RATIO
) / 100)) {
299 T_LOG("Number of VM map entries is comparable to vm objects\n\n");
302 T_LOG("Number of VM map entries is NOT comparable to vm objects\n\n");
307 verify_generic_jetsam_criteria(void)
309 T_LOG("Largest zone info");
310 print_zone_info(&largest_zone_name
, &largest_zone_info
);
312 /* If VM map entries is not the largest zone */
313 if (strcmp(largest_zone_name
.mzn_name
, VME_ZONE
)) {
314 /* If vm objects is the largest zone and the VM map entries zone had comparable # of elements, return false */
315 if (!strcmp(largest_zone_name
.mzn_name
, VMOBJECTS_ZONE
) && vme_zone_compares_to_vm_objects()) {
324 begin_test_teardown(void)
326 int ret
, old_limit
= 95;
329 * Restore kern.zone_map_jetsam_limit to the default high value, to prevent further jetsams.
330 * We should change the value of old_limit if ZONE_MAP_JETSAM_LIMIT_DEFAULT changes in the kernel.
331 * We don't have a way to capture what the original value was before the test, because the
332 * T_META_SYSCTL_INT macro will have changed the value before the test starts running.
334 ret
= sysctlbyname("kern.zone_map_jetsam_limit", NULL
, NULL
, &old_limit
, sizeof(old_limit
));
335 T_QUIET
; T_ASSERT_POSIX_SUCCESS(ret
, "sysctl kern.zone_map_jetsam_limit failed");
336 T_LOG("kern.zone_map_jetsam_limit set to %d%%", old_limit
);
339 /* End ktrace session */
340 if (session
!= NULL
) {
341 T_LOG("Ending ktrace session...");
342 ktrace_end(session
, 1);
345 dispatch_sync(dq_spawn
, ^{
346 T_LOG("Cancelling dispatch sources...");
348 /* Disable the timer that queries and prints zone info periodically */
349 if (ds_timer
!= NULL
) {
350 dispatch_source_cancel(ds_timer
);
353 /* Disable signal handler that spawns child processes */
354 if (ds_signal
!= NULL
) {
356 * No need for a dispatch_source_cancel_and_wait here.
357 * We're queueing this on the spawn queue, so no further
358 * processes will be spawned after the source is cancelled.
360 dispatch_source_cancel(ds_signal
);
366 cleanup_and_end_test(void)
371 * The atend handler executes on a different dispatch queue.
372 * We want to do the cleanup only once.
374 pthread_mutex_lock(&test_mtx
);
376 pthread_mutex_unlock(&test_mtx
);
380 pthread_mutex_unlock(&test_mtx
);
382 dispatch_async(dq_spawn
, ^{
384 * If the test succeeds, we will call dispatch_source_cancel twice, which is fine since
385 * the operation is idempotent. Just make sure to not drop all references to the dispatch sources
386 * (in this case we're not, we have globals holding references to them), or we can end up with
387 * use-after-frees which would be a problem.
389 /* Disable the timer that queries and prints zone info periodically */
390 if (ds_timer
!= NULL
) {
391 dispatch_source_cancel(ds_timer
);
394 /* Disable signal handler that spawns child processes */
395 if (ds_signal
!= NULL
) {
396 dispatch_source_cancel(ds_signal
);
400 pthread_mutex_lock(&test_mtx
);
401 T_LOG("Number of processes spawned: %d", num_children
);
402 T_LOG("Killing child processes...");
404 /* Kill all the child processes that were spawned */
405 for (i
= 0; i
< num_children
; i
++) {
406 pid_t pid
= child_pids
[i
];
410 * Kill and wait for each child to exit
411 * Without this we were seeing hw_lock_bit timeouts in BATS.
414 pthread_mutex_unlock(&test_mtx
);
415 if (waitpid(pid
, &status
, 0) < 0) {
416 T_LOG("waitpid returned status %d", status
);
418 pthread_mutex_lock(&test_mtx
);
422 /* Force zone_gc before starting test for another zone or exiting */
423 mach_zone_force_gc(mach_host_self());
425 /* End ktrace session */
426 if (session
!= NULL
) {
427 ktrace_end(session
, 1);
430 if (current_test
.num_zones
> 0) {
431 T_LOG("Relevant zone info at the end of the test:");
432 for (i
= 0; i
< current_test
.num_zones
; i
++) {
433 print_zone_info(&(current_test
.zone_names
[i
]), &(zone_info_array
[i
]));
439 setup_ktrace_session(void)
443 T_LOG("Setting up ktrace session...");
444 session
= ktrace_session_create();
445 T_QUIET
; T_ASSERT_NOTNULL(session
, "ktrace_session_create");
447 ktrace_set_interactive(session
);
449 ktrace_set_dropped_events_handler(session
, ^{
450 T_FAIL("Dropped ktrace events; might have missed an expected jetsam event. Terminating early.");
453 ktrace_set_completion_handler(session
, ^{
454 ktrace_session_destroy(session
);
458 /* Listen for memorystatus_do_kill trace events */
459 ret
= ktrace_events_single(session
, (BSDDBG_CODE(DBG_BSD_MEMSTAT
, BSD_MEMSTAT_DO_KILL
)), ^(ktrace_event_t event
) {
461 bool received_jetsam_event
= false;
464 * libktrace does not support DBG_FUNC_START/END in the event filter. It simply ignores it.
465 * So we need to explicitly check for the end event (a successful jetsam kill) here,
466 * instead of passing in ((BSDDBG_CODE(DBG_BSD_MEMSTAT, BSD_MEMSTAT_DO_KILL)) | DBG_FUNC_START).
468 if (!(event
->debugid
& DBG_FUNC_START
)) {
472 /* Check for zone-map-exhaustion jetsam. */
473 if (event
->arg2
== kMemorystatusKilledZoneMapExhaustion
) {
474 begin_test_teardown();
475 T_LOG("[memorystatus_do_kill] jetsam reason: zone-map-exhaustion, pid: %d\n\n", (int)event
->arg1
);
476 if (current_test
.test_index
== VME_ZONE_TEST
|| current_test
.test_index
== VM_OBJECTS_ZONE_TEST
) {
478 * For the VM map entries zone we try to kill the leaking process.
479 * Verify that we jetsammed one of the processes we spawned.
481 * For the vm objects zone we pick the leaking process via the VM map entries
482 * zone, if the number of vm objects and VM map entries are comparable.
483 * The test simulates this scenario, we should see a targeted jetsam for the
484 * vm objects zone too.
486 pthread_mutex_lock(&test_mtx
);
487 for (i
= 0; i
< num_children
; i
++) {
488 if (child_pids
[i
] == (pid_t
)event
->arg1
) {
489 received_jetsam_event
= true;
490 T_LOG("Received jetsam event for a child");
494 pthread_mutex_unlock(&test_mtx
);
496 * If we didn't see a targeted jetsam, verify that the largest zone actually
497 * fulfilled the criteria for generic jetsams.
499 if (!received_jetsam_event
&& verify_generic_jetsam_criteria()) {
500 received_jetsam_event
= true;
501 T_LOG("Did not receive jetsam event for a child, but generic jetsam criteria holds");
504 received_jetsam_event
= true;
505 T_LOG("Received generic jetsam event");
508 T_QUIET
; T_ASSERT_TRUE(received_jetsam_event
, "Jetsam event not as expected");
511 * The test relies on the children being able to send a signal to the parent, to continue spawning new processes
512 * that leak more zone memory. If a child is jetsammed for some other reason, the parent can get stuck waiting for
513 * a signal from the child, never being able to make progress (We spawn only a single process at a time to rate-limit
514 * the zone memory bloat.). If this happens, the test eventually times out. So if a child is jetsammed for some
515 * reason other than zone-map-exhaustion, end the test early.
517 * This typically happens when we end up triggering vm-pageshortage jetsams before zone-map-exhaustion jetsams.
518 * Lowering the zone_map_jetsam_limit if the zone map size was initially low should help with this too.
519 * See sysctlbyname("kern.zone_map_jetsam_limit"...) in run_test() below.
521 pthread_mutex_lock(&test_mtx
);
522 for (i
= 0; i
< num_children
; i
++) {
523 if (child_pids
[i
] == (pid_t
)event
->arg1
) {
524 begin_test_teardown();
525 T_PASS("Child pid %d was jetsammed due to reason %d. Terminating early.",
526 (int)event
->arg1
, (int)event
->arg2
);
529 pthread_mutex_unlock(&test_mtx
);
532 T_QUIET
; T_ASSERT_POSIX_ZERO(ret
, "ktrace_events_single");
534 ret
= ktrace_start(session
, dispatch_get_main_queue());
535 T_QUIET
; T_ASSERT_POSIX_ZERO(ret
, "ktrace_start");
539 query_zone_map_size(void)
543 size_t zstats_size
= sizeof(zstats
);
545 ret
= sysctlbyname("kern.zone_map_size_and_capacity", &zstats
, &zstats_size
, NULL
, 0);
546 T_QUIET
; T_ASSERT_POSIX_SUCCESS(ret
, "sysctl kern.zone_map_size_and_capacity failed");
548 T_LOG("Zone map capacity: %-30lldZone map size: %lld [%lld%% full]", zstats
[1], zstats
[0], (zstats
[0] * 100) / zstats
[1]);
550 #if (TARGET_OS_IPHONE && !TARGET_OS_SIMULATOR)
552 size_t memstat_level_size
= sizeof(memstat_level
);
553 ret
= sysctlbyname("kern.memorystatus_level", &memstat_level
, &memstat_level_size
, NULL
, 0);
554 T_QUIET
; T_ASSERT_POSIX_SUCCESS(ret
, "sysctl kern.memorystatus_level failed");
556 T_LOG("kern.memorystatus_level = %d%%", memstat_level
);
558 return (int)(zstats
[0] * 100 / zstats
[1]);
562 spawn_child_process(void)
565 char helper_func
[50];
566 char *launch_tool_args
[4];
568 pthread_mutex_lock(&test_mtx
);
570 if (num_children
== MAX_CHILD_PROCS
) {
571 pthread_mutex_unlock(&test_mtx
);
572 T_ASSERT_FAIL("Spawned too many children. Aborting test");
576 strlcpy(helper_func
, current_test
.helper_func
, sizeof(helper_func
));
577 launch_tool_args
[0] = testpath
;
578 launch_tool_args
[1] = "-n";
579 launch_tool_args
[2] = helper_func
;
580 launch_tool_args
[3] = NULL
;
582 /* Spawn the child process */
583 int rc
= dt_launch_tool(&pid
, launch_tool_args
, false, NULL
, NULL
);
585 T_LOG("dt_launch tool returned %d with error code %d", rc
, errno
);
587 T_QUIET
; T_ASSERT_POSIX_SUCCESS(pid
, "dt_launch_tool");
589 child_pids
[num_children
++] = pid
;
591 pthread_mutex_unlock(&test_mtx
);
598 uint32_t testpath_buf_size
, pages
;
599 int ret
, dev
, pgsz
, initial_zone_occupancy
, old_limit
, new_limit
= 0;
602 T_ATEND(cleanup_and_end_test
);
605 main_start
= time(NULL
);
607 sysctl_size
= sizeof(dev
);
608 ret
= sysctlbyname("kern.development", &dev
, &sysctl_size
, NULL
, 0);
609 T_QUIET
; T_ASSERT_POSIX_SUCCESS(ret
, "sysctl kern.development failed");
611 T_SKIP("Skipping test on release kernel");
614 testpath_buf_size
= sizeof(testpath
);
615 ret
= _NSGetExecutablePath(testpath
, &testpath_buf_size
);
616 T_QUIET
; T_ASSERT_POSIX_ZERO(ret
, "_NSGetExecutablePath");
617 T_LOG("Executable path: %s", testpath
);
619 sysctl_size
= sizeof(mem
);
620 ret
= sysctlbyname("hw.memsize", &mem
, &sysctl_size
, NULL
, 0);
621 T_QUIET
; T_ASSERT_POSIX_SUCCESS(ret
, "sysctl hw.memsize failed");
622 T_LOG("hw.memsize: %llu", mem
);
624 sysctl_size
= sizeof(pgsz
);
625 ret
= sysctlbyname("vm.pagesize", &pgsz
, &sysctl_size
, NULL
, 0);
626 T_QUIET
; T_ASSERT_POSIX_SUCCESS(ret
, "sysctl vm.pagesize failed");
627 T_LOG("vm.pagesize: %d", pgsz
);
629 sysctl_size
= sizeof(pages
);
630 ret
= sysctlbyname("vm.pages", &pages
, &sysctl_size
, NULL
, 0);
631 T_QUIET
; T_ASSERT_POSIX_SUCCESS(ret
, "sysctl vm.pages failed");
632 T_LOG("vm.pages: %d", pages
);
634 sysctl_size
= sizeof(old_limit
);
635 ret
= sysctlbyname("kern.zone_map_jetsam_limit", &old_limit
, &sysctl_size
, NULL
, 0);
636 T_QUIET
; T_ASSERT_POSIX_SUCCESS(ret
, "sysctl kern.zone_map_jetsam_limit failed");
637 T_LOG("kern.zone_map_jetsam_limit: %d", old_limit
);
639 initial_zone_occupancy
= query_zone_map_size();
641 /* On large memory systems, set the zone maps jetsam limit lower so we can hit it without timing out. */
642 if (mem
> (uint64_t)LARGE_MEM_GB
* 1024 * 1024 * 1024) {
643 new_limit
= LARGE_MEM_JETSAM_LIMIT
;
647 * If we start out with the zone map < 5% full, aim for 10% as the limit, so we don't time out.
648 * For anything else aim for 2x the initial size, capped by whatever value was set by T_META_SYSCTL_INT,
649 * or LARGE_MEM_JETSAM_LIMIT for large memory systems.
651 if (initial_zone_occupancy
< 5) {
652 new_limit
= JETSAM_LIMIT_LOWEST
;
654 new_limit
= initial_zone_occupancy
* 2;
657 if (new_limit
> 0 && new_limit
< old_limit
) {
659 * We should be fine messing with the zone_map_jetsam_limit here, i.e. outside of T_META_SYSCTL_INT.
660 * When the test ends, T_META_SYSCTL_INT will restore the zone_map_jetsam_limit to what it was
661 * before the test anyway.
663 ret
= sysctlbyname("kern.zone_map_jetsam_limit", NULL
, NULL
, &new_limit
, sizeof(new_limit
));
664 T_QUIET
; T_ASSERT_POSIX_SUCCESS(ret
, "sysctl kern.zone_map_jetsam_limit failed");
665 T_LOG("kern.zone_map_jetsam_limit set to %d%%", new_limit
);
668 zone_info_array
= (mach_zone_info_array_t
) calloc((unsigned long)current_test
.num_zones
, sizeof *zone_info_array
);
671 * If the timeout specified by T_META_TIMEOUT is hit, the atend handler does not get called.
672 * So we're queueing a dispatch block to fire after TIMEOUT_SECS seconds, so we can exit cleanly.
674 dispatch_after(dispatch_time(DISPATCH_TIME_NOW
, TIMEOUT_SECS
* NSEC_PER_SEC
), dispatch_get_main_queue(), ^{
675 T_ASSERT_FAIL("Timed out after %d seconds", TIMEOUT_SECS
);
679 * Create a dispatch source for the signal SIGUSR1. When a child is done allocating zone memory, it
680 * sends SIGUSR1 to the parent. Only then does the parent spawn another child. This prevents us from
681 * spawning many children at once and creating a lot of memory pressure.
683 signal(SIGUSR1
, SIG_IGN
);
684 dq_spawn
= dispatch_queue_create("spawn_queue", DISPATCH_QUEUE_SERIAL
);
685 ds_signal
= dispatch_source_create(DISPATCH_SOURCE_TYPE_SIGNAL
, SIGUSR1
, 0, dq_spawn
);
686 T_QUIET
; T_ASSERT_NOTNULL(ds_signal
, "dispatch_source_create: signal");
688 dispatch_source_set_event_handler(ds_signal
, ^{
689 (void)query_zone_map_size();
691 /* Wait a few seconds before spawning another child. Keeps us from allocating too aggressively */
693 spawn_child_process();
695 dispatch_activate(ds_signal
);
697 /* Timer to query jetsam-relevant zone info every second. Print it every 5 seconds. */
698 ds_timer
= dispatch_source_create(DISPATCH_SOURCE_TYPE_TIMER
, 0, 0, dispatch_queue_create("timer_queue", NULL
));
699 T_QUIET
; T_ASSERT_NOTNULL(ds_timer
, "dispatch_source_create: timer");
700 dispatch_source_set_timer(ds_timer
, dispatch_time(DISPATCH_TIME_NOW
, NSEC_PER_SEC
), NSEC_PER_SEC
, 0);
702 dispatch_source_set_event_handler(ds_timer
, ^{
705 dispatch_activate(ds_timer
);
707 /* Set up a ktrace session to listen for jetsam events */
708 setup_ktrace_session();
712 /* Spawn the first child process */
713 T_LOG("Spawning child processes to allocate zone memory...\n\n");
714 spawn_child_process();
720 move_to_idle_band(void)
722 memorystatus_priority_properties_t props
;
725 * We want to move the processes we spawn into the idle band, so that jetsam can target them first.
726 * This prevents other important BATS tasks from getting killed, specially in LTE where we have very few
729 * This is only needed for tests which (are likely to) lead us down the generic jetsam path.
731 props
.priority
= JETSAM_PRIORITY_IDLE
;
734 if (memorystatus_control(MEMORYSTATUS_CMD_SET_PRIORITY_PROPERTIES
, getpid(), 0, &props
, sizeof(props
))) {
735 printf("memorystatus call to change jetsam priority failed\n");
740 T_HELPER_DECL(allocate_vm_regions
, "allocates VM regions")
743 allocate_vm_stuff(REGIONS
);
746 T_HELPER_DECL(allocate_vm_objects
, "allocates VM objects and VM regions")
749 allocate_vm_stuff(OBJECTS
);
752 T_HELPER_DECL(allocate_from_generic_zone
, "allocates from a generic zone")
755 allocate_from_generic_zone();
759 * T_META_SYSCTL_INT(ZONEMAP_JETSAM_LIMIT_SYSCTL) changes the zone_map_jetsam_limit to a
760 * lower value, so that the test can complete faster.
761 * The test allocates zone memory pretty aggressively which can cause the system to panic
762 * if the jetsam limit is quite high; a lower value keeps us from panicking.
764 T_DECL( memorystatus_vme_zone_test
,
765 "allocates elements from the VM map entries zone, verifies zone-map-exhaustion jetsams",
767 T_META_TIMEOUT(1800),
768 /* T_META_LTEPHASE(LTE_POSTINIT),
770 T_META_SYSCTL_INT(ZONEMAP_JETSAM_LIMIT_SYSCTL
))
772 current_test
= (test_config_struct
) {
773 .test_index
= VME_ZONE_TEST
,
774 .helper_func
= VME_ZONE_TEST_OPT
,
776 .zone_names
= (mach_zone_name_t
[]){
777 { .mzn_name
= VME_ZONE
}
783 T_DECL( memorystatus_vm_objects_zone_test
,
784 "allocates elements from the VM objects and the VM map entries zones, verifies zone-map-exhaustion jetsams",
786 T_META_TIMEOUT(1800),
787 /* T_META_LTEPHASE(LTE_POSTINIT),
789 T_META_SYSCTL_INT(ZONEMAP_JETSAM_LIMIT_SYSCTL
))
791 current_test
= (test_config_struct
) {
792 .test_index
= VM_OBJECTS_ZONE_TEST
,
793 .helper_func
= VM_OBJECTS_ZONE_TEST_OPT
,
795 .zone_names
= (mach_zone_name_t
[]){
796 { .mzn_name
= VME_ZONE
},
797 { .mzn_name
= VMOBJECTS_ZONE
}
803 T_DECL( memorystatus_generic_zone_test
,
804 "allocates elements from a zone that doesn't have an optimized jetsam path, verifies zone-map-exhaustion jetsams",
806 T_META_TIMEOUT(1800),
807 /* T_META_LTEPHASE(LTE_POSTINIT),
809 T_META_SYSCTL_INT(ZONEMAP_JETSAM_LIMIT_SYSCTL
))
811 current_test
= (test_config_struct
) {
812 .test_index
= GENERIC_ZONE_TEST
,
813 .helper_func
= GENERIC_ZONE_TEST_OPT
,