]> git.saurik.com Git - apple/xnu.git/blame - tests/stackshot_tests.m
xnu-6153.141.1.tar.gz
[apple/xnu.git] / tests / stackshot_tests.m
CommitLineData
d9a64523
A
1#include <darwintest.h>
2#include <darwintest_utils.h>
3#include <kern/debug.h>
4#include <kern/kern_cdata.h>
ea3f0419 5#include <kern/block_hint.h>
d9a64523
A
6#include <kdd.h>
7#include <libproc.h>
8#include <mach-o/dyld.h>
cb323159 9#include <mach-o/dyld_images.h>
d9a64523
A
10#include <mach-o/dyld_priv.h>
11#include <sys/syscall.h>
12#include <sys/stackshot.h>
13
d9a64523
A
14T_GLOBAL_META(
15 T_META_NAMESPACE("xnu.stackshot"),
16 T_META_CHECK_LEAKS(false),
17 T_META_ASROOT(true)
18 );
19
20static const char *current_process_name(void);
21static void verify_stackshot_sharedcache_layout(struct dyld_uuid_info_64 *uuids, uint32_t uuid_count);
22static void parse_stackshot(uint64_t stackshot_parsing_flags, void *ssbuf, size_t sslen, int child_pid);
23static void parse_thread_group_stackshot(void **sbuf, size_t sslen);
24static uint64_t stackshot_timestamp(void *ssbuf, size_t sslen);
25static void initialize_thread(void);
26
27#define DEFAULT_STACKSHOT_BUFFER_SIZE (1024 * 1024)
28#define MAX_STACKSHOT_BUFFER_SIZE (6 * 1024 * 1024)
29
30/* bit flags for parse_stackshot */
cb323159
A
31#define PARSE_STACKSHOT_DELTA 0x01
32#define PARSE_STACKSHOT_ZOMBIE 0x02
33#define PARSE_STACKSHOT_SHAREDCACHE_LAYOUT 0x04
34#define PARSE_STACKSHOT_DISPATCH_QUEUE_LABEL 0x08
35#define PARSE_STACKSHOT_TURNSTILEINFO 0x10
ea3f0419
A
36#define PARSE_STACKSHOT_WAITINFO_CSEG 0x40
37
38static uint64_t cseg_expected_threadid = 0;
cb323159
A
39
40#define TEST_STACKSHOT_QUEUE_LABEL "houston.we.had.a.problem"
41#define TEST_STACKSHOT_QUEUE_LABEL_LENGTH sizeof(TEST_STACKSHOT_QUEUE_LABEL)
d9a64523
A
42
43T_DECL(microstackshots, "test the microstackshot syscall")
44{
45 void *buf = NULL;
46 unsigned int size = DEFAULT_STACKSHOT_BUFFER_SIZE;
47
48 while (1) {
49 buf = malloc(size);
50 T_QUIET; T_ASSERT_NOTNULL(buf, "allocated stackshot buffer");
51
52#pragma clang diagnostic push
53#pragma clang diagnostic ignored "-Wdeprecated-declarations"
54 int len = syscall(SYS_microstackshot, buf, size,
55 STACKSHOT_GET_MICROSTACKSHOT);
56#pragma clang diagnostic pop
57 if (len == ENOSYS) {
58 T_SKIP("microstackshot syscall failed, likely not compiled with CONFIG_TELEMETRY");
59 }
60 if (len == -1 && errno == ENOSPC) {
61 /* syscall failed because buffer wasn't large enough, try again */
62 free(buf);
63 buf = NULL;
64 size *= 2;
65 T_ASSERT_LE(size, (unsigned int)MAX_STACKSHOT_BUFFER_SIZE,
66 "growing stackshot buffer to sane size");
67 continue;
68 }
69 T_ASSERT_POSIX_SUCCESS(len, "called microstackshot syscall");
70 break;
71 }
72
73 T_EXPECT_EQ(*(uint32_t *)buf,
74 (uint32_t)STACKSHOT_MICRO_SNAPSHOT_MAGIC,
75 "magic value for microstackshot matches");
76
77 free(buf);
78}
79
80struct scenario {
81 const char *name;
82 uint32_t flags;
cb323159 83 bool quiet;
d9a64523
A
84 bool should_fail;
85 bool maybe_unsupported;
86 pid_t target_pid;
87 uint64_t since_timestamp;
88 uint32_t size_hint;
89 dt_stat_time_t timer;
90};
91
92static void
93quiet(struct scenario *scenario)
94{
cb323159 95 if (scenario->timer || scenario->quiet) {
d9a64523
A
96 T_QUIET;
97 }
98}
99
100static void
101take_stackshot(struct scenario *scenario, void (^cb)(void *buf, size_t size))
102{
103 initialize_thread();
104
105 void *config = stackshot_config_create();
106 quiet(scenario);
107 T_ASSERT_NOTNULL(config, "created stackshot config");
108
109 int ret = stackshot_config_set_flags(config, scenario->flags);
110 quiet(scenario);
111 T_ASSERT_POSIX_ZERO(ret, "set flags %#x on stackshot config", scenario->flags);
112
113 if (scenario->size_hint > 0) {
114 ret = stackshot_config_set_size_hint(config, scenario->size_hint);
115 quiet(scenario);
116 T_ASSERT_POSIX_ZERO(ret, "set size hint %" PRIu32 " on stackshot config",
117 scenario->size_hint);
118 }
119
120 if (scenario->target_pid > 0) {
121 ret = stackshot_config_set_pid(config, scenario->target_pid);
122 quiet(scenario);
123 T_ASSERT_POSIX_ZERO(ret, "set target pid %d on stackshot config",
124 scenario->target_pid);
125 }
126
127 if (scenario->since_timestamp > 0) {
128 ret = stackshot_config_set_delta_timestamp(config, scenario->since_timestamp);
129 quiet(scenario);
130 T_ASSERT_POSIX_ZERO(ret, "set since timestamp %" PRIu64 " on stackshot config",
131 scenario->since_timestamp);
132 }
133
134 int retries_remaining = 5;
135
136retry: ;
137 uint64_t start_time = mach_absolute_time();
138 ret = stackshot_capture_with_config(config);
139 uint64_t end_time = mach_absolute_time();
140
141 if (scenario->should_fail) {
142 T_EXPECTFAIL;
143 T_ASSERT_POSIX_ZERO(ret, "called stackshot_capture_with_config");
144 return;
145 }
146
147 if (ret == EBUSY || ret == ETIMEDOUT) {
148 if (retries_remaining > 0) {
149 if (!scenario->timer) {
150 T_LOG("stackshot_capture_with_config failed with %s (%d), retrying",
151 strerror(ret), ret);
152 }
153
154 retries_remaining--;
155 goto retry;
156 } else {
157 T_ASSERT_POSIX_ZERO(ret,
158 "called stackshot_capture_with_config (no retries remaining)");
159 }
160 } else if ((ret == ENOTSUP) && scenario->maybe_unsupported) {
161 T_SKIP("kernel indicated this stackshot configuration is not supported");
162 } else {
163 quiet(scenario);
164 T_ASSERT_POSIX_ZERO(ret, "called stackshot_capture_with_config");
165 }
166
167 if (scenario->timer) {
168 dt_stat_mach_time_add(scenario->timer, end_time - start_time);
169 }
170 void *buf = stackshot_config_get_stackshot_buffer(config);
171 size_t size = stackshot_config_get_stackshot_size(config);
172 if (scenario->name) {
173 char sspath[MAXPATHLEN];
174 strlcpy(sspath, scenario->name, sizeof(sspath));
175 strlcat(sspath, ".kcdata", sizeof(sspath));
176 T_QUIET; T_ASSERT_POSIX_ZERO(dt_resultfile(sspath, sizeof(sspath)),
177 "create result file path");
178
cb323159
A
179 if (!scenario->quiet) {
180 T_LOG("writing stackshot to %s", sspath);
181 }
d9a64523
A
182
183 FILE *f = fopen(sspath, "w");
184 T_WITH_ERRNO; T_QUIET; T_ASSERT_NOTNULL(f,
185 "open stackshot output file");
186
187 size_t written = fwrite(buf, size, 1, f);
188 T_QUIET; T_ASSERT_POSIX_SUCCESS(written, "wrote stackshot to file");
189
190 fclose(f);
191 }
192 cb(buf, size);
193
194 ret = stackshot_config_dealloc(config);
195 T_QUIET; T_EXPECT_POSIX_ZERO(ret, "deallocated stackshot config");
196}
197
198T_DECL(kcdata, "test that kcdata stackshots can be taken and parsed")
199{
200 struct scenario scenario = {
201 .name = "kcdata",
202 .flags = (STACKSHOT_SAVE_LOADINFO | STACKSHOT_GET_GLOBAL_MEM_STATS |
203 STACKSHOT_SAVE_IMP_DONATION_PIDS | STACKSHOT_KCDATA_FORMAT),
204 };
205
206 T_LOG("taking kcdata stackshot");
207 take_stackshot(&scenario, ^(void *ssbuf, size_t sslen) {
208 parse_stackshot(0, ssbuf, sslen, -1);
209 });
210}
211
212T_DECL(kcdata_faulting, "test that kcdata stackshots while faulting can be taken and parsed")
213{
214 struct scenario scenario = {
215 .name = "faulting",
216 .flags = (STACKSHOT_SAVE_LOADINFO | STACKSHOT_GET_GLOBAL_MEM_STATS
217 | STACKSHOT_SAVE_IMP_DONATION_PIDS | STACKSHOT_KCDATA_FORMAT
218 | STACKSHOT_ENABLE_BT_FAULTING | STACKSHOT_ENABLE_UUID_FAULTING),
219 };
220
221 T_LOG("taking faulting stackshot");
222 take_stackshot(&scenario, ^(void *ssbuf, size_t sslen) {
223 parse_stackshot(0, ssbuf, sslen, -1);
224 });
225}
226
227T_DECL(bad_flags, "test a poorly-formed stackshot syscall")
228{
229 struct scenario scenario = {
230 .flags = STACKSHOT_SAVE_IN_KERNEL_BUFFER /* not allowed from user space */,
231 .should_fail = true,
232 };
233
234 T_LOG("attempting to take stackshot with kernel-only flag");
235 take_stackshot(&scenario, ^(__unused void *ssbuf, __unused size_t sslen) {
236 T_ASSERT_FAIL("stackshot data callback called");
237 });
238}
239
240T_DECL(delta, "test delta stackshots")
241{
242 struct scenario scenario = {
243 .name = "delta",
244 .flags = (STACKSHOT_SAVE_LOADINFO | STACKSHOT_GET_GLOBAL_MEM_STATS
245 | STACKSHOT_SAVE_IMP_DONATION_PIDS | STACKSHOT_KCDATA_FORMAT),
246 };
247
248 T_LOG("taking full stackshot");
249 take_stackshot(&scenario, ^(void *ssbuf, size_t sslen) {
250 uint64_t stackshot_time = stackshot_timestamp(ssbuf, sslen);
251
252 T_LOG("taking delta stackshot since time %" PRIu64, stackshot_time);
253
254 parse_stackshot(0, ssbuf, sslen, -1);
255
256 struct scenario delta_scenario = {
257 .flags = (STACKSHOT_SAVE_LOADINFO | STACKSHOT_GET_GLOBAL_MEM_STATS
258 | STACKSHOT_SAVE_IMP_DONATION_PIDS | STACKSHOT_KCDATA_FORMAT
259 | STACKSHOT_COLLECT_DELTA_SNAPSHOT),
260 .since_timestamp = stackshot_time
261 };
262
263 take_stackshot(&delta_scenario, ^(void *dssbuf, size_t dsslen) {
264 parse_stackshot(PARSE_STACKSHOT_DELTA, dssbuf, dsslen, -1);
265 });
266 });
267}
268
269T_DECL(shared_cache_layout, "test stackshot inclusion of shared cache layout")
270{
271 struct scenario scenario = {
272 .name = "shared_cache_layout",
273 .flags = (STACKSHOT_SAVE_LOADINFO | STACKSHOT_GET_GLOBAL_MEM_STATS
274 | STACKSHOT_SAVE_IMP_DONATION_PIDS | STACKSHOT_KCDATA_FORMAT |
275 STACKSHOT_COLLECT_SHAREDCACHE_LAYOUT),
276 };
277
cb323159
A
278 size_t shared_cache_length;
279 const void *cache_header = _dyld_get_shared_cache_range(&shared_cache_length);
280 if (cache_header == NULL) {
281 T_SKIP("Device not running with shared cache, skipping test...");
282 }
283
284 if (shared_cache_length == 0) {
285 T_SKIP("dyld reports that currently running shared cache has zero length");
286 }
287
d9a64523
A
288 T_LOG("taking stackshot with STACKSHOT_COLLECT_SHAREDCACHE_LAYOUT set");
289 take_stackshot(&scenario, ^(void *ssbuf, size_t sslen) {
290 parse_stackshot(PARSE_STACKSHOT_SHAREDCACHE_LAYOUT, ssbuf, sslen, -1);
291 });
292}
293
cb323159
A
294T_DECL(stress, "test that taking stackshots for 60 seconds doesn't crash the system")
295{
296 uint64_t max_diff_time = 60ULL /* seconds */ * 1000000000ULL;
297 uint64_t start_time;
298
299 struct scenario scenario = {
300 .name = "stress",
301 .quiet = true,
302 .flags = (STACKSHOT_KCDATA_FORMAT |
303 STACKSHOT_THREAD_WAITINFO |
304 STACKSHOT_SAVE_LOADINFO |
305 STACKSHOT_SAVE_KEXT_LOADINFO |
306 STACKSHOT_GET_GLOBAL_MEM_STATS |
307 // STACKSHOT_GET_BOOT_PROFILE |
308 STACKSHOT_SAVE_IMP_DONATION_PIDS |
309 STACKSHOT_COLLECT_SHAREDCACHE_LAYOUT |
310 STACKSHOT_THREAD_GROUP |
311 STACKSHOT_SAVE_JETSAM_COALITIONS |
312 STACKSHOT_ASID |
313 // STACKSHOT_PAGE_TABLES |
314 0),
315 };
316
317 start_time = clock_gettime_nsec_np(CLOCK_MONOTONIC);
318 while (clock_gettime_nsec_np(CLOCK_MONOTONIC) - start_time < max_diff_time) {
319 take_stackshot(&scenario, ^(void *ssbuf, size_t sslen) {
320 printf(".");
321 fflush(stdout);
322 });
323
324 /* Leave some time for the testing infrastructure to catch up */
325 usleep(10000);
326
327 }
328 printf("\n");
329}
330
331T_DECL(dispatch_queue_label, "test that kcdata stackshots contain libdispatch queue labels")
332{
333 struct scenario scenario = {
334 .name = "kcdata",
335 .flags = (STACKSHOT_GET_DQ | STACKSHOT_KCDATA_FORMAT),
336 };
337 dispatch_semaphore_t child_ready_sem, parent_done_sem;
338 dispatch_queue_t dq;
339
340#if TARGET_OS_WATCH
341 T_SKIP("This test is flaky on watches: 51663346");
342#endif
343
344 child_ready_sem = dispatch_semaphore_create(0);
345 T_QUIET; T_ASSERT_NOTNULL(child_ready_sem, "dqlabel child semaphore");
346
347 parent_done_sem = dispatch_semaphore_create(0);
348 T_QUIET; T_ASSERT_NOTNULL(parent_done_sem, "dqlabel parent semaphore");
349
350 dq = dispatch_queue_create(TEST_STACKSHOT_QUEUE_LABEL, NULL);
351 T_QUIET; T_ASSERT_NOTNULL(dq, "dispatch queue");
352
353 /* start the helper thread */
354 dispatch_async(dq, ^{
355 dispatch_semaphore_signal(child_ready_sem);
356
357 dispatch_semaphore_wait(parent_done_sem, DISPATCH_TIME_FOREVER);
358 });
359
360 /* block behind the child starting up */
361 dispatch_semaphore_wait(child_ready_sem, DISPATCH_TIME_FOREVER);
362
363 T_LOG("taking kcdata stackshot with libdispatch queue labels");
364 take_stackshot(&scenario, ^(void *ssbuf, size_t sslen) {
365 parse_stackshot(PARSE_STACKSHOT_DISPATCH_QUEUE_LABEL, ssbuf, sslen, -1);
366 });
367
368 dispatch_semaphore_signal(parent_done_sem);
369}
370
d9a64523
A
371static void *stuck_sysctl_thread(void *arg) {
372 int val = 1;
373 dispatch_semaphore_t child_thread_started = *(dispatch_semaphore_t *)arg;
374
375 dispatch_semaphore_signal(child_thread_started);
376 T_ASSERT_POSIX_SUCCESS(sysctlbyname("kern.wedge_thread", NULL, NULL, &val, sizeof(val)), "wedge child thread");
377
378 return NULL;
379}
380
381T_HELPER_DECL(zombie_child, "child process to sample as a zombie")
382{
383 pthread_t pthread;
384 dispatch_semaphore_t child_thread_started = dispatch_semaphore_create(0);
385 T_QUIET; T_ASSERT_NOTNULL(child_thread_started, "zombie child thread semaphore");
386
387 /* spawn another thread to get stuck in the kernel, then call exit() to become a zombie */
388 T_QUIET; T_ASSERT_POSIX_SUCCESS(pthread_create(&pthread, NULL, stuck_sysctl_thread, &child_thread_started), "pthread_create");
389
390 dispatch_semaphore_wait(child_thread_started, DISPATCH_TIME_FOREVER);
391
392 /* sleep for a bit in the hope of ensuring that the other thread has called the sysctl before we signal the parent */
393 usleep(100);
394 T_ASSERT_POSIX_SUCCESS(kill(getppid(), SIGUSR1), "signaled parent to take stackshot");
395
396 exit(0);
397}
398
399T_DECL(zombie, "tests a stackshot of a zombie task with a thread stuck in the kernel")
400{
401 char path[PATH_MAX];
402 uint32_t path_size = sizeof(path);
403 T_ASSERT_POSIX_ZERO(_NSGetExecutablePath(path, &path_size), "_NSGetExecutablePath");
404 char *args[] = { path, "-n", "zombie_child", NULL };
405
406 dispatch_source_t child_sig_src;
407 dispatch_semaphore_t child_ready_sem = dispatch_semaphore_create(0);
408 T_QUIET; T_ASSERT_NOTNULL(child_ready_sem, "zombie child semaphore");
409
410 dispatch_queue_t signal_processing_q = dispatch_queue_create("signal processing queue", NULL);
cb323159 411 T_QUIET; T_ASSERT_NOTNULL(signal_processing_q, "signal processing queue");
d9a64523
A
412
413 pid_t pid;
414
415 T_LOG("spawning a child");
416
417 signal(SIGUSR1, SIG_IGN);
418 child_sig_src = dispatch_source_create(DISPATCH_SOURCE_TYPE_SIGNAL, SIGUSR1, 0, signal_processing_q);
419 T_QUIET; T_ASSERT_NOTNULL(child_sig_src, "dispatch_source_create (child_sig_src)");
420
421 dispatch_source_set_event_handler(child_sig_src, ^{ dispatch_semaphore_signal(child_ready_sem); });
422 dispatch_activate(child_sig_src);
423
424 int sp_ret = posix_spawn(&pid, args[0], NULL, NULL, args, NULL);
425 T_QUIET; T_ASSERT_POSIX_ZERO(sp_ret, "spawned process '%s' with PID %d", args[0], pid);
426
427 dispatch_semaphore_wait(child_ready_sem, DISPATCH_TIME_FOREVER);
428
429 T_LOG("received signal from child, capturing stackshot");
430
431 struct proc_bsdshortinfo bsdshortinfo;
432 int retval, iterations_to_wait = 10;
433
434 while (iterations_to_wait > 0) {
435 retval = proc_pidinfo(pid, PROC_PIDT_SHORTBSDINFO, 0, &bsdshortinfo, sizeof(bsdshortinfo));
436 if ((retval == 0) && errno == ESRCH) {
437 T_LOG("unable to find child using proc_pidinfo, assuming zombie");
438 break;
439 }
440
441 T_QUIET; T_WITH_ERRNO; T_ASSERT_GT(retval, 0, "proc_pidinfo(PROC_PIDT_SHORTBSDINFO) returned a value > 0");
442 T_QUIET; T_ASSERT_EQ(retval, (int)sizeof(bsdshortinfo), "proc_pidinfo call for PROC_PIDT_SHORTBSDINFO returned expected size");
443
444 if (bsdshortinfo.pbsi_flags & PROC_FLAG_INEXIT) {
445 T_LOG("child proc info marked as in exit");
446 break;
447 }
448
449 iterations_to_wait--;
450 if (iterations_to_wait == 0) {
451 /*
452 * This will mark the test as failed but let it continue so we
453 * don't leave a process stuck in the kernel.
454 */
455 T_FAIL("unable to discover that child is marked as exiting");
456 }
457
458 /* Give the child a few more seconds to make it to exit */
459 sleep(5);
460 }
461
462 /* Give the child some more time to make it through exit */
463 sleep(10);
464
465 struct scenario scenario = {
466 .name = "zombie",
467 .flags = (STACKSHOT_SAVE_LOADINFO | STACKSHOT_GET_GLOBAL_MEM_STATS
468 | STACKSHOT_SAVE_IMP_DONATION_PIDS | STACKSHOT_KCDATA_FORMAT),
469 };
470
471 take_stackshot(&scenario, ^( void *ssbuf, size_t sslen) {
472 /* First unwedge the child so we can reap it */
473 int val = 1, status;
474 T_ASSERT_POSIX_SUCCESS(sysctlbyname("kern.unwedge_thread", NULL, NULL, &val, sizeof(val)), "unwedge child");
475
476 T_QUIET; T_ASSERT_POSIX_SUCCESS(waitpid(pid, &status, 0), "waitpid on zombie child");
477
478 parse_stackshot(PARSE_STACKSHOT_ZOMBIE, ssbuf, sslen, pid);
479 });
480}
481
cb323159
A
482static uint32_t
483get_user_promotion_basepri(void)
484{
485 mach_msg_type_number_t count = THREAD_POLICY_STATE_COUNT;
486 struct thread_policy_state thread_policy;
487 boolean_t get_default = FALSE;
488 mach_port_t thread_port = pthread_mach_thread_np(pthread_self());
489
490 kern_return_t kr = thread_policy_get(thread_port, THREAD_POLICY_STATE,
491 (thread_policy_t)&thread_policy, &count, &get_default);
492 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "thread_policy_get");
493 return thread_policy.thps_user_promotion_basepri;
494}
495
496static int
497get_pri(thread_t thread_port)
498{
499 kern_return_t kr;
500
501 thread_extended_info_data_t extended_info;
502 mach_msg_type_number_t count = THREAD_EXTENDED_INFO_COUNT;
503 kr = thread_info(thread_port, THREAD_EXTENDED_INFO,
504 (thread_info_t)&extended_info, &count);
505
506 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "thread_info");
507
508 return extended_info.pth_curpri;
509}
510
511
512T_DECL(turnstile_singlehop, "turnstile single hop test")
513{
514 dispatch_queue_t dq1, dq2;
515 dispatch_semaphore_t sema_x;
516 dispatch_queue_attr_t dq1_attr, dq2_attr;
517 qos_class_t main_qos = 0;
518 int main_relpri = 0, main_relpri2 = 0, main_afterpri = 0;
519 struct scenario scenario = {
520 .name = "turnstile_singlehop",
521 .flags = (STACKSHOT_THREAD_WAITINFO | STACKSHOT_KCDATA_FORMAT),
522 };
523 dq1_attr = dispatch_queue_attr_make_with_qos_class(DISPATCH_QUEUE_SERIAL, QOS_CLASS_UTILITY, 0);
524 dq2_attr = dispatch_queue_attr_make_with_qos_class(DISPATCH_QUEUE_SERIAL, QOS_CLASS_USER_INITIATED, 0);
525 pthread_mutex_t lock_a = PTHREAD_MUTEX_INITIALIZER;
526 pthread_mutex_t lock_b = PTHREAD_MUTEX_INITIALIZER;
527
528 pthread_mutex_t *lockap = &lock_a, *lockbp = &lock_b;
529
530 dq1 = dispatch_queue_create("q1", dq1_attr);
531 dq2 = dispatch_queue_create("q2", dq2_attr);
532 sema_x = dispatch_semaphore_create(0);
533
534 pthread_mutex_lock(lockap);
535 dispatch_async(dq1, ^{
536 pthread_mutex_lock(lockbp);
537 T_ASSERT_POSIX_SUCCESS(pthread_get_qos_class_np(pthread_self(), &main_qos, &main_relpri), "get qos class");
538 T_LOG("The priority of q1 is %d\n", get_pri(mach_thread_self()));
539 dispatch_semaphore_signal(sema_x);
540 pthread_mutex_lock(lockap);
541 });
542 dispatch_semaphore_wait(sema_x, DISPATCH_TIME_FOREVER);
543
544 T_LOG("Async1 completed");
545
546 pthread_set_qos_class_self_np(QOS_CLASS_UTILITY, 0);
547 T_ASSERT_POSIX_SUCCESS(pthread_get_qos_class_np(pthread_self(), &main_qos, &main_relpri), "get qos class");
548 T_LOG("The priority of main is %d\n", get_pri(mach_thread_self()));
549 main_relpri = get_pri(mach_thread_self());
550
551 dispatch_async(dq2, ^{
552 T_ASSERT_POSIX_SUCCESS(pthread_get_qos_class_np(pthread_self(), &main_qos, &main_relpri2), "get qos class");
553 T_LOG("The priority of q2 is %d\n", get_pri(mach_thread_self()));
554 dispatch_semaphore_signal(sema_x);
555 pthread_mutex_lock(lockbp);
556 });
557 dispatch_semaphore_wait(sema_x, DISPATCH_TIME_FOREVER);
558
559 T_LOG("Async2 completed");
560
561 while (1) {
562 main_afterpri = get_user_promotion_basepri();
563 if (main_relpri != main_afterpri) {
564 T_LOG("Success with promotion pri is %d", main_afterpri);
565 break;
566 }
567
568 usleep(100);
569 }
570
571 take_stackshot(&scenario, ^( void *ssbuf, size_t sslen) {
572 parse_stackshot(PARSE_STACKSHOT_TURNSTILEINFO, ssbuf, sslen, -1);
573 });
574}
575
576
d9a64523
A
577static void
578expect_instrs_cycles_in_stackshot(void *ssbuf, size_t sslen)
579{
580 kcdata_iter_t iter = kcdata_iter(ssbuf, sslen);
581
582 bool in_task = false;
583 bool in_thread = false;
584 bool saw_instrs_cycles = false;
585 iter = kcdata_iter_next(iter);
586
587 KCDATA_ITER_FOREACH(iter) {
588 switch (kcdata_iter_type(iter)) {
589 case KCDATA_TYPE_CONTAINER_BEGIN:
590 switch (kcdata_iter_container_type(iter)) {
591 case STACKSHOT_KCCONTAINER_TASK:
592 in_task = true;
593 saw_instrs_cycles = false;
594 break;
595
596 case STACKSHOT_KCCONTAINER_THREAD:
597 in_thread = true;
598 saw_instrs_cycles = false;
599 break;
600
601 default:
602 break;
603 }
604 break;
605
606 case STACKSHOT_KCTYPE_INSTRS_CYCLES:
607 saw_instrs_cycles = true;
608 break;
609
610 case KCDATA_TYPE_CONTAINER_END:
611 if (in_thread) {
612 T_QUIET; T_EXPECT_TRUE(saw_instrs_cycles,
613 "saw instructions and cycles in thread");
614 in_thread = false;
615 } else if (in_task) {
616 T_QUIET; T_EXPECT_TRUE(saw_instrs_cycles,
617 "saw instructions and cycles in task");
618 in_task = false;
619 }
620
621 default:
622 break;
623 }
624 }
625}
626
627static void
628skip_if_monotonic_unsupported(void)
629{
630 int supported = 0;
631 size_t supported_size = sizeof(supported);
632 int ret = sysctlbyname("kern.monotonic.supported", &supported,
633 &supported_size, 0, 0);
634 if (ret < 0 || !supported) {
635 T_SKIP("monotonic is unsupported");
636 }
637}
638
639T_DECL(instrs_cycles, "test a getting instructions and cycles in stackshot")
640{
641 skip_if_monotonic_unsupported();
642
643 struct scenario scenario = {
644 .name = "instrs-cycles",
645 .flags = (STACKSHOT_SAVE_LOADINFO | STACKSHOT_INSTRS_CYCLES
646 | STACKSHOT_KCDATA_FORMAT),
647 };
648
649 T_LOG("attempting to take stackshot with instructions and cycles");
650 take_stackshot(&scenario, ^(void *ssbuf, size_t sslen) {
651 parse_stackshot(0, ssbuf, sslen, -1);
652 expect_instrs_cycles_in_stackshot(ssbuf, sslen);
653 });
654}
655
656T_DECL(delta_instrs_cycles,
657 "test delta stackshots with instructions and cycles")
658{
659 skip_if_monotonic_unsupported();
660
661 struct scenario scenario = {
662 .name = "delta-instrs-cycles",
663 .flags = (STACKSHOT_SAVE_LOADINFO | STACKSHOT_INSTRS_CYCLES
664 | STACKSHOT_KCDATA_FORMAT),
665 };
666
667 T_LOG("taking full stackshot");
668 take_stackshot(&scenario, ^(void *ssbuf, size_t sslen) {
669 uint64_t stackshot_time = stackshot_timestamp(ssbuf, sslen);
670
671 T_LOG("taking delta stackshot since time %" PRIu64, stackshot_time);
672
673 parse_stackshot(0, ssbuf, sslen, -1);
674 expect_instrs_cycles_in_stackshot(ssbuf, sslen);
675
676 struct scenario delta_scenario = {
677 .name = "delta-instrs-cycles-next",
678 .flags = (STACKSHOT_SAVE_LOADINFO | STACKSHOT_INSTRS_CYCLES
679 | STACKSHOT_KCDATA_FORMAT
680 | STACKSHOT_COLLECT_DELTA_SNAPSHOT),
681 .since_timestamp = stackshot_time,
682 };
683
684 take_stackshot(&delta_scenario, ^(void *dssbuf, size_t dsslen) {
685 parse_stackshot(PARSE_STACKSHOT_DELTA, dssbuf, dsslen, -1);
686 expect_instrs_cycles_in_stackshot(dssbuf, dsslen);
687 });
688 });
689}
690
691static void
692check_thread_groups_supported()
693{
694 int err;
695 int supported = 0;
696 size_t supported_size = sizeof(supported);
697 err = sysctlbyname("kern.thread_groups_supported", &supported, &supported_size, NULL, 0);
698
699 if (err || !supported)
700 T_SKIP("thread groups not supported on this system");
701}
702
703T_DECL(thread_groups, "test getting thread groups in stackshot")
704{
705 check_thread_groups_supported();
706
707 struct scenario scenario = {
708 .name = "thread-groups",
709 .flags = (STACKSHOT_SAVE_LOADINFO | STACKSHOT_THREAD_GROUP
710 | STACKSHOT_KCDATA_FORMAT),
711 };
712
713 T_LOG("attempting to take stackshot with thread group flag");
714 take_stackshot(&scenario, ^(void *ssbuf, size_t sslen) {
715 parse_thread_group_stackshot(ssbuf, sslen);
716 });
717}
718
719static void
720parse_page_table_asid_stackshot(void **ssbuf, size_t sslen)
721{
722 bool seen_asid = false;
723 bool seen_page_table_snapshot = false;
724 kcdata_iter_t iter = kcdata_iter(ssbuf, sslen);
725 T_ASSERT_EQ(kcdata_iter_type(iter), KCDATA_BUFFER_BEGIN_STACKSHOT,
726 "buffer provided is a stackshot");
727
728 iter = kcdata_iter_next(iter);
729 KCDATA_ITER_FOREACH(iter) {
730 switch (kcdata_iter_type(iter)) {
731 case KCDATA_TYPE_ARRAY: {
732 T_QUIET;
733 T_ASSERT_TRUE(kcdata_iter_array_valid(iter),
734 "checked that array is valid");
735
736 if (kcdata_iter_array_elem_type(iter) != STACKSHOT_KCTYPE_PAGE_TABLES) {
737 continue;
738 }
739
740 T_ASSERT_FALSE(seen_page_table_snapshot, "check that we haven't yet seen a page table snapshot");
741 seen_page_table_snapshot = true;
742
743 T_ASSERT_EQ((size_t) kcdata_iter_array_elem_size(iter), sizeof(uint64_t),
744 "check that each element of the pagetable dump is the expected size");
745
746 uint64_t *pt_array = kcdata_iter_payload(iter);
747 uint32_t elem_count = kcdata_iter_array_elem_count(iter);
748 uint32_t j;
749 bool nonzero_tte = false;
750 for (j = 0; j < elem_count;) {
751 T_QUIET; T_ASSERT_LE(j + 4, elem_count, "check for valid page table segment header");
752 uint64_t pa = pt_array[j];
753 uint64_t num_entries = pt_array[j + 1];
754 uint64_t start_va = pt_array[j + 2];
755 uint64_t end_va = pt_array[j + 3];
756
757 T_QUIET; T_ASSERT_NE(pa, (uint64_t) 0, "check that the pagetable physical address is non-zero");
758 T_QUIET; T_ASSERT_EQ(pa % (num_entries * sizeof(uint64_t)), (uint64_t) 0, "check that the pagetable physical address is correctly aligned");
759 T_QUIET; T_ASSERT_NE(num_entries, (uint64_t) 0, "check that a pagetable region has more than 0 entries");
760 T_QUIET; T_ASSERT_LE(j + 4 + num_entries, (uint64_t) elem_count, "check for sufficient space in page table array");
761 T_QUIET; T_ASSERT_GT(end_va, start_va, "check for valid VA bounds in page table segment header");
762
763 for (uint32_t k = j + 4; k < (j + 4 + num_entries); ++k) {
764 if (pt_array[k] != 0) {
765 nonzero_tte = true;
766 T_QUIET; T_ASSERT_EQ((pt_array[k] >> 48) & 0xf, (uint64_t) 0, "check that bits[48:51] of arm64 TTE are clear");
767 // L0-L2 table and non-compressed L3 block entries should always have bit 1 set; assumes L0-L2 blocks will not be used outside the kernel
768 bool table = ((pt_array[k] & 0x2) != 0);
769 if (table) {
770 T_QUIET; T_ASSERT_NE(pt_array[k] & ((1ULL << 48) - 1) & ~((1ULL << 12) - 1), (uint64_t) 0, "check that arm64 TTE physical address is non-zero");
771 } else { // should be a compressed PTE
772 T_QUIET; T_ASSERT_NE(pt_array[k] & 0xC000000000000000ULL, (uint64_t) 0, "check that compressed PTE has at least one of bits [63:62] set");
773 T_QUIET; T_ASSERT_EQ(pt_array[k] & ~0xC000000000000000ULL, (uint64_t) 0, "check that compressed PTE has no other bits besides [63:62] set");
774 }
775 }
776 }
777
778 j += (4 + num_entries);
779 }
780 T_ASSERT_TRUE(nonzero_tte, "check that we saw at least one non-empty TTE");
781 T_ASSERT_EQ(j, elem_count, "check that page table dump size matches extent of last header");
782 break;
783 }
784 case STACKSHOT_KCTYPE_ASID: {
785 T_ASSERT_FALSE(seen_asid, "check that we haven't yet seen an ASID");
786 seen_asid = true;
787 }
788 }
789 }
790 T_ASSERT_TRUE(seen_page_table_snapshot, "check that we have seen a page table snapshot");
791 T_ASSERT_TRUE(seen_asid, "check that we have seen an ASID");
792}
793
794T_DECL(dump_page_tables, "test stackshot page table dumping support")
795{
796 struct scenario scenario = {
797 .name = "asid-page-tables",
798 .flags = (STACKSHOT_KCDATA_FORMAT | STACKSHOT_ASID | STACKSHOT_PAGE_TABLES),
799 .size_hint = (1ULL << 23), // 8 MB
800 .target_pid = getpid(),
801 .maybe_unsupported = true,
802 };
803
804 T_LOG("attempting to take stackshot with ASID and page table flags");
805 take_stackshot(&scenario, ^(void *ssbuf, size_t sslen) {
806 parse_page_table_asid_stackshot(ssbuf, sslen);
807 });
808}
809
cb323159
A
810static void stackshot_verify_current_proc_uuid_info(void **ssbuf, size_t sslen, uint64_t expected_offset, const struct proc_uniqidentifierinfo *proc_info_data)
811{
812 const uuid_t *current_uuid = (const uuid_t *)(&proc_info_data->p_uuid);
813
814 kcdata_iter_t iter = kcdata_iter(ssbuf, sslen);
815 T_ASSERT_EQ(kcdata_iter_type(iter), KCDATA_BUFFER_BEGIN_STACKSHOT, "buffer provided is a stackshot");
816
817 iter = kcdata_iter_next(iter);
818
819 KCDATA_ITER_FOREACH(iter) {
820 switch (kcdata_iter_type(iter)) {
821 case KCDATA_TYPE_ARRAY: {
822 T_QUIET; T_ASSERT_TRUE(kcdata_iter_array_valid(iter), "checked that array is valid");
823 if (kcdata_iter_array_elem_type(iter) == KCDATA_TYPE_LIBRARY_LOADINFO64) {
824 struct user64_dyld_uuid_info *info = (struct user64_dyld_uuid_info *) kcdata_iter_payload(iter);
825 if (uuid_compare(*current_uuid, info->imageUUID) == 0) {
826 T_ASSERT_EQ(expected_offset, info->imageLoadAddress, "found matching UUID with matching binary offset");
827 return;
828 }
829 } else if (kcdata_iter_array_elem_type(iter) == KCDATA_TYPE_LIBRARY_LOADINFO) {
830 struct user32_dyld_uuid_info *info = (struct user32_dyld_uuid_info *) kcdata_iter_payload(iter);
831 if (uuid_compare(*current_uuid, info->imageUUID) == 0) {
832 T_ASSERT_EQ(expected_offset, ((uint64_t) info->imageLoadAddress), "found matching UUID with matching binary offset");
833 return;
834 }
835 }
836 break;
837 }
838 default:
839 break;
840 }
841 }
842
843 T_FAIL("failed to find matching UUID in stackshot data");
844}
845
846T_DECL(proc_uuid_info, "tests that the main binary UUID for a proc is always populated")
847{
848 struct proc_uniqidentifierinfo proc_info_data = { };
849 mach_msg_type_number_t count;
850 kern_return_t kernel_status;
851 task_dyld_info_data_t task_dyld_info;
852 struct dyld_all_image_infos *target_infos;
853 int retval;
854 bool found_image_in_image_infos = false;
855 uint64_t expected_mach_header_offset = 0;
856
857 /* Find the UUID of our main binary */
858 retval = proc_pidinfo(getpid(), PROC_PIDUNIQIDENTIFIERINFO, 0, &proc_info_data, sizeof(proc_info_data));
859 T_QUIET; T_EXPECT_POSIX_SUCCESS(retval, "proc_pidinfo PROC_PIDUNIQIDENTIFIERINFO");
860 T_QUIET; T_ASSERT_EQ_INT(retval, (int) sizeof(proc_info_data), "proc_pidinfo PROC_PIDUNIQIDENTIFIERINFO returned data");
861
862 uuid_string_t str = {};
863 uuid_unparse(*(uuid_t*)&proc_info_data.p_uuid, str);
864 T_LOG("Found current UUID is %s", str);
865
866 /* Find the location of the dyld image info metadata */
867 count = TASK_DYLD_INFO_COUNT;
868 kernel_status = task_info(mach_task_self(), TASK_DYLD_INFO, (task_info_t)&task_dyld_info, &count);
869 T_QUIET; T_ASSERT_EQ(kernel_status, KERN_SUCCESS, "retrieve task_info for TASK_DYLD_INFO");
870
871 target_infos = (struct dyld_all_image_infos *)task_dyld_info.all_image_info_addr;
872
873 /* Find our binary in the dyld image info array */
874 for (int i = 0; i < (int) target_infos->uuidArrayCount; i++) {
875 if (uuid_compare(target_infos->uuidArray[i].imageUUID, *(uuid_t*)&proc_info_data.p_uuid) == 0) {
876 expected_mach_header_offset = (uint64_t) target_infos->uuidArray[i].imageLoadAddress;
877 found_image_in_image_infos = true;
878 }
879 }
880
881 T_ASSERT_TRUE(found_image_in_image_infos, "found binary image in dyld image info list");
882
883 /* Overwrite the dyld image info data so the kernel has to fallback to the UUID stored in the proc structure */
884 target_infos->uuidArrayCount = 0;
885
886 struct scenario scenario = {
887 .name = "proc_uuid_info",
888 .flags = (STACKSHOT_SAVE_LOADINFO | STACKSHOT_KCDATA_FORMAT),
889 .target_pid = getpid(),
890 };
891
892 T_LOG("attempting to take stackshot for current PID");
893 take_stackshot(&scenario, ^(void *ssbuf, size_t sslen) {
894 stackshot_verify_current_proc_uuid_info(ssbuf, sslen, expected_mach_header_offset, &proc_info_data);
895 });
896}
897
ea3f0419
A
898T_DECL(cseg_waitinfo, "test that threads stuck in the compressor report correct waitinfo")
899{
900 int val = 1;
901 struct scenario scenario = {
902 .name = "cseg_waitinfo",
903 .quiet = false,
904 .flags = (STACKSHOT_THREAD_WAITINFO | STACKSHOT_KCDATA_FORMAT),
905 };
906
907 dispatch_queue_t dq = dispatch_queue_create("com.apple.stackshot.cseg_waitinfo", NULL);
908 dispatch_semaphore_t child_ok = dispatch_semaphore_create(0);
909
910 dispatch_async(dq, ^{
911 pthread_threadid_np(NULL, &cseg_expected_threadid);
912 dispatch_semaphore_signal(child_ok);
913 T_ASSERT_POSIX_SUCCESS(sysctlbyname("kern.cseg_wedge_thread", NULL, NULL, &val, sizeof(val)), "wedge child thread");
914 });
915
916 dispatch_semaphore_wait(child_ok, DISPATCH_TIME_FOREVER);
917 sleep(1);
918
919 T_LOG("taking stackshot");
920 take_stackshot(&scenario, ^(void *ssbuf, size_t sslen) {
921 T_ASSERT_POSIX_SUCCESS(sysctlbyname("kern.cseg_unwedge_thread", NULL, NULL, &val, sizeof(val)), "unwedge child thread");
922 parse_stackshot(PARSE_STACKSHOT_WAITINFO_CSEG, ssbuf, sslen, -1);
923 });
924}
925
d9a64523
A
926#pragma mark performance tests
927
928#define SHOULD_REUSE_SIZE_HINT 0x01
929#define SHOULD_USE_DELTA 0x02
930#define SHOULD_TARGET_SELF 0x04
931
932static void
933stackshot_perf(unsigned int options)
934{
935 struct scenario scenario = {
936 .flags = (STACKSHOT_SAVE_LOADINFO | STACKSHOT_GET_GLOBAL_MEM_STATS
937 | STACKSHOT_SAVE_IMP_DONATION_PIDS | STACKSHOT_KCDATA_FORMAT),
938 };
939
940 dt_stat_t size = dt_stat_create("bytes", "size");
941 dt_stat_time_t duration = dt_stat_time_create("duration");
942 scenario.timer = duration;
943
944 if (options & SHOULD_TARGET_SELF) {
945 scenario.target_pid = getpid();
946 }
947
948 while (!dt_stat_stable(duration) || !dt_stat_stable(size)) {
949 __block uint64_t last_time = 0;
950 __block uint32_t size_hint = 0;
951 take_stackshot(&scenario, ^(void *ssbuf, size_t sslen) {
952 dt_stat_add(size, (double)sslen);
953 last_time = stackshot_timestamp(ssbuf, sslen);
954 size_hint = (uint32_t)sslen;
955 });
956 if (options & SHOULD_USE_DELTA) {
957 scenario.since_timestamp = last_time;
958 scenario.flags |= STACKSHOT_COLLECT_DELTA_SNAPSHOT;
959 }
960 if (options & SHOULD_REUSE_SIZE_HINT) {
961 scenario.size_hint = size_hint;
962 }
963 }
964
965 dt_stat_finalize(duration);
966 dt_stat_finalize(size);
967}
968
969T_DECL(perf_no_size_hint, "test stackshot performance with no size hint",
970 T_META_TAG_PERF)
971{
972 stackshot_perf(0);
973}
974
975T_DECL(perf_size_hint, "test stackshot performance with size hint",
976 T_META_TAG_PERF)
977{
978 stackshot_perf(SHOULD_REUSE_SIZE_HINT);
979}
980
981T_DECL(perf_process, "test stackshot performance targeted at process",
982 T_META_TAG_PERF)
983{
984 stackshot_perf(SHOULD_REUSE_SIZE_HINT | SHOULD_TARGET_SELF);
985}
986
987T_DECL(perf_delta, "test delta stackshot performance",
988 T_META_TAG_PERF)
989{
990 stackshot_perf(SHOULD_REUSE_SIZE_HINT | SHOULD_USE_DELTA);
991}
992
993T_DECL(perf_delta_process, "test delta stackshot performance targeted at a process",
994 T_META_TAG_PERF)
995{
996 stackshot_perf(SHOULD_REUSE_SIZE_HINT | SHOULD_USE_DELTA | SHOULD_TARGET_SELF);
997}
998
999static uint64_t
1000stackshot_timestamp(void *ssbuf, size_t sslen)
1001{
1002 kcdata_iter_t iter = kcdata_iter(ssbuf, sslen);
1003
1004 uint32_t type = kcdata_iter_type(iter);
1005 if (type != KCDATA_BUFFER_BEGIN_STACKSHOT && type != KCDATA_BUFFER_BEGIN_DELTA_STACKSHOT) {
1006 T_ASSERT_FAIL("invalid kcdata type %u", kcdata_iter_type(iter));
1007 }
1008
1009 iter = kcdata_iter_find_type(iter, KCDATA_TYPE_MACH_ABSOLUTE_TIME);
1010 T_QUIET;
1011 T_ASSERT_TRUE(kcdata_iter_valid(iter), "timestamp found in stackshot");
1012
1013 return *(uint64_t *)kcdata_iter_payload(iter);
1014}
1015
1016#define TEST_THREAD_NAME "stackshot_test_thread"
1017
1018static void
1019parse_thread_group_stackshot(void **ssbuf, size_t sslen)
1020{
1021 bool seen_thread_group_snapshot = false;
1022 kcdata_iter_t iter = kcdata_iter(ssbuf, sslen);
1023 T_ASSERT_EQ(kcdata_iter_type(iter), KCDATA_BUFFER_BEGIN_STACKSHOT,
1024 "buffer provided is a stackshot");
1025
1026 NSMutableSet *thread_groups = [[NSMutableSet alloc] init];
1027
1028 iter = kcdata_iter_next(iter);
1029 KCDATA_ITER_FOREACH(iter) {
1030 switch (kcdata_iter_type(iter)) {
1031 case KCDATA_TYPE_ARRAY: {
1032 T_QUIET;
1033 T_ASSERT_TRUE(kcdata_iter_array_valid(iter),
1034 "checked that array is valid");
1035
1036 if (kcdata_iter_array_elem_type(iter) != STACKSHOT_KCTYPE_THREAD_GROUP_SNAPSHOT) {
1037 continue;
1038 }
1039
1040 seen_thread_group_snapshot = true;
1041
1042 if (kcdata_iter_array_elem_size(iter) >= sizeof(struct thread_group_snapshot_v2)) {
1043 struct thread_group_snapshot_v2 *tgs_array = kcdata_iter_payload(iter);
1044 for (uint32_t j = 0; j < kcdata_iter_array_elem_count(iter); j++) {
1045 struct thread_group_snapshot_v2 *tgs = tgs_array + j;
1046 [thread_groups addObject:@(tgs->tgs_id)];
1047 }
1048
1049 }
1050 else {
1051 struct thread_group_snapshot *tgs_array = kcdata_iter_payload(iter);
1052 for (uint32_t j = 0; j < kcdata_iter_array_elem_count(iter); j++) {
1053 struct thread_group_snapshot *tgs = tgs_array + j;
1054 [thread_groups addObject:@(tgs->tgs_id)];
1055 }
1056 }
1057 break;
1058 }
1059 }
1060 }
1061 KCDATA_ITER_FOREACH(iter) {
1062 NSError *error = nil;
1063
1064 switch (kcdata_iter_type(iter)) {
1065
1066 case KCDATA_TYPE_CONTAINER_BEGIN: {
1067 T_QUIET;
1068 T_ASSERT_TRUE(kcdata_iter_container_valid(iter),
1069 "checked that container is valid");
1070
1071 if (kcdata_iter_container_type(iter) != STACKSHOT_KCCONTAINER_THREAD) {
1072 break;
1073 }
1074
1075 NSDictionary *container = parseKCDataContainer(&iter, &error);
1076 T_QUIET; T_ASSERT_NOTNULL(container, "parsed container from stackshot");
1077 T_QUIET; T_ASSERT_NULL(error, "error unset after parsing container");
1078
1079 int tg = [container[@"thread_snapshots"][@"thread_group"] intValue];
1080
1081 T_ASSERT_TRUE([thread_groups containsObject:@(tg)], "check that the thread group the thread is in exists");
1082
1083 break;
1084 };
1085
1086 }
1087 }
1088 T_ASSERT_TRUE(seen_thread_group_snapshot, "check that we have seen a thread group snapshot");
1089}
1090
1091static void
1092verify_stackshot_sharedcache_layout(struct dyld_uuid_info_64 *uuids, uint32_t uuid_count)
1093{
1094 uuid_t cur_shared_cache_uuid;
1095 __block uint32_t lib_index = 0, libs_found = 0;
1096
1097 _dyld_get_shared_cache_uuid(cur_shared_cache_uuid);
1098 int result = dyld_shared_cache_iterate_text(cur_shared_cache_uuid, ^(const dyld_shared_cache_dylib_text_info* info) {
1099 T_QUIET; T_ASSERT_LT(lib_index, uuid_count, "dyld_shared_cache_iterate_text exceeded number of libraries returned by kernel");
1100
1101 libs_found++;
1102 struct dyld_uuid_info_64 *cur_stackshot_uuid_entry = &uuids[lib_index];
1103 T_QUIET; T_ASSERT_EQ(memcmp(info->dylibUuid, cur_stackshot_uuid_entry->imageUUID, sizeof(info->dylibUuid)), 0,
1104 "dyld returned UUID doesn't match kernel returned UUID");
1105 T_QUIET; T_ASSERT_EQ(info->loadAddressUnslid, cur_stackshot_uuid_entry->imageLoadAddress,
1106 "dyld returned load address doesn't match kernel returned load address");
1107 lib_index++;
1108 });
1109
1110 T_ASSERT_EQ(result, 0, "iterate shared cache layout");
1111 T_ASSERT_EQ(libs_found, uuid_count, "dyld iterator returned same number of libraries as kernel");
1112
1113 T_LOG("verified %d libraries from dyld shared cache", libs_found);
1114}
1115
1116static void
1117parse_stackshot(uint64_t stackshot_parsing_flags, void *ssbuf, size_t sslen, int child_pid)
1118{
1119 bool delta = (stackshot_parsing_flags & PARSE_STACKSHOT_DELTA);
1120 bool expect_zombie_child = (stackshot_parsing_flags & PARSE_STACKSHOT_ZOMBIE);
ea3f0419 1121 bool expect_cseg_waitinfo = (stackshot_parsing_flags & PARSE_STACKSHOT_WAITINFO_CSEG);
d9a64523
A
1122 bool expect_shared_cache_layout = false;
1123 bool expect_shared_cache_uuid = !delta;
cb323159
A
1124 bool expect_dispatch_queue_label = (stackshot_parsing_flags & PARSE_STACKSHOT_DISPATCH_QUEUE_LABEL);
1125 bool expect_turnstile_lock = (stackshot_parsing_flags & PARSE_STACKSHOT_TURNSTILEINFO);
d9a64523 1126 bool found_zombie_child = false, found_shared_cache_layout = false, found_shared_cache_uuid = false;
cb323159 1127 bool found_dispatch_queue_label = false, found_turnstile_lock = false;
ea3f0419 1128 bool found_cseg_waitinfo = false;
cb323159
A
1129
1130 if (expect_shared_cache_uuid) {
1131 uuid_t shared_cache_uuid;
1132 if (!_dyld_get_shared_cache_uuid(shared_cache_uuid)) {
1133 T_LOG("Skipping verifying shared cache UUID in stackshot data because not running with a shared cache");
1134 expect_shared_cache_uuid = false;
1135 }
1136 }
d9a64523
A
1137
1138 if (stackshot_parsing_flags & PARSE_STACKSHOT_SHAREDCACHE_LAYOUT) {
1139 size_t shared_cache_length = 0;
cb323159 1140 const void *cache_header = _dyld_get_shared_cache_range(&shared_cache_length);
d9a64523
A
1141 T_QUIET; T_ASSERT_NOTNULL(cache_header, "current process running with shared cache");
1142 T_QUIET; T_ASSERT_GT(shared_cache_length, sizeof(struct _dyld_cache_header), "valid shared cache length populated by _dyld_get_shared_cache_range");
1143
cb323159 1144 if (_dyld_shared_cache_is_locally_built()) {
d9a64523
A
1145 T_LOG("device running with locally built shared cache, expect shared cache layout");
1146 expect_shared_cache_layout = true;
1147 } else {
1148 T_LOG("device running with B&I built shared-cache, no shared cache layout expected");
1149 }
1150 }
1151
1152 if (expect_zombie_child) {
1153 T_QUIET; T_ASSERT_GT(child_pid, 0, "child pid greater than zero");
1154 }
1155
1156 kcdata_iter_t iter = kcdata_iter(ssbuf, sslen);
1157 if (delta) {
1158 T_ASSERT_EQ(kcdata_iter_type(iter), KCDATA_BUFFER_BEGIN_DELTA_STACKSHOT,
1159 "buffer provided is a delta stackshot");
1160 } else {
1161 T_ASSERT_EQ(kcdata_iter_type(iter), KCDATA_BUFFER_BEGIN_STACKSHOT,
1162 "buffer provided is a stackshot");
1163 }
1164
1165 iter = kcdata_iter_next(iter);
1166 KCDATA_ITER_FOREACH(iter) {
1167 NSError *error = nil;
1168
1169 switch (kcdata_iter_type(iter)) {
1170 case KCDATA_TYPE_ARRAY: {
1171 T_QUIET;
1172 T_ASSERT_TRUE(kcdata_iter_array_valid(iter),
1173 "checked that array is valid");
1174
1175 NSMutableDictionary *array = parseKCDataArray(iter, &error);
1176 T_QUIET; T_ASSERT_NOTNULL(array, "parsed array from stackshot");
1177 T_QUIET; T_ASSERT_NULL(error, "error unset after parsing array");
1178
1179 if (kcdata_iter_array_elem_type(iter) == STACKSHOT_KCTYPE_SYS_SHAREDCACHE_LAYOUT) {
1180 struct dyld_uuid_info_64 *shared_cache_uuids = kcdata_iter_payload(iter);
1181 uint32_t uuid_count = kcdata_iter_array_elem_count(iter);
1182 T_ASSERT_NOTNULL(shared_cache_uuids, "parsed shared cache layout array");
1183 T_ASSERT_GT(uuid_count, 0, "returned valid number of UUIDs from shared cache");
1184 verify_stackshot_sharedcache_layout(shared_cache_uuids, uuid_count);
1185 found_shared_cache_layout = true;
1186 }
1187
1188 break;
1189 }
1190
1191 case KCDATA_TYPE_CONTAINER_BEGIN: {
1192 T_QUIET;
1193 T_ASSERT_TRUE(kcdata_iter_container_valid(iter),
1194 "checked that container is valid");
1195
1196 if (kcdata_iter_container_type(iter) != STACKSHOT_KCCONTAINER_TASK) {
1197 break;
1198 }
1199
1200 NSDictionary *container = parseKCDataContainer(&iter, &error);
1201 T_QUIET; T_ASSERT_NOTNULL(container, "parsed container from stackshot");
1202 T_QUIET; T_ASSERT_NULL(error, "error unset after parsing container");
1203
cb323159
A
1204 if (expect_dispatch_queue_label && !found_dispatch_queue_label) {
1205 for (id thread_key in container[@"task_snapshots"][@"thread_snapshots"]) {
1206 NSMutableDictionary *thread = container[@"task_snapshots"][@"thread_snapshots"][thread_key];
1207 NSString *dql = thread[@"dispatch_queue_label"];
1208
1209 if ([dql isEqualToString:@TEST_STACKSHOT_QUEUE_LABEL]) {
1210 found_dispatch_queue_label = true;
1211 break;
1212 }
1213 }
1214 }
1215
ea3f0419
A
1216 if (expect_cseg_waitinfo) {
1217 NSArray *winfos = container[@"task_snapshots"][@"thread_waitinfo"];
1218
1219 for (id i in winfos) {
1220 if ([i[@"wait_type"] intValue] == kThreadWaitCompressor && [i[@"owner"] intValue] == cseg_expected_threadid) {
1221 found_cseg_waitinfo = true;
1222 break;
1223 }
1224 }
1225 }
1226
d9a64523
A
1227 int pid = [container[@"task_snapshots"][@"task_snapshot"][@"ts_pid"] intValue];
1228 if (expect_zombie_child && (pid == child_pid)) {
1229 found_zombie_child = true;
1230
1231 uint64_t task_flags = [container[@"task_snapshots"][@"task_snapshot"][@"ts_ss_flags"] unsignedLongLongValue];
1232 T_ASSERT_TRUE((task_flags & kTerminatedSnapshot) == kTerminatedSnapshot, "child zombie marked as terminated");
1233
1234 continue;
1235 } else if (pid != getpid()) {
1236 break;
1237 }
1238
1239 T_EXPECT_EQ_STR(current_process_name(),
1240 [container[@"task_snapshots"][@"task_snapshot"][@"ts_p_comm"] UTF8String],
1241 "current process name matches in stackshot");
1242
1243 uint64_t task_flags = [container[@"task_snapshots"][@"task_snapshot"][@"ts_ss_flags"] unsignedLongLongValue];
1244 T_ASSERT_FALSE((task_flags & kTerminatedSnapshot) == kTerminatedSnapshot, "current process not marked as terminated");
1245
1246 T_QUIET;
1247 T_EXPECT_LE(pid, [container[@"task_snapshots"][@"task_snapshot"][@"ts_unique_pid"] intValue],
1248 "unique pid is greater than pid");
1249
1250 bool found_main_thread = false;
cb323159 1251 uint64_t main_thread_id = -1;
d9a64523
A
1252 for (id thread_key in container[@"task_snapshots"][@"thread_snapshots"]) {
1253 NSMutableDictionary *thread = container[@"task_snapshots"][@"thread_snapshots"][thread_key];
1254 NSDictionary *thread_snap = thread[@"thread_snapshot"];
1255
1256 T_QUIET; T_EXPECT_GT([thread_snap[@"ths_thread_id"] intValue], 0,
1257 "thread ID of thread in current task is valid");
1258 T_QUIET; T_EXPECT_GT([thread_snap[@"ths_base_priority"] intValue], 0,
1259 "base priority of thread in current task is valid");
1260 T_QUIET; T_EXPECT_GT([thread_snap[@"ths_sched_priority"] intValue], 0,
1261 "scheduling priority of thread in current task is valid");
1262
1263 NSString *pth_name = thread[@"pth_name"];
1264 if (pth_name != nil && [pth_name isEqualToString:@TEST_THREAD_NAME]) {
1265 found_main_thread = true;
cb323159 1266 main_thread_id = [thread_snap[@"ths_thread_id"] intValue];
d9a64523
A
1267
1268 T_QUIET; T_EXPECT_GT([thread_snap[@"ths_total_syscalls"] intValue], 0,
1269 "total syscalls of current thread is valid");
1270
1271 NSDictionary *cpu_times = thread[@"cpu_times"];
1272 T_EXPECT_GE([cpu_times[@"runnable_time"] intValue],
1273 [cpu_times[@"system_time"] intValue] +
1274 [cpu_times[@"user_time"] intValue],
1275 "runnable time of current thread is valid");
1276 }
1277 }
1278 T_EXPECT_TRUE(found_main_thread, "found main thread for current task in stackshot");
cb323159
A
1279
1280 if (expect_turnstile_lock && !found_turnstile_lock) {
1281 NSArray *tsinfos = container[@"task_snapshots"][@"thread_turnstileinfo"];
1282
1283 for (id i in tsinfos) {
1284 if ([i[@"turnstile_context"] intValue] == main_thread_id) {
1285 found_turnstile_lock = true;
1286 break;
1287 }
1288 }
1289 }
d9a64523
A
1290 break;
1291 }
1292 case STACKSHOT_KCTYPE_SHAREDCACHE_LOADINFO: {
1293 struct dyld_uuid_info_64_v2 *shared_cache_info = kcdata_iter_payload(iter);
1294 uuid_t shared_cache_uuid;
1295 T_QUIET; T_ASSERT_TRUE(_dyld_get_shared_cache_uuid(shared_cache_uuid), "retrieve current shared cache UUID");
1296 T_QUIET; T_ASSERT_EQ(memcmp(shared_cache_info->imageUUID, shared_cache_uuid, sizeof(shared_cache_uuid)), 0,
1297 "dyld returned UUID doesn't match kernel returned UUID for system shared cache");
1298 found_shared_cache_uuid = true;
1299 break;
1300 }
1301 }
1302 }
1303
1304 if (expect_zombie_child) {
1305 T_QUIET; T_ASSERT_TRUE(found_zombie_child, "found zombie child in kcdata");
1306 }
1307
1308 if (expect_shared_cache_layout) {
1309 T_QUIET; T_ASSERT_TRUE(found_shared_cache_layout, "shared cache layout found in kcdata");
1310 }
1311
1312 if (expect_shared_cache_uuid) {
1313 T_QUIET; T_ASSERT_TRUE(found_shared_cache_uuid, "shared cache UUID found in kcdata");
1314 }
1315
cb323159
A
1316 if (expect_dispatch_queue_label) {
1317 T_QUIET; T_ASSERT_TRUE(found_dispatch_queue_label, "dispatch queue label found in kcdata");
1318 }
1319
1320 if (expect_turnstile_lock) {
1321 T_QUIET; T_ASSERT_TRUE(found_turnstile_lock, "found expected deadlock");
1322 }
1323
ea3f0419
A
1324 if (expect_cseg_waitinfo) {
1325 T_QUIET; T_ASSERT_TRUE(found_cseg_waitinfo, "found c_seg waitinfo");
1326 }
1327
d9a64523
A
1328 T_ASSERT_FALSE(KCDATA_ITER_FOREACH_FAILED(iter), "successfully iterated kcdata");
1329}
1330
1331static const char *
1332current_process_name(void)
1333{
1334 static char name[64];
1335
1336 if (!name[0]) {
1337 int ret = proc_name(getpid(), name, sizeof(name));
1338 T_QUIET;
1339 T_ASSERT_POSIX_SUCCESS(ret, "proc_name failed for current process");
1340 }
1341
1342 return name;
1343}
1344
1345static void
1346initialize_thread(void)
1347{
1348 int ret = pthread_setname_np(TEST_THREAD_NAME);
1349 T_QUIET;
1350 T_ASSERT_POSIX_ZERO(ret, "set thread name to %s", TEST_THREAD_NAME);
1351}