]>
Commit | Line | Data |
---|---|---|
d9a64523 A |
1 | #include <darwintest.h> |
2 | #include <darwintest_utils.h> | |
3 | #include <kern/debug.h> | |
4 | #include <kern/kern_cdata.h> | |
5 | #include <kdd.h> | |
6 | #include <libproc.h> | |
7 | #include <mach-o/dyld.h> | |
cb323159 | 8 | #include <mach-o/dyld_images.h> |
d9a64523 A |
9 | #include <mach-o/dyld_priv.h> |
10 | #include <sys/syscall.h> | |
11 | #include <sys/stackshot.h> | |
12 | ||
d9a64523 A |
13 | T_GLOBAL_META( |
14 | T_META_NAMESPACE("xnu.stackshot"), | |
15 | T_META_CHECK_LEAKS(false), | |
16 | T_META_ASROOT(true) | |
17 | ); | |
18 | ||
19 | static const char *current_process_name(void); | |
20 | static void verify_stackshot_sharedcache_layout(struct dyld_uuid_info_64 *uuids, uint32_t uuid_count); | |
21 | static void parse_stackshot(uint64_t stackshot_parsing_flags, void *ssbuf, size_t sslen, int child_pid); | |
22 | static void parse_thread_group_stackshot(void **sbuf, size_t sslen); | |
23 | static uint64_t stackshot_timestamp(void *ssbuf, size_t sslen); | |
24 | static void initialize_thread(void); | |
25 | ||
26 | #define DEFAULT_STACKSHOT_BUFFER_SIZE (1024 * 1024) | |
27 | #define MAX_STACKSHOT_BUFFER_SIZE (6 * 1024 * 1024) | |
28 | ||
29 | /* bit flags for parse_stackshot */ | |
cb323159 A |
30 | #define PARSE_STACKSHOT_DELTA 0x01 |
31 | #define PARSE_STACKSHOT_ZOMBIE 0x02 | |
32 | #define PARSE_STACKSHOT_SHAREDCACHE_LAYOUT 0x04 | |
33 | #define PARSE_STACKSHOT_DISPATCH_QUEUE_LABEL 0x08 | |
34 | #define PARSE_STACKSHOT_TURNSTILEINFO 0x10 | |
35 | ||
36 | #define TEST_STACKSHOT_QUEUE_LABEL "houston.we.had.a.problem" | |
37 | #define TEST_STACKSHOT_QUEUE_LABEL_LENGTH sizeof(TEST_STACKSHOT_QUEUE_LABEL) | |
d9a64523 A |
38 | |
39 | T_DECL(microstackshots, "test the microstackshot syscall") | |
40 | { | |
41 | void *buf = NULL; | |
42 | unsigned int size = DEFAULT_STACKSHOT_BUFFER_SIZE; | |
43 | ||
44 | while (1) { | |
45 | buf = malloc(size); | |
46 | T_QUIET; T_ASSERT_NOTNULL(buf, "allocated stackshot buffer"); | |
47 | ||
48 | #pragma clang diagnostic push | |
49 | #pragma clang diagnostic ignored "-Wdeprecated-declarations" | |
50 | int len = syscall(SYS_microstackshot, buf, size, | |
51 | STACKSHOT_GET_MICROSTACKSHOT); | |
52 | #pragma clang diagnostic pop | |
53 | if (len == ENOSYS) { | |
54 | T_SKIP("microstackshot syscall failed, likely not compiled with CONFIG_TELEMETRY"); | |
55 | } | |
56 | if (len == -1 && errno == ENOSPC) { | |
57 | /* syscall failed because buffer wasn't large enough, try again */ | |
58 | free(buf); | |
59 | buf = NULL; | |
60 | size *= 2; | |
61 | T_ASSERT_LE(size, (unsigned int)MAX_STACKSHOT_BUFFER_SIZE, | |
62 | "growing stackshot buffer to sane size"); | |
63 | continue; | |
64 | } | |
65 | T_ASSERT_POSIX_SUCCESS(len, "called microstackshot syscall"); | |
66 | break; | |
67 | } | |
68 | ||
69 | T_EXPECT_EQ(*(uint32_t *)buf, | |
70 | (uint32_t)STACKSHOT_MICRO_SNAPSHOT_MAGIC, | |
71 | "magic value for microstackshot matches"); | |
72 | ||
73 | free(buf); | |
74 | } | |
75 | ||
76 | struct scenario { | |
77 | const char *name; | |
78 | uint32_t flags; | |
cb323159 | 79 | bool quiet; |
d9a64523 A |
80 | bool should_fail; |
81 | bool maybe_unsupported; | |
82 | pid_t target_pid; | |
83 | uint64_t since_timestamp; | |
84 | uint32_t size_hint; | |
85 | dt_stat_time_t timer; | |
86 | }; | |
87 | ||
88 | static void | |
89 | quiet(struct scenario *scenario) | |
90 | { | |
cb323159 | 91 | if (scenario->timer || scenario->quiet) { |
d9a64523 A |
92 | T_QUIET; |
93 | } | |
94 | } | |
95 | ||
96 | static void | |
97 | take_stackshot(struct scenario *scenario, void (^cb)(void *buf, size_t size)) | |
98 | { | |
99 | initialize_thread(); | |
100 | ||
101 | void *config = stackshot_config_create(); | |
102 | quiet(scenario); | |
103 | T_ASSERT_NOTNULL(config, "created stackshot config"); | |
104 | ||
105 | int ret = stackshot_config_set_flags(config, scenario->flags); | |
106 | quiet(scenario); | |
107 | T_ASSERT_POSIX_ZERO(ret, "set flags %#x on stackshot config", scenario->flags); | |
108 | ||
109 | if (scenario->size_hint > 0) { | |
110 | ret = stackshot_config_set_size_hint(config, scenario->size_hint); | |
111 | quiet(scenario); | |
112 | T_ASSERT_POSIX_ZERO(ret, "set size hint %" PRIu32 " on stackshot config", | |
113 | scenario->size_hint); | |
114 | } | |
115 | ||
116 | if (scenario->target_pid > 0) { | |
117 | ret = stackshot_config_set_pid(config, scenario->target_pid); | |
118 | quiet(scenario); | |
119 | T_ASSERT_POSIX_ZERO(ret, "set target pid %d on stackshot config", | |
120 | scenario->target_pid); | |
121 | } | |
122 | ||
123 | if (scenario->since_timestamp > 0) { | |
124 | ret = stackshot_config_set_delta_timestamp(config, scenario->since_timestamp); | |
125 | quiet(scenario); | |
126 | T_ASSERT_POSIX_ZERO(ret, "set since timestamp %" PRIu64 " on stackshot config", | |
127 | scenario->since_timestamp); | |
128 | } | |
129 | ||
130 | int retries_remaining = 5; | |
131 | ||
132 | retry: ; | |
133 | uint64_t start_time = mach_absolute_time(); | |
134 | ret = stackshot_capture_with_config(config); | |
135 | uint64_t end_time = mach_absolute_time(); | |
136 | ||
137 | if (scenario->should_fail) { | |
138 | T_EXPECTFAIL; | |
139 | T_ASSERT_POSIX_ZERO(ret, "called stackshot_capture_with_config"); | |
140 | return; | |
141 | } | |
142 | ||
143 | if (ret == EBUSY || ret == ETIMEDOUT) { | |
144 | if (retries_remaining > 0) { | |
145 | if (!scenario->timer) { | |
146 | T_LOG("stackshot_capture_with_config failed with %s (%d), retrying", | |
147 | strerror(ret), ret); | |
148 | } | |
149 | ||
150 | retries_remaining--; | |
151 | goto retry; | |
152 | } else { | |
153 | T_ASSERT_POSIX_ZERO(ret, | |
154 | "called stackshot_capture_with_config (no retries remaining)"); | |
155 | } | |
156 | } else if ((ret == ENOTSUP) && scenario->maybe_unsupported) { | |
157 | T_SKIP("kernel indicated this stackshot configuration is not supported"); | |
158 | } else { | |
159 | quiet(scenario); | |
160 | T_ASSERT_POSIX_ZERO(ret, "called stackshot_capture_with_config"); | |
161 | } | |
162 | ||
163 | if (scenario->timer) { | |
164 | dt_stat_mach_time_add(scenario->timer, end_time - start_time); | |
165 | } | |
166 | void *buf = stackshot_config_get_stackshot_buffer(config); | |
167 | size_t size = stackshot_config_get_stackshot_size(config); | |
168 | if (scenario->name) { | |
169 | char sspath[MAXPATHLEN]; | |
170 | strlcpy(sspath, scenario->name, sizeof(sspath)); | |
171 | strlcat(sspath, ".kcdata", sizeof(sspath)); | |
172 | T_QUIET; T_ASSERT_POSIX_ZERO(dt_resultfile(sspath, sizeof(sspath)), | |
173 | "create result file path"); | |
174 | ||
cb323159 A |
175 | if (!scenario->quiet) { |
176 | T_LOG("writing stackshot to %s", sspath); | |
177 | } | |
d9a64523 A |
178 | |
179 | FILE *f = fopen(sspath, "w"); | |
180 | T_WITH_ERRNO; T_QUIET; T_ASSERT_NOTNULL(f, | |
181 | "open stackshot output file"); | |
182 | ||
183 | size_t written = fwrite(buf, size, 1, f); | |
184 | T_QUIET; T_ASSERT_POSIX_SUCCESS(written, "wrote stackshot to file"); | |
185 | ||
186 | fclose(f); | |
187 | } | |
188 | cb(buf, size); | |
189 | ||
190 | ret = stackshot_config_dealloc(config); | |
191 | T_QUIET; T_EXPECT_POSIX_ZERO(ret, "deallocated stackshot config"); | |
192 | } | |
193 | ||
194 | T_DECL(kcdata, "test that kcdata stackshots can be taken and parsed") | |
195 | { | |
196 | struct scenario scenario = { | |
197 | .name = "kcdata", | |
198 | .flags = (STACKSHOT_SAVE_LOADINFO | STACKSHOT_GET_GLOBAL_MEM_STATS | | |
199 | STACKSHOT_SAVE_IMP_DONATION_PIDS | STACKSHOT_KCDATA_FORMAT), | |
200 | }; | |
201 | ||
202 | T_LOG("taking kcdata stackshot"); | |
203 | take_stackshot(&scenario, ^(void *ssbuf, size_t sslen) { | |
204 | parse_stackshot(0, ssbuf, sslen, -1); | |
205 | }); | |
206 | } | |
207 | ||
208 | T_DECL(kcdata_faulting, "test that kcdata stackshots while faulting can be taken and parsed") | |
209 | { | |
210 | struct scenario scenario = { | |
211 | .name = "faulting", | |
212 | .flags = (STACKSHOT_SAVE_LOADINFO | STACKSHOT_GET_GLOBAL_MEM_STATS | |
213 | | STACKSHOT_SAVE_IMP_DONATION_PIDS | STACKSHOT_KCDATA_FORMAT | |
214 | | STACKSHOT_ENABLE_BT_FAULTING | STACKSHOT_ENABLE_UUID_FAULTING), | |
215 | }; | |
216 | ||
217 | T_LOG("taking faulting stackshot"); | |
218 | take_stackshot(&scenario, ^(void *ssbuf, size_t sslen) { | |
219 | parse_stackshot(0, ssbuf, sslen, -1); | |
220 | }); | |
221 | } | |
222 | ||
223 | T_DECL(bad_flags, "test a poorly-formed stackshot syscall") | |
224 | { | |
225 | struct scenario scenario = { | |
226 | .flags = STACKSHOT_SAVE_IN_KERNEL_BUFFER /* not allowed from user space */, | |
227 | .should_fail = true, | |
228 | }; | |
229 | ||
230 | T_LOG("attempting to take stackshot with kernel-only flag"); | |
231 | take_stackshot(&scenario, ^(__unused void *ssbuf, __unused size_t sslen) { | |
232 | T_ASSERT_FAIL("stackshot data callback called"); | |
233 | }); | |
234 | } | |
235 | ||
236 | T_DECL(delta, "test delta stackshots") | |
237 | { | |
238 | struct scenario scenario = { | |
239 | .name = "delta", | |
240 | .flags = (STACKSHOT_SAVE_LOADINFO | STACKSHOT_GET_GLOBAL_MEM_STATS | |
241 | | STACKSHOT_SAVE_IMP_DONATION_PIDS | STACKSHOT_KCDATA_FORMAT), | |
242 | }; | |
243 | ||
244 | T_LOG("taking full stackshot"); | |
245 | take_stackshot(&scenario, ^(void *ssbuf, size_t sslen) { | |
246 | uint64_t stackshot_time = stackshot_timestamp(ssbuf, sslen); | |
247 | ||
248 | T_LOG("taking delta stackshot since time %" PRIu64, stackshot_time); | |
249 | ||
250 | parse_stackshot(0, ssbuf, sslen, -1); | |
251 | ||
252 | struct scenario delta_scenario = { | |
253 | .flags = (STACKSHOT_SAVE_LOADINFO | STACKSHOT_GET_GLOBAL_MEM_STATS | |
254 | | STACKSHOT_SAVE_IMP_DONATION_PIDS | STACKSHOT_KCDATA_FORMAT | |
255 | | STACKSHOT_COLLECT_DELTA_SNAPSHOT), | |
256 | .since_timestamp = stackshot_time | |
257 | }; | |
258 | ||
259 | take_stackshot(&delta_scenario, ^(void *dssbuf, size_t dsslen) { | |
260 | parse_stackshot(PARSE_STACKSHOT_DELTA, dssbuf, dsslen, -1); | |
261 | }); | |
262 | }); | |
263 | } | |
264 | ||
265 | T_DECL(shared_cache_layout, "test stackshot inclusion of shared cache layout") | |
266 | { | |
267 | struct scenario scenario = { | |
268 | .name = "shared_cache_layout", | |
269 | .flags = (STACKSHOT_SAVE_LOADINFO | STACKSHOT_GET_GLOBAL_MEM_STATS | |
270 | | STACKSHOT_SAVE_IMP_DONATION_PIDS | STACKSHOT_KCDATA_FORMAT | | |
271 | STACKSHOT_COLLECT_SHAREDCACHE_LAYOUT), | |
272 | }; | |
273 | ||
cb323159 A |
274 | size_t shared_cache_length; |
275 | const void *cache_header = _dyld_get_shared_cache_range(&shared_cache_length); | |
276 | if (cache_header == NULL) { | |
277 | T_SKIP("Device not running with shared cache, skipping test..."); | |
278 | } | |
279 | ||
280 | if (shared_cache_length == 0) { | |
281 | T_SKIP("dyld reports that currently running shared cache has zero length"); | |
282 | } | |
283 | ||
d9a64523 A |
284 | T_LOG("taking stackshot with STACKSHOT_COLLECT_SHAREDCACHE_LAYOUT set"); |
285 | take_stackshot(&scenario, ^(void *ssbuf, size_t sslen) { | |
286 | parse_stackshot(PARSE_STACKSHOT_SHAREDCACHE_LAYOUT, ssbuf, sslen, -1); | |
287 | }); | |
288 | } | |
289 | ||
cb323159 A |
290 | T_DECL(stress, "test that taking stackshots for 60 seconds doesn't crash the system") |
291 | { | |
292 | uint64_t max_diff_time = 60ULL /* seconds */ * 1000000000ULL; | |
293 | uint64_t start_time; | |
294 | ||
295 | struct scenario scenario = { | |
296 | .name = "stress", | |
297 | .quiet = true, | |
298 | .flags = (STACKSHOT_KCDATA_FORMAT | | |
299 | STACKSHOT_THREAD_WAITINFO | | |
300 | STACKSHOT_SAVE_LOADINFO | | |
301 | STACKSHOT_SAVE_KEXT_LOADINFO | | |
302 | STACKSHOT_GET_GLOBAL_MEM_STATS | | |
303 | // STACKSHOT_GET_BOOT_PROFILE | | |
304 | STACKSHOT_SAVE_IMP_DONATION_PIDS | | |
305 | STACKSHOT_COLLECT_SHAREDCACHE_LAYOUT | | |
306 | STACKSHOT_THREAD_GROUP | | |
307 | STACKSHOT_SAVE_JETSAM_COALITIONS | | |
308 | STACKSHOT_ASID | | |
309 | // STACKSHOT_PAGE_TABLES | | |
310 | 0), | |
311 | }; | |
312 | ||
313 | start_time = clock_gettime_nsec_np(CLOCK_MONOTONIC); | |
314 | while (clock_gettime_nsec_np(CLOCK_MONOTONIC) - start_time < max_diff_time) { | |
315 | take_stackshot(&scenario, ^(void *ssbuf, size_t sslen) { | |
316 | printf("."); | |
317 | fflush(stdout); | |
318 | }); | |
319 | ||
320 | /* Leave some time for the testing infrastructure to catch up */ | |
321 | usleep(10000); | |
322 | ||
323 | } | |
324 | printf("\n"); | |
325 | } | |
326 | ||
327 | T_DECL(dispatch_queue_label, "test that kcdata stackshots contain libdispatch queue labels") | |
328 | { | |
329 | struct scenario scenario = { | |
330 | .name = "kcdata", | |
331 | .flags = (STACKSHOT_GET_DQ | STACKSHOT_KCDATA_FORMAT), | |
332 | }; | |
333 | dispatch_semaphore_t child_ready_sem, parent_done_sem; | |
334 | dispatch_queue_t dq; | |
335 | ||
336 | #if TARGET_OS_WATCH | |
337 | T_SKIP("This test is flaky on watches: 51663346"); | |
338 | #endif | |
339 | ||
340 | child_ready_sem = dispatch_semaphore_create(0); | |
341 | T_QUIET; T_ASSERT_NOTNULL(child_ready_sem, "dqlabel child semaphore"); | |
342 | ||
343 | parent_done_sem = dispatch_semaphore_create(0); | |
344 | T_QUIET; T_ASSERT_NOTNULL(parent_done_sem, "dqlabel parent semaphore"); | |
345 | ||
346 | dq = dispatch_queue_create(TEST_STACKSHOT_QUEUE_LABEL, NULL); | |
347 | T_QUIET; T_ASSERT_NOTNULL(dq, "dispatch queue"); | |
348 | ||
349 | /* start the helper thread */ | |
350 | dispatch_async(dq, ^{ | |
351 | dispatch_semaphore_signal(child_ready_sem); | |
352 | ||
353 | dispatch_semaphore_wait(parent_done_sem, DISPATCH_TIME_FOREVER); | |
354 | }); | |
355 | ||
356 | /* block behind the child starting up */ | |
357 | dispatch_semaphore_wait(child_ready_sem, DISPATCH_TIME_FOREVER); | |
358 | ||
359 | T_LOG("taking kcdata stackshot with libdispatch queue labels"); | |
360 | take_stackshot(&scenario, ^(void *ssbuf, size_t sslen) { | |
361 | parse_stackshot(PARSE_STACKSHOT_DISPATCH_QUEUE_LABEL, ssbuf, sslen, -1); | |
362 | }); | |
363 | ||
364 | dispatch_semaphore_signal(parent_done_sem); | |
365 | } | |
366 | ||
d9a64523 A |
367 | static void *stuck_sysctl_thread(void *arg) { |
368 | int val = 1; | |
369 | dispatch_semaphore_t child_thread_started = *(dispatch_semaphore_t *)arg; | |
370 | ||
371 | dispatch_semaphore_signal(child_thread_started); | |
372 | T_ASSERT_POSIX_SUCCESS(sysctlbyname("kern.wedge_thread", NULL, NULL, &val, sizeof(val)), "wedge child thread"); | |
373 | ||
374 | return NULL; | |
375 | } | |
376 | ||
377 | T_HELPER_DECL(zombie_child, "child process to sample as a zombie") | |
378 | { | |
379 | pthread_t pthread; | |
380 | dispatch_semaphore_t child_thread_started = dispatch_semaphore_create(0); | |
381 | T_QUIET; T_ASSERT_NOTNULL(child_thread_started, "zombie child thread semaphore"); | |
382 | ||
383 | /* spawn another thread to get stuck in the kernel, then call exit() to become a zombie */ | |
384 | T_QUIET; T_ASSERT_POSIX_SUCCESS(pthread_create(&pthread, NULL, stuck_sysctl_thread, &child_thread_started), "pthread_create"); | |
385 | ||
386 | dispatch_semaphore_wait(child_thread_started, DISPATCH_TIME_FOREVER); | |
387 | ||
388 | /* sleep for a bit in the hope of ensuring that the other thread has called the sysctl before we signal the parent */ | |
389 | usleep(100); | |
390 | T_ASSERT_POSIX_SUCCESS(kill(getppid(), SIGUSR1), "signaled parent to take stackshot"); | |
391 | ||
392 | exit(0); | |
393 | } | |
394 | ||
395 | T_DECL(zombie, "tests a stackshot of a zombie task with a thread stuck in the kernel") | |
396 | { | |
397 | char path[PATH_MAX]; | |
398 | uint32_t path_size = sizeof(path); | |
399 | T_ASSERT_POSIX_ZERO(_NSGetExecutablePath(path, &path_size), "_NSGetExecutablePath"); | |
400 | char *args[] = { path, "-n", "zombie_child", NULL }; | |
401 | ||
402 | dispatch_source_t child_sig_src; | |
403 | dispatch_semaphore_t child_ready_sem = dispatch_semaphore_create(0); | |
404 | T_QUIET; T_ASSERT_NOTNULL(child_ready_sem, "zombie child semaphore"); | |
405 | ||
406 | dispatch_queue_t signal_processing_q = dispatch_queue_create("signal processing queue", NULL); | |
cb323159 | 407 | T_QUIET; T_ASSERT_NOTNULL(signal_processing_q, "signal processing queue"); |
d9a64523 A |
408 | |
409 | pid_t pid; | |
410 | ||
411 | T_LOG("spawning a child"); | |
412 | ||
413 | signal(SIGUSR1, SIG_IGN); | |
414 | child_sig_src = dispatch_source_create(DISPATCH_SOURCE_TYPE_SIGNAL, SIGUSR1, 0, signal_processing_q); | |
415 | T_QUIET; T_ASSERT_NOTNULL(child_sig_src, "dispatch_source_create (child_sig_src)"); | |
416 | ||
417 | dispatch_source_set_event_handler(child_sig_src, ^{ dispatch_semaphore_signal(child_ready_sem); }); | |
418 | dispatch_activate(child_sig_src); | |
419 | ||
420 | int sp_ret = posix_spawn(&pid, args[0], NULL, NULL, args, NULL); | |
421 | T_QUIET; T_ASSERT_POSIX_ZERO(sp_ret, "spawned process '%s' with PID %d", args[0], pid); | |
422 | ||
423 | dispatch_semaphore_wait(child_ready_sem, DISPATCH_TIME_FOREVER); | |
424 | ||
425 | T_LOG("received signal from child, capturing stackshot"); | |
426 | ||
427 | struct proc_bsdshortinfo bsdshortinfo; | |
428 | int retval, iterations_to_wait = 10; | |
429 | ||
430 | while (iterations_to_wait > 0) { | |
431 | retval = proc_pidinfo(pid, PROC_PIDT_SHORTBSDINFO, 0, &bsdshortinfo, sizeof(bsdshortinfo)); | |
432 | if ((retval == 0) && errno == ESRCH) { | |
433 | T_LOG("unable to find child using proc_pidinfo, assuming zombie"); | |
434 | break; | |
435 | } | |
436 | ||
437 | T_QUIET; T_WITH_ERRNO; T_ASSERT_GT(retval, 0, "proc_pidinfo(PROC_PIDT_SHORTBSDINFO) returned a value > 0"); | |
438 | T_QUIET; T_ASSERT_EQ(retval, (int)sizeof(bsdshortinfo), "proc_pidinfo call for PROC_PIDT_SHORTBSDINFO returned expected size"); | |
439 | ||
440 | if (bsdshortinfo.pbsi_flags & PROC_FLAG_INEXIT) { | |
441 | T_LOG("child proc info marked as in exit"); | |
442 | break; | |
443 | } | |
444 | ||
445 | iterations_to_wait--; | |
446 | if (iterations_to_wait == 0) { | |
447 | /* | |
448 | * This will mark the test as failed but let it continue so we | |
449 | * don't leave a process stuck in the kernel. | |
450 | */ | |
451 | T_FAIL("unable to discover that child is marked as exiting"); | |
452 | } | |
453 | ||
454 | /* Give the child a few more seconds to make it to exit */ | |
455 | sleep(5); | |
456 | } | |
457 | ||
458 | /* Give the child some more time to make it through exit */ | |
459 | sleep(10); | |
460 | ||
461 | struct scenario scenario = { | |
462 | .name = "zombie", | |
463 | .flags = (STACKSHOT_SAVE_LOADINFO | STACKSHOT_GET_GLOBAL_MEM_STATS | |
464 | | STACKSHOT_SAVE_IMP_DONATION_PIDS | STACKSHOT_KCDATA_FORMAT), | |
465 | }; | |
466 | ||
467 | take_stackshot(&scenario, ^( void *ssbuf, size_t sslen) { | |
468 | /* First unwedge the child so we can reap it */ | |
469 | int val = 1, status; | |
470 | T_ASSERT_POSIX_SUCCESS(sysctlbyname("kern.unwedge_thread", NULL, NULL, &val, sizeof(val)), "unwedge child"); | |
471 | ||
472 | T_QUIET; T_ASSERT_POSIX_SUCCESS(waitpid(pid, &status, 0), "waitpid on zombie child"); | |
473 | ||
474 | parse_stackshot(PARSE_STACKSHOT_ZOMBIE, ssbuf, sslen, pid); | |
475 | }); | |
476 | } | |
477 | ||
cb323159 A |
478 | static uint32_t |
479 | get_user_promotion_basepri(void) | |
480 | { | |
481 | mach_msg_type_number_t count = THREAD_POLICY_STATE_COUNT; | |
482 | struct thread_policy_state thread_policy; | |
483 | boolean_t get_default = FALSE; | |
484 | mach_port_t thread_port = pthread_mach_thread_np(pthread_self()); | |
485 | ||
486 | kern_return_t kr = thread_policy_get(thread_port, THREAD_POLICY_STATE, | |
487 | (thread_policy_t)&thread_policy, &count, &get_default); | |
488 | T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "thread_policy_get"); | |
489 | return thread_policy.thps_user_promotion_basepri; | |
490 | } | |
491 | ||
492 | static int | |
493 | get_pri(thread_t thread_port) | |
494 | { | |
495 | kern_return_t kr; | |
496 | ||
497 | thread_extended_info_data_t extended_info; | |
498 | mach_msg_type_number_t count = THREAD_EXTENDED_INFO_COUNT; | |
499 | kr = thread_info(thread_port, THREAD_EXTENDED_INFO, | |
500 | (thread_info_t)&extended_info, &count); | |
501 | ||
502 | T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "thread_info"); | |
503 | ||
504 | return extended_info.pth_curpri; | |
505 | } | |
506 | ||
507 | ||
508 | T_DECL(turnstile_singlehop, "turnstile single hop test") | |
509 | { | |
510 | dispatch_queue_t dq1, dq2; | |
511 | dispatch_semaphore_t sema_x; | |
512 | dispatch_queue_attr_t dq1_attr, dq2_attr; | |
513 | qos_class_t main_qos = 0; | |
514 | int main_relpri = 0, main_relpri2 = 0, main_afterpri = 0; | |
515 | struct scenario scenario = { | |
516 | .name = "turnstile_singlehop", | |
517 | .flags = (STACKSHOT_THREAD_WAITINFO | STACKSHOT_KCDATA_FORMAT), | |
518 | }; | |
519 | dq1_attr = dispatch_queue_attr_make_with_qos_class(DISPATCH_QUEUE_SERIAL, QOS_CLASS_UTILITY, 0); | |
520 | dq2_attr = dispatch_queue_attr_make_with_qos_class(DISPATCH_QUEUE_SERIAL, QOS_CLASS_USER_INITIATED, 0); | |
521 | pthread_mutex_t lock_a = PTHREAD_MUTEX_INITIALIZER; | |
522 | pthread_mutex_t lock_b = PTHREAD_MUTEX_INITIALIZER; | |
523 | ||
524 | pthread_mutex_t *lockap = &lock_a, *lockbp = &lock_b; | |
525 | ||
526 | dq1 = dispatch_queue_create("q1", dq1_attr); | |
527 | dq2 = dispatch_queue_create("q2", dq2_attr); | |
528 | sema_x = dispatch_semaphore_create(0); | |
529 | ||
530 | pthread_mutex_lock(lockap); | |
531 | dispatch_async(dq1, ^{ | |
532 | pthread_mutex_lock(lockbp); | |
533 | T_ASSERT_POSIX_SUCCESS(pthread_get_qos_class_np(pthread_self(), &main_qos, &main_relpri), "get qos class"); | |
534 | T_LOG("The priority of q1 is %d\n", get_pri(mach_thread_self())); | |
535 | dispatch_semaphore_signal(sema_x); | |
536 | pthread_mutex_lock(lockap); | |
537 | }); | |
538 | dispatch_semaphore_wait(sema_x, DISPATCH_TIME_FOREVER); | |
539 | ||
540 | T_LOG("Async1 completed"); | |
541 | ||
542 | pthread_set_qos_class_self_np(QOS_CLASS_UTILITY, 0); | |
543 | T_ASSERT_POSIX_SUCCESS(pthread_get_qos_class_np(pthread_self(), &main_qos, &main_relpri), "get qos class"); | |
544 | T_LOG("The priority of main is %d\n", get_pri(mach_thread_self())); | |
545 | main_relpri = get_pri(mach_thread_self()); | |
546 | ||
547 | dispatch_async(dq2, ^{ | |
548 | T_ASSERT_POSIX_SUCCESS(pthread_get_qos_class_np(pthread_self(), &main_qos, &main_relpri2), "get qos class"); | |
549 | T_LOG("The priority of q2 is %d\n", get_pri(mach_thread_self())); | |
550 | dispatch_semaphore_signal(sema_x); | |
551 | pthread_mutex_lock(lockbp); | |
552 | }); | |
553 | dispatch_semaphore_wait(sema_x, DISPATCH_TIME_FOREVER); | |
554 | ||
555 | T_LOG("Async2 completed"); | |
556 | ||
557 | while (1) { | |
558 | main_afterpri = get_user_promotion_basepri(); | |
559 | if (main_relpri != main_afterpri) { | |
560 | T_LOG("Success with promotion pri is %d", main_afterpri); | |
561 | break; | |
562 | } | |
563 | ||
564 | usleep(100); | |
565 | } | |
566 | ||
567 | take_stackshot(&scenario, ^( void *ssbuf, size_t sslen) { | |
568 | parse_stackshot(PARSE_STACKSHOT_TURNSTILEINFO, ssbuf, sslen, -1); | |
569 | }); | |
570 | } | |
571 | ||
572 | ||
d9a64523 A |
573 | static void |
574 | expect_instrs_cycles_in_stackshot(void *ssbuf, size_t sslen) | |
575 | { | |
576 | kcdata_iter_t iter = kcdata_iter(ssbuf, sslen); | |
577 | ||
578 | bool in_task = false; | |
579 | bool in_thread = false; | |
580 | bool saw_instrs_cycles = false; | |
581 | iter = kcdata_iter_next(iter); | |
582 | ||
583 | KCDATA_ITER_FOREACH(iter) { | |
584 | switch (kcdata_iter_type(iter)) { | |
585 | case KCDATA_TYPE_CONTAINER_BEGIN: | |
586 | switch (kcdata_iter_container_type(iter)) { | |
587 | case STACKSHOT_KCCONTAINER_TASK: | |
588 | in_task = true; | |
589 | saw_instrs_cycles = false; | |
590 | break; | |
591 | ||
592 | case STACKSHOT_KCCONTAINER_THREAD: | |
593 | in_thread = true; | |
594 | saw_instrs_cycles = false; | |
595 | break; | |
596 | ||
597 | default: | |
598 | break; | |
599 | } | |
600 | break; | |
601 | ||
602 | case STACKSHOT_KCTYPE_INSTRS_CYCLES: | |
603 | saw_instrs_cycles = true; | |
604 | break; | |
605 | ||
606 | case KCDATA_TYPE_CONTAINER_END: | |
607 | if (in_thread) { | |
608 | T_QUIET; T_EXPECT_TRUE(saw_instrs_cycles, | |
609 | "saw instructions and cycles in thread"); | |
610 | in_thread = false; | |
611 | } else if (in_task) { | |
612 | T_QUIET; T_EXPECT_TRUE(saw_instrs_cycles, | |
613 | "saw instructions and cycles in task"); | |
614 | in_task = false; | |
615 | } | |
616 | ||
617 | default: | |
618 | break; | |
619 | } | |
620 | } | |
621 | } | |
622 | ||
623 | static void | |
624 | skip_if_monotonic_unsupported(void) | |
625 | { | |
626 | int supported = 0; | |
627 | size_t supported_size = sizeof(supported); | |
628 | int ret = sysctlbyname("kern.monotonic.supported", &supported, | |
629 | &supported_size, 0, 0); | |
630 | if (ret < 0 || !supported) { | |
631 | T_SKIP("monotonic is unsupported"); | |
632 | } | |
633 | } | |
634 | ||
635 | T_DECL(instrs_cycles, "test a getting instructions and cycles in stackshot") | |
636 | { | |
637 | skip_if_monotonic_unsupported(); | |
638 | ||
639 | struct scenario scenario = { | |
640 | .name = "instrs-cycles", | |
641 | .flags = (STACKSHOT_SAVE_LOADINFO | STACKSHOT_INSTRS_CYCLES | |
642 | | STACKSHOT_KCDATA_FORMAT), | |
643 | }; | |
644 | ||
645 | T_LOG("attempting to take stackshot with instructions and cycles"); | |
646 | take_stackshot(&scenario, ^(void *ssbuf, size_t sslen) { | |
647 | parse_stackshot(0, ssbuf, sslen, -1); | |
648 | expect_instrs_cycles_in_stackshot(ssbuf, sslen); | |
649 | }); | |
650 | } | |
651 | ||
652 | T_DECL(delta_instrs_cycles, | |
653 | "test delta stackshots with instructions and cycles") | |
654 | { | |
655 | skip_if_monotonic_unsupported(); | |
656 | ||
657 | struct scenario scenario = { | |
658 | .name = "delta-instrs-cycles", | |
659 | .flags = (STACKSHOT_SAVE_LOADINFO | STACKSHOT_INSTRS_CYCLES | |
660 | | STACKSHOT_KCDATA_FORMAT), | |
661 | }; | |
662 | ||
663 | T_LOG("taking full stackshot"); | |
664 | take_stackshot(&scenario, ^(void *ssbuf, size_t sslen) { | |
665 | uint64_t stackshot_time = stackshot_timestamp(ssbuf, sslen); | |
666 | ||
667 | T_LOG("taking delta stackshot since time %" PRIu64, stackshot_time); | |
668 | ||
669 | parse_stackshot(0, ssbuf, sslen, -1); | |
670 | expect_instrs_cycles_in_stackshot(ssbuf, sslen); | |
671 | ||
672 | struct scenario delta_scenario = { | |
673 | .name = "delta-instrs-cycles-next", | |
674 | .flags = (STACKSHOT_SAVE_LOADINFO | STACKSHOT_INSTRS_CYCLES | |
675 | | STACKSHOT_KCDATA_FORMAT | |
676 | | STACKSHOT_COLLECT_DELTA_SNAPSHOT), | |
677 | .since_timestamp = stackshot_time, | |
678 | }; | |
679 | ||
680 | take_stackshot(&delta_scenario, ^(void *dssbuf, size_t dsslen) { | |
681 | parse_stackshot(PARSE_STACKSHOT_DELTA, dssbuf, dsslen, -1); | |
682 | expect_instrs_cycles_in_stackshot(dssbuf, dsslen); | |
683 | }); | |
684 | }); | |
685 | } | |
686 | ||
687 | static void | |
688 | check_thread_groups_supported() | |
689 | { | |
690 | int err; | |
691 | int supported = 0; | |
692 | size_t supported_size = sizeof(supported); | |
693 | err = sysctlbyname("kern.thread_groups_supported", &supported, &supported_size, NULL, 0); | |
694 | ||
695 | if (err || !supported) | |
696 | T_SKIP("thread groups not supported on this system"); | |
697 | } | |
698 | ||
699 | T_DECL(thread_groups, "test getting thread groups in stackshot") | |
700 | { | |
701 | check_thread_groups_supported(); | |
702 | ||
703 | struct scenario scenario = { | |
704 | .name = "thread-groups", | |
705 | .flags = (STACKSHOT_SAVE_LOADINFO | STACKSHOT_THREAD_GROUP | |
706 | | STACKSHOT_KCDATA_FORMAT), | |
707 | }; | |
708 | ||
709 | T_LOG("attempting to take stackshot with thread group flag"); | |
710 | take_stackshot(&scenario, ^(void *ssbuf, size_t sslen) { | |
711 | parse_thread_group_stackshot(ssbuf, sslen); | |
712 | }); | |
713 | } | |
714 | ||
715 | static void | |
716 | parse_page_table_asid_stackshot(void **ssbuf, size_t sslen) | |
717 | { | |
718 | bool seen_asid = false; | |
719 | bool seen_page_table_snapshot = false; | |
720 | kcdata_iter_t iter = kcdata_iter(ssbuf, sslen); | |
721 | T_ASSERT_EQ(kcdata_iter_type(iter), KCDATA_BUFFER_BEGIN_STACKSHOT, | |
722 | "buffer provided is a stackshot"); | |
723 | ||
724 | iter = kcdata_iter_next(iter); | |
725 | KCDATA_ITER_FOREACH(iter) { | |
726 | switch (kcdata_iter_type(iter)) { | |
727 | case KCDATA_TYPE_ARRAY: { | |
728 | T_QUIET; | |
729 | T_ASSERT_TRUE(kcdata_iter_array_valid(iter), | |
730 | "checked that array is valid"); | |
731 | ||
732 | if (kcdata_iter_array_elem_type(iter) != STACKSHOT_KCTYPE_PAGE_TABLES) { | |
733 | continue; | |
734 | } | |
735 | ||
736 | T_ASSERT_FALSE(seen_page_table_snapshot, "check that we haven't yet seen a page table snapshot"); | |
737 | seen_page_table_snapshot = true; | |
738 | ||
739 | T_ASSERT_EQ((size_t) kcdata_iter_array_elem_size(iter), sizeof(uint64_t), | |
740 | "check that each element of the pagetable dump is the expected size"); | |
741 | ||
742 | uint64_t *pt_array = kcdata_iter_payload(iter); | |
743 | uint32_t elem_count = kcdata_iter_array_elem_count(iter); | |
744 | uint32_t j; | |
745 | bool nonzero_tte = false; | |
746 | for (j = 0; j < elem_count;) { | |
747 | T_QUIET; T_ASSERT_LE(j + 4, elem_count, "check for valid page table segment header"); | |
748 | uint64_t pa = pt_array[j]; | |
749 | uint64_t num_entries = pt_array[j + 1]; | |
750 | uint64_t start_va = pt_array[j + 2]; | |
751 | uint64_t end_va = pt_array[j + 3]; | |
752 | ||
753 | T_QUIET; T_ASSERT_NE(pa, (uint64_t) 0, "check that the pagetable physical address is non-zero"); | |
754 | T_QUIET; T_ASSERT_EQ(pa % (num_entries * sizeof(uint64_t)), (uint64_t) 0, "check that the pagetable physical address is correctly aligned"); | |
755 | T_QUIET; T_ASSERT_NE(num_entries, (uint64_t) 0, "check that a pagetable region has more than 0 entries"); | |
756 | T_QUIET; T_ASSERT_LE(j + 4 + num_entries, (uint64_t) elem_count, "check for sufficient space in page table array"); | |
757 | T_QUIET; T_ASSERT_GT(end_va, start_va, "check for valid VA bounds in page table segment header"); | |
758 | ||
759 | for (uint32_t k = j + 4; k < (j + 4 + num_entries); ++k) { | |
760 | if (pt_array[k] != 0) { | |
761 | nonzero_tte = true; | |
762 | T_QUIET; T_ASSERT_EQ((pt_array[k] >> 48) & 0xf, (uint64_t) 0, "check that bits[48:51] of arm64 TTE are clear"); | |
763 | // L0-L2 table and non-compressed L3 block entries should always have bit 1 set; assumes L0-L2 blocks will not be used outside the kernel | |
764 | bool table = ((pt_array[k] & 0x2) != 0); | |
765 | if (table) { | |
766 | T_QUIET; T_ASSERT_NE(pt_array[k] & ((1ULL << 48) - 1) & ~((1ULL << 12) - 1), (uint64_t) 0, "check that arm64 TTE physical address is non-zero"); | |
767 | } else { // should be a compressed PTE | |
768 | T_QUIET; T_ASSERT_NE(pt_array[k] & 0xC000000000000000ULL, (uint64_t) 0, "check that compressed PTE has at least one of bits [63:62] set"); | |
769 | T_QUIET; T_ASSERT_EQ(pt_array[k] & ~0xC000000000000000ULL, (uint64_t) 0, "check that compressed PTE has no other bits besides [63:62] set"); | |
770 | } | |
771 | } | |
772 | } | |
773 | ||
774 | j += (4 + num_entries); | |
775 | } | |
776 | T_ASSERT_TRUE(nonzero_tte, "check that we saw at least one non-empty TTE"); | |
777 | T_ASSERT_EQ(j, elem_count, "check that page table dump size matches extent of last header"); | |
778 | break; | |
779 | } | |
780 | case STACKSHOT_KCTYPE_ASID: { | |
781 | T_ASSERT_FALSE(seen_asid, "check that we haven't yet seen an ASID"); | |
782 | seen_asid = true; | |
783 | } | |
784 | } | |
785 | } | |
786 | T_ASSERT_TRUE(seen_page_table_snapshot, "check that we have seen a page table snapshot"); | |
787 | T_ASSERT_TRUE(seen_asid, "check that we have seen an ASID"); | |
788 | } | |
789 | ||
790 | T_DECL(dump_page_tables, "test stackshot page table dumping support") | |
791 | { | |
792 | struct scenario scenario = { | |
793 | .name = "asid-page-tables", | |
794 | .flags = (STACKSHOT_KCDATA_FORMAT | STACKSHOT_ASID | STACKSHOT_PAGE_TABLES), | |
795 | .size_hint = (1ULL << 23), // 8 MB | |
796 | .target_pid = getpid(), | |
797 | .maybe_unsupported = true, | |
798 | }; | |
799 | ||
800 | T_LOG("attempting to take stackshot with ASID and page table flags"); | |
801 | take_stackshot(&scenario, ^(void *ssbuf, size_t sslen) { | |
802 | parse_page_table_asid_stackshot(ssbuf, sslen); | |
803 | }); | |
804 | } | |
805 | ||
cb323159 A |
806 | static void stackshot_verify_current_proc_uuid_info(void **ssbuf, size_t sslen, uint64_t expected_offset, const struct proc_uniqidentifierinfo *proc_info_data) |
807 | { | |
808 | const uuid_t *current_uuid = (const uuid_t *)(&proc_info_data->p_uuid); | |
809 | ||
810 | kcdata_iter_t iter = kcdata_iter(ssbuf, sslen); | |
811 | T_ASSERT_EQ(kcdata_iter_type(iter), KCDATA_BUFFER_BEGIN_STACKSHOT, "buffer provided is a stackshot"); | |
812 | ||
813 | iter = kcdata_iter_next(iter); | |
814 | ||
815 | KCDATA_ITER_FOREACH(iter) { | |
816 | switch (kcdata_iter_type(iter)) { | |
817 | case KCDATA_TYPE_ARRAY: { | |
818 | T_QUIET; T_ASSERT_TRUE(kcdata_iter_array_valid(iter), "checked that array is valid"); | |
819 | if (kcdata_iter_array_elem_type(iter) == KCDATA_TYPE_LIBRARY_LOADINFO64) { | |
820 | struct user64_dyld_uuid_info *info = (struct user64_dyld_uuid_info *) kcdata_iter_payload(iter); | |
821 | if (uuid_compare(*current_uuid, info->imageUUID) == 0) { | |
822 | T_ASSERT_EQ(expected_offset, info->imageLoadAddress, "found matching UUID with matching binary offset"); | |
823 | return; | |
824 | } | |
825 | } else if (kcdata_iter_array_elem_type(iter) == KCDATA_TYPE_LIBRARY_LOADINFO) { | |
826 | struct user32_dyld_uuid_info *info = (struct user32_dyld_uuid_info *) kcdata_iter_payload(iter); | |
827 | if (uuid_compare(*current_uuid, info->imageUUID) == 0) { | |
828 | T_ASSERT_EQ(expected_offset, ((uint64_t) info->imageLoadAddress), "found matching UUID with matching binary offset"); | |
829 | return; | |
830 | } | |
831 | } | |
832 | break; | |
833 | } | |
834 | default: | |
835 | break; | |
836 | } | |
837 | } | |
838 | ||
839 | T_FAIL("failed to find matching UUID in stackshot data"); | |
840 | } | |
841 | ||
842 | T_DECL(proc_uuid_info, "tests that the main binary UUID for a proc is always populated") | |
843 | { | |
844 | struct proc_uniqidentifierinfo proc_info_data = { }; | |
845 | mach_msg_type_number_t count; | |
846 | kern_return_t kernel_status; | |
847 | task_dyld_info_data_t task_dyld_info; | |
848 | struct dyld_all_image_infos *target_infos; | |
849 | int retval; | |
850 | bool found_image_in_image_infos = false; | |
851 | uint64_t expected_mach_header_offset = 0; | |
852 | ||
853 | /* Find the UUID of our main binary */ | |
854 | retval = proc_pidinfo(getpid(), PROC_PIDUNIQIDENTIFIERINFO, 0, &proc_info_data, sizeof(proc_info_data)); | |
855 | T_QUIET; T_EXPECT_POSIX_SUCCESS(retval, "proc_pidinfo PROC_PIDUNIQIDENTIFIERINFO"); | |
856 | T_QUIET; T_ASSERT_EQ_INT(retval, (int) sizeof(proc_info_data), "proc_pidinfo PROC_PIDUNIQIDENTIFIERINFO returned data"); | |
857 | ||
858 | uuid_string_t str = {}; | |
859 | uuid_unparse(*(uuid_t*)&proc_info_data.p_uuid, str); | |
860 | T_LOG("Found current UUID is %s", str); | |
861 | ||
862 | /* Find the location of the dyld image info metadata */ | |
863 | count = TASK_DYLD_INFO_COUNT; | |
864 | kernel_status = task_info(mach_task_self(), TASK_DYLD_INFO, (task_info_t)&task_dyld_info, &count); | |
865 | T_QUIET; T_ASSERT_EQ(kernel_status, KERN_SUCCESS, "retrieve task_info for TASK_DYLD_INFO"); | |
866 | ||
867 | target_infos = (struct dyld_all_image_infos *)task_dyld_info.all_image_info_addr; | |
868 | ||
869 | /* Find our binary in the dyld image info array */ | |
870 | for (int i = 0; i < (int) target_infos->uuidArrayCount; i++) { | |
871 | if (uuid_compare(target_infos->uuidArray[i].imageUUID, *(uuid_t*)&proc_info_data.p_uuid) == 0) { | |
872 | expected_mach_header_offset = (uint64_t) target_infos->uuidArray[i].imageLoadAddress; | |
873 | found_image_in_image_infos = true; | |
874 | } | |
875 | } | |
876 | ||
877 | T_ASSERT_TRUE(found_image_in_image_infos, "found binary image in dyld image info list"); | |
878 | ||
879 | /* Overwrite the dyld image info data so the kernel has to fallback to the UUID stored in the proc structure */ | |
880 | target_infos->uuidArrayCount = 0; | |
881 | ||
882 | struct scenario scenario = { | |
883 | .name = "proc_uuid_info", | |
884 | .flags = (STACKSHOT_SAVE_LOADINFO | STACKSHOT_KCDATA_FORMAT), | |
885 | .target_pid = getpid(), | |
886 | }; | |
887 | ||
888 | T_LOG("attempting to take stackshot for current PID"); | |
889 | take_stackshot(&scenario, ^(void *ssbuf, size_t sslen) { | |
890 | stackshot_verify_current_proc_uuid_info(ssbuf, sslen, expected_mach_header_offset, &proc_info_data); | |
891 | }); | |
892 | } | |
893 | ||
d9a64523 A |
894 | #pragma mark performance tests |
895 | ||
896 | #define SHOULD_REUSE_SIZE_HINT 0x01 | |
897 | #define SHOULD_USE_DELTA 0x02 | |
898 | #define SHOULD_TARGET_SELF 0x04 | |
899 | ||
900 | static void | |
901 | stackshot_perf(unsigned int options) | |
902 | { | |
903 | struct scenario scenario = { | |
904 | .flags = (STACKSHOT_SAVE_LOADINFO | STACKSHOT_GET_GLOBAL_MEM_STATS | |
905 | | STACKSHOT_SAVE_IMP_DONATION_PIDS | STACKSHOT_KCDATA_FORMAT), | |
906 | }; | |
907 | ||
908 | dt_stat_t size = dt_stat_create("bytes", "size"); | |
909 | dt_stat_time_t duration = dt_stat_time_create("duration"); | |
910 | scenario.timer = duration; | |
911 | ||
912 | if (options & SHOULD_TARGET_SELF) { | |
913 | scenario.target_pid = getpid(); | |
914 | } | |
915 | ||
916 | while (!dt_stat_stable(duration) || !dt_stat_stable(size)) { | |
917 | __block uint64_t last_time = 0; | |
918 | __block uint32_t size_hint = 0; | |
919 | take_stackshot(&scenario, ^(void *ssbuf, size_t sslen) { | |
920 | dt_stat_add(size, (double)sslen); | |
921 | last_time = stackshot_timestamp(ssbuf, sslen); | |
922 | size_hint = (uint32_t)sslen; | |
923 | }); | |
924 | if (options & SHOULD_USE_DELTA) { | |
925 | scenario.since_timestamp = last_time; | |
926 | scenario.flags |= STACKSHOT_COLLECT_DELTA_SNAPSHOT; | |
927 | } | |
928 | if (options & SHOULD_REUSE_SIZE_HINT) { | |
929 | scenario.size_hint = size_hint; | |
930 | } | |
931 | } | |
932 | ||
933 | dt_stat_finalize(duration); | |
934 | dt_stat_finalize(size); | |
935 | } | |
936 | ||
937 | T_DECL(perf_no_size_hint, "test stackshot performance with no size hint", | |
938 | T_META_TAG_PERF) | |
939 | { | |
940 | stackshot_perf(0); | |
941 | } | |
942 | ||
943 | T_DECL(perf_size_hint, "test stackshot performance with size hint", | |
944 | T_META_TAG_PERF) | |
945 | { | |
946 | stackshot_perf(SHOULD_REUSE_SIZE_HINT); | |
947 | } | |
948 | ||
949 | T_DECL(perf_process, "test stackshot performance targeted at process", | |
950 | T_META_TAG_PERF) | |
951 | { | |
952 | stackshot_perf(SHOULD_REUSE_SIZE_HINT | SHOULD_TARGET_SELF); | |
953 | } | |
954 | ||
955 | T_DECL(perf_delta, "test delta stackshot performance", | |
956 | T_META_TAG_PERF) | |
957 | { | |
958 | stackshot_perf(SHOULD_REUSE_SIZE_HINT | SHOULD_USE_DELTA); | |
959 | } | |
960 | ||
961 | T_DECL(perf_delta_process, "test delta stackshot performance targeted at a process", | |
962 | T_META_TAG_PERF) | |
963 | { | |
964 | stackshot_perf(SHOULD_REUSE_SIZE_HINT | SHOULD_USE_DELTA | SHOULD_TARGET_SELF); | |
965 | } | |
966 | ||
967 | static uint64_t | |
968 | stackshot_timestamp(void *ssbuf, size_t sslen) | |
969 | { | |
970 | kcdata_iter_t iter = kcdata_iter(ssbuf, sslen); | |
971 | ||
972 | uint32_t type = kcdata_iter_type(iter); | |
973 | if (type != KCDATA_BUFFER_BEGIN_STACKSHOT && type != KCDATA_BUFFER_BEGIN_DELTA_STACKSHOT) { | |
974 | T_ASSERT_FAIL("invalid kcdata type %u", kcdata_iter_type(iter)); | |
975 | } | |
976 | ||
977 | iter = kcdata_iter_find_type(iter, KCDATA_TYPE_MACH_ABSOLUTE_TIME); | |
978 | T_QUIET; | |
979 | T_ASSERT_TRUE(kcdata_iter_valid(iter), "timestamp found in stackshot"); | |
980 | ||
981 | return *(uint64_t *)kcdata_iter_payload(iter); | |
982 | } | |
983 | ||
984 | #define TEST_THREAD_NAME "stackshot_test_thread" | |
985 | ||
986 | static void | |
987 | parse_thread_group_stackshot(void **ssbuf, size_t sslen) | |
988 | { | |
989 | bool seen_thread_group_snapshot = false; | |
990 | kcdata_iter_t iter = kcdata_iter(ssbuf, sslen); | |
991 | T_ASSERT_EQ(kcdata_iter_type(iter), KCDATA_BUFFER_BEGIN_STACKSHOT, | |
992 | "buffer provided is a stackshot"); | |
993 | ||
994 | NSMutableSet *thread_groups = [[NSMutableSet alloc] init]; | |
995 | ||
996 | iter = kcdata_iter_next(iter); | |
997 | KCDATA_ITER_FOREACH(iter) { | |
998 | switch (kcdata_iter_type(iter)) { | |
999 | case KCDATA_TYPE_ARRAY: { | |
1000 | T_QUIET; | |
1001 | T_ASSERT_TRUE(kcdata_iter_array_valid(iter), | |
1002 | "checked that array is valid"); | |
1003 | ||
1004 | if (kcdata_iter_array_elem_type(iter) != STACKSHOT_KCTYPE_THREAD_GROUP_SNAPSHOT) { | |
1005 | continue; | |
1006 | } | |
1007 | ||
1008 | seen_thread_group_snapshot = true; | |
1009 | ||
1010 | if (kcdata_iter_array_elem_size(iter) >= sizeof(struct thread_group_snapshot_v2)) { | |
1011 | struct thread_group_snapshot_v2 *tgs_array = kcdata_iter_payload(iter); | |
1012 | for (uint32_t j = 0; j < kcdata_iter_array_elem_count(iter); j++) { | |
1013 | struct thread_group_snapshot_v2 *tgs = tgs_array + j; | |
1014 | [thread_groups addObject:@(tgs->tgs_id)]; | |
1015 | } | |
1016 | ||
1017 | } | |
1018 | else { | |
1019 | struct thread_group_snapshot *tgs_array = kcdata_iter_payload(iter); | |
1020 | for (uint32_t j = 0; j < kcdata_iter_array_elem_count(iter); j++) { | |
1021 | struct thread_group_snapshot *tgs = tgs_array + j; | |
1022 | [thread_groups addObject:@(tgs->tgs_id)]; | |
1023 | } | |
1024 | } | |
1025 | break; | |
1026 | } | |
1027 | } | |
1028 | } | |
1029 | KCDATA_ITER_FOREACH(iter) { | |
1030 | NSError *error = nil; | |
1031 | ||
1032 | switch (kcdata_iter_type(iter)) { | |
1033 | ||
1034 | case KCDATA_TYPE_CONTAINER_BEGIN: { | |
1035 | T_QUIET; | |
1036 | T_ASSERT_TRUE(kcdata_iter_container_valid(iter), | |
1037 | "checked that container is valid"); | |
1038 | ||
1039 | if (kcdata_iter_container_type(iter) != STACKSHOT_KCCONTAINER_THREAD) { | |
1040 | break; | |
1041 | } | |
1042 | ||
1043 | NSDictionary *container = parseKCDataContainer(&iter, &error); | |
1044 | T_QUIET; T_ASSERT_NOTNULL(container, "parsed container from stackshot"); | |
1045 | T_QUIET; T_ASSERT_NULL(error, "error unset after parsing container"); | |
1046 | ||
1047 | int tg = [container[@"thread_snapshots"][@"thread_group"] intValue]; | |
1048 | ||
1049 | T_ASSERT_TRUE([thread_groups containsObject:@(tg)], "check that the thread group the thread is in exists"); | |
1050 | ||
1051 | break; | |
1052 | }; | |
1053 | ||
1054 | } | |
1055 | } | |
1056 | T_ASSERT_TRUE(seen_thread_group_snapshot, "check that we have seen a thread group snapshot"); | |
1057 | } | |
1058 | ||
1059 | static void | |
1060 | verify_stackshot_sharedcache_layout(struct dyld_uuid_info_64 *uuids, uint32_t uuid_count) | |
1061 | { | |
1062 | uuid_t cur_shared_cache_uuid; | |
1063 | __block uint32_t lib_index = 0, libs_found = 0; | |
1064 | ||
1065 | _dyld_get_shared_cache_uuid(cur_shared_cache_uuid); | |
1066 | int result = dyld_shared_cache_iterate_text(cur_shared_cache_uuid, ^(const dyld_shared_cache_dylib_text_info* info) { | |
1067 | T_QUIET; T_ASSERT_LT(lib_index, uuid_count, "dyld_shared_cache_iterate_text exceeded number of libraries returned by kernel"); | |
1068 | ||
1069 | libs_found++; | |
1070 | struct dyld_uuid_info_64 *cur_stackshot_uuid_entry = &uuids[lib_index]; | |
1071 | T_QUIET; T_ASSERT_EQ(memcmp(info->dylibUuid, cur_stackshot_uuid_entry->imageUUID, sizeof(info->dylibUuid)), 0, | |
1072 | "dyld returned UUID doesn't match kernel returned UUID"); | |
1073 | T_QUIET; T_ASSERT_EQ(info->loadAddressUnslid, cur_stackshot_uuid_entry->imageLoadAddress, | |
1074 | "dyld returned load address doesn't match kernel returned load address"); | |
1075 | lib_index++; | |
1076 | }); | |
1077 | ||
1078 | T_ASSERT_EQ(result, 0, "iterate shared cache layout"); | |
1079 | T_ASSERT_EQ(libs_found, uuid_count, "dyld iterator returned same number of libraries as kernel"); | |
1080 | ||
1081 | T_LOG("verified %d libraries from dyld shared cache", libs_found); | |
1082 | } | |
1083 | ||
1084 | static void | |
1085 | parse_stackshot(uint64_t stackshot_parsing_flags, void *ssbuf, size_t sslen, int child_pid) | |
1086 | { | |
1087 | bool delta = (stackshot_parsing_flags & PARSE_STACKSHOT_DELTA); | |
1088 | bool expect_zombie_child = (stackshot_parsing_flags & PARSE_STACKSHOT_ZOMBIE); | |
1089 | bool expect_shared_cache_layout = false; | |
1090 | bool expect_shared_cache_uuid = !delta; | |
cb323159 A |
1091 | bool expect_dispatch_queue_label = (stackshot_parsing_flags & PARSE_STACKSHOT_DISPATCH_QUEUE_LABEL); |
1092 | bool expect_turnstile_lock = (stackshot_parsing_flags & PARSE_STACKSHOT_TURNSTILEINFO); | |
d9a64523 | 1093 | bool found_zombie_child = false, found_shared_cache_layout = false, found_shared_cache_uuid = false; |
cb323159 A |
1094 | bool found_dispatch_queue_label = false, found_turnstile_lock = false; |
1095 | ||
1096 | if (expect_shared_cache_uuid) { | |
1097 | uuid_t shared_cache_uuid; | |
1098 | if (!_dyld_get_shared_cache_uuid(shared_cache_uuid)) { | |
1099 | T_LOG("Skipping verifying shared cache UUID in stackshot data because not running with a shared cache"); | |
1100 | expect_shared_cache_uuid = false; | |
1101 | } | |
1102 | } | |
d9a64523 A |
1103 | |
1104 | if (stackshot_parsing_flags & PARSE_STACKSHOT_SHAREDCACHE_LAYOUT) { | |
1105 | size_t shared_cache_length = 0; | |
cb323159 | 1106 | const void *cache_header = _dyld_get_shared_cache_range(&shared_cache_length); |
d9a64523 A |
1107 | T_QUIET; T_ASSERT_NOTNULL(cache_header, "current process running with shared cache"); |
1108 | T_QUIET; T_ASSERT_GT(shared_cache_length, sizeof(struct _dyld_cache_header), "valid shared cache length populated by _dyld_get_shared_cache_range"); | |
1109 | ||
cb323159 | 1110 | if (_dyld_shared_cache_is_locally_built()) { |
d9a64523 A |
1111 | T_LOG("device running with locally built shared cache, expect shared cache layout"); |
1112 | expect_shared_cache_layout = true; | |
1113 | } else { | |
1114 | T_LOG("device running with B&I built shared-cache, no shared cache layout expected"); | |
1115 | } | |
1116 | } | |
1117 | ||
1118 | if (expect_zombie_child) { | |
1119 | T_QUIET; T_ASSERT_GT(child_pid, 0, "child pid greater than zero"); | |
1120 | } | |
1121 | ||
1122 | kcdata_iter_t iter = kcdata_iter(ssbuf, sslen); | |
1123 | if (delta) { | |
1124 | T_ASSERT_EQ(kcdata_iter_type(iter), KCDATA_BUFFER_BEGIN_DELTA_STACKSHOT, | |
1125 | "buffer provided is a delta stackshot"); | |
1126 | } else { | |
1127 | T_ASSERT_EQ(kcdata_iter_type(iter), KCDATA_BUFFER_BEGIN_STACKSHOT, | |
1128 | "buffer provided is a stackshot"); | |
1129 | } | |
1130 | ||
1131 | iter = kcdata_iter_next(iter); | |
1132 | KCDATA_ITER_FOREACH(iter) { | |
1133 | NSError *error = nil; | |
1134 | ||
1135 | switch (kcdata_iter_type(iter)) { | |
1136 | case KCDATA_TYPE_ARRAY: { | |
1137 | T_QUIET; | |
1138 | T_ASSERT_TRUE(kcdata_iter_array_valid(iter), | |
1139 | "checked that array is valid"); | |
1140 | ||
1141 | NSMutableDictionary *array = parseKCDataArray(iter, &error); | |
1142 | T_QUIET; T_ASSERT_NOTNULL(array, "parsed array from stackshot"); | |
1143 | T_QUIET; T_ASSERT_NULL(error, "error unset after parsing array"); | |
1144 | ||
1145 | if (kcdata_iter_array_elem_type(iter) == STACKSHOT_KCTYPE_SYS_SHAREDCACHE_LAYOUT) { | |
1146 | struct dyld_uuid_info_64 *shared_cache_uuids = kcdata_iter_payload(iter); | |
1147 | uint32_t uuid_count = kcdata_iter_array_elem_count(iter); | |
1148 | T_ASSERT_NOTNULL(shared_cache_uuids, "parsed shared cache layout array"); | |
1149 | T_ASSERT_GT(uuid_count, 0, "returned valid number of UUIDs from shared cache"); | |
1150 | verify_stackshot_sharedcache_layout(shared_cache_uuids, uuid_count); | |
1151 | found_shared_cache_layout = true; | |
1152 | } | |
1153 | ||
1154 | break; | |
1155 | } | |
1156 | ||
1157 | case KCDATA_TYPE_CONTAINER_BEGIN: { | |
1158 | T_QUIET; | |
1159 | T_ASSERT_TRUE(kcdata_iter_container_valid(iter), | |
1160 | "checked that container is valid"); | |
1161 | ||
1162 | if (kcdata_iter_container_type(iter) != STACKSHOT_KCCONTAINER_TASK) { | |
1163 | break; | |
1164 | } | |
1165 | ||
1166 | NSDictionary *container = parseKCDataContainer(&iter, &error); | |
1167 | T_QUIET; T_ASSERT_NOTNULL(container, "parsed container from stackshot"); | |
1168 | T_QUIET; T_ASSERT_NULL(error, "error unset after parsing container"); | |
1169 | ||
cb323159 A |
1170 | if (expect_dispatch_queue_label && !found_dispatch_queue_label) { |
1171 | for (id thread_key in container[@"task_snapshots"][@"thread_snapshots"]) { | |
1172 | NSMutableDictionary *thread = container[@"task_snapshots"][@"thread_snapshots"][thread_key]; | |
1173 | NSString *dql = thread[@"dispatch_queue_label"]; | |
1174 | ||
1175 | if ([dql isEqualToString:@TEST_STACKSHOT_QUEUE_LABEL]) { | |
1176 | found_dispatch_queue_label = true; | |
1177 | break; | |
1178 | } | |
1179 | } | |
1180 | } | |
1181 | ||
d9a64523 A |
1182 | int pid = [container[@"task_snapshots"][@"task_snapshot"][@"ts_pid"] intValue]; |
1183 | if (expect_zombie_child && (pid == child_pid)) { | |
1184 | found_zombie_child = true; | |
1185 | ||
1186 | uint64_t task_flags = [container[@"task_snapshots"][@"task_snapshot"][@"ts_ss_flags"] unsignedLongLongValue]; | |
1187 | T_ASSERT_TRUE((task_flags & kTerminatedSnapshot) == kTerminatedSnapshot, "child zombie marked as terminated"); | |
1188 | ||
1189 | continue; | |
1190 | } else if (pid != getpid()) { | |
1191 | break; | |
1192 | } | |
1193 | ||
1194 | T_EXPECT_EQ_STR(current_process_name(), | |
1195 | [container[@"task_snapshots"][@"task_snapshot"][@"ts_p_comm"] UTF8String], | |
1196 | "current process name matches in stackshot"); | |
1197 | ||
1198 | uint64_t task_flags = [container[@"task_snapshots"][@"task_snapshot"][@"ts_ss_flags"] unsignedLongLongValue]; | |
1199 | T_ASSERT_FALSE((task_flags & kTerminatedSnapshot) == kTerminatedSnapshot, "current process not marked as terminated"); | |
1200 | ||
1201 | T_QUIET; | |
1202 | T_EXPECT_LE(pid, [container[@"task_snapshots"][@"task_snapshot"][@"ts_unique_pid"] intValue], | |
1203 | "unique pid is greater than pid"); | |
1204 | ||
1205 | bool found_main_thread = false; | |
cb323159 | 1206 | uint64_t main_thread_id = -1; |
d9a64523 A |
1207 | for (id thread_key in container[@"task_snapshots"][@"thread_snapshots"]) { |
1208 | NSMutableDictionary *thread = container[@"task_snapshots"][@"thread_snapshots"][thread_key]; | |
1209 | NSDictionary *thread_snap = thread[@"thread_snapshot"]; | |
1210 | ||
1211 | T_QUIET; T_EXPECT_GT([thread_snap[@"ths_thread_id"] intValue], 0, | |
1212 | "thread ID of thread in current task is valid"); | |
1213 | T_QUIET; T_EXPECT_GT([thread_snap[@"ths_base_priority"] intValue], 0, | |
1214 | "base priority of thread in current task is valid"); | |
1215 | T_QUIET; T_EXPECT_GT([thread_snap[@"ths_sched_priority"] intValue], 0, | |
1216 | "scheduling priority of thread in current task is valid"); | |
1217 | ||
1218 | NSString *pth_name = thread[@"pth_name"]; | |
1219 | if (pth_name != nil && [pth_name isEqualToString:@TEST_THREAD_NAME]) { | |
1220 | found_main_thread = true; | |
cb323159 | 1221 | main_thread_id = [thread_snap[@"ths_thread_id"] intValue]; |
d9a64523 A |
1222 | |
1223 | T_QUIET; T_EXPECT_GT([thread_snap[@"ths_total_syscalls"] intValue], 0, | |
1224 | "total syscalls of current thread is valid"); | |
1225 | ||
1226 | NSDictionary *cpu_times = thread[@"cpu_times"]; | |
1227 | T_EXPECT_GE([cpu_times[@"runnable_time"] intValue], | |
1228 | [cpu_times[@"system_time"] intValue] + | |
1229 | [cpu_times[@"user_time"] intValue], | |
1230 | "runnable time of current thread is valid"); | |
1231 | } | |
1232 | } | |
1233 | T_EXPECT_TRUE(found_main_thread, "found main thread for current task in stackshot"); | |
cb323159 A |
1234 | |
1235 | if (expect_turnstile_lock && !found_turnstile_lock) { | |
1236 | NSArray *tsinfos = container[@"task_snapshots"][@"thread_turnstileinfo"]; | |
1237 | ||
1238 | for (id i in tsinfos) { | |
1239 | if ([i[@"turnstile_context"] intValue] == main_thread_id) { | |
1240 | found_turnstile_lock = true; | |
1241 | break; | |
1242 | } | |
1243 | } | |
1244 | } | |
d9a64523 A |
1245 | break; |
1246 | } | |
1247 | case STACKSHOT_KCTYPE_SHAREDCACHE_LOADINFO: { | |
1248 | struct dyld_uuid_info_64_v2 *shared_cache_info = kcdata_iter_payload(iter); | |
1249 | uuid_t shared_cache_uuid; | |
1250 | T_QUIET; T_ASSERT_TRUE(_dyld_get_shared_cache_uuid(shared_cache_uuid), "retrieve current shared cache UUID"); | |
1251 | T_QUIET; T_ASSERT_EQ(memcmp(shared_cache_info->imageUUID, shared_cache_uuid, sizeof(shared_cache_uuid)), 0, | |
1252 | "dyld returned UUID doesn't match kernel returned UUID for system shared cache"); | |
1253 | found_shared_cache_uuid = true; | |
1254 | break; | |
1255 | } | |
1256 | } | |
1257 | } | |
1258 | ||
1259 | if (expect_zombie_child) { | |
1260 | T_QUIET; T_ASSERT_TRUE(found_zombie_child, "found zombie child in kcdata"); | |
1261 | } | |
1262 | ||
1263 | if (expect_shared_cache_layout) { | |
1264 | T_QUIET; T_ASSERT_TRUE(found_shared_cache_layout, "shared cache layout found in kcdata"); | |
1265 | } | |
1266 | ||
1267 | if (expect_shared_cache_uuid) { | |
1268 | T_QUIET; T_ASSERT_TRUE(found_shared_cache_uuid, "shared cache UUID found in kcdata"); | |
1269 | } | |
1270 | ||
cb323159 A |
1271 | if (expect_dispatch_queue_label) { |
1272 | T_QUIET; T_ASSERT_TRUE(found_dispatch_queue_label, "dispatch queue label found in kcdata"); | |
1273 | } | |
1274 | ||
1275 | if (expect_turnstile_lock) { | |
1276 | T_QUIET; T_ASSERT_TRUE(found_turnstile_lock, "found expected deadlock"); | |
1277 | } | |
1278 | ||
d9a64523 A |
1279 | T_ASSERT_FALSE(KCDATA_ITER_FOREACH_FAILED(iter), "successfully iterated kcdata"); |
1280 | } | |
1281 | ||
1282 | static const char * | |
1283 | current_process_name(void) | |
1284 | { | |
1285 | static char name[64]; | |
1286 | ||
1287 | if (!name[0]) { | |
1288 | int ret = proc_name(getpid(), name, sizeof(name)); | |
1289 | T_QUIET; | |
1290 | T_ASSERT_POSIX_SUCCESS(ret, "proc_name failed for current process"); | |
1291 | } | |
1292 | ||
1293 | return name; | |
1294 | } | |
1295 | ||
1296 | static void | |
1297 | initialize_thread(void) | |
1298 | { | |
1299 | int ret = pthread_setname_np(TEST_THREAD_NAME); | |
1300 | T_QUIET; | |
1301 | T_ASSERT_POSIX_ZERO(ret, "set thread name to %s", TEST_THREAD_NAME); | |
1302 | } |