]> git.saurik.com Git - apple/xnu.git/blame - tests/stackshot_block_owner_14362384.m
xnu-4903.241.1.tar.gz
[apple/xnu.git] / tests / stackshot_block_owner_14362384.m
CommitLineData
813fb2f6
A
1#ifdef T_NAMESPACE
2#undef T_NAMESPACE
3#endif
4#include <darwintest.h>
5
6#include <kdd.h>
7#include <kern/kcdata.h>
8#include <kern/debug.h>
9#include <kern/block_hint.h>
10#include <mach/mach.h>
11#include <mach/mach_init.h>
12#include <mach/mach_traps.h>
13#include <mach/message.h>
14#include <mach/port.h>
15#include <mach/semaphore.h>
16#include <mach/task.h>
17#include <os/lock.h>
18#include <pthread.h>
d9a64523 19#include <signal.h>
813fb2f6
A
20#include <sys/sysctl.h>
21#include <sys/stackshot.h>
22#include <sys/types.h>
23#include <stdlib.h>
24#include <unistd.h>
a39ff7e2
A
25#include <TargetConditionals.h>
26
27#if !TARGET_OS_EMBEDDED
28#include <pcre.h>
29#endif
30
813fb2f6
A
31
32T_GLOBAL_META(
33 T_META_NAMESPACE("xnu.scheduler"),
34 T_META_ASROOT(true)
35);
36
37#include <Foundation/Foundation.h>
38
39#define SENDS_TO_BLOCK 6
40#define NUMRETRIES 5
41#define KRWLCK_STORES_EXCL_OWNER 0
42
43#define KMUTEX_SYSCTL_CHECK_EXISTS 0
44#define KMUTEX_SYSCTL_ACQUIRE_WAIT 1
45#define KMUTEX_SYSCTL_ACQUIRE_NOWAIT 2
46#define KMUTEX_SYSCTL_SIGNAL 3
47#define KMUTEX_SYSCTL_TEARDOWN 4
48
49#define KRWLCK_SYSCTL_CHECK_EXISTS 0
50#define KRWLCK_SYSCTL_RACQUIRE_NOWAIT 1
51#define KRWLCK_SYSCTL_RACQUIRE_WAIT 2
52#define KRWLCK_SYSCTL_WACQUIRE_NOWAIT 3
53#define KRWLCK_SYSCTL_WACQUIRE_WAIT 4
54#define KRWLCK_SYSCTL_SIGNAL 5
55#define KRWLCK_SYSCTL_TEARDOWN 6
56
57static const char kmutex_ctl[] = "debug.test_MutexOwnerCtl";
58static const char krwlck_ctl[] = "debug.test_RWLockOwnerCtl";
59
60static mach_port_t send = MACH_PORT_NULL;
61static mach_port_t recv = MACH_PORT_NULL;
62
63static void *
64take_stackshot(uint32_t extra_flags, uint64_t since_timestamp)
65{
a39ff7e2
A
66 void * stackshot = NULL;
67 int ret = 0;
813fb2f6
A
68 uint32_t stackshot_flags = STACKSHOT_SAVE_LOADINFO |
69 STACKSHOT_GET_GLOBAL_MEM_STATS |
70 STACKSHOT_SAVE_IMP_DONATION_PIDS |
71 STACKSHOT_KCDATA_FORMAT;
72
73 if (since_timestamp != 0)
74 stackshot_flags |= STACKSHOT_COLLECT_DELTA_SNAPSHOT;
75
76 stackshot_flags |= extra_flags;
77
78 stackshot = stackshot_config_create();
79 T_QUIET; T_ASSERT_NOTNULL(stackshot, "Allocating stackshot config");
80
81 ret = stackshot_config_set_flags(stackshot, stackshot_flags);
82 T_ASSERT_POSIX_ZERO(ret, "Setting flags on stackshot config");
83
84 ret = stackshot_config_set_pid(stackshot, getpid());
85 T_QUIET; T_ASSERT_POSIX_ZERO(ret, "Setting target pid on stackshot config");
86
87 if (since_timestamp != 0) {
88 ret = stackshot_config_set_delta_timestamp(stackshot, since_timestamp);
89 T_QUIET; T_ASSERT_POSIX_ZERO(ret, "Setting prev snapshot time on stackshot config");
90 }
91
a39ff7e2 92 for (int retries = NUMRETRIES; retries > 0; retries--) {
813fb2f6
A
93 ret = stackshot_capture_with_config(stackshot);
94 T_QUIET; T_ASSERT_TRUE(ret == 0 || ret == EBUSY || ret == ETIMEDOUT,
95 "Attempting to take stackshot (error %d)...", ret);
96 if (retries == 0 && (ret == EBUSY || ret == ETIMEDOUT))
97 T_ASSERT_FAIL("Failed to take stackshot after %d retries: got %d (%s)", NUMRETRIES, ret, strerror(ret));
98 if (ret == 0)
99 break;
100 }
101 return stackshot;
102}
103
a39ff7e2
A
104static void
105save_stackshot(void *stackshot, const char *filename)
106{
107 void *buf = stackshot_config_get_stackshot_buffer(stackshot);
108 T_QUIET; T_ASSERT_NOTNULL(buf, "buf");
109 size_t size = stackshot_config_get_stackshot_size(stackshot);
110 FILE *f = fopen(filename, "w");
111 T_QUIET; T_ASSERT_NOTNULL(f, "f");
112 fwrite(buf, size, 1, f);
113 fclose(f);
114}
115
116static
117void check_python(void *stackshot, const char *fmt, ...)
118{
119 save_stackshot(stackshot, "/tmp/ss");
120
121#if !TARGET_OS_EMBEDDED
122 va_list args;
123 va_start(args, fmt);
124 char *re_string = NULL;
125 vasprintf(&re_string, fmt, args);
126 va_end(args);
127 T_QUIET; T_ASSERT_NOTNULL(re_string, "vasprintf");
128
129 const char *pcreErrorStr;
130 int pcreErrorOffset;
131 pcre *re = pcre_compile(re_string, 0, &pcreErrorStr, &pcreErrorOffset, NULL);
132 T_QUIET; T_ASSERT_NOTNULL(re, "pcre_compile");
133
134 bool found = false;
135 FILE *p = popen("/usr/local/bin/kcdata --pretty /tmp/ss", "r");
136 T_QUIET; T_ASSERT_NOTNULL(p, "popen");
137 while (1) {
138 char *line = NULL;
139 size_t linecap = 0;
140 ssize_t linesize = getline(&line, &linecap, p);
141 if (linesize < 0) {
142 if (line)
143 free(line);
144 break;
145 }
146 int pcre_ret = pcre_exec(re, NULL, line, strlen(line), 0, 0, NULL, 0);
147 if (pcre_ret == 0){
148 T_LOG("line: %s", line);
149 found = true;
150 }
151 free(line);
152 }
153 T_EXPECT_TRUE(found, "found the waitinfo in kcdata.py output");
154 pclose(p);
155 pcre_free(re);
156 free(re_string);
157#endif
158}
159
160
813fb2f6
A
161// waitinfo can be NULL, but len must be non-null and point to the length of the waitinfo array.
162// when the function returns, len will be set to the number of waitinfo structs found in the stackshot.
163static void
164find_blocking_info(void * stackshot, struct stackshot_thread_waitinfo *waitinfo, int *len)
165{
a39ff7e2
A
166 void *buf = NULL;
167 uint32_t t = 0;
168 uint32_t buflen = 0;
813fb2f6 169 NSError *error = nil;
a39ff7e2
A
170 NSMutableDictionary *parsed_container = nil;
171 NSArray *parsed_waitinfo = nil;
813fb2f6
A
172
173 T_QUIET; T_ASSERT_NOTNULL(len, "Length pointer shouldn't be NULL");
174 int oldlen = *len;
175 *len = 0;
176
177 buf = stackshot_config_get_stackshot_buffer(stackshot);
178 T_QUIET; T_ASSERT_NOTNULL(buf, "Getting stackshot buffer");
179 buflen = stackshot_config_get_stackshot_size(stackshot);
180
181 kcdata_iter_t iter = kcdata_iter(buf, buflen);
182
183 T_QUIET; T_ASSERT_TRUE(kcdata_iter_type(iter) == KCDATA_BUFFER_BEGIN_STACKSHOT ||
184 kcdata_iter_type(iter) == KCDATA_BUFFER_BEGIN_DELTA_STACKSHOT,
185 "Checking start of stackshot buffer");
186
187 iter = kcdata_iter_next(iter);
188 KCDATA_ITER_FOREACH(iter)
189 {
190 t = kcdata_iter_type(iter);
191
192 if (t != KCDATA_TYPE_CONTAINER_BEGIN) {
193 continue;
194 }
195
196 if (kcdata_iter_container_type(iter) != STACKSHOT_KCCONTAINER_TASK) {
197 continue;
198 }
199
200 parsed_container = parseKCDataContainer(&iter, &error);
201 T_QUIET; T_ASSERT_TRUE(!error, "Error while parsing container: %d (%s)",
202 (int)error.code, [error.domain UTF8String]);
203 T_QUIET; T_ASSERT_TRUE(parsed_container && !error, "Parsing container");
204
205 parsed_waitinfo = parsed_container[@"task_snapshots"][@"thread_waitinfo"];
206 for (id elem in parsed_waitinfo) {
207 /* check to see that tid matches expected idle status */
208 uint8_t type = [elem[@"wait_type"] unsignedCharValue];
209 if (type != kThreadWaitNone) {
210 if (waitinfo && *len < oldlen) {
211 struct stackshot_thread_waitinfo *curr = &waitinfo[*len];
212 curr->wait_type = type;
213 curr->owner = [elem[@"owner"] unsignedLongLongValue];
214 curr->waiter = [elem[@"waiter"] unsignedLongLongValue];
215 curr->context = [elem[@"context"] unsignedLongLongValue];
216 }
217 (*len)++;
218 }
219 }
220 [parsed_container release];
221 }
222}
223
224/* perform various actions with a mutex in kernel memory. note that, since we aren't allowed
225 * to go to user space while still holding a mutex, the lock-acquiring actions in this kernel
226 * sysctl will either lock and immediately release the lock, or lock and wait until a semaphore
227 * is signalled, then unlock. if called with CHECK_EXISTS, returns whether or not the sysctl
228 * exist in the kernel (to determine if we're running with CONFIG_XNUPOST defined). Else,
229 * returns 1. */
230static int kmutex_action(int action)
231{
a39ff7e2 232 int ret = 0;
813fb2f6
A
233 if (action == KMUTEX_SYSCTL_CHECK_EXISTS) {
234 ret = sysctlbyname(krwlck_ctl, NULL, NULL, NULL, 0);
235 return !(ret == -1);
236 }
237
238 char * action_name = "";
239 switch(action) {
240 case KMUTEX_SYSCTL_ACQUIRE_WAIT:
241 action_name = "lock (and wait)";
242 break;
243 case KMUTEX_SYSCTL_ACQUIRE_NOWAIT:
244 action_name = "lock";
245 break;
246 case KMUTEX_SYSCTL_SIGNAL:
247 action_name = "signal to holder of";
248 break;
249 case KMUTEX_SYSCTL_TEARDOWN:
250 action_name = "tear down";
251 break;
252 default:
253 T_ASSERT_FAIL("Somebody passed the wrong argument to kmutex_action: %d", action);
254 break;
255 }
256
257 ret = sysctlbyname(kmutex_ctl, NULL, NULL, &action, sizeof(int));
258 T_ASSERT_POSIX_SUCCESS(ret, "sysctl: %s kernel mutex", action_name);
259 return 1;
260}
261
262static void
263sysctl_kmutex_test_match(uint64_t context)
264{
a39ff7e2
A
265 int ret = 0;
266 unsigned long long unslid_kmutex_address = 0;
813fb2f6
A
267 size_t addrsize = sizeof(unslid_kmutex_address);
268
269 ret = sysctlbyname(kmutex_ctl, &unslid_kmutex_address, &addrsize, NULL, 0);
270 T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "Getting unslid location of kernel mutex. Size is %llu",
271 (unsigned long long)addrsize);
272 T_EXPECT_EQ(context, unslid_kmutex_address,
273 "Context should match unslid location of mutex in kernel memory");
274}
275
276/* We don't really care what goes into these messages, we're just sending something to a port. */
277static void
278msg_send_helper(mach_port_t remote_port)
279{
280 int ret;
281 mach_msg_header_t * msg = NULL;
282
283 ret = vm_allocate(mach_task_self(),
284 (vm_address_t *)&msg,
285 PAGE_SIZE,
286 VM_MAKE_TAG(VM_MEMORY_MACH_MSG) | TRUE);
287
288 T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "Allocating vm page %p", (void*)msg);
289 msg->msgh_bits = MACH_MSGH_BITS_SET(MACH_MSG_TYPE_COPY_SEND, 0, 0, 0);
290 msg->msgh_size = PAGE_SIZE;
291 msg->msgh_remote_port = remote_port;
292 msg->msgh_local_port = MACH_PORT_NULL;
293 msg->msgh_voucher_port = MACH_PORT_NULL;
294 ret = mach_msg(msg,
295 MACH_SEND_MSG | MACH_MSG_OPTION_NONE,
296 PAGE_SIZE,
297 0,
298 MACH_PORT_NULL,
299 MACH_MSG_TIMEOUT_NONE,
300 MACH_PORT_NULL);
301 T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "Sending message to port %d", remote_port);
302
303 vm_deallocate(mach_task_self(), (vm_address_t)msg, PAGE_SIZE);
304 T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "Deallocating vm page %p", (void*)msg);
305}
306
307static void
308msg_recv_helper(mach_port_t local_port)
309{
a39ff7e2 310 int ret = 0;
813fb2f6
A
311 mach_msg_size_t size = 2*PAGE_SIZE;
312 mach_msg_header_t * msg = NULL;
313 ret = vm_allocate(mach_task_self(),
314 (vm_address_t *)&msg,
315 size,
316 VM_MAKE_TAG(VM_MEMORY_MACH_MSG) | TRUE );
317 T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "Allocating page %p for message", (void*)msg);
318
319 ret = mach_msg(msg,
320 MACH_RCV_MSG,
321 0,
322 size,
323 local_port,
324 MACH_MSG_TIMEOUT_NONE,
325 MACH_PORT_NULL);
326 T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "Received message on port %d", local_port);
327 ret = vm_deallocate(mach_task_self(), (vm_address_t)msg, PAGE_SIZE);
328 T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "Deallocating page %p", (void*)msg);
329}
330
331/* perform various actions with a rwlock in kernel memory. note that, since we aren't allowed
332 * to go to user space while still holding a rwlock, the lock-acquiring actions in this kernel
333 * sysctl will either lock and immediately release the lock, or lock and wait until a semaphore
334 * is signalled, then unlock. if called with CHECK_EXISTS, returns whether or not the sysctl
335 * exist in the kernel (to determine if we're running with CONFIG_XNUPOST defined). Else,
336 * returns 1. */
337static int
338krwlck_action(int action)
339{
a39ff7e2 340 int ret = 0;
813fb2f6
A
341 if (action == KRWLCK_SYSCTL_CHECK_EXISTS) {
342 ret = sysctlbyname(krwlck_ctl, NULL, NULL, NULL, 0);
343 return !(ret == -1);
344 }
345
346 char * action_name = "";
347 switch(action) {
348 case KRWLCK_SYSCTL_RACQUIRE_NOWAIT:
349 action_name = "shared lock";
350 break;
351 case KRWLCK_SYSCTL_RACQUIRE_WAIT:
352 action_name = "shared lock (and wait)";
353 break;
354 case KRWLCK_SYSCTL_WACQUIRE_NOWAIT:
355 action_name = "exclusive lock";
356 break;
357 case KRWLCK_SYSCTL_WACQUIRE_WAIT:
358 action_name = "exclusive lock (and wait)";
359 break;
360 case KRWLCK_SYSCTL_SIGNAL:
361 action_name = "signal to holder of";
362 break;
363 case KRWLCK_SYSCTL_TEARDOWN:
364 action_name = "tear down";
365 break;
366 default:
367 T_ASSERT_FAIL("Somebody passed the wrong argument to krwlck_action: %d", action);
368 break;
369 }
370
371 ret = sysctlbyname(krwlck_ctl, NULL, NULL, &action, sizeof(int));
372 T_ASSERT_POSIX_SUCCESS(ret, "sysctl: %s kernel rwlock", action_name);
373 return 1;
374}
375
376static void
377sysctl_krwlck_test_match(uint64_t context)
378{
a39ff7e2
A
379 int ret = 0;
380 unsigned long long unslid_krwlck_address = 0;
813fb2f6
A
381 size_t addrsize = sizeof(unslid_krwlck_address);
382
383 ret = sysctlbyname(krwlck_ctl, &unslid_krwlck_address, &addrsize, NULL, 0);
384 T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "Getting unslid location of kernel rwlock");
385 T_EXPECT_EQ(context, unslid_krwlck_address, "Context should match unslid location of rwlock in kernel memory");
386}
387
388/* "Grabbing" threads: only purpose is to grab a sync primitive and hang. */
389
390static void *
391kmutex_grabbing_thread(void * arg)
392{
393 (void)arg;
394 kmutex_action(KMUTEX_SYSCTL_ACQUIRE_NOWAIT);
395 return NULL;
396}
397
398static void *
399kmutex_grab_and_wait_thread(void * arg)
400{
401 (void)arg;
402 kmutex_action(KMUTEX_SYSCTL_ACQUIRE_WAIT);
403 return NULL;
404}
405
406static void *
407sem_grabbing_thread(void * arg)
408{
409 semaphore_t *sem = (semaphore_t *)arg;
410 semaphore_wait(*sem);
411 return NULL;
412}
413
414static void *
415msg_blocking_thread(void * arg)
416{
417 (void)arg;
418 msg_recv_helper(send);
419
420 for (int i = 0; i < SENDS_TO_BLOCK; i++)
421 msg_send_helper(recv); // will block on send until message is received
422 return NULL;
423}
424
425static void *
426ulock_blocking_thread(void * arg)
427{
428 os_unfair_lock_t oul = (os_unfair_lock_t)arg;
429 os_unfair_lock_lock(oul);
430 os_unfair_lock_unlock(oul);
431 return NULL;
432}
433
434// acquires a kernel rwlock for writing, and then waits on a kernel semaphore.
435static void *
436krwlck_write_waiting_thread(void * arg)
437{
438 (void)arg;
439 krwlck_action(KRWLCK_SYSCTL_WACQUIRE_WAIT);
440 return NULL;
441}
442
443// attempts to acquire a kernel rwlock for reading, and doesn't wait on a semaphore afterwards.
444static void *
445krwlck_read_grabbing_thread(void * arg)
446{
447 (void)arg;
448 krwlck_action(KRWLCK_SYSCTL_RACQUIRE_NOWAIT);
449 return NULL;
450}
451
452static void *
453pthread_mutex_blocking_thread(void * arg)
454{
455 pthread_mutex_t *mtx = (pthread_mutex_t *)arg;
456 pthread_mutex_lock(mtx);
457 pthread_mutex_unlock(mtx);
458 return NULL;
459}
460
461static void *
462pthread_rwlck_blocking_thread(void * arg)
463{
464 pthread_rwlock_t *rwlck = (pthread_rwlock_t *)arg;
465 pthread_rwlock_rdlock(rwlck);
466 pthread_rwlock_unlock(rwlck);
467 return NULL;
468}
469
470static void *
471pthread_cond_blocking_thread(void * arg)
472{
473 pthread_mutex_t mtx = PTHREAD_MUTEX_INITIALIZER;
474 pthread_cond_t *cond = (pthread_cond_t *)arg;
475 pthread_cond_wait(cond, &mtx);
476 pthread_mutex_unlock(&mtx);
477 return NULL;
478}
479
d9a64523
A
480static void *
481waitpid_blocking_thread(void * arg)
482{
483 pid_t pid = (pid_t)arg;
484
485 int ret = waitpid(pid, NULL, 0);
486 T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "Reaping child.");
487 return NULL;
488}
489
813fb2f6
A
490/*
491 * Uses a debug sysctl to initialize a kernel mutex.
492 *
493 * The 'waiting' thread grabs this kernel mutex, and immediately waits on a kernel semaphore.
494 * The 'grabbing' thread just attempts to lock the kernel mutex.
495 * When the semaphore is signalled, the 'waiting' thread will unlock the kernel mutex,
496 * giving the opportunity for the 'grabbing' thread to lock it and then immediately unlock it.
497 * This allows us to create a situation in the kernel where we know a thread to be blocked
498 * on a kernel mutex.
499 */
500static void
501test_kmutex_blocking(void)
502{
a39ff7e2 503 int ret = 0;
813fb2f6 504 int len = 2;
a39ff7e2
A
505 struct stackshot_thread_waitinfo waitinfo[2] = { { 0 }, { 0 } };
506 uint64_t thread_id = 0;
813fb2f6
A
507 pthread_t grabbing, waiting;
508
509 T_LOG("Starting %s", __FUNCTION__);
510 ret = pthread_create(&waiting, NULL, kmutex_grab_and_wait_thread, NULL); // thread will block until we signal it
511 T_QUIET; T_ASSERT_POSIX_ZERO(ret, "Spawning grab and wait thread");
512 sleep(1); // give time for thread to block
513 ret = pthread_create(&grabbing, NULL, kmutex_grabbing_thread, NULL); // thread should immediately block
514 T_QUIET; T_ASSERT_POSIX_ZERO(ret, "Spawning waiting thread");
515 sleep(3); // give (lots of) time for thread to give up spinning on lock
516
517 void * stackshot = take_stackshot(STACKSHOT_THREAD_WAITINFO, 0);
a39ff7e2
A
518
519 ret = pthread_threadid_np(waiting, &thread_id); // this is the thread that currently holds the kernel mutex
520 T_QUIET; T_ASSERT_POSIX_ZERO(ret, "Getting integer value of thread id");
521
522 check_python(stackshot, "thread \\d+: semaphore port \\w+ with unknown owner");
523
813fb2f6
A
524 find_blocking_info(stackshot, (struct stackshot_thread_waitinfo *)&waitinfo, &len);
525
526 T_EXPECT_EQ(len, 2, "There should only be two blocking threads");
527 for (int i = 0; i < len; i++) {
528 struct stackshot_thread_waitinfo *curr = &waitinfo[i];
529 if (curr->wait_type == kThreadWaitSemaphore)
530 continue;
531 T_EXPECT_EQ(curr->wait_type, kThreadWaitKernelMutex, "Wait type should match expected KernelMutex value");
813fb2f6
A
532 T_EXPECT_EQ(curr->owner, thread_id, "Thread ID of blocking thread should match 'owner' field in stackshot");
533 sysctl_kmutex_test_match(curr->context);
a39ff7e2
A
534
535 check_python(stackshot, "thread \\d+: kernel mutex %llx owned by thread %lld", curr->context, thread_id);
813fb2f6
A
536 }
537
538 kmutex_action(KMUTEX_SYSCTL_SIGNAL); // waiting thread should now unblock.
539 ret = pthread_join(waiting, NULL);
540 T_QUIET; T_ASSERT_POSIX_ZERO(ret, "Joining on waiting thread");
541 ret = pthread_join(grabbing, NULL);
542 T_QUIET; T_ASSERT_POSIX_ZERO(ret, "Joining on grabber thread");
543 kmutex_action(KMUTEX_SYSCTL_TEARDOWN);
544 stackshot_config_dealloc(stackshot);
545}
546
547/* Initialize a userspace semaphore, and spawn a thread to block on it. */
548static void
549test_semaphore_blocking(void)
550{
a39ff7e2 551 int ret = 0;
813fb2f6 552 semaphore_t sem;
a39ff7e2 553 struct stackshot_thread_waitinfo waitinfo = { 0 };
813fb2f6 554 int len = 1;
a39ff7e2 555 uint64_t pid = 0;
813fb2f6
A
556
557 T_LOG("Starting %s", __FUNCTION__);
558 ret = semaphore_create(mach_task_self(), &sem, SYNC_POLICY_FIFO, 0);
559 T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "Creating semaphore");
560 pthread_t tid;
561 ret = pthread_create(&tid, NULL, sem_grabbing_thread, (void*)&sem); // thread should immediately block
562 T_QUIET; T_ASSERT_POSIX_ZERO(ret, "Creating semaphore grabbing thread");
563
564 sleep(1); // give time for thread to block
565
566 void * stackshot = take_stackshot(STACKSHOT_THREAD_WAITINFO, 0);
567 find_blocking_info(stackshot, (struct stackshot_thread_waitinfo *)&waitinfo, &len);
568 T_EXPECT_EQ(len, 1, "Only one blocking thread should exist");
569 T_EXPECT_EQ(waitinfo.wait_type, kThreadWaitSemaphore, "Wait type should match expected Semaphore value");
570
571 pid = (uint64_t)getpid();
572 T_EXPECT_EQ(waitinfo.owner, pid, "Owner value should match process ID");
573
a39ff7e2
A
574 check_python(stackshot, "thread \\d+: semaphore port \\w+ owned by pid %d", (int)pid);
575
813fb2f6
A
576 ret = semaphore_signal(sem);
577 T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "Signalling semaphore");
578 ret = pthread_join(tid, NULL);
579 T_QUIET; T_ASSERT_POSIX_ZERO(ret, "Joining on grabber thread");
580 ret = semaphore_destroy(mach_task_self(), sem);
581 T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "Destroying semaphore");
582 stackshot_config_dealloc(stackshot);
583}
584
585/* Spawn a process to send a message to, and block while both sending and receiving in different contexts. */
586static void
587test_mach_msg_blocking(void)
588{
a39ff7e2 589 int ret = 0;
813fb2f6 590 pthread_t tid;
a39ff7e2
A
591 void *stackshot = NULL;
592 struct stackshot_thread_waitinfo waitinfo = { 0 };
813fb2f6
A
593 int len = 1;
594
595 T_LOG("Starting %s", __FUNCTION__);
596 ret = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, &send);
597 T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "Allocating send port");
598 ret = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, &recv);
599 T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "Allocating recv port");
600 ret = mach_port_insert_right(mach_task_self(), send, send, MACH_MSG_TYPE_MAKE_SEND);
601 T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "Getting send right to send port");
602 ret = mach_port_insert_right(mach_task_self(), recv, recv, MACH_MSG_TYPE_MAKE_SEND);
603 T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "Getting send right to recv port");
604
605 ret = pthread_create(&tid, NULL, msg_blocking_thread, (void*)&send); // thread should block on recv soon
606 T_QUIET; T_ASSERT_POSIX_ZERO(ret, "Creating message blocking thread");
607
608 sleep(1); // give time for thread to block
609 stackshot = take_stackshot(STACKSHOT_THREAD_WAITINFO, 0);
610 find_blocking_info(stackshot, (struct stackshot_thread_waitinfo *)&waitinfo, &len);
611
612 T_EXPECT_EQ(len, 1, "Only one blocking thread should exist");
613 T_EXPECT_EQ(waitinfo.wait_type, kThreadWaitPortReceive, "Wait type should match expected PortReceive value");
a39ff7e2
A
614
615 check_python(stackshot, "thread \\d+: mach_msg receive on port \\w+ name %llx", (long long)send);
616
813fb2f6
A
617 stackshot_config_dealloc(stackshot);
618
619 msg_send_helper(send); // ping! msg_blocking_thread will now try to send us stuff, and block until we receive.
620
621 sleep(1); // give time for thread to block
622 stackshot = take_stackshot(STACKSHOT_THREAD_WAITINFO, 0);
623 find_blocking_info(stackshot, (struct stackshot_thread_waitinfo *)&waitinfo, &len);
624 T_EXPECT_EQ(len, 1, "Only one blocking thread should exist");
625 T_EXPECT_EQ(waitinfo.wait_type, kThreadWaitPortSend, "Wait type should match expected PortSend value");
a39ff7e2
A
626
627 check_python(stackshot, "thread \\d+: mach_msg send on port \\w+ owned by pid %d", (int)getpid());
628
813fb2f6
A
629 stackshot_config_dealloc(stackshot);
630
631 msg_recv_helper(recv); // thread should block until we receive one of its messages
632 ret = pthread_join(tid, NULL);
633 T_QUIET; T_ASSERT_POSIX_ZERO(ret, "Joining on blocking thread");
634}
635
636static void
637test_ulock_blocking(void)
638{
a39ff7e2
A
639 int ret = 0;
640 void *stackshot = NULL;
641 uint64_t thread_id = 0;
813fb2f6
A
642 pthread_t tid;
643 struct os_unfair_lock_s ouls = OS_UNFAIR_LOCK_INIT;
644 os_unfair_lock_t oul = &ouls;
a39ff7e2 645 struct stackshot_thread_waitinfo waitinfo = { 0 };
813fb2f6
A
646 int len = 1;
647
648 T_LOG("Starting %s", __FUNCTION__);
649 os_unfair_lock_lock(oul);
650 ret = pthread_create(&tid, NULL, ulock_blocking_thread, (void*)oul);
651 T_QUIET; T_ASSERT_POSIX_ZERO(ret, "Creating ulock blocking thread");
652 sleep(3); // give time for thread to spawn, fall back to kernel for contention, and block
653
654 stackshot = take_stackshot(STACKSHOT_THREAD_WAITINFO, 0);
a39ff7e2 655
813fb2f6
A
656 find_blocking_info(stackshot, (struct stackshot_thread_waitinfo *)&waitinfo, &len);
657 T_EXPECT_EQ(len, 1, "Only one blocking thread should exist");
658 T_EXPECT_EQ(waitinfo.wait_type, kThreadWaitUserLock, "Wait type should match expected UserLock value");
813fb2f6
A
659
660 os_unfair_lock_unlock(oul);
661 ret = pthread_join(tid, NULL); // wait for thread to unblock and exit
662 T_QUIET; T_ASSERT_POSIX_ZERO(ret, "Joining on blocking thread");
663
664 ret = pthread_threadid_np(NULL, &thread_id); // this thread is the "owner" of the ulock
665 T_QUIET; T_ASSERT_POSIX_ZERO(ret, "Getting integer value of thread id");
666 T_EXPECT_EQ(waitinfo.owner, thread_id, "Thread ID of blocking thread should match 'owner' field in stackshot");
a39ff7e2
A
667
668 check_python(stackshot, "thread \\d+: unfair lock \\w+ owned by thread %lld", thread_id);
669 stackshot_config_dealloc(stackshot);
813fb2f6
A
670 return;
671}
672
673static void
674test_krwlock_blocking(void)
675{
a39ff7e2
A
676 int ret = 0;
677 void *stackshot = NULL;
678 uint64_t thread_id = 0;
813fb2f6
A
679 pthread_t waiting, grabbing;
680 int len = 2;
a39ff7e2 681 struct stackshot_thread_waitinfo waitinfo[2] = { { 0 }, { 0 } };
813fb2f6
A
682
683 T_LOG("Starting %s", __FUNCTION__);
684 // this thread should spawn, acquire a kernel rwlock for write, and then wait on a semaphore
685 ret = pthread_create(&waiting, NULL, krwlck_write_waiting_thread, NULL);
686 T_QUIET; T_ASSERT_POSIX_ZERO(ret, "Creating krwlck write waiting thread");
687 sleep(1); // give time for thread to block
688 // this thread should spawn and try to acquire the same kernel rwlock for read, but block
689 ret = pthread_create(&grabbing, NULL, krwlck_read_grabbing_thread, NULL);
690 T_QUIET; T_ASSERT_POSIX_ZERO(ret, "Creating krwlck read grabbing thread");
691 sleep(1); // give time for thread to block
692
693 stackshot = take_stackshot(STACKSHOT_THREAD_WAITINFO, 0);
a39ff7e2
A
694
695 check_python(stackshot, "thread \\d+: semaphore port \\w+ with unknown owner");
696
813fb2f6
A
697 find_blocking_info(stackshot, (struct stackshot_thread_waitinfo *)&waitinfo, &len);
698
699 T_EXPECT_EQ(len, 2, "There should only be two blocking threads");
700 for (int i = 0; i < len; i++) {
701 struct stackshot_thread_waitinfo *curr = &waitinfo[i];
702 if (curr->wait_type == kThreadWaitSemaphore)
703 continue;
704 T_EXPECT_EQ(curr->wait_type, kThreadWaitKernelRWLockRead, "Wait type should match expected KRWLockRead value");
705 sysctl_krwlck_test_match(curr->context);
706
a39ff7e2
A
707 check_python(stackshot, "thread \\d+: krwlock %llx for reading", curr->context);
708
813fb2f6
A
709#if KRWLCK_STORES_EXCL_OWNER /* A future planned enhancement */
710 ret = pthread_threadid_np(waiting, &thread_id); // this is the thread that currently holds the kernel mutex
711 T_QUIET; T_ASSERT_POSIX_ZERO(ret, "Getting integer value of thread id");
712 T_EXPECT_EQ(curr->owner, thread_id, "Thread ID of blocking thread should match 'owner' field in stackshot");
a39ff7e2
A
713#else
714 (void)thread_id; // suppress compiler warning about unused variable
813fb2f6
A
715#endif /* RWLCK_STORES_EXCL_OWNER */
716 }
717
718 krwlck_action(KRWLCK_SYSCTL_SIGNAL); // pthread should now unblock & finish
719 ret = pthread_join(waiting, NULL);
720 T_QUIET; T_ASSERT_POSIX_ZERO(ret, "Joining on waiting thread");
721 ret = pthread_join(grabbing, NULL);
722 T_QUIET; T_ASSERT_POSIX_ZERO(ret, "Joining on grabbing thread");
723 krwlck_action(KRWLCK_SYSCTL_TEARDOWN);
724 stackshot_config_dealloc(stackshot);
725}
726
a39ff7e2 727
813fb2f6
A
728static void
729test_pthread_mutex_blocking(void)
730{
a39ff7e2
A
731 int ret = 0;
732 void *stackshot = NULL;
733 uint64_t thread_id = 0;
813fb2f6 734 pthread_t tid;
a39ff7e2 735 struct stackshot_thread_waitinfo waitinfo = { 0 };
813fb2f6
A
736 pthread_mutex_t mtx = PTHREAD_MUTEX_INITIALIZER;
737 int len = 1;
738
739 T_LOG("Starting %s", __FUNCTION__);
740
a39ff7e2
A
741 ret = pthread_threadid_np(NULL, &thread_id); // this thread is the "owner" of the mutex
742 T_QUIET; T_ASSERT_POSIX_ZERO(ret, "Getting integer value of thread id");
743
813fb2f6
A
744 pthread_mutex_lock(&mtx);
745 ret = pthread_create(&tid, NULL, pthread_mutex_blocking_thread, (void*)&mtx);
746 T_QUIET; T_ASSERT_POSIX_ZERO(ret, "Creating pthread mutex blocking thread");
747 sleep(2); // give time for thread to block
748
749 stackshot = take_stackshot(STACKSHOT_THREAD_WAITINFO, 0);
a39ff7e2
A
750
751 check_python(stackshot, "thread \\d+: pthread mutex %llx owned by thread %lld", &mtx, thread_id);
752
813fb2f6
A
753 find_blocking_info(stackshot, (struct stackshot_thread_waitinfo *)&waitinfo, &len);
754 T_EXPECT_EQ(len, 1, "Only one blocking thread should exist");
755 T_EXPECT_EQ(waitinfo.wait_type, kThreadWaitPThreadMutex,
756 "Wait type should match expected PThreadMutex value");
757 stackshot_config_dealloc(stackshot);
758
759 pthread_mutex_unlock(&mtx);
760 ret = pthread_join(tid, NULL); // wait for thread to unblock and exit
761
a39ff7e2 762
813fb2f6
A
763 T_EXPECT_EQ(waitinfo.owner, thread_id,
764 "Thread ID of blocking thread should match 'owner' field in stackshot");
765 T_EXPECT_EQ(waitinfo.context, (uint64_t)&mtx,
766 "Userspace address of mutex should match 'context' field in stackshot");
767}
768
769static void
770test_pthread_rwlck_blocking(void)
771{
a39ff7e2
A
772 int ret = 0;
773 void *stackshot = NULL;
813fb2f6 774 pthread_t tid;
a39ff7e2 775 struct stackshot_thread_waitinfo waitinfo = { 0 };
813fb2f6
A
776 pthread_rwlock_t rwlck = PTHREAD_RWLOCK_INITIALIZER;
777 int len = 1;
778
779 T_LOG("Starting %s", __FUNCTION__);
780 pthread_rwlock_wrlock(&rwlck);
781 ret = pthread_create(&tid, NULL, pthread_rwlck_blocking_thread, (void*)&rwlck);
782 T_QUIET; T_ASSERT_POSIX_ZERO(ret, "Creating pthread rwlck blocking thread");
783 sleep(2);
784
785 stackshot = take_stackshot(STACKSHOT_THREAD_WAITINFO, 0);
a39ff7e2
A
786
787 check_python(stackshot, "thread \\d+: pthread rwlock %llx for reading", (long long)&rwlck);
788
813fb2f6
A
789 find_blocking_info(stackshot, (struct stackshot_thread_waitinfo *)&waitinfo, &len);
790 T_EXPECT_EQ(len, 1, "Only one blocking thread should exist");
791 T_EXPECT_EQ(waitinfo.wait_type, kThreadWaitPThreadRWLockRead,
792 "Wait type should match expected PThreadRWLockRead value");
793 stackshot_config_dealloc(stackshot);
794
795 pthread_rwlock_unlock(&rwlck);
796 ret = pthread_join(tid, NULL); // wait for thread to unblock and exit
797 T_EXPECT_EQ(waitinfo.context, (uint64_t)&rwlck,
798 "Userspace address of rwlck should match 'context' field in stackshot");
799}
800
a39ff7e2
A
801
802
813fb2f6
A
803static void
804test_pthread_cond_blocking(void)
805{
a39ff7e2
A
806 int ret = 0;
807 void *stackshot = NULL;
813fb2f6
A
808 pthread_t tid;
809 pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
a39ff7e2 810 struct stackshot_thread_waitinfo waitinfo = { 0 };
813fb2f6
A
811 int len = 1;
812
813 T_LOG("Starting %s", __FUNCTION__);
814 ret = pthread_create(&tid, NULL, pthread_cond_blocking_thread, (void*)&cond);
815 T_QUIET; T_ASSERT_POSIX_ZERO(ret, "Creating pthread condvar blocking thread");
816 sleep(2);
817
818 stackshot = take_stackshot(STACKSHOT_THREAD_WAITINFO, 0);
a39ff7e2
A
819
820 check_python(stackshot, "thread \\d+: pthread condvar %llx", (long long)&cond);
821
813fb2f6
A
822 find_blocking_info(stackshot, (struct stackshot_thread_waitinfo *)&waitinfo, &len);
823 T_EXPECT_EQ(len, 1, "Only one blocking thread should exist");
824 T_EXPECT_EQ(waitinfo.wait_type, kThreadWaitPThreadCondVar,
825 "Wait type should match expected PThreadCondVar value");
826 stackshot_config_dealloc(stackshot);
827
828 pthread_cond_signal(&cond);
829 ret = pthread_join(tid, NULL); // wait for thread to unblock and exit
830 T_EXPECT_EQ(waitinfo.context, (uint64_t)&cond,
831 "Userspace address of condvar should match 'context' field in stackshot");
832 pthread_cond_destroy(&cond);
833}
834
d9a64523
A
835static void
836test_waitpid_blocking(void)
837{
838 int ret = 0;
839 pid_t pid = 0;
840 void *stackshot = NULL;
841 struct stackshot_thread_waitinfo waitinfo = { 0 };
842 int len = 1;
843 pthread_t tid;
844
845 T_LOG("Starting %s", __FUNCTION__);
846 if ((pid = fork()) == 0) {
847 pause();
848 } else {
849 T_ASSERT_POSIX_SUCCESS(ret, "Running in parent. Child pid is %d", pid);
850
851 sleep(1); // allow enough time for child to run & sleep
852 ret = pthread_create(&tid, NULL, waitpid_blocking_thread, (void*)pid);
853 T_QUIET; T_ASSERT_POSIX_ZERO(ret, "Creating waitpid blocking thread");
854
855 sleep(1); // allow enough time for reaping thread to waitpid & block
856 stackshot = take_stackshot(STACKSHOT_THREAD_WAITINFO, 0);
857 find_blocking_info(stackshot, (struct stackshot_thread_waitinfo *)&waitinfo, &len);
858 T_EXPECT_EQ(len, 1, "Only one blocking thread should exist");
859 T_EXPECT_EQ(waitinfo.wait_type, kThreadWaitOnProcess,
860 "Wait type should match expected WaitOnProcess value");
861
862 check_python(stackshot, "thread \\d+: waitpid, for pid %d", (int)pid);
863
864 stackshot_config_dealloc(stackshot);
865 T_EXPECT_EQ(waitinfo.owner, pid,
866 "Process ID of blocking process should match 'owner' field in stackshot");
867
868 ret = kill(pid, SIGUSR1); // wake up child so waitpid thread can reap it & exit
869 T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "Send SIGUSR1 to child process");
870 ret = pthread_join(tid, NULL);
871 T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "Join on waitpid thread");
872 }
873}
874
813fb2f6
A
875/*
876 *
877 * Test declarations
878 *
879 */
880
881T_DECL(stackshot_block_owner_klocks, "tests stackshot block owner for kernel locks") {
882 /* check to see if kmutex sysctl exists before running kmutex test */
883 if (kmutex_action(KMUTEX_SYSCTL_CHECK_EXISTS))
884 test_kmutex_blocking();
885 /* check to see if krwlck sysctl exists before running krwlck test */
886 if (krwlck_action(KRWLCK_SYSCTL_CHECK_EXISTS))
887 test_krwlock_blocking();
888 test_ulock_blocking();
889}
890
891T_DECL(stackshot_block_owner_pthread_mutex, "tests stackshot block owner: pthread mutex") {
892 test_pthread_mutex_blocking();
893}
894
895T_DECL(stackshot_block_owner_pthread_rwlck, "tests stackshot block owner: pthread rw locks") {
896 test_pthread_rwlck_blocking();
897}
898
899T_DECL(stackshot_block_owner_pthread_condvar, "tests stackshot block owner: pthread condvar") {
900 test_pthread_cond_blocking();
901}
902
903T_DECL(stackshot_block_owner_semaphore, "tests stackshot block owner: semaphore") {
904 test_semaphore_blocking();
905}
906
907T_DECL(stackshot_block_owner_mach_msg, "tests stackshot block owner: mach messaging") {
908 test_mach_msg_blocking();
909}
d9a64523
A
910
911T_DECL(stackshot_block_owner_waitpid, "tests stackshot block owner: waitpid") {
912 test_waitpid_blocking();
913}