xnu-4570.41.2.tar.gz
[apple/xnu.git] / tools / tests / darwintests / stackshot_block_owner_14362384.m
1 #ifdef T_NAMESPACE
2 #undef T_NAMESPACE
3 #endif
4 #include <darwintest.h>
5
6 #include <kdd.h>
7 #include <kern/kcdata.h>
8 #include <kern/debug.h>
9 #include <kern/block_hint.h>
10 #include <mach/mach.h>
11 #include <mach/mach_init.h>
12 #include <mach/mach_traps.h>
13 #include <mach/message.h>
14 #include <mach/port.h>
15 #include <mach/semaphore.h>
16 #include <mach/task.h>
17 #include <os/lock.h>
18 #include <pthread.h>
19 #include <sys/sysctl.h>
20 #include <sys/stackshot.h>
21 #include <sys/types.h>
22 #include <stdlib.h>
23 #include <unistd.h>
24
25 T_GLOBAL_META(
26 T_META_NAMESPACE("xnu.scheduler"),
27 T_META_ASROOT(true)
28 );
29
30 #include <Foundation/Foundation.h>
31
32 #define SENDS_TO_BLOCK 6
33 #define NUMRETRIES 5
34 #define KRWLCK_STORES_EXCL_OWNER 0
35
36 #define KMUTEX_SYSCTL_CHECK_EXISTS 0
37 #define KMUTEX_SYSCTL_ACQUIRE_WAIT 1
38 #define KMUTEX_SYSCTL_ACQUIRE_NOWAIT 2
39 #define KMUTEX_SYSCTL_SIGNAL 3
40 #define KMUTEX_SYSCTL_TEARDOWN 4
41
42 #define KRWLCK_SYSCTL_CHECK_EXISTS 0
43 #define KRWLCK_SYSCTL_RACQUIRE_NOWAIT 1
44 #define KRWLCK_SYSCTL_RACQUIRE_WAIT 2
45 #define KRWLCK_SYSCTL_WACQUIRE_NOWAIT 3
46 #define KRWLCK_SYSCTL_WACQUIRE_WAIT 4
47 #define KRWLCK_SYSCTL_SIGNAL 5
48 #define KRWLCK_SYSCTL_TEARDOWN 6
49
50 static const char kmutex_ctl[] = "debug.test_MutexOwnerCtl";
51 static const char krwlck_ctl[] = "debug.test_RWLockOwnerCtl";
52
53 static mach_port_t send = MACH_PORT_NULL;
54 static mach_port_t recv = MACH_PORT_NULL;
55
56 static void *
57 take_stackshot(uint32_t extra_flags, uint64_t since_timestamp)
58 {
59 void * stackshot;
60 int ret, retries;
61 uint32_t stackshot_flags = STACKSHOT_SAVE_LOADINFO |
62 STACKSHOT_GET_GLOBAL_MEM_STATS |
63 STACKSHOT_SAVE_IMP_DONATION_PIDS |
64 STACKSHOT_KCDATA_FORMAT;
65
66 if (since_timestamp != 0)
67 stackshot_flags |= STACKSHOT_COLLECT_DELTA_SNAPSHOT;
68
69 stackshot_flags |= extra_flags;
70
71 stackshot = stackshot_config_create();
72 T_QUIET; T_ASSERT_NOTNULL(stackshot, "Allocating stackshot config");
73
74 ret = stackshot_config_set_flags(stackshot, stackshot_flags);
75 T_ASSERT_POSIX_ZERO(ret, "Setting flags on stackshot config");
76
77 ret = stackshot_config_set_pid(stackshot, getpid());
78 T_QUIET; T_ASSERT_POSIX_ZERO(ret, "Setting target pid on stackshot config");
79
80 if (since_timestamp != 0) {
81 ret = stackshot_config_set_delta_timestamp(stackshot, since_timestamp);
82 T_QUIET; T_ASSERT_POSIX_ZERO(ret, "Setting prev snapshot time on stackshot config");
83 }
84
85 for (retries = NUMRETRIES; retries > 0; retries--) {
86 ret = stackshot_capture_with_config(stackshot);
87 T_QUIET; T_ASSERT_TRUE(ret == 0 || ret == EBUSY || ret == ETIMEDOUT,
88 "Attempting to take stackshot (error %d)...", ret);
89 if (retries == 0 && (ret == EBUSY || ret == ETIMEDOUT))
90 T_ASSERT_FAIL("Failed to take stackshot after %d retries: got %d (%s)", NUMRETRIES, ret, strerror(ret));
91 if (ret == 0)
92 break;
93 }
94 return stackshot;
95 }
96
97 // waitinfo can be NULL, but len must be non-null and point to the length of the waitinfo array.
98 // when the function returns, len will be set to the number of waitinfo structs found in the stackshot.
99 static void
100 find_blocking_info(void * stackshot, struct stackshot_thread_waitinfo *waitinfo, int *len)
101 {
102 void *buf;
103 uint32_t t, buflen;
104 NSError *error = nil;
105 NSMutableDictionary *parsed_container;
106 NSArray *parsed_waitinfo;
107
108 T_QUIET; T_ASSERT_NOTNULL(len, "Length pointer shouldn't be NULL");
109 int oldlen = *len;
110 *len = 0;
111
112 buf = stackshot_config_get_stackshot_buffer(stackshot);
113 T_QUIET; T_ASSERT_NOTNULL(buf, "Getting stackshot buffer");
114 buflen = stackshot_config_get_stackshot_size(stackshot);
115
116 kcdata_iter_t iter = kcdata_iter(buf, buflen);
117
118 T_QUIET; T_ASSERT_TRUE(kcdata_iter_type(iter) == KCDATA_BUFFER_BEGIN_STACKSHOT ||
119 kcdata_iter_type(iter) == KCDATA_BUFFER_BEGIN_DELTA_STACKSHOT,
120 "Checking start of stackshot buffer");
121
122 iter = kcdata_iter_next(iter);
123 KCDATA_ITER_FOREACH(iter)
124 {
125 t = kcdata_iter_type(iter);
126
127 if (t != KCDATA_TYPE_CONTAINER_BEGIN) {
128 continue;
129 }
130
131 if (kcdata_iter_container_type(iter) != STACKSHOT_KCCONTAINER_TASK) {
132 continue;
133 }
134
135 parsed_container = parseKCDataContainer(&iter, &error);
136 T_QUIET; T_ASSERT_TRUE(!error, "Error while parsing container: %d (%s)",
137 (int)error.code, [error.domain UTF8String]);
138 T_QUIET; T_ASSERT_TRUE(parsed_container && !error, "Parsing container");
139
140 parsed_waitinfo = parsed_container[@"task_snapshots"][@"thread_waitinfo"];
141 for (id elem in parsed_waitinfo) {
142 /* check to see that tid matches expected idle status */
143 uint8_t type = [elem[@"wait_type"] unsignedCharValue];
144 if (type != kThreadWaitNone) {
145 if (waitinfo && *len < oldlen) {
146 struct stackshot_thread_waitinfo *curr = &waitinfo[*len];
147 curr->wait_type = type;
148 curr->owner = [elem[@"owner"] unsignedLongLongValue];
149 curr->waiter = [elem[@"waiter"] unsignedLongLongValue];
150 curr->context = [elem[@"context"] unsignedLongLongValue];
151 }
152 (*len)++;
153 }
154 }
155 [parsed_container release];
156 }
157 }
158
159 /* perform various actions with a mutex in kernel memory. note that, since we aren't allowed
160 * to go to user space while still holding a mutex, the lock-acquiring actions in this kernel
161 * sysctl will either lock and immediately release the lock, or lock and wait until a semaphore
162 * is signalled, then unlock. if called with CHECK_EXISTS, returns whether or not the sysctl
163 * exist in the kernel (to determine if we're running with CONFIG_XNUPOST defined). Else,
164 * returns 1. */
165 static int kmutex_action(int action)
166 {
167 int ret;
168 if (action == KMUTEX_SYSCTL_CHECK_EXISTS) {
169 ret = sysctlbyname(krwlck_ctl, NULL, NULL, NULL, 0);
170 return !(ret == -1);
171 }
172
173 char * action_name = "";
174 switch(action) {
175 case KMUTEX_SYSCTL_ACQUIRE_WAIT:
176 action_name = "lock (and wait)";
177 break;
178 case KMUTEX_SYSCTL_ACQUIRE_NOWAIT:
179 action_name = "lock";
180 break;
181 case KMUTEX_SYSCTL_SIGNAL:
182 action_name = "signal to holder of";
183 break;
184 case KMUTEX_SYSCTL_TEARDOWN:
185 action_name = "tear down";
186 break;
187 default:
188 T_ASSERT_FAIL("Somebody passed the wrong argument to kmutex_action: %d", action);
189 break;
190 }
191
192 ret = sysctlbyname(kmutex_ctl, NULL, NULL, &action, sizeof(int));
193 T_ASSERT_POSIX_SUCCESS(ret, "sysctl: %s kernel mutex", action_name);
194 return 1;
195 }
196
197 static void
198 sysctl_kmutex_test_match(uint64_t context)
199 {
200 int ret;
201 unsigned long long unslid_kmutex_address;
202 size_t addrsize = sizeof(unslid_kmutex_address);
203
204 ret = sysctlbyname(kmutex_ctl, &unslid_kmutex_address, &addrsize, NULL, 0);
205 T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "Getting unslid location of kernel mutex. Size is %llu",
206 (unsigned long long)addrsize);
207 T_EXPECT_EQ(context, unslid_kmutex_address,
208 "Context should match unslid location of mutex in kernel memory");
209 }
210
211 /* We don't really care what goes into these messages, we're just sending something to a port. */
212 static void
213 msg_send_helper(mach_port_t remote_port)
214 {
215 int ret;
216 mach_msg_header_t * msg = NULL;
217
218 ret = vm_allocate(mach_task_self(),
219 (vm_address_t *)&msg,
220 PAGE_SIZE,
221 VM_MAKE_TAG(VM_MEMORY_MACH_MSG) | TRUE);
222
223 T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "Allocating vm page %p", (void*)msg);
224 msg->msgh_bits = MACH_MSGH_BITS_SET(MACH_MSG_TYPE_COPY_SEND, 0, 0, 0);
225 msg->msgh_size = PAGE_SIZE;
226 msg->msgh_remote_port = remote_port;
227 msg->msgh_local_port = MACH_PORT_NULL;
228 msg->msgh_voucher_port = MACH_PORT_NULL;
229 ret = mach_msg(msg,
230 MACH_SEND_MSG | MACH_MSG_OPTION_NONE,
231 PAGE_SIZE,
232 0,
233 MACH_PORT_NULL,
234 MACH_MSG_TIMEOUT_NONE,
235 MACH_PORT_NULL);
236 T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "Sending message to port %d", remote_port);
237
238 vm_deallocate(mach_task_self(), (vm_address_t)msg, PAGE_SIZE);
239 T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "Deallocating vm page %p", (void*)msg);
240 }
241
242 static void
243 msg_recv_helper(mach_port_t local_port)
244 {
245 int ret;
246 mach_msg_size_t size = 2*PAGE_SIZE;
247 mach_msg_header_t * msg = NULL;
248 ret = vm_allocate(mach_task_self(),
249 (vm_address_t *)&msg,
250 size,
251 VM_MAKE_TAG(VM_MEMORY_MACH_MSG) | TRUE );
252 T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "Allocating page %p for message", (void*)msg);
253
254 ret = mach_msg(msg,
255 MACH_RCV_MSG,
256 0,
257 size,
258 local_port,
259 MACH_MSG_TIMEOUT_NONE,
260 MACH_PORT_NULL);
261 T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "Received message on port %d", local_port);
262 ret = vm_deallocate(mach_task_self(), (vm_address_t)msg, PAGE_SIZE);
263 T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "Deallocating page %p", (void*)msg);
264 }
265
266 /* perform various actions with a rwlock in kernel memory. note that, since we aren't allowed
267 * to go to user space while still holding a rwlock, the lock-acquiring actions in this kernel
268 * sysctl will either lock and immediately release the lock, or lock and wait until a semaphore
269 * is signalled, then unlock. if called with CHECK_EXISTS, returns whether or not the sysctl
270 * exist in the kernel (to determine if we're running with CONFIG_XNUPOST defined). Else,
271 * returns 1. */
272 static int
273 krwlck_action(int action)
274 {
275 int ret;
276 if (action == KRWLCK_SYSCTL_CHECK_EXISTS) {
277 ret = sysctlbyname(krwlck_ctl, NULL, NULL, NULL, 0);
278 return !(ret == -1);
279 }
280
281 char * action_name = "";
282 switch(action) {
283 case KRWLCK_SYSCTL_RACQUIRE_NOWAIT:
284 action_name = "shared lock";
285 break;
286 case KRWLCK_SYSCTL_RACQUIRE_WAIT:
287 action_name = "shared lock (and wait)";
288 break;
289 case KRWLCK_SYSCTL_WACQUIRE_NOWAIT:
290 action_name = "exclusive lock";
291 break;
292 case KRWLCK_SYSCTL_WACQUIRE_WAIT:
293 action_name = "exclusive lock (and wait)";
294 break;
295 case KRWLCK_SYSCTL_SIGNAL:
296 action_name = "signal to holder of";
297 break;
298 case KRWLCK_SYSCTL_TEARDOWN:
299 action_name = "tear down";
300 break;
301 default:
302 T_ASSERT_FAIL("Somebody passed the wrong argument to krwlck_action: %d", action);
303 break;
304 }
305
306 ret = sysctlbyname(krwlck_ctl, NULL, NULL, &action, sizeof(int));
307 T_ASSERT_POSIX_SUCCESS(ret, "sysctl: %s kernel rwlock", action_name);
308 return 1;
309 }
310
311 static void
312 sysctl_krwlck_test_match(uint64_t context)
313 {
314 int ret;
315 unsigned long long unslid_krwlck_address;
316 size_t addrsize = sizeof(unslid_krwlck_address);
317
318 ret = sysctlbyname(krwlck_ctl, &unslid_krwlck_address, &addrsize, NULL, 0);
319 T_QUIET; T_ASSERT_POSIX_SUCCESS(ret, "Getting unslid location of kernel rwlock");
320 T_EXPECT_EQ(context, unslid_krwlck_address, "Context should match unslid location of rwlock in kernel memory");
321 }
322
323 /* "Grabbing" threads: only purpose is to grab a sync primitive and hang. */
324
325 static void *
326 kmutex_grabbing_thread(void * arg)
327 {
328 (void)arg;
329 kmutex_action(KMUTEX_SYSCTL_ACQUIRE_NOWAIT);
330 return NULL;
331 }
332
333 static void *
334 kmutex_grab_and_wait_thread(void * arg)
335 {
336 (void)arg;
337 kmutex_action(KMUTEX_SYSCTL_ACQUIRE_WAIT);
338 return NULL;
339 }
340
341 static void *
342 sem_grabbing_thread(void * arg)
343 {
344 semaphore_t *sem = (semaphore_t *)arg;
345 semaphore_wait(*sem);
346 return NULL;
347 }
348
349 static void *
350 msg_blocking_thread(void * arg)
351 {
352 (void)arg;
353 msg_recv_helper(send);
354
355 for (int i = 0; i < SENDS_TO_BLOCK; i++)
356 msg_send_helper(recv); // will block on send until message is received
357 return NULL;
358 }
359
360 static void *
361 ulock_blocking_thread(void * arg)
362 {
363 os_unfair_lock_t oul = (os_unfair_lock_t)arg;
364 os_unfair_lock_lock(oul);
365 os_unfair_lock_unlock(oul);
366 return NULL;
367 }
368
369 // acquires a kernel rwlock for writing, and then waits on a kernel semaphore.
370 static void *
371 krwlck_write_waiting_thread(void * arg)
372 {
373 (void)arg;
374 krwlck_action(KRWLCK_SYSCTL_WACQUIRE_WAIT);
375 return NULL;
376 }
377
378 // attempts to acquire a kernel rwlock for reading, and doesn't wait on a semaphore afterwards.
379 static void *
380 krwlck_read_grabbing_thread(void * arg)
381 {
382 (void)arg;
383 krwlck_action(KRWLCK_SYSCTL_RACQUIRE_NOWAIT);
384 return NULL;
385 }
386
387 static void *
388 pthread_mutex_blocking_thread(void * arg)
389 {
390 pthread_mutex_t *mtx = (pthread_mutex_t *)arg;
391 pthread_mutex_lock(mtx);
392 pthread_mutex_unlock(mtx);
393 return NULL;
394 }
395
396 static void *
397 pthread_rwlck_blocking_thread(void * arg)
398 {
399 pthread_rwlock_t *rwlck = (pthread_rwlock_t *)arg;
400 pthread_rwlock_rdlock(rwlck);
401 pthread_rwlock_unlock(rwlck);
402 return NULL;
403 }
404
405 static void *
406 pthread_cond_blocking_thread(void * arg)
407 {
408 pthread_mutex_t mtx = PTHREAD_MUTEX_INITIALIZER;
409 pthread_cond_t *cond = (pthread_cond_t *)arg;
410 pthread_cond_wait(cond, &mtx);
411 pthread_mutex_unlock(&mtx);
412 return NULL;
413 }
414
415 /*
416 * Uses a debug sysctl to initialize a kernel mutex.
417 *
418 * The 'waiting' thread grabs this kernel mutex, and immediately waits on a kernel semaphore.
419 * The 'grabbing' thread just attempts to lock the kernel mutex.
420 * When the semaphore is signalled, the 'waiting' thread will unlock the kernel mutex,
421 * giving the opportunity for the 'grabbing' thread to lock it and then immediately unlock it.
422 * This allows us to create a situation in the kernel where we know a thread to be blocked
423 * on a kernel mutex.
424 */
425 static void
426 test_kmutex_blocking(void)
427 {
428 int ret;
429 int len = 2;
430 struct stackshot_thread_waitinfo waitinfo[len];
431 uint64_t thread_id;
432 pthread_t grabbing, waiting;
433
434 T_LOG("Starting %s", __FUNCTION__);
435 ret = pthread_create(&waiting, NULL, kmutex_grab_and_wait_thread, NULL); // thread will block until we signal it
436 T_QUIET; T_ASSERT_POSIX_ZERO(ret, "Spawning grab and wait thread");
437 sleep(1); // give time for thread to block
438 ret = pthread_create(&grabbing, NULL, kmutex_grabbing_thread, NULL); // thread should immediately block
439 T_QUIET; T_ASSERT_POSIX_ZERO(ret, "Spawning waiting thread");
440 sleep(3); // give (lots of) time for thread to give up spinning on lock
441
442 void * stackshot = take_stackshot(STACKSHOT_THREAD_WAITINFO, 0);
443 find_blocking_info(stackshot, (struct stackshot_thread_waitinfo *)&waitinfo, &len);
444
445 T_EXPECT_EQ(len, 2, "There should only be two blocking threads");
446 for (int i = 0; i < len; i++) {
447 struct stackshot_thread_waitinfo *curr = &waitinfo[i];
448 if (curr->wait_type == kThreadWaitSemaphore)
449 continue;
450 T_EXPECT_EQ(curr->wait_type, kThreadWaitKernelMutex, "Wait type should match expected KernelMutex value");
451 ret = pthread_threadid_np(waiting, &thread_id); // this is the thread that currently holds the kernel mutex
452 T_QUIET; T_ASSERT_POSIX_ZERO(ret, "Getting integer value of thread id");
453 T_EXPECT_EQ(curr->owner, thread_id, "Thread ID of blocking thread should match 'owner' field in stackshot");
454 sysctl_kmutex_test_match(curr->context);
455 }
456
457 kmutex_action(KMUTEX_SYSCTL_SIGNAL); // waiting thread should now unblock.
458 ret = pthread_join(waiting, NULL);
459 T_QUIET; T_ASSERT_POSIX_ZERO(ret, "Joining on waiting thread");
460 ret = pthread_join(grabbing, NULL);
461 T_QUIET; T_ASSERT_POSIX_ZERO(ret, "Joining on grabber thread");
462 kmutex_action(KMUTEX_SYSCTL_TEARDOWN);
463 stackshot_config_dealloc(stackshot);
464 }
465
466 /* Initialize a userspace semaphore, and spawn a thread to block on it. */
467 static void
468 test_semaphore_blocking(void)
469 {
470 int ret;
471 semaphore_t sem;
472 struct stackshot_thread_waitinfo waitinfo;
473 int len = 1;
474 uint64_t pid;
475
476 T_LOG("Starting %s", __FUNCTION__);
477 ret = semaphore_create(mach_task_self(), &sem, SYNC_POLICY_FIFO, 0);
478 T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "Creating semaphore");
479 pthread_t tid;
480 ret = pthread_create(&tid, NULL, sem_grabbing_thread, (void*)&sem); // thread should immediately block
481 T_QUIET; T_ASSERT_POSIX_ZERO(ret, "Creating semaphore grabbing thread");
482
483 sleep(1); // give time for thread to block
484
485 void * stackshot = take_stackshot(STACKSHOT_THREAD_WAITINFO, 0);
486 find_blocking_info(stackshot, (struct stackshot_thread_waitinfo *)&waitinfo, &len);
487 T_EXPECT_EQ(len, 1, "Only one blocking thread should exist");
488 T_EXPECT_EQ(waitinfo.wait_type, kThreadWaitSemaphore, "Wait type should match expected Semaphore value");
489
490 pid = (uint64_t)getpid();
491 T_EXPECT_EQ(waitinfo.owner, pid, "Owner value should match process ID");
492
493 ret = semaphore_signal(sem);
494 T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "Signalling semaphore");
495 ret = pthread_join(tid, NULL);
496 T_QUIET; T_ASSERT_POSIX_ZERO(ret, "Joining on grabber thread");
497 ret = semaphore_destroy(mach_task_self(), sem);
498 T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "Destroying semaphore");
499 stackshot_config_dealloc(stackshot);
500 }
501
502 /* Spawn a process to send a message to, and block while both sending and receiving in different contexts. */
503 static void
504 test_mach_msg_blocking(void)
505 {
506 int ret;
507 pthread_t tid;
508 void *stackshot;
509 struct stackshot_thread_waitinfo waitinfo;
510 int len = 1;
511
512 T_LOG("Starting %s", __FUNCTION__);
513 ret = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, &send);
514 T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "Allocating send port");
515 ret = mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_RECEIVE, &recv);
516 T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "Allocating recv port");
517 ret = mach_port_insert_right(mach_task_self(), send, send, MACH_MSG_TYPE_MAKE_SEND);
518 T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "Getting send right to send port");
519 ret = mach_port_insert_right(mach_task_self(), recv, recv, MACH_MSG_TYPE_MAKE_SEND);
520 T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "Getting send right to recv port");
521
522 ret = pthread_create(&tid, NULL, msg_blocking_thread, (void*)&send); // thread should block on recv soon
523 T_QUIET; T_ASSERT_POSIX_ZERO(ret, "Creating message blocking thread");
524
525 sleep(1); // give time for thread to block
526 stackshot = take_stackshot(STACKSHOT_THREAD_WAITINFO, 0);
527 find_blocking_info(stackshot, (struct stackshot_thread_waitinfo *)&waitinfo, &len);
528
529 T_EXPECT_EQ(len, 1, "Only one blocking thread should exist");
530 T_EXPECT_EQ(waitinfo.wait_type, kThreadWaitPortReceive, "Wait type should match expected PortReceive value");
531 stackshot_config_dealloc(stackshot);
532
533 msg_send_helper(send); // ping! msg_blocking_thread will now try to send us stuff, and block until we receive.
534
535 sleep(1); // give time for thread to block
536 stackshot = take_stackshot(STACKSHOT_THREAD_WAITINFO, 0);
537 find_blocking_info(stackshot, (struct stackshot_thread_waitinfo *)&waitinfo, &len);
538 T_EXPECT_EQ(len, 1, "Only one blocking thread should exist");
539 T_EXPECT_EQ(waitinfo.wait_type, kThreadWaitPortSend, "Wait type should match expected PortSend value");
540 stackshot_config_dealloc(stackshot);
541
542 msg_recv_helper(recv); // thread should block until we receive one of its messages
543 ret = pthread_join(tid, NULL);
544 T_QUIET; T_ASSERT_POSIX_ZERO(ret, "Joining on blocking thread");
545 }
546
547 static void
548 test_ulock_blocking(void)
549 {
550 int ret;
551 void *stackshot;
552 uint64_t thread_id;
553 pthread_t tid;
554 struct os_unfair_lock_s ouls = OS_UNFAIR_LOCK_INIT;
555 os_unfair_lock_t oul = &ouls;
556 struct stackshot_thread_waitinfo waitinfo;
557 int len = 1;
558
559 T_LOG("Starting %s", __FUNCTION__);
560 os_unfair_lock_lock(oul);
561 ret = pthread_create(&tid, NULL, ulock_blocking_thread, (void*)oul);
562 T_QUIET; T_ASSERT_POSIX_ZERO(ret, "Creating ulock blocking thread");
563 sleep(3); // give time for thread to spawn, fall back to kernel for contention, and block
564
565 stackshot = take_stackshot(STACKSHOT_THREAD_WAITINFO, 0);
566 find_blocking_info(stackshot, (struct stackshot_thread_waitinfo *)&waitinfo, &len);
567 T_EXPECT_EQ(len, 1, "Only one blocking thread should exist");
568 T_EXPECT_EQ(waitinfo.wait_type, kThreadWaitUserLock, "Wait type should match expected UserLock value");
569 stackshot_config_dealloc(stackshot);
570
571 os_unfair_lock_unlock(oul);
572 ret = pthread_join(tid, NULL); // wait for thread to unblock and exit
573 T_QUIET; T_ASSERT_POSIX_ZERO(ret, "Joining on blocking thread");
574
575 ret = pthread_threadid_np(NULL, &thread_id); // this thread is the "owner" of the ulock
576 T_QUIET; T_ASSERT_POSIX_ZERO(ret, "Getting integer value of thread id");
577 T_EXPECT_EQ(waitinfo.owner, thread_id, "Thread ID of blocking thread should match 'owner' field in stackshot");
578 return;
579 }
580
581 static void
582 test_krwlock_blocking(void)
583 {
584 int ret;
585 void *stackshot;
586 uint64_t thread_id;
587 pthread_t waiting, grabbing;
588 int len = 2;
589 struct stackshot_thread_waitinfo waitinfo[len];
590
591 T_LOG("Starting %s", __FUNCTION__);
592 // this thread should spawn, acquire a kernel rwlock for write, and then wait on a semaphore
593 ret = pthread_create(&waiting, NULL, krwlck_write_waiting_thread, NULL);
594 T_QUIET; T_ASSERT_POSIX_ZERO(ret, "Creating krwlck write waiting thread");
595 sleep(1); // give time for thread to block
596 // this thread should spawn and try to acquire the same kernel rwlock for read, but block
597 ret = pthread_create(&grabbing, NULL, krwlck_read_grabbing_thread, NULL);
598 T_QUIET; T_ASSERT_POSIX_ZERO(ret, "Creating krwlck read grabbing thread");
599 sleep(1); // give time for thread to block
600
601 stackshot = take_stackshot(STACKSHOT_THREAD_WAITINFO, 0);
602 find_blocking_info(stackshot, (struct stackshot_thread_waitinfo *)&waitinfo, &len);
603
604 T_EXPECT_EQ(len, 2, "There should only be two blocking threads");
605 for (int i = 0; i < len; i++) {
606 struct stackshot_thread_waitinfo *curr = &waitinfo[i];
607 if (curr->wait_type == kThreadWaitSemaphore)
608 continue;
609 T_EXPECT_EQ(curr->wait_type, kThreadWaitKernelRWLockRead, "Wait type should match expected KRWLockRead value");
610 sysctl_krwlck_test_match(curr->context);
611
612 #if KRWLCK_STORES_EXCL_OWNER /* A future planned enhancement */
613 ret = pthread_threadid_np(waiting, &thread_id); // this is the thread that currently holds the kernel mutex
614 T_QUIET; T_ASSERT_POSIX_ZERO(ret, "Getting integer value of thread id");
615 T_EXPECT_EQ(curr->owner, thread_id, "Thread ID of blocking thread should match 'owner' field in stackshot");
616 #endif /* RWLCK_STORES_EXCL_OWNER */
617 }
618
619 krwlck_action(KRWLCK_SYSCTL_SIGNAL); // pthread should now unblock & finish
620 ret = pthread_join(waiting, NULL);
621 T_QUIET; T_ASSERT_POSIX_ZERO(ret, "Joining on waiting thread");
622 ret = pthread_join(grabbing, NULL);
623 T_QUIET; T_ASSERT_POSIX_ZERO(ret, "Joining on grabbing thread");
624 krwlck_action(KRWLCK_SYSCTL_TEARDOWN);
625 stackshot_config_dealloc(stackshot);
626 }
627
628 static void
629 test_pthread_mutex_blocking(void)
630 {
631 int ret;
632 void *stackshot;
633 uint64_t thread_id;
634 pthread_t tid;
635 struct stackshot_thread_waitinfo waitinfo;
636 pthread_mutex_t mtx = PTHREAD_MUTEX_INITIALIZER;
637 int len = 1;
638
639 T_LOG("Starting %s", __FUNCTION__);
640
641 pthread_mutex_lock(&mtx);
642 ret = pthread_create(&tid, NULL, pthread_mutex_blocking_thread, (void*)&mtx);
643 T_QUIET; T_ASSERT_POSIX_ZERO(ret, "Creating pthread mutex blocking thread");
644 sleep(2); // give time for thread to block
645
646 stackshot = take_stackshot(STACKSHOT_THREAD_WAITINFO, 0);
647 find_blocking_info(stackshot, (struct stackshot_thread_waitinfo *)&waitinfo, &len);
648 T_EXPECT_EQ(len, 1, "Only one blocking thread should exist");
649 T_EXPECT_EQ(waitinfo.wait_type, kThreadWaitPThreadMutex,
650 "Wait type should match expected PThreadMutex value");
651 stackshot_config_dealloc(stackshot);
652
653 pthread_mutex_unlock(&mtx);
654 ret = pthread_join(tid, NULL); // wait for thread to unblock and exit
655
656 ret = pthread_threadid_np(NULL, &thread_id); // this thread is the "owner" of the mutex
657 T_QUIET; T_ASSERT_POSIX_ZERO(ret, "Getting integer value of thread id");
658 T_EXPECT_EQ(waitinfo.owner, thread_id,
659 "Thread ID of blocking thread should match 'owner' field in stackshot");
660 T_EXPECT_EQ(waitinfo.context, (uint64_t)&mtx,
661 "Userspace address of mutex should match 'context' field in stackshot");
662 }
663
664 static void
665 test_pthread_rwlck_blocking(void)
666 {
667 int ret;
668 void *stackshot;
669 pthread_t tid;
670 struct stackshot_thread_waitinfo waitinfo;
671 pthread_rwlock_t rwlck = PTHREAD_RWLOCK_INITIALIZER;
672 int len = 1;
673
674 T_LOG("Starting %s", __FUNCTION__);
675 pthread_rwlock_wrlock(&rwlck);
676 ret = pthread_create(&tid, NULL, pthread_rwlck_blocking_thread, (void*)&rwlck);
677 T_QUIET; T_ASSERT_POSIX_ZERO(ret, "Creating pthread rwlck blocking thread");
678 sleep(2);
679
680 stackshot = take_stackshot(STACKSHOT_THREAD_WAITINFO, 0);
681 find_blocking_info(stackshot, (struct stackshot_thread_waitinfo *)&waitinfo, &len);
682 T_EXPECT_EQ(len, 1, "Only one blocking thread should exist");
683 T_EXPECT_EQ(waitinfo.wait_type, kThreadWaitPThreadRWLockRead,
684 "Wait type should match expected PThreadRWLockRead value");
685 stackshot_config_dealloc(stackshot);
686
687 pthread_rwlock_unlock(&rwlck);
688 ret = pthread_join(tid, NULL); // wait for thread to unblock and exit
689 T_EXPECT_EQ(waitinfo.context, (uint64_t)&rwlck,
690 "Userspace address of rwlck should match 'context' field in stackshot");
691 }
692
693 static void
694 test_pthread_cond_blocking(void)
695 {
696 int ret;
697 void *stackshot;
698 pthread_t tid;
699 pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
700 struct stackshot_thread_waitinfo waitinfo;
701 int len = 1;
702
703 T_LOG("Starting %s", __FUNCTION__);
704 ret = pthread_create(&tid, NULL, pthread_cond_blocking_thread, (void*)&cond);
705 T_QUIET; T_ASSERT_POSIX_ZERO(ret, "Creating pthread condvar blocking thread");
706 sleep(2);
707
708 stackshot = take_stackshot(STACKSHOT_THREAD_WAITINFO, 0);
709 find_blocking_info(stackshot, (struct stackshot_thread_waitinfo *)&waitinfo, &len);
710 T_EXPECT_EQ(len, 1, "Only one blocking thread should exist");
711 T_EXPECT_EQ(waitinfo.wait_type, kThreadWaitPThreadCondVar,
712 "Wait type should match expected PThreadCondVar value");
713 stackshot_config_dealloc(stackshot);
714
715 pthread_cond_signal(&cond);
716 ret = pthread_join(tid, NULL); // wait for thread to unblock and exit
717 T_EXPECT_EQ(waitinfo.context, (uint64_t)&cond,
718 "Userspace address of condvar should match 'context' field in stackshot");
719 pthread_cond_destroy(&cond);
720 }
721
722 /*
723 *
724 * Test declarations
725 *
726 */
727
728 T_DECL(stackshot_block_owner_klocks, "tests stackshot block owner for kernel locks") {
729 /* check to see if kmutex sysctl exists before running kmutex test */
730 if (kmutex_action(KMUTEX_SYSCTL_CHECK_EXISTS))
731 test_kmutex_blocking();
732 /* check to see if krwlck sysctl exists before running krwlck test */
733 if (krwlck_action(KRWLCK_SYSCTL_CHECK_EXISTS))
734 test_krwlock_blocking();
735 test_ulock_blocking();
736 }
737
738 T_DECL(stackshot_block_owner_pthread_mutex, "tests stackshot block owner: pthread mutex") {
739 test_pthread_mutex_blocking();
740 }
741
742 T_DECL(stackshot_block_owner_pthread_rwlck, "tests stackshot block owner: pthread rw locks") {
743 test_pthread_rwlck_blocking();
744 }
745
746 T_DECL(stackshot_block_owner_pthread_condvar, "tests stackshot block owner: pthread condvar") {
747 test_pthread_cond_blocking();
748 }
749
750 T_DECL(stackshot_block_owner_semaphore, "tests stackshot block owner: semaphore") {
751 test_semaphore_blocking();
752 }
753
754 T_DECL(stackshot_block_owner_mach_msg, "tests stackshot block owner: mach messaging") {
755 test_mach_msg_blocking();
756 }