]> git.saurik.com Git - apple/xnu.git/blob - tests/turnstile_multihop.c
e2f367367be9fccc6a29e116a2d4ff7fc5cb8468
[apple/xnu.git] / tests / turnstile_multihop.c
1 /*
2 * turnstile_multihop: Tests turnstile and multi hop priority propagation.
3 */
4
5 #ifdef T_NAMESPACE
6 #undef T_NAMESPACE
7 #endif
8
9 #include <darwintest.h>
10 #include <darwintest_multiprocess.h>
11
12 #include <dispatch/dispatch.h>
13 #include <pthread.h>
14 #include <launch.h>
15 #include <mach/mach.h>
16 #include <mach/message.h>
17 #include <mach/mach_voucher.h>
18 #include <pthread/workqueue_private.h>
19 #include <voucher/ipc_pthread_priority_types.h>
20 #include <servers/bootstrap.h>
21 #include <stdlib.h>
22 #include <sys/event.h>
23 #include <unistd.h>
24 #include <crt_externs.h>
25 #include <signal.h>
26 #include <sys/types.h>
27 #include <sys/sysctl.h>
28 #include <libkern/OSAtomic.h>
29 #include <sys/wait.h>
30
31 #include "turnstile_multihop_helper.h"
32
33 T_GLOBAL_META(T_META_NAMESPACE("xnu.turnstile_multihop"));
34
35 #define HELPER_TIMEOUT_SECS (3000)
36
37 struct test_msg {
38 mach_msg_header_t header;
39 mach_msg_body_t body;
40 mach_msg_port_descriptor_t port_descriptor;
41 };
42
43 static boolean_t spin_for_ever = false;
44
45 static boolean_t test_noimportance = false;
46
47 #define EXPECTED_MESSAGE_ID 0x100
48
49 static void
50 thread_create_at_qos(qos_class_t qos, void * (*function)(void *));
51 static uint64_t
52 nanoseconds_to_absolutetime(uint64_t nanoseconds);
53 static int
54 sched_create_load_at_qos(qos_class_t qos, void **load_token);
55 static int
56 sched_terminate_load(void *load_token) __unused;
57 static void do_work(int num);
58 static void
59 dispatch_sync_cancel(mach_port_t owner_thread, qos_class_t promote_qos);
60
61 static void *sched_load_thread(void *);
62
63 struct load_token_context {
64 volatile int threads_should_exit;
65 int thread_count;
66 qos_class_t qos;
67 pthread_t *threads;
68 };
69
70 static struct mach_timebase_info sched_mti;
71 static pthread_once_t sched_mti_once_control = PTHREAD_ONCE_INIT;
72
73 static void
74 sched_mti_init(void)
75 {
76 mach_timebase_info(&sched_mti);
77 }
78 uint64_t
79 nanoseconds_to_absolutetime(uint64_t nanoseconds)
80 {
81 pthread_once(&sched_mti_once_control, sched_mti_init);
82
83 return (uint64_t)(nanoseconds * (((double)sched_mti.denom) / ((double)sched_mti.numer)));
84 }
85
86 static int
87 sched_create_load_at_qos(qos_class_t qos, void **load_token)
88 {
89 struct load_token_context *context = NULL;
90 int ret;
91 int ncpu;
92 size_t ncpu_size = sizeof(ncpu);
93 int nthreads;
94 int i;
95 pthread_attr_t attr;
96
97 ret = sysctlbyname("hw.ncpu", &ncpu, &ncpu_size, NULL, 0);
98 if (ret == -1) {
99 T_LOG("sysctlbyname(hw.ncpu)");
100 return errno;
101 }
102
103 T_QUIET; T_LOG("%s: Detected %d CPUs\n", __FUNCTION__, ncpu);
104
105 nthreads = ncpu;
106 T_QUIET; T_LOG("%s: Will create %d threads\n", __FUNCTION__, nthreads);
107
108 ret = pthread_attr_init(&attr);
109 T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "pthread_attr_init");
110
111 if (&pthread_attr_set_qos_class_np) {
112 ret = pthread_attr_set_qos_class_np(&attr, qos, 0);
113 T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "pthread_attr_set_qos_class_np");
114 }
115
116 context = calloc(1, sizeof(*context));
117 if (context == NULL) {
118 T_QUIET; T_LOG("calloc returned error"); return ENOMEM;
119 }
120
121 context->threads_should_exit = 0;
122 context->thread_count = nthreads;
123 context->qos = qos;
124 context->threads = calloc((unsigned int)nthreads, sizeof(pthread_t));
125
126 OSMemoryBarrier();
127
128 for (i = 0; i < nthreads; i++) {
129 ret = pthread_create(&context->threads[i], &attr, sched_load_thread, context);
130 T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "pthread_create");
131 T_QUIET; T_LOG("%s: Created thread %d (%p)\n", __FUNCTION__, i, (void *)context->threads[i]);
132 }
133
134 ret = pthread_attr_destroy(&attr);
135 T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "pthread_attr_destroy");
136
137 *load_token = context;
138
139 return 0;
140 }
141
142 static void *
143 sched_load_thread(void *arg)
144 {
145 struct load_token_context *context = (struct load_token_context *)arg;
146
147 T_QUIET; T_LOG("%s: Thread started %p\n", __FUNCTION__, (void *)pthread_self());
148
149 while (!context->threads_should_exit) {
150 uint64_t start = mach_absolute_time();
151 uint64_t end = start + nanoseconds_to_absolutetime(900ULL * NSEC_PER_MSEC);
152
153 while ((mach_absolute_time() < end) && !context->threads_should_exit) {
154 ;
155 }
156 }
157
158 T_QUIET; T_LOG("%s: Thread terminating %p\n", __FUNCTION__, (void *)pthread_self());
159
160 return NULL;
161 }
162
163 static int
164 sched_terminate_load(void *load_token)
165 {
166 int ret;
167 int i;
168 struct load_token_context *context = (struct load_token_context *)load_token;
169
170 context->threads_should_exit = 1;
171 OSMemoryBarrier();
172
173 for (i = 0; i < context->thread_count; i++) {
174 T_QUIET; T_LOG("%s: Joining thread %d (%p)\n", __FUNCTION__, i, (void *)context->threads[i]);
175 ret = pthread_join(context->threads[i], NULL);
176 T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "pthread_join");
177 }
178
179 free(context->threads);
180 free(context);
181
182 return 0;
183 }
184
185
186 // Find the first num primes, simply as a means of doing work
187 static void
188 do_work(int num)
189 {
190 volatile int i = 3, count, c;
191
192 for (count = 2; count <= num;) {
193 for (c = 2; c <= i; c++) {
194 if (i % c == 0) {
195 break;
196 }
197 }
198 if (c == i) {
199 count++;
200 }
201 i++;
202 }
203 }
204
205 #pragma mark pthread callbacks
206
207 static void
208 worker_cb(pthread_priority_t __unused priority)
209 {
210 T_FAIL("a worker thread was created");
211 }
212
213 static void
214 event_cb(void ** __unused events, int * __unused nevents)
215 {
216 T_FAIL("a kevent routine was called instead of workloop");
217 }
218
219 static uint32_t
220 get_user_promotion_basepri(void)
221 {
222 mach_msg_type_number_t count = THREAD_POLICY_STATE_COUNT;
223 struct thread_policy_state thread_policy;
224 boolean_t get_default = FALSE;
225 mach_port_t thread_port = pthread_mach_thread_np(pthread_self());
226
227 kern_return_t kr = thread_policy_get(thread_port, THREAD_POLICY_STATE,
228 (thread_policy_t)&thread_policy, &count, &get_default);
229 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "thread_policy_get");
230 return thread_policy.thps_user_promotion_basepri;
231 }
232
233 static uint32_t
234 get_thread_base_priority(void)
235 {
236 kern_return_t kr;
237 mach_port_t thread_port = pthread_mach_thread_np(pthread_self());
238
239 policy_timeshare_info_data_t timeshare_info;
240 mach_msg_type_number_t count = POLICY_TIMESHARE_INFO_COUNT;
241
242 kr = thread_info(thread_port, THREAD_SCHED_TIMESHARE_INFO,
243 (thread_info_t)&timeshare_info, &count);
244 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "thread_info");
245
246 return (uint32_t)timeshare_info.base_priority;
247 }
248
249
250 #define LISTENER_WLID 0x100
251 #define CONN_WLID 0x200
252
253 static uint32_t
254 register_port_options(void)
255 {
256 return MACH_RCV_MSG | MACH_RCV_LARGE | MACH_RCV_LARGE_IDENTITY |
257 MACH_RCV_TRAILER_ELEMENTS(MACH_RCV_TRAILER_CTX) |
258 MACH_RCV_TRAILER_TYPE(MACH_MSG_TRAILER_FORMAT_0) |
259 MACH_RCV_VOUCHER;
260 }
261
262 static void
263 register_port(uint64_t wlid, mach_port_t port)
264 {
265 int r;
266
267 struct kevent_qos_s kev = {
268 .ident = port,
269 .filter = EVFILT_MACHPORT,
270 .flags = EV_ADD | EV_UDATA_SPECIFIC | EV_DISPATCH | EV_VANISHED,
271 .fflags = register_port_options(),
272 .data = 1,
273 .qos = (int32_t)_pthread_qos_class_encode(QOS_CLASS_MAINTENANCE, 0, 0)
274 };
275
276 struct kevent_qos_s kev_err = { 0 };
277
278 /* Setup workloop for mach msg rcv */
279 r = kevent_id(wlid, &kev, 1, &kev_err, 1, NULL,
280 NULL, KEVENT_FLAG_WORKLOOP | KEVENT_FLAG_ERROR_EVENTS);
281
282 T_QUIET; T_ASSERT_POSIX_SUCCESS(r, "kevent_id");
283 T_QUIET; T_ASSERT_EQ(r, 0, "no errors returned from kevent_id");
284 }
285
286 /*
287 * Basic WL handler callback, it checks the
288 * effective Qos of the servicer thread.
289 */
290 static void
291 workloop_cb_test_intransit(uint64_t *workloop_id, void **eventslist, int *events)
292 {
293 static bool got_peer;
294
295 struct kevent_qos_s *kev = eventslist[0];
296 mach_msg_header_t *hdr;
297 struct test_msg *tmsg;
298
299 T_LOG("Workloop handler %s called. Received message on 0x%llx",
300 __func__, *workloop_id);
301
302 /* Skip the test if we can't check Qos */
303 if (geteuid() != 0) {
304 T_SKIP("kevent_qos test requires root privileges to run.");
305 }
306
307 T_QUIET; T_ASSERT_EQ(*events, 1, "should have one event");
308
309 T_EXPECT_REQUESTED_QOS_EQ(QOS_CLASS_MAINTENANCE, "message handler should have MT requested QoS");
310
311 hdr = (mach_msg_header_t *)kev->ext[0];
312 T_ASSERT_NOTNULL(hdr, "has a message");
313 T_ASSERT_EQ(hdr->msgh_size, (uint32_t)sizeof(struct test_msg), "of the right size");
314 tmsg = (struct test_msg *)hdr;
315
316 switch (*workloop_id) {
317 case LISTENER_WLID:
318 T_LOG("Registering peer connection");
319 T_QUIET; T_ASSERT_FALSE(got_peer, "Should not have seen peer yet");
320 got_peer = true;
321 break;
322
323 case CONN_WLID:
324 T_LOG("Received message on peer");
325 break;
326
327 default:
328 T_FAIL("???");
329 }
330
331 sleep(5);
332 T_LOG("Do some CPU work.");
333 do_work(5000);
334
335 /* Check if the override now is IN + 60 boost */
336 T_EXPECT_EFFECTIVE_QOS_EQ(QOS_CLASS_USER_INITIATED,
337 "dispatch_source event handler QoS should be QOS_CLASS_USER_INITIATED");
338 T_EXPECT_EQ(get_user_promotion_basepri(), 60u,
339 "dispatch_source event handler should be overridden at 60");
340
341 T_EXPECT_EQ(get_thread_base_priority(), 60u,
342 "dispatch_source event handler should have base pri at 60");
343
344 if (*workloop_id == LISTENER_WLID) {
345 register_port(CONN_WLID, tmsg->port_descriptor.name);
346
347 kev->flags = EV_ADD | EV_ENABLE | EV_UDATA_SPECIFIC | EV_DISPATCH | EV_VANISHED;
348 kev->fflags = register_port_options();
349 kev->ext[0] = kev->ext[1] = kev->ext[2] = kev->ext[3] = 0;
350 *events = 1;
351 } else {
352 /* this will unblock the waiter */
353 mach_msg_destroy(hdr);
354 *events = 0;
355
356 /* now that the message is destroyed, the priority should be gone */
357 T_EXPECT_EFFECTIVE_QOS_EQ(QOS_CLASS_MAINTENANCE,
358 "dispatch_source event handler QoS should be QOS_CLASS_MAINTENANCE after destroying message");
359 T_EXPECT_LE(get_user_promotion_basepri(), 0u,
360 "dispatch_source event handler should not be overridden after destroying message");
361 T_EXPECT_LE(get_thread_base_priority(), 4u,
362 "dispatch_source event handler should have base pri at 4 or less after destroying message");
363 }
364 }
365
366 static void
367 run_client_server(const char *server_name, const char *client_name)
368 {
369 dt_helper_t helpers[] = {
370 dt_launchd_helper_domain("com.apple.xnu.test.turnstile_multihop.plist",
371 server_name, NULL, LAUNCH_SYSTEM_DOMAIN),
372 dt_fork_helper(client_name)
373 };
374 dt_run_helpers(helpers, 2, HELPER_TIMEOUT_SECS);
375 }
376
377 #pragma mark Mach receive
378
379 #define TURNSTILE_MULTIHOP_SERVICE_NAME "com.apple.xnu.test.turnstile_multihop"
380
381 static mach_port_t
382 get_server_port(void)
383 {
384 mach_port_t port;
385 kern_return_t kr = bootstrap_check_in(bootstrap_port,
386 TURNSTILE_MULTIHOP_SERVICE_NAME, &port);
387 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "server bootstrap_check_in");
388 return port;
389 }
390
391 static mach_voucher_t
392 create_pthpriority_voucher(mach_msg_priority_t qos)
393 {
394 char voucher_buf[sizeof(mach_voucher_attr_recipe_data_t) + sizeof(ipc_pthread_priority_value_t)];
395
396 mach_voucher_t voucher = MACH_PORT_NULL;
397 kern_return_t ret;
398 ipc_pthread_priority_value_t ipc_pthread_priority_value =
399 (ipc_pthread_priority_value_t)qos;
400
401 mach_voucher_attr_raw_recipe_array_t recipes;
402 mach_voucher_attr_raw_recipe_size_t recipe_size = 0;
403 mach_voucher_attr_recipe_t recipe =
404 (mach_voucher_attr_recipe_t)&voucher_buf[recipe_size];
405
406 recipe->key = MACH_VOUCHER_ATTR_KEY_PTHPRIORITY;
407 recipe->command = MACH_VOUCHER_ATTR_PTHPRIORITY_CREATE;
408 recipe->previous_voucher = MACH_VOUCHER_NULL;
409 memcpy((char *)&recipe->content[0], &ipc_pthread_priority_value, sizeof(ipc_pthread_priority_value));
410 recipe->content_size = sizeof(ipc_pthread_priority_value_t);
411 recipe_size += sizeof(mach_voucher_attr_recipe_data_t) + recipe->content_size;
412
413 recipes = (mach_voucher_attr_raw_recipe_array_t)&voucher_buf[0];
414
415 ret = host_create_mach_voucher(mach_host_self(),
416 recipes,
417 recipe_size,
418 &voucher);
419
420 T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "client host_create_mach_voucher");
421 return voucher;
422 }
423
424 static void
425 send(
426 mach_port_t send_port,
427 mach_port_t reply_port,
428 mach_port_t msg_port,
429 mach_msg_priority_t qos,
430 mach_msg_option_t options)
431 {
432 kern_return_t ret = 0;
433
434 struct test_msg send_msg = {
435 .header = {
436 .msgh_remote_port = send_port,
437 .msgh_local_port = reply_port,
438 .msgh_bits = MACH_MSGH_BITS_SET(MACH_MSG_TYPE_COPY_SEND,
439 reply_port ? MACH_MSG_TYPE_MAKE_SEND_ONCE : 0,
440 MACH_MSG_TYPE_MOVE_SEND,
441 MACH_MSGH_BITS_COMPLEX),
442 .msgh_id = EXPECTED_MESSAGE_ID,
443 .msgh_size = sizeof(send_msg),
444 },
445 .body = {
446 .msgh_descriptor_count = 1,
447 },
448 .port_descriptor = {
449 .name = msg_port,
450 .disposition = MACH_MSG_TYPE_MOVE_RECEIVE,
451 .type = MACH_MSG_PORT_DESCRIPTOR,
452 },
453 };
454
455 if (options & MACH_SEND_SYNC_USE_THRPRI) {
456 send_msg.header.msgh_voucher_port = create_pthpriority_voucher(qos);
457 }
458
459 if (msg_port == MACH_PORT_NULL) {
460 send_msg.body.msgh_descriptor_count = 0;
461 }
462
463 ret = mach_msg(&(send_msg.header),
464 MACH_SEND_MSG |
465 MACH_SEND_TIMEOUT |
466 MACH_SEND_OVERRIDE |
467 (test_noimportance ? MACH_SEND_NOIMPORTANCE : 0) |
468 ((reply_port ? MACH_SEND_SYNC_OVERRIDE : 0) | options),
469 send_msg.header.msgh_size,
470 0,
471 MACH_PORT_NULL,
472 10000,
473 0);
474
475 T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "client mach_msg");
476 }
477
478 static mach_msg_id_t
479 receive(
480 mach_port_t rcv_port,
481 mach_port_t notify_port)
482 {
483 struct {
484 mach_msg_header_t header;
485 mach_msg_body_t body;
486 mach_msg_port_descriptor_t port_descriptor;
487 } rcv_msg = {
488 .header =
489 {
490 .msgh_remote_port = MACH_PORT_NULL,
491 .msgh_local_port = rcv_port,
492 .msgh_size = sizeof(rcv_msg),
493 },
494 };
495
496 T_LOG("Client: Starting sync receive\n");
497
498 kern_return_t kr;
499 kr = mach_msg(&(rcv_msg.header),
500 MACH_RCV_MSG |
501 MACH_RCV_SYNC_WAIT,
502 0,
503 rcv_msg.header.msgh_size,
504 rcv_port,
505 0,
506 notify_port);
507
508 T_ASSERT_MACH_SUCCESS(kr, "mach_msg rcv");
509
510 return rcv_msg.header.msgh_id;
511 }
512
513 static lock_t lock_DEF;
514 static lock_t lock_IN;
515 static lock_t lock_UI;
516
517 static mach_port_t main_thread_port;
518 static mach_port_t def_thread_port;
519 static mach_port_t in_thread_port;
520 static mach_port_t ui_thread_port;
521 static mach_port_t sixty_thread_port;
522
523 static uint64_t dispatch_sync_owner;
524
525 static int
526 get_pri(thread_t thread_port)
527 {
528 kern_return_t kr;
529
530 thread_extended_info_data_t extended_info;
531 mach_msg_type_number_t count = THREAD_EXTENDED_INFO_COUNT;
532 kr = thread_info(thread_port, THREAD_EXTENDED_INFO,
533 (thread_info_t)&extended_info, &count);
534
535 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "thread_info");
536
537 return extended_info.pth_curpri;
538 }
539
540 static void
541 set_thread_name(const char *fn_name)
542 {
543 char name[50] = "";
544
545 thread_t thread_port = pthread_mach_thread_np(pthread_self());
546
547 int pri = get_pri(thread_port);
548
549 snprintf(name, sizeof(name), "%s at pri %2d", fn_name, pri);
550 pthread_setname_np(name);
551 }
552
553 static void
554 thread_wait_to_block(mach_port_t thread_port)
555 {
556 thread_extended_info_data_t extended_info;
557 kern_return_t kr;
558
559 while (1) {
560 mach_msg_type_number_t count = THREAD_EXTENDED_INFO_COUNT;
561 kr = thread_info(thread_port, THREAD_EXTENDED_INFO,
562 (thread_info_t)&extended_info, &count);
563
564 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "thread_info");
565
566 if (extended_info.pth_run_state == TH_STATE_WAITING) {
567 T_LOG("Target thread blocked\n");
568 break;
569 }
570 thread_switch(thread_port, SWITCH_OPTION_DEPRESS, 0);
571 }
572 }
573
574 static void
575 thread_wait_to_boost(mach_port_t thread_port, mach_port_t yield_thread, int priority)
576 {
577 thread_extended_info_data_t extended_info;
578 kern_return_t kr;
579
580 while (1) {
581 mach_msg_type_number_t count = THREAD_EXTENDED_INFO_COUNT;
582 kr = thread_info(thread_port, THREAD_EXTENDED_INFO,
583 (thread_info_t)&extended_info, &count);
584
585 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "thread_info");
586
587 if (extended_info.pth_priority >= priority) {
588 T_LOG("Target thread boosted\n");
589 break;
590 }
591 thread_switch(yield_thread, SWITCH_OPTION_DEPRESS, 0);
592 }
593 }
594
595 static void
596 dispatch_sync_wait(mach_port_t owner_thread, qos_class_t promote_qos)
597 {
598 struct kevent_qos_s kev_err[] = {{ 0 }};
599 uint32_t fflags = 0;
600 uint64_t mask = 0;
601 uint16_t action = 0;
602 int r;
603
604 action = EV_ADD | EV_DISABLE;
605 fflags = NOTE_WL_SYNC_WAIT | NOTE_WL_DISCOVER_OWNER;
606
607 dispatch_sync_owner = owner_thread;
608
609 struct kevent_qos_s kev[] = {{
610 .ident = mach_thread_self(),
611 .filter = EVFILT_WORKLOOP,
612 .flags = action,
613 .fflags = fflags,
614 .udata = (uintptr_t) &def_thread_port,
615 .qos = (int32_t)_pthread_qos_class_encode(promote_qos, 0, 0),
616 .ext[EV_EXTIDX_WL_MASK] = mask,
617 .ext[EV_EXTIDX_WL_VALUE] = dispatch_sync_owner,
618 .ext[EV_EXTIDX_WL_ADDR] = (uint64_t)&dispatch_sync_owner,
619 }};
620
621 /* Setup workloop to fake dispatch sync wait on a workloop */
622 r = kevent_id(30, kev, 1, kev_err, 1, NULL,
623 NULL, KEVENT_FLAG_WORKLOOP | KEVENT_FLAG_ERROR_EVENTS);
624 T_QUIET; T_LOG("dispatch_sync_wait returned\n");
625 }
626
627 static void
628 dispatch_sync_cancel(mach_port_t owner_thread, qos_class_t promote_qos)
629 {
630 struct kevent_qos_s kev_err[] = {{ 0 }};
631 uint32_t fflags = 0;
632 uint64_t mask = 0;
633 uint16_t action = 0;
634 int r;
635
636 action = EV_DELETE | EV_ENABLE;
637 fflags = NOTE_WL_SYNC_WAKE | NOTE_WL_END_OWNERSHIP;
638
639 dispatch_sync_owner = owner_thread;
640
641 struct kevent_qos_s kev[] = {{
642 .ident = def_thread_port,
643 .filter = EVFILT_WORKLOOP,
644 .flags = action,
645 .fflags = fflags,
646 .udata = (uintptr_t) &def_thread_port,
647 .qos = (int32_t)_pthread_qos_class_encode(promote_qos, 0, 0),
648 .ext[EV_EXTIDX_WL_MASK] = mask,
649 .ext[EV_EXTIDX_WL_VALUE] = dispatch_sync_owner,
650 .ext[EV_EXTIDX_WL_ADDR] = (uint64_t)&dispatch_sync_owner,
651 }};
652
653 /* Setup workloop to fake dispatch sync wake on a workloop */
654 r = kevent_id(30, kev, 1, kev_err, 1, NULL,
655 NULL, KEVENT_FLAG_WORKLOOP | KEVENT_FLAG_ERROR_EVENTS);
656 T_QUIET; T_LOG("dispatch_sync_cancel returned\n");
657 }
658
659 static void *
660 thread_at_sixty(void *arg __unused)
661 {
662 int policy;
663 struct sched_param param;
664 int ret;
665 void *load_token;
666 uint64_t before_lock_time, after_lock_time;
667
668 sixty_thread_port = mach_thread_self();
669
670 set_thread_name(__FUNCTION__);
671
672 /* Change our priority to 60 */
673 ret = pthread_getschedparam(pthread_self(), &policy, &param);
674 T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "pthread_getschedparam");
675
676 param.sched_priority = 60;
677
678 ret = pthread_setschedparam(pthread_self(), policy, &param);
679 T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "pthread_setschedparam");
680
681 ret = pthread_getschedparam(pthread_self(), &policy, &param);
682 T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "pthread_getschedparam");
683
684 T_LOG("My priority is %d", param.sched_priority);
685
686 thread_wait_to_boost(in_thread_port, ui_thread_port, 46);
687
688 if (spin_for_ever) {
689 /* Schedule load at Default */
690 sched_create_load_at_qos(QOS_CLASS_DEFAULT, &load_token);
691 }
692
693 T_LOG("Thread at priority 60 trying to acquire UI lock");
694
695 before_lock_time = mach_absolute_time();
696 ull_lock(&lock_UI, 3, UL_UNFAIR_LOCK, 0);
697 after_lock_time = mach_absolute_time();
698
699 T_QUIET; T_LOG("The time for priority 60 thread to acquire lock was %llu \n",
700 (after_lock_time - before_lock_time));
701 T_END;
702 }
703
704 static void *
705 thread_at_ui(void *arg __unused)
706 {
707 ui_thread_port = mach_thread_self();
708
709 set_thread_name(__FUNCTION__);
710
711 /* Grab the first ulock */
712 ull_lock(&lock_UI, 2, UL_UNFAIR_LOCK, 0);
713
714 thread_wait_to_boost(def_thread_port, in_thread_port, 37);
715 thread_create_at_qos(QOS_CLASS_USER_INTERACTIVE, thread_at_sixty);
716
717 T_LOG("Thread at UI priority trying to acquire IN lock");
718 ull_lock(&lock_IN, 2, UL_UNFAIR_LOCK, 0);
719 ull_unlock(&lock_UI, 2, UL_UNFAIR_LOCK, 0);
720 return NULL;
721 }
722
723 static void *
724 thread_at_in(void *arg __unused)
725 {
726 in_thread_port = mach_thread_self();
727
728 set_thread_name(__FUNCTION__);
729
730 /* Grab the first ulock */
731 ull_lock(&lock_IN, 2, UL_UNFAIR_LOCK, 0);
732
733 T_LOG("Thread at IN priority got first lock ");
734
735 thread_wait_to_boost(main_thread_port, def_thread_port, 31);
736
737 /* Create a new thread at QOS_CLASS_USER_INTERACTIVE qos */
738 thread_create_at_qos(QOS_CLASS_USER_INTERACTIVE, thread_at_ui);
739
740 T_LOG("Thread at IN priority trying to acquire default lock");
741 ull_lock(&lock_DEF, 1, UL_UNFAIR_LOCK, 0);
742 ull_unlock(&lock_IN, 2, UL_UNFAIR_LOCK, 0);
743 return NULL;
744 }
745
746 static void *
747 thread_at_default(void *arg __unused)
748 {
749 def_thread_port = mach_thread_self();
750
751 set_thread_name(__FUNCTION__);
752
753 /* Grab the first ulock */
754 ull_lock(&lock_DEF, 1, UL_UNFAIR_LOCK, 0);
755
756 T_LOG("Thread at DEFAULT priority got first lock ");
757
758 thread_wait_to_block(main_thread_port);
759
760 /* Create a new thread at QOS_CLASS_USER_INITIATED qos */
761 thread_create_at_qos(QOS_CLASS_USER_INITIATED, thread_at_in);
762
763 T_LOG("Thread at Default priority trying to wait on dispatch sync for maintenance thread");
764 dispatch_sync_wait(main_thread_port, QOS_CLASS_DEFAULT);
765 ull_unlock(&lock_DEF, 1, UL_UNFAIR_LOCK, 0);
766 return NULL;
767 }
768
769 static void *
770 thread_at_maintenance(void *arg __unused)
771 {
772 mach_port_t service_port;
773 mach_port_t conn_port;
774 mach_port_t special_reply_port;
775 mach_port_options_t opts = {
776 .flags = MPO_INSERT_SEND_RIGHT,
777 };
778
779 main_thread_port = mach_thread_self();
780
781 set_thread_name(__FUNCTION__);
782
783 kern_return_t kr = bootstrap_look_up(bootstrap_port,
784 TURNSTILE_MULTIHOP_SERVICE_NAME, &service_port);
785 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "client bootstrap_look_up");
786
787 kr = mach_port_construct(mach_task_self(), &opts, 0ull, &conn_port);
788 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "mach_port_construct");
789
790 special_reply_port = thread_get_special_reply_port();
791 T_QUIET; T_ASSERT_TRUE(MACH_PORT_VALID(special_reply_port), "get_thread_special_reply_port");
792
793 /* Become the dispatch sync owner, dispatch_sync_owner will be set in dispatch_sync_wait function */
794
795 /* Send a sync message */
796 send(conn_port, special_reply_port, MACH_PORT_NULL,
797 (uint32_t)_pthread_qos_class_encode(QOS_CLASS_MAINTENANCE, 0, 0), 0);
798
799 /* Send an async checkin message */
800 send(service_port, MACH_PORT_NULL, conn_port,
801 (uint32_t)_pthread_qos_class_encode(QOS_CLASS_MAINTENANCE, 0, 0), 0);
802
803 /* Create a new thread at QOS_CLASS_DEFAULT qos */
804 thread_create_at_qos(QOS_CLASS_DEFAULT, thread_at_default);
805
806 /* Block on Sync IPC */
807 mach_msg_id_t message_id = receive(special_reply_port, service_port);
808
809 T_ASSERT_EQ(message_id, MACH_NOTIFY_SEND_ONCE, "got the expected send-once notification");
810
811 T_LOG("received reply");
812
813 dispatch_sync_cancel(def_thread_port, QOS_CLASS_DEFAULT);
814 return NULL;
815 }
816
817 T_HELPER_DECL(three_ulock_sync_ipc_hop,
818 "Create chain of 4 threads with 3 ulocks and 1 sync IPC at different qos")
819 {
820 thread_create_at_qos(QOS_CLASS_MAINTENANCE, thread_at_maintenance);
821 sigsuspend(0);
822 }
823
824 T_HELPER_DECL(three_ulock_sync_ipc_hop_noimportance,
825 "Create chain of 4 threads with 3 ulocks and 1 no-importance sync IPC at different qos")
826 {
827 test_noimportance = true;
828 thread_create_at_qos(QOS_CLASS_MAINTENANCE, thread_at_maintenance);
829 sigsuspend(0);
830 }
831
832
833 static void
834 thread_create_at_qos(qos_class_t qos, void * (*function)(void *))
835 {
836 qos_class_t qos_thread;
837 pthread_t thread;
838 pthread_attr_t attr;
839 int ret;
840
841 ret = setpriority(PRIO_DARWIN_ROLE, 0, PRIO_DARWIN_ROLE_UI_FOCAL);
842 if (ret != 0) {
843 T_LOG("set priority failed\n");
844 }
845
846 pthread_attr_init(&attr);
847 pthread_attr_set_qos_class_np(&attr, qos, 0);
848 pthread_create(&thread, &attr, function, NULL);
849
850 T_LOG("pthread created\n");
851 pthread_get_qos_class_np(thread, &qos_thread, NULL);
852 }
853
854 #pragma mark Mach receive - kevent_qos
855
856 T_HELPER_DECL(server_kevent_id,
857 "Reply with the QoS that a dispatch source event handler ran with")
858 {
859 T_QUIET; T_ASSERT_POSIX_ZERO(_pthread_workqueue_init_with_workloop(
860 worker_cb, event_cb,
861 (pthread_workqueue_function_workloop_t)workloop_cb_test_intransit, 0, 0), NULL);
862
863 register_port(LISTENER_WLID, get_server_port());
864 sigsuspend(0);
865 T_ASSERT_FAIL("should receive a message");
866 }
867
868 #define TEST_MULTIHOP(server_name, client_name, name) \
869 T_DECL(server_kevent_id_##name, \
870 "Event delivery using a kevent_id", \
871 T_META_ASROOT(YES)) \
872 { \
873 run_client_server(server_name, client_name); \
874 }
875
876 #define TEST_MULTIHOP_SPIN(server_name, client_name, name) \
877 T_DECL(server_kevent_id_##name, \
878 "Event delivery using a kevent_id", \
879 T_META_ASROOT(YES), T_META_ENABLED(FALSE)) \
880 { \
881 spin_for_ever = true; \
882 run_client_server(server_name, client_name); \
883 spin_for_ever = false; \
884 }
885
886 /*
887 * Test 1: Test multihop priority boosting with ulocks, dispatch sync and sync IPC.
888 *
889 * Create thread's at different Qos and acquire a ulock and block on next ulock/dispatch sync
890 * creating a sync chain. The last hop the chain is blocked on Sync IPC.
891 */
892 TEST_MULTIHOP("server_kevent_id", "three_ulock_sync_ipc_hop", three_ulock_sync_ipc_hop)
893
894 TEST_MULTIHOP("server_kevent_id", "three_ulock_sync_ipc_hop_noimportance", three_ulock_sync_ipc_hop_noimportance)
895
896 /*
897 * Test 2: Test multihop priority boosting with ulocks, dispatch sync and sync IPC.
898 *
899 * Create thread's at different Qos and acquire a ulock and block on next ulock/dispatch sync
900 * creating a sync chain. The last hop the chain is blocked on Sync IPC.
901 * Before the last priority 60 thread blocks on ulock, it also starts spinforeverd at priority 31.
902 */
903 TEST_MULTIHOP_SPIN("server_kevent_id", "three_ulock_sync_ipc_hop", three_ulock_sync_ipc_hop_spin)