]> git.saurik.com Git - apple/xnu.git/blame - tests/turnstile_multihop.c
xnu-4903.270.47.tar.gz
[apple/xnu.git] / tests / turnstile_multihop.c
CommitLineData
d9a64523
A
1/*
2 * turnstile_multihop: Tests turnstile and multi hop priority propagation.
3 */
4
5#ifdef T_NAMESPACE
6#undef T_NAMESPACE
7#endif
8
9#include <darwintest.h>
10#include <darwintest_multiprocess.h>
11
12#include <dispatch/dispatch.h>
13#include <pthread.h>
14#include <launch.h>
15#include <mach/mach.h>
16#include <mach/message.h>
17#include <mach/mach_voucher.h>
18#include <pthread/workqueue_private.h>
19#include <voucher/ipc_pthread_priority_types.h>
20#include <servers/bootstrap.h>
21#include <stdlib.h>
22#include <sys/event.h>
23#include <unistd.h>
24#include <crt_externs.h>
25#include <signal.h>
26#include <sys/types.h>
27#include <sys/sysctl.h>
28#include <libkern/OSAtomic.h>
29#include <sys/wait.h>
30
31#include "turnstile_multihop_helper.h"
32
33T_GLOBAL_META(T_META_NAMESPACE("xnu.turnstile_multihop"));
34
35#define HELPER_TIMEOUT_SECS (3000)
36
37static boolean_t spin_for_ever = false;
38
39static void
40thread_create_at_qos(qos_class_t qos, void * (*function)(void *));
41static uint64_t
42nanoseconds_to_absolutetime(uint64_t nanoseconds);
43static int
44sched_create_load_at_qos(qos_class_t qos, void **load_token);
45static int
46sched_terminate_load(void *load_token) __unused;
47static void do_work(int num);
48static void
49dispatch_sync_cancel(mach_port_t owner_thread, qos_class_t promote_qos);
50
51static void *sched_load_thread(void *);
52
53struct load_token_context {
54 volatile int threads_should_exit;
55 int thread_count;
56 qos_class_t qos;
57 pthread_t *threads;
58};
59
60static struct mach_timebase_info sched_mti;
61static pthread_once_t sched_mti_once_control = PTHREAD_ONCE_INIT;
62
0a7de745
A
63static void
64sched_mti_init(void)
d9a64523
A
65{
66 mach_timebase_info(&sched_mti);
67}
68uint64_t
69nanoseconds_to_absolutetime(uint64_t nanoseconds)
70{
71 pthread_once(&sched_mti_once_control, sched_mti_init);
72
73 return (uint64_t)(nanoseconds * (((double)sched_mti.denom) / ((double)sched_mti.numer)));
74}
75
76static int
77sched_create_load_at_qos(qos_class_t qos, void **load_token)
78{
79 struct load_token_context *context = NULL;
80 int ret;
81 int ncpu;
82 size_t ncpu_size = sizeof(ncpu);
83 int nthreads;
84 int i;
85 pthread_attr_t attr;
86
87 ret = sysctlbyname("hw.ncpu", &ncpu, &ncpu_size, NULL, 0);
88 if (ret == -1) {
89 T_LOG("sysctlbyname(hw.ncpu)");
90 return errno;
91 }
92
93 T_QUIET; T_LOG("%s: Detected %d CPUs\n", __FUNCTION__, ncpu);
94
95 nthreads = ncpu;
96 T_QUIET; T_LOG("%s: Will create %d threads\n", __FUNCTION__, nthreads);
97
98 ret = pthread_attr_init(&attr);
99 T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "pthread_attr_init");
100
101 if (&pthread_attr_set_qos_class_np) {
102 ret = pthread_attr_set_qos_class_np(&attr, qos, 0);
103 T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "pthread_attr_set_qos_class_np");
104 }
105
106 context = calloc(1, sizeof(*context));
0a7de745
A
107 if (context == NULL) {
108 T_QUIET; T_LOG("calloc returned error"); return ENOMEM;
109 }
d9a64523
A
110
111 context->threads_should_exit = 0;
112 context->thread_count = nthreads;
113 context->qos = qos;
114 context->threads = calloc((unsigned int)nthreads, sizeof(pthread_t));
115
116 OSMemoryBarrier();
117
0a7de745 118 for (i = 0; i < nthreads; i++) {
d9a64523
A
119 ret = pthread_create(&context->threads[i], &attr, sched_load_thread, context);
120 T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "pthread_create");
121 T_QUIET; T_LOG("%s: Created thread %d (%p)\n", __FUNCTION__, i, (void *)context->threads[i]);
122 }
123
124 ret = pthread_attr_destroy(&attr);
125 T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "pthread_attr_destroy");
126
127 *load_token = context;
128
129 return 0;
130}
131
132static void *
133sched_load_thread(void *arg)
134{
135 struct load_token_context *context = (struct load_token_context *)arg;
136
137 T_QUIET; T_LOG("%s: Thread started %p\n", __FUNCTION__, (void *)pthread_self());
138
139 while (!context->threads_should_exit) {
140 uint64_t start = mach_absolute_time();
141 uint64_t end = start + nanoseconds_to_absolutetime(900ULL * NSEC_PER_MSEC);
142
0a7de745
A
143 while ((mach_absolute_time() < end) && !context->threads_should_exit) {
144 ;
145 }
d9a64523
A
146 }
147
148 T_QUIET; T_LOG("%s: Thread terminating %p\n", __FUNCTION__, (void *)pthread_self());
149
150 return NULL;
151}
152
153static int
154sched_terminate_load(void *load_token)
155{
156 int ret;
157 int i;
158 struct load_token_context *context = (struct load_token_context *)load_token;
159
160 context->threads_should_exit = 1;
161 OSMemoryBarrier();
162
0a7de745 163 for (i = 0; i < context->thread_count; i++) {
d9a64523
A
164 T_QUIET; T_LOG("%s: Joining thread %d (%p)\n", __FUNCTION__, i, (void *)context->threads[i]);
165 ret = pthread_join(context->threads[i], NULL);
166 T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "pthread_join");
167 }
168
169 free(context->threads);
170 free(context);
171
172 return 0;
173}
174
175
176// Find the first num primes, simply as a means of doing work
0a7de745
A
177static void
178do_work(int num)
d9a64523
A
179{
180 volatile int i = 3, count, c;
181
0a7de745
A
182 for (count = 2; count <= num;) {
183 for (c = 2; c <= i; c++) {
184 if (i % c == 0) {
d9a64523
A
185 break;
186 }
187 }
0a7de745 188 if (c == i) {
d9a64523
A
189 count++;
190 }
191 i++;
192 }
193}
194
195#pragma mark pthread callbacks
196
197static void
198worker_cb(pthread_priority_t __unused priority)
199{
200 T_FAIL("a worker thread was created");
201}
202
203static void
204event_cb(void ** __unused events, int * __unused nevents)
205{
206 T_FAIL("a kevent routine was called instead of workloop");
207}
208
209static uint32_t
210get_user_promotion_basepri(void)
211{
212 mach_msg_type_number_t count = THREAD_POLICY_STATE_COUNT;
213 struct thread_policy_state thread_policy;
214 boolean_t get_default = FALSE;
215 mach_port_t thread_port = pthread_mach_thread_np(pthread_self());
216
217 kern_return_t kr = thread_policy_get(thread_port, THREAD_POLICY_STATE,
0a7de745 218 (thread_policy_t)&thread_policy, &count, &get_default);
d9a64523
A
219 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "thread_policy_get");
220 return thread_policy.thps_user_promotion_basepri;
221}
222
223static int messages_received = 0;
224/*
225 * Basic WL handler callback, it checks the
226 * effective Qos of the servicer thread.
227 */
228static void
229workloop_cb_test_intransit(uint64_t *workloop_id __unused, void **eventslist __unused, int *events)
230{
231 messages_received++;
232 T_LOG("Workloop handler workloop_cb_test_intransit called. Received message no %d",
0a7de745 233 messages_received);
d9a64523
A
234
235
236 /* Skip the test if we can't check Qos */
237 if (geteuid() != 0) {
238 T_SKIP("kevent_qos test requires root privileges to run.");
239 }
240
241 if (messages_received == 1) {
d9a64523
A
242 sleep(5);
243 T_LOG("Do some CPU work.");
244 do_work(5000);
245
246 /* Check if the override now is IN + 60 boost */
247 T_EXPECT_EFFECTIVE_QOS_EQ(QOS_CLASS_USER_INITIATED,
0a7de745 248 "dispatch_source event handler QoS should be QOS_CLASS_USER_INITIATED");
d9a64523 249 T_EXPECT_EQ(get_user_promotion_basepri(), 60u,
0a7de745 250 "dispatch_source event handler should be overridden at 60");
d9a64523
A
251
252 /* Enable the knote to get 2nd message */
253 struct kevent_qos_s *kev = *eventslist;
254 kev->flags = EV_ADD | EV_ENABLE | EV_UDATA_SPECIFIC | EV_DISPATCH | EV_VANISHED;
255 kev->fflags = (MACH_RCV_MSG | MACH_RCV_LARGE | MACH_RCV_LARGE_IDENTITY |
0a7de745
A
256 MACH_RCV_TRAILER_ELEMENTS(MACH_RCV_TRAILER_CTX) |
257 MACH_RCV_TRAILER_TYPE(MACH_MSG_TRAILER_FORMAT_0) |
258 MACH_RCV_VOUCHER);
d9a64523
A
259 *events = 1;
260 } else {
261 *events = 0;
262 exit(0);
263 }
264}
265
266static void
267run_client_server(const char *server_name, const char *client_name)
268{
269 dt_helper_t helpers[] = {
270 dt_launchd_helper_domain("com.apple.xnu.test.turnstile_multihop.plist",
0a7de745 271 server_name, NULL, LAUNCH_SYSTEM_DOMAIN),
d9a64523
A
272 dt_fork_helper(client_name)
273 };
274 dt_run_helpers(helpers, 2, HELPER_TIMEOUT_SECS);
275}
276
277#pragma mark Mach receive
278
279#define TURNSTILE_MULTIHOP_SERVICE_NAME "com.apple.xnu.test.turnstile_multihop"
280
281static mach_port_t
282get_server_port(void)
283{
284 mach_port_t port;
285 kern_return_t kr = bootstrap_check_in(bootstrap_port,
0a7de745 286 TURNSTILE_MULTIHOP_SERVICE_NAME, &port);
d9a64523
A
287 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "server bootstrap_check_in");
288 return port;
289}
290
291static mach_voucher_t
292create_pthpriority_voucher(mach_msg_priority_t qos)
293{
294 char voucher_buf[sizeof(mach_voucher_attr_recipe_data_t) + sizeof(ipc_pthread_priority_value_t)];
295
296 mach_voucher_t voucher = MACH_PORT_NULL;
297 kern_return_t ret;
298 ipc_pthread_priority_value_t ipc_pthread_priority_value =
0a7de745 299 (ipc_pthread_priority_value_t)qos;
d9a64523
A
300
301 mach_voucher_attr_raw_recipe_array_t recipes;
302 mach_voucher_attr_raw_recipe_size_t recipe_size = 0;
303 mach_voucher_attr_recipe_t recipe =
0a7de745 304 (mach_voucher_attr_recipe_t)&voucher_buf[recipe_size];
d9a64523
A
305
306 recipe->key = MACH_VOUCHER_ATTR_KEY_PTHPRIORITY;
307 recipe->command = MACH_VOUCHER_ATTR_PTHPRIORITY_CREATE;
308 recipe->previous_voucher = MACH_VOUCHER_NULL;
309 memcpy((char *)&recipe->content[0], &ipc_pthread_priority_value, sizeof(ipc_pthread_priority_value));
310 recipe->content_size = sizeof(ipc_pthread_priority_value_t);
311 recipe_size += sizeof(mach_voucher_attr_recipe_data_t) + recipe->content_size;
312
313 recipes = (mach_voucher_attr_raw_recipe_array_t)&voucher_buf[0];
314
315 ret = host_create_mach_voucher(mach_host_self(),
0a7de745
A
316 recipes,
317 recipe_size,
318 &voucher);
d9a64523
A
319
320 T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "client host_create_mach_voucher");
321 return voucher;
322}
323
324static void
325send(
326 mach_port_t send_port,
327 mach_port_t reply_port,
328 mach_port_t msg_port,
329 mach_msg_priority_t qos,
330 mach_msg_option_t options)
331{
332 kern_return_t ret = 0;
333
334 struct {
335 mach_msg_header_t header;
336 mach_msg_body_t body;
337 mach_msg_port_descriptor_t port_descriptor;
338 } send_msg = {
0a7de745
A
339 .header = {
340 .msgh_remote_port = send_port,
341 .msgh_local_port = reply_port,
342 .msgh_bits = MACH_MSGH_BITS_SET(MACH_MSG_TYPE_COPY_SEND,
343 reply_port ? MACH_MSG_TYPE_MAKE_SEND_ONCE : 0,
344 MACH_MSG_TYPE_MOVE_SEND,
345 MACH_MSGH_BITS_COMPLEX),
346 .msgh_id = 0x100,
347 .msgh_size = sizeof(send_msg),
d9a64523 348 },
0a7de745
A
349 .body = {
350 .msgh_descriptor_count = 1,
d9a64523 351 },
0a7de745
A
352 .port_descriptor = {
353 .name = msg_port,
d9a64523
A
354 .disposition = MACH_MSG_TYPE_MOVE_RECEIVE,
355 .type = MACH_MSG_PORT_DESCRIPTOR,
356 },
357 };
358
359 if (options & MACH_SEND_SYNC_USE_THRPRI) {
360 send_msg.header.msgh_voucher_port = create_pthpriority_voucher(qos);
361 }
362
363 if (msg_port == MACH_PORT_NULL) {
364 send_msg.body.msgh_descriptor_count = 0;
365 }
366
367 ret = mach_msg(&(send_msg.header),
0a7de745
A
368 MACH_SEND_MSG |
369 MACH_SEND_TIMEOUT |
370 MACH_SEND_OVERRIDE |
371 ((reply_port ? MACH_SEND_SYNC_OVERRIDE : 0) | options),
372 send_msg.header.msgh_size,
373 0,
374 MACH_PORT_NULL,
375 10000,
376 0);
d9a64523
A
377
378 T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "client mach_msg");
379}
380
381static void
382receive(
383 mach_port_t rcv_port,
384 mach_port_t notify_port)
385{
386 kern_return_t ret = 0;
387
388 struct {
389 mach_msg_header_t header;
390 mach_msg_body_t body;
391 mach_msg_port_descriptor_t port_descriptor;
392 } rcv_msg = {
0a7de745 393 .header =
d9a64523 394 {
0a7de745
A
395 .msgh_remote_port = MACH_PORT_NULL,
396 .msgh_local_port = rcv_port,
397 .msgh_size = sizeof(rcv_msg),
d9a64523
A
398 },
399 };
400
401 T_LOG("Client: Starting sync receive\n");
402
403 ret = mach_msg(&(rcv_msg.header),
0a7de745
A
404 MACH_RCV_MSG |
405 MACH_RCV_SYNC_WAIT,
406 0,
407 rcv_msg.header.msgh_size,
408 rcv_port,
409 0,
410 notify_port);
d9a64523
A
411}
412
413static lock_t lock_DEF;
414static lock_t lock_IN;
415static lock_t lock_UI;
416
417static mach_port_t main_thread_port;
418static mach_port_t def_thread_port;
419static mach_port_t in_thread_port;
420static mach_port_t ui_thread_port;
421static mach_port_t sixty_thread_port;
422
423static uint64_t dispatch_sync_owner;
424
0a7de745
A
425static int
426get_pri(thread_t thread_port)
427{
d9a64523
A
428 kern_return_t kr;
429
430 thread_extended_info_data_t extended_info;
431 mach_msg_type_number_t count = THREAD_EXTENDED_INFO_COUNT;
432 kr = thread_info(thread_port, THREAD_EXTENDED_INFO,
0a7de745 433 (thread_info_t)&extended_info, &count);
d9a64523
A
434
435 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "thread_info");
436
437 return extended_info.pth_curpri;
438}
439
440static void
441set_thread_name(const char *fn_name)
442{
443 char name[50] = "";
444
445 thread_t thread_port = pthread_mach_thread_np(pthread_self());
446
447 int pri = get_pri(thread_port);
448
449 snprintf(name, sizeof(name), "%s at pri %2d", fn_name, pri);
450 pthread_setname_np(name);
451}
452
453static void
454thread_wait_to_block(mach_port_t thread_port)
455{
456 thread_extended_info_data_t extended_info;
457 kern_return_t kr;
458
459 while (1) {
460 mach_msg_type_number_t count = THREAD_EXTENDED_INFO_COUNT;
461 kr = thread_info(thread_port, THREAD_EXTENDED_INFO,
0a7de745 462 (thread_info_t)&extended_info, &count);
d9a64523
A
463
464 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "thread_info");
465
466 if (extended_info.pth_run_state == TH_STATE_WAITING) {
467 T_LOG("Target thread blocked\n");
468 break;
469 }
470 thread_switch(thread_port, SWITCH_OPTION_DEPRESS, 0);
471 }
472}
473
474static void
475thread_wait_to_boost(mach_port_t thread_port, mach_port_t yield_thread, int priority)
476{
477 thread_extended_info_data_t extended_info;
478 kern_return_t kr;
479
480 while (1) {
481 mach_msg_type_number_t count = THREAD_EXTENDED_INFO_COUNT;
482 kr = thread_info(thread_port, THREAD_EXTENDED_INFO,
0a7de745 483 (thread_info_t)&extended_info, &count);
d9a64523
A
484
485 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "thread_info");
486
487 if (extended_info.pth_priority >= priority) {
488 T_LOG("Target thread boosted\n");
489 break;
490 }
491 thread_switch(yield_thread, SWITCH_OPTION_DEPRESS, 0);
492 }
493}
494
495static void
496dispatch_sync_wait(mach_port_t owner_thread, qos_class_t promote_qos)
497{
498 struct kevent_qos_s kev_err[] = {{ 0 }};
499 uint32_t fflags = 0;
500 uint64_t mask = 0;
501 uint16_t action = 0;
502 int r;
503
504 action = EV_ADD | EV_DISABLE;
505 fflags = NOTE_WL_SYNC_WAIT | NOTE_WL_DISCOVER_OWNER;
506
507 dispatch_sync_owner = owner_thread;
508
509 struct kevent_qos_s kev[] = {{
0a7de745
A
510 .ident = mach_thread_self(),
511 .filter = EVFILT_WORKLOOP,
512 .flags = action,
513 .fflags = fflags,
514 .udata = (uintptr_t) &def_thread_port,
515 .qos = (int32_t)_pthread_qos_class_encode(promote_qos, 0, 0),
516 .ext[EV_EXTIDX_WL_MASK] = mask,
517 .ext[EV_EXTIDX_WL_VALUE] = dispatch_sync_owner,
518 .ext[EV_EXTIDX_WL_ADDR] = (uint64_t)&dispatch_sync_owner,
519 }};
d9a64523
A
520
521 /* Setup workloop to fake dispatch sync wait on a workloop */
522 r = kevent_id(30, kev, 1, kev_err, 1, NULL,
0a7de745 523 NULL, KEVENT_FLAG_WORKLOOP | KEVENT_FLAG_ERROR_EVENTS);
d9a64523
A
524 T_QUIET; T_LOG("dispatch_sync_wait returned\n");
525}
526
527static void
528dispatch_sync_cancel(mach_port_t owner_thread, qos_class_t promote_qos)
529{
530 struct kevent_qos_s kev_err[] = {{ 0 }};
531 uint32_t fflags = 0;
532 uint64_t mask = 0;
533 uint16_t action = 0;
534 int r;
535
536 action = EV_DELETE | EV_ENABLE;
537 fflags = NOTE_WL_SYNC_WAKE | NOTE_WL_END_OWNERSHIP;
538
539 dispatch_sync_owner = owner_thread;
540
541 struct kevent_qos_s kev[] = {{
0a7de745
A
542 .ident = def_thread_port,
543 .filter = EVFILT_WORKLOOP,
544 .flags = action,
545 .fflags = fflags,
546 .udata = (uintptr_t) &def_thread_port,
547 .qos = (int32_t)_pthread_qos_class_encode(promote_qos, 0, 0),
548 .ext[EV_EXTIDX_WL_MASK] = mask,
549 .ext[EV_EXTIDX_WL_VALUE] = dispatch_sync_owner,
550 .ext[EV_EXTIDX_WL_ADDR] = (uint64_t)&dispatch_sync_owner,
551 }};
d9a64523
A
552
553 /* Setup workloop to fake dispatch sync wake on a workloop */
554 r = kevent_id(30, kev, 1, kev_err, 1, NULL,
0a7de745 555 NULL, KEVENT_FLAG_WORKLOOP | KEVENT_FLAG_ERROR_EVENTS);
d9a64523 556 T_QUIET; T_LOG("dispatch_sync_cancel returned\n");
d9a64523
A
557}
558
559static void *
560thread_at_sixty(void *arg __unused)
561{
562 int policy;
563 struct sched_param param;
564 int ret;
565 void *load_token;
566 uint64_t before_lock_time, after_lock_time;
567
568 sixty_thread_port = mach_thread_self();
569
570 set_thread_name(__FUNCTION__);
571
572 /* Change our priority to 60 */
573 ret = pthread_getschedparam(pthread_self(), &policy, &param);
574 T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "pthread_getschedparam");
575
576 param.sched_priority = 60;
577
578 ret = pthread_setschedparam(pthread_self(), policy, &param);
579 T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "pthread_setschedparam");
580
581 ret = pthread_getschedparam(pthread_self(), &policy, &param);
582 T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "pthread_getschedparam");
583
584 T_LOG("My priority is %d", param.sched_priority);
585
586 thread_wait_to_boost(in_thread_port, ui_thread_port, 46);
587
588 if (spin_for_ever) {
589 /* Schedule load at Default */
590 sched_create_load_at_qos(QOS_CLASS_DEFAULT, &load_token);
591 }
592
593 T_LOG("Thread at priority 60 trying to acquire UI lock");
594
595 before_lock_time = mach_absolute_time();
596 ull_lock(&lock_UI, 3, UL_UNFAIR_LOCK, 0);
597 after_lock_time = mach_absolute_time();
598
599 T_QUIET; T_LOG("The time for priority 60 thread to acquire lock was %llu \n",
0a7de745 600 (after_lock_time - before_lock_time));
d9a64523
A
601 exit(0);
602}
603
604static void *
605thread_at_ui(void *arg __unused)
606{
607 ui_thread_port = mach_thread_self();
608
609 set_thread_name(__FUNCTION__);
610
611 /* Grab the first ulock */
612 ull_lock(&lock_UI, 2, UL_UNFAIR_LOCK, 0);
613
614 thread_wait_to_boost(def_thread_port, in_thread_port, 37);
615 thread_create_at_qos(QOS_CLASS_USER_INTERACTIVE, thread_at_sixty);
616
617 T_LOG("Thread at UI priority trying to acquire IN lock");
618 ull_lock(&lock_IN, 2, UL_UNFAIR_LOCK, 0);
619 ull_unlock(&lock_UI, 2, UL_UNFAIR_LOCK, 0);
620 return NULL;
621}
622
623static void *
624thread_at_in(void *arg __unused)
625{
626 in_thread_port = mach_thread_self();
627
628 set_thread_name(__FUNCTION__);
629
630 /* Grab the first ulock */
631 ull_lock(&lock_IN, 2, UL_UNFAIR_LOCK, 0);
632
633 T_LOG("Thread at IN priority got first lock ");
634
635 thread_wait_to_boost(main_thread_port, def_thread_port, 31);
636
637 /* Create a new thread at QOS_CLASS_USER_INTERACTIVE qos */
638 thread_create_at_qos(QOS_CLASS_USER_INTERACTIVE, thread_at_ui);
639
640 T_LOG("Thread at IN priority trying to acquire default lock");
641 ull_lock(&lock_DEF, 1, UL_UNFAIR_LOCK, 0);
642 ull_unlock(&lock_IN, 2, UL_UNFAIR_LOCK, 0);
643 return NULL;
644}
645
646static void *
647thread_at_default(void *arg __unused)
648{
649 def_thread_port = mach_thread_self();
650
651 set_thread_name(__FUNCTION__);
652
653 /* Grab the first ulock */
654 ull_lock(&lock_DEF, 1, UL_UNFAIR_LOCK, 0);
655
656 T_LOG("Thread at DEFAULT priority got first lock ");
657
658 thread_wait_to_block(main_thread_port);
659
660 /* Create a new thread at QOS_CLASS_USER_INITIATED qos */
661 thread_create_at_qos(QOS_CLASS_USER_INITIATED, thread_at_in);
662
663 T_LOG("Thread at Default priority trying to wait on dispatch sync for maintenance thread");
664 dispatch_sync_wait(main_thread_port, QOS_CLASS_DEFAULT);
665 ull_unlock(&lock_DEF, 1, UL_UNFAIR_LOCK, 0);
666 return NULL;
667}
668
669static void *
670thread_at_maintenance(void *arg __unused)
671{
672 mach_port_t qos_send_port;
673 mach_port_t special_reply_port;
674
675 main_thread_port = mach_thread_self();
676
677 set_thread_name(__FUNCTION__);
678
679 kern_return_t kr = bootstrap_look_up(bootstrap_port,
0a7de745 680 TURNSTILE_MULTIHOP_SERVICE_NAME, &qos_send_port);
d9a64523
A
681 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "client bootstrap_look_up");
682
683 special_reply_port = thread_get_special_reply_port();
684 T_QUIET; T_ASSERT_TRUE(MACH_PORT_VALID(special_reply_port), "get_thread_special_reply_port");
685
686 /* Become the dispatch sync owner, dispatch_sync_owner will be set in dispatch_sync_wait function */
687
688 /* Send an async message */
689 send(qos_send_port, MACH_PORT_NULL, MACH_PORT_NULL,
0a7de745 690 (uint32_t)_pthread_qos_class_encode(QOS_CLASS_MAINTENANCE, 0, 0), 0);
d9a64523
A
691
692 /* Send a sync message */
693 send(qos_send_port, special_reply_port, MACH_PORT_NULL,
0a7de745 694 (uint32_t)_pthread_qos_class_encode(QOS_CLASS_MAINTENANCE, 0, 0), 0);
d9a64523
A
695
696 /* Create a new thread at QOS_CLASS_DEFAULT qos */
697 thread_create_at_qos(QOS_CLASS_DEFAULT, thread_at_default);
698
699 /* Block on Sync IPC */
700 receive(special_reply_port, qos_send_port);
701
702 dispatch_sync_cancel(def_thread_port, QOS_CLASS_DEFAULT);
703 return NULL;
704}
705
706T_HELPER_DECL(three_ulock_sync_ipc_hop,
0a7de745 707 "Create chain of 4 threads with 3 ulocks and 1 sync IPC at different qos")
d9a64523
A
708{
709 dt_stat_time_t roundtrip_stat = dt_stat_time_create("multihop_lock_acquire");
710
711 T_STAT_MEASURE_LOOP(roundtrip_stat) {
712 if (fork() == 0) {
713 thread_create_at_qos(QOS_CLASS_MAINTENANCE, thread_at_maintenance);
714 sigsuspend(0);
715 exit(0);
716 }
717 wait(NULL);
718 }
719
720 dt_stat_finalize(roundtrip_stat);
721 T_END;
722}
723
724static void
725thread_create_at_qos(qos_class_t qos, void * (*function)(void *))
726{
727 qos_class_t qos_thread;
728 pthread_t thread;
0a7de745 729 pthread_attr_t attr;
d9a64523
A
730 int ret;
731
732 ret = setpriority(PRIO_DARWIN_ROLE, 0, PRIO_DARWIN_ROLE_UI_FOCAL);
733 if (ret != 0) {
734 T_LOG("set priority failed\n");
735 }
736
0a7de745
A
737 pthread_attr_init(&attr);
738 pthread_attr_set_qos_class_np(&attr, qos, 0);
739 pthread_create(&thread, &attr, function, NULL);
d9a64523
A
740
741 T_LOG("pthread created\n");
742 pthread_get_qos_class_np(thread, &qos_thread, NULL);
743}
744
745#pragma mark Mach receive - kevent_qos
746
747static void
748expect_kevent_id_recv(mach_port_t port)
749{
750 int r;
751
752 T_QUIET; T_ASSERT_POSIX_ZERO(_pthread_workqueue_init_with_workloop(
0a7de745
A
753 worker_cb, event_cb,
754 (pthread_workqueue_function_workloop_t)workloop_cb_test_intransit, 0, 0), NULL);
d9a64523
A
755
756 struct kevent_qos_s kev[] = {{
0a7de745
A
757 .ident = port,
758 .filter = EVFILT_MACHPORT,
759 .flags = EV_ADD | EV_UDATA_SPECIFIC | EV_DISPATCH | EV_VANISHED,
760 .fflags = (MACH_RCV_MSG | MACH_RCV_LARGE | MACH_RCV_LARGE_IDENTITY |
761 MACH_RCV_TRAILER_ELEMENTS(MACH_RCV_TRAILER_CTX) |
762 MACH_RCV_TRAILER_TYPE(MACH_MSG_TRAILER_FORMAT_0) |
763 MACH_RCV_VOUCHER),
764 .data = 1,
765 .qos = (int32_t)_pthread_qos_class_encode(QOS_CLASS_MAINTENANCE, 0, 0)
766 }};
d9a64523
A
767
768 struct kevent_qos_s kev_err[] = {{ 0 }};
769
770 /* Setup workloop for mach msg rcv */
771 r = kevent_id(25, kev, 1, kev_err, 1, NULL,
0a7de745 772 NULL, KEVENT_FLAG_WORKLOOP | KEVENT_FLAG_ERROR_EVENTS);
d9a64523
A
773
774 T_QUIET; T_ASSERT_POSIX_SUCCESS(r, "kevent_id");
775 T_QUIET; T_ASSERT_EQ(r, 0, "no errors returned from kevent_id");
776}
777
778T_HELPER_DECL(server_kevent_id,
0a7de745 779 "Reply with the QoS that a dispatch source event handler ran with")
d9a64523
A
780{
781 expect_kevent_id_recv(get_server_port());
782 sigsuspend(0);
783 T_ASSERT_FAIL("should receive a message");
784}
785
786#define TEST_MULTIHOP(server_name, client_name, name) \
787 T_DECL(server_kevent_id_##name, \
0a7de745
A
788 "Event delivery using a kevent_id", \
789 T_META_ASROOT(YES)) \
d9a64523 790 { \
0a7de745 791 run_client_server(server_name, client_name); \
d9a64523
A
792 }
793
794#define TEST_MULTIHOP_SPIN(server_name, client_name, name) \
795 T_DECL(server_kevent_id_##name, \
0a7de745
A
796 "Event delivery using a kevent_id", \
797 T_META_ASROOT(YES), T_META_ENABLED(FALSE)) \
d9a64523 798 { \
0a7de745
A
799 spin_for_ever = true; \
800 run_client_server(server_name, client_name); \
801 spin_for_ever = false; \
d9a64523
A
802 }
803
804/*
805 * Test 1: Test multihop priority boosting with ulocks, dispatch sync and sync IPC.
806 *
807 * Create thread's at different Qos and acquire a ulock and block on next ulock/dispatch sync
808 * creating a sync chain. The last hop the chain is blocked on Sync IPC.
809 */
810TEST_MULTIHOP("server_kevent_id", "three_ulock_sync_ipc_hop", three_ulock_sync_ipc_hop)
811
812/*
813 * Test 2: Test multihop priority boosting with ulocks, dispatch sync and sync IPC.
814 *
815 * Create thread's at different Qos and acquire a ulock and block on next ulock/dispatch sync
816 * creating a sync chain. The last hop the chain is blocked on Sync IPC.
817 * Before the last priority 60 thread blocks on ulock, it also starts spinforeverd at priority 31.
818 */
819TEST_MULTIHOP_SPIN("server_kevent_id", "three_ulock_sync_ipc_hop", three_ulock_sync_ipc_hop_spin)