]> git.saurik.com Git - apple/xnu.git/blame - tests/turnstile_multihop.c
xnu-6153.81.5.tar.gz
[apple/xnu.git] / tests / turnstile_multihop.c
CommitLineData
d9a64523
A
1/*
2 * turnstile_multihop: Tests turnstile and multi hop priority propagation.
3 */
4
5#ifdef T_NAMESPACE
6#undef T_NAMESPACE
7#endif
8
9#include <darwintest.h>
10#include <darwintest_multiprocess.h>
11
12#include <dispatch/dispatch.h>
13#include <pthread.h>
14#include <launch.h>
15#include <mach/mach.h>
16#include <mach/message.h>
17#include <mach/mach_voucher.h>
18#include <pthread/workqueue_private.h>
19#include <voucher/ipc_pthread_priority_types.h>
20#include <servers/bootstrap.h>
21#include <stdlib.h>
22#include <sys/event.h>
23#include <unistd.h>
24#include <crt_externs.h>
25#include <signal.h>
26#include <sys/types.h>
27#include <sys/sysctl.h>
28#include <libkern/OSAtomic.h>
29#include <sys/wait.h>
30
31#include "turnstile_multihop_helper.h"
32
33T_GLOBAL_META(T_META_NAMESPACE("xnu.turnstile_multihop"));
34
35#define HELPER_TIMEOUT_SECS (3000)
36
cb323159
A
37struct test_msg {
38 mach_msg_header_t header;
39 mach_msg_body_t body;
40 mach_msg_port_descriptor_t port_descriptor;
41};
42
d9a64523
A
43static boolean_t spin_for_ever = false;
44
45static void
46thread_create_at_qos(qos_class_t qos, void * (*function)(void *));
47static uint64_t
48nanoseconds_to_absolutetime(uint64_t nanoseconds);
49static int
50sched_create_load_at_qos(qos_class_t qos, void **load_token);
51static int
52sched_terminate_load(void *load_token) __unused;
53static void do_work(int num);
54static void
55dispatch_sync_cancel(mach_port_t owner_thread, qos_class_t promote_qos);
56
57static void *sched_load_thread(void *);
58
59struct load_token_context {
60 volatile int threads_should_exit;
61 int thread_count;
62 qos_class_t qos;
63 pthread_t *threads;
64};
65
66static struct mach_timebase_info sched_mti;
67static pthread_once_t sched_mti_once_control = PTHREAD_ONCE_INIT;
68
0a7de745
A
69static void
70sched_mti_init(void)
d9a64523
A
71{
72 mach_timebase_info(&sched_mti);
73}
74uint64_t
75nanoseconds_to_absolutetime(uint64_t nanoseconds)
76{
77 pthread_once(&sched_mti_once_control, sched_mti_init);
78
79 return (uint64_t)(nanoseconds * (((double)sched_mti.denom) / ((double)sched_mti.numer)));
80}
81
82static int
83sched_create_load_at_qos(qos_class_t qos, void **load_token)
84{
85 struct load_token_context *context = NULL;
86 int ret;
87 int ncpu;
88 size_t ncpu_size = sizeof(ncpu);
89 int nthreads;
90 int i;
91 pthread_attr_t attr;
92
93 ret = sysctlbyname("hw.ncpu", &ncpu, &ncpu_size, NULL, 0);
94 if (ret == -1) {
95 T_LOG("sysctlbyname(hw.ncpu)");
96 return errno;
97 }
98
99 T_QUIET; T_LOG("%s: Detected %d CPUs\n", __FUNCTION__, ncpu);
100
101 nthreads = ncpu;
102 T_QUIET; T_LOG("%s: Will create %d threads\n", __FUNCTION__, nthreads);
103
104 ret = pthread_attr_init(&attr);
105 T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "pthread_attr_init");
106
107 if (&pthread_attr_set_qos_class_np) {
108 ret = pthread_attr_set_qos_class_np(&attr, qos, 0);
109 T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "pthread_attr_set_qos_class_np");
110 }
111
112 context = calloc(1, sizeof(*context));
0a7de745
A
113 if (context == NULL) {
114 T_QUIET; T_LOG("calloc returned error"); return ENOMEM;
115 }
d9a64523
A
116
117 context->threads_should_exit = 0;
118 context->thread_count = nthreads;
119 context->qos = qos;
120 context->threads = calloc((unsigned int)nthreads, sizeof(pthread_t));
121
122 OSMemoryBarrier();
123
0a7de745 124 for (i = 0; i < nthreads; i++) {
d9a64523
A
125 ret = pthread_create(&context->threads[i], &attr, sched_load_thread, context);
126 T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "pthread_create");
127 T_QUIET; T_LOG("%s: Created thread %d (%p)\n", __FUNCTION__, i, (void *)context->threads[i]);
128 }
129
130 ret = pthread_attr_destroy(&attr);
131 T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "pthread_attr_destroy");
132
133 *load_token = context;
134
135 return 0;
136}
137
138static void *
139sched_load_thread(void *arg)
140{
141 struct load_token_context *context = (struct load_token_context *)arg;
142
143 T_QUIET; T_LOG("%s: Thread started %p\n", __FUNCTION__, (void *)pthread_self());
144
145 while (!context->threads_should_exit) {
146 uint64_t start = mach_absolute_time();
147 uint64_t end = start + nanoseconds_to_absolutetime(900ULL * NSEC_PER_MSEC);
148
0a7de745
A
149 while ((mach_absolute_time() < end) && !context->threads_should_exit) {
150 ;
151 }
d9a64523
A
152 }
153
154 T_QUIET; T_LOG("%s: Thread terminating %p\n", __FUNCTION__, (void *)pthread_self());
155
156 return NULL;
157}
158
159static int
160sched_terminate_load(void *load_token)
161{
162 int ret;
163 int i;
164 struct load_token_context *context = (struct load_token_context *)load_token;
165
166 context->threads_should_exit = 1;
167 OSMemoryBarrier();
168
0a7de745 169 for (i = 0; i < context->thread_count; i++) {
d9a64523
A
170 T_QUIET; T_LOG("%s: Joining thread %d (%p)\n", __FUNCTION__, i, (void *)context->threads[i]);
171 ret = pthread_join(context->threads[i], NULL);
172 T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "pthread_join");
173 }
174
175 free(context->threads);
176 free(context);
177
178 return 0;
179}
180
181
182// Find the first num primes, simply as a means of doing work
0a7de745
A
183static void
184do_work(int num)
d9a64523
A
185{
186 volatile int i = 3, count, c;
187
0a7de745
A
188 for (count = 2; count <= num;) {
189 for (c = 2; c <= i; c++) {
190 if (i % c == 0) {
d9a64523
A
191 break;
192 }
193 }
0a7de745 194 if (c == i) {
d9a64523
A
195 count++;
196 }
197 i++;
198 }
199}
200
201#pragma mark pthread callbacks
202
203static void
204worker_cb(pthread_priority_t __unused priority)
205{
206 T_FAIL("a worker thread was created");
207}
208
209static void
210event_cb(void ** __unused events, int * __unused nevents)
211{
212 T_FAIL("a kevent routine was called instead of workloop");
213}
214
215static uint32_t
216get_user_promotion_basepri(void)
217{
218 mach_msg_type_number_t count = THREAD_POLICY_STATE_COUNT;
219 struct thread_policy_state thread_policy;
220 boolean_t get_default = FALSE;
221 mach_port_t thread_port = pthread_mach_thread_np(pthread_self());
222
223 kern_return_t kr = thread_policy_get(thread_port, THREAD_POLICY_STATE,
0a7de745 224 (thread_policy_t)&thread_policy, &count, &get_default);
d9a64523
A
225 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "thread_policy_get");
226 return thread_policy.thps_user_promotion_basepri;
227}
228
cb323159
A
229#define LISTENER_WLID 0x100
230#define CONN_WLID 0x200
231
232static uint32_t
233register_port_options(void)
234{
235 return MACH_RCV_MSG | MACH_RCV_LARGE | MACH_RCV_LARGE_IDENTITY |
236 MACH_RCV_TRAILER_ELEMENTS(MACH_RCV_TRAILER_CTX) |
237 MACH_RCV_TRAILER_TYPE(MACH_MSG_TRAILER_FORMAT_0) |
238 MACH_RCV_VOUCHER;
239}
240
241static void
242register_port(uint64_t wlid, mach_port_t port)
243{
244 int r;
245
246 struct kevent_qos_s kev = {
247 .ident = port,
248 .filter = EVFILT_MACHPORT,
249 .flags = EV_ADD | EV_UDATA_SPECIFIC | EV_DISPATCH | EV_VANISHED,
250 .fflags = register_port_options(),
251 .data = 1,
252 .qos = (int32_t)_pthread_qos_class_encode(QOS_CLASS_MAINTENANCE, 0, 0)
253 };
254
255 struct kevent_qos_s kev_err = { 0 };
256
257 /* Setup workloop for mach msg rcv */
258 r = kevent_id(wlid, &kev, 1, &kev_err, 1, NULL,
259 NULL, KEVENT_FLAG_WORKLOOP | KEVENT_FLAG_ERROR_EVENTS);
260
261 T_QUIET; T_ASSERT_POSIX_SUCCESS(r, "kevent_id");
262 T_QUIET; T_ASSERT_EQ(r, 0, "no errors returned from kevent_id");
263}
264
d9a64523
A
265/*
266 * Basic WL handler callback, it checks the
267 * effective Qos of the servicer thread.
268 */
269static void
cb323159 270workloop_cb_test_intransit(uint64_t *workloop_id, void **eventslist, int *events)
d9a64523 271{
cb323159
A
272 static bool got_peer;
273
274 struct kevent_qos_s *kev = eventslist[0];
275 mach_msg_header_t *hdr;
276 struct test_msg *tmsg;
d9a64523 277
cb323159
A
278 T_LOG("Workloop handler %s called. Received message on 0x%llx",
279 __func__, *workloop_id);
d9a64523
A
280
281 /* Skip the test if we can't check Qos */
282 if (geteuid() != 0) {
283 T_SKIP("kevent_qos test requires root privileges to run.");
284 }
285
cb323159
A
286 T_QUIET; T_ASSERT_EQ(*events, 1, "should have one event");
287
288 hdr = (mach_msg_header_t *)kev->ext[0];
289 T_ASSERT_NOTNULL(hdr, "has a message");
290 T_ASSERT_EQ(hdr->msgh_size, (uint32_t)sizeof(struct test_msg), "of the right size");
291 tmsg = (struct test_msg *)hdr;
292
293 switch (*workloop_id) {
294 case LISTENER_WLID:
295 T_LOG("Registering peer connection");
296 T_QUIET; T_ASSERT_FALSE(got_peer, "Should not have seen peer yet");
297 got_peer = true;
298 break;
299
300 case CONN_WLID:
301 T_LOG("Received message on peer");
302 break;
303
304 default:
305 T_FAIL("???");
306 }
307
308 sleep(5);
309 T_LOG("Do some CPU work.");
310 do_work(5000);
d9a64523 311
cb323159
A
312 /* Check if the override now is IN + 60 boost */
313 T_EXPECT_EFFECTIVE_QOS_EQ(QOS_CLASS_USER_INITIATED,
314 "dispatch_source event handler QoS should be QOS_CLASS_USER_INITIATED");
315 T_EXPECT_EQ(get_user_promotion_basepri(), 60u,
316 "dispatch_source event handler should be overridden at 60");
317
318 if (*workloop_id == LISTENER_WLID) {
319 register_port(CONN_WLID, tmsg->port_descriptor.name);
d9a64523 320
d9a64523 321 kev->flags = EV_ADD | EV_ENABLE | EV_UDATA_SPECIFIC | EV_DISPATCH | EV_VANISHED;
cb323159
A
322 kev->fflags = register_port_options();
323 kev->ext[0] = kev->ext[1] = kev->ext[2] = kev->ext[3] = 0;
d9a64523
A
324 *events = 1;
325 } else {
cb323159
A
326 /* this will unblock the waiter */
327 mach_msg_destroy(hdr);
d9a64523 328 *events = 0;
d9a64523
A
329 }
330}
331
332static void
333run_client_server(const char *server_name, const char *client_name)
334{
335 dt_helper_t helpers[] = {
336 dt_launchd_helper_domain("com.apple.xnu.test.turnstile_multihop.plist",
0a7de745 337 server_name, NULL, LAUNCH_SYSTEM_DOMAIN),
d9a64523
A
338 dt_fork_helper(client_name)
339 };
340 dt_run_helpers(helpers, 2, HELPER_TIMEOUT_SECS);
341}
342
343#pragma mark Mach receive
344
345#define TURNSTILE_MULTIHOP_SERVICE_NAME "com.apple.xnu.test.turnstile_multihop"
346
347static mach_port_t
348get_server_port(void)
349{
350 mach_port_t port;
351 kern_return_t kr = bootstrap_check_in(bootstrap_port,
0a7de745 352 TURNSTILE_MULTIHOP_SERVICE_NAME, &port);
d9a64523
A
353 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "server bootstrap_check_in");
354 return port;
355}
356
357static mach_voucher_t
358create_pthpriority_voucher(mach_msg_priority_t qos)
359{
360 char voucher_buf[sizeof(mach_voucher_attr_recipe_data_t) + sizeof(ipc_pthread_priority_value_t)];
361
362 mach_voucher_t voucher = MACH_PORT_NULL;
363 kern_return_t ret;
364 ipc_pthread_priority_value_t ipc_pthread_priority_value =
0a7de745 365 (ipc_pthread_priority_value_t)qos;
d9a64523
A
366
367 mach_voucher_attr_raw_recipe_array_t recipes;
368 mach_voucher_attr_raw_recipe_size_t recipe_size = 0;
369 mach_voucher_attr_recipe_t recipe =
0a7de745 370 (mach_voucher_attr_recipe_t)&voucher_buf[recipe_size];
d9a64523
A
371
372 recipe->key = MACH_VOUCHER_ATTR_KEY_PTHPRIORITY;
373 recipe->command = MACH_VOUCHER_ATTR_PTHPRIORITY_CREATE;
374 recipe->previous_voucher = MACH_VOUCHER_NULL;
375 memcpy((char *)&recipe->content[0], &ipc_pthread_priority_value, sizeof(ipc_pthread_priority_value));
376 recipe->content_size = sizeof(ipc_pthread_priority_value_t);
377 recipe_size += sizeof(mach_voucher_attr_recipe_data_t) + recipe->content_size;
378
379 recipes = (mach_voucher_attr_raw_recipe_array_t)&voucher_buf[0];
380
381 ret = host_create_mach_voucher(mach_host_self(),
0a7de745
A
382 recipes,
383 recipe_size,
384 &voucher);
d9a64523
A
385
386 T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "client host_create_mach_voucher");
387 return voucher;
388}
389
390static void
391send(
392 mach_port_t send_port,
393 mach_port_t reply_port,
394 mach_port_t msg_port,
395 mach_msg_priority_t qos,
396 mach_msg_option_t options)
397{
398 kern_return_t ret = 0;
399
cb323159 400 struct test_msg send_msg = {
0a7de745
A
401 .header = {
402 .msgh_remote_port = send_port,
403 .msgh_local_port = reply_port,
404 .msgh_bits = MACH_MSGH_BITS_SET(MACH_MSG_TYPE_COPY_SEND,
405 reply_port ? MACH_MSG_TYPE_MAKE_SEND_ONCE : 0,
406 MACH_MSG_TYPE_MOVE_SEND,
407 MACH_MSGH_BITS_COMPLEX),
408 .msgh_id = 0x100,
409 .msgh_size = sizeof(send_msg),
d9a64523 410 },
0a7de745
A
411 .body = {
412 .msgh_descriptor_count = 1,
d9a64523 413 },
0a7de745
A
414 .port_descriptor = {
415 .name = msg_port,
d9a64523
A
416 .disposition = MACH_MSG_TYPE_MOVE_RECEIVE,
417 .type = MACH_MSG_PORT_DESCRIPTOR,
418 },
419 };
420
421 if (options & MACH_SEND_SYNC_USE_THRPRI) {
422 send_msg.header.msgh_voucher_port = create_pthpriority_voucher(qos);
423 }
424
425 if (msg_port == MACH_PORT_NULL) {
426 send_msg.body.msgh_descriptor_count = 0;
427 }
428
429 ret = mach_msg(&(send_msg.header),
0a7de745
A
430 MACH_SEND_MSG |
431 MACH_SEND_TIMEOUT |
432 MACH_SEND_OVERRIDE |
433 ((reply_port ? MACH_SEND_SYNC_OVERRIDE : 0) | options),
434 send_msg.header.msgh_size,
435 0,
436 MACH_PORT_NULL,
437 10000,
438 0);
d9a64523
A
439
440 T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "client mach_msg");
441}
442
443static void
444receive(
445 mach_port_t rcv_port,
446 mach_port_t notify_port)
447{
448 kern_return_t ret = 0;
449
450 struct {
451 mach_msg_header_t header;
452 mach_msg_body_t body;
453 mach_msg_port_descriptor_t port_descriptor;
454 } rcv_msg = {
0a7de745 455 .header =
d9a64523 456 {
0a7de745
A
457 .msgh_remote_port = MACH_PORT_NULL,
458 .msgh_local_port = rcv_port,
459 .msgh_size = sizeof(rcv_msg),
d9a64523
A
460 },
461 };
462
463 T_LOG("Client: Starting sync receive\n");
464
465 ret = mach_msg(&(rcv_msg.header),
0a7de745
A
466 MACH_RCV_MSG |
467 MACH_RCV_SYNC_WAIT,
468 0,
469 rcv_msg.header.msgh_size,
470 rcv_port,
471 0,
472 notify_port);
d9a64523
A
473}
474
475static lock_t lock_DEF;
476static lock_t lock_IN;
477static lock_t lock_UI;
478
479static mach_port_t main_thread_port;
480static mach_port_t def_thread_port;
481static mach_port_t in_thread_port;
482static mach_port_t ui_thread_port;
483static mach_port_t sixty_thread_port;
484
485static uint64_t dispatch_sync_owner;
486
0a7de745
A
487static int
488get_pri(thread_t thread_port)
489{
d9a64523
A
490 kern_return_t kr;
491
492 thread_extended_info_data_t extended_info;
493 mach_msg_type_number_t count = THREAD_EXTENDED_INFO_COUNT;
494 kr = thread_info(thread_port, THREAD_EXTENDED_INFO,
0a7de745 495 (thread_info_t)&extended_info, &count);
d9a64523
A
496
497 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "thread_info");
498
499 return extended_info.pth_curpri;
500}
501
502static void
503set_thread_name(const char *fn_name)
504{
505 char name[50] = "";
506
507 thread_t thread_port = pthread_mach_thread_np(pthread_self());
508
509 int pri = get_pri(thread_port);
510
511 snprintf(name, sizeof(name), "%s at pri %2d", fn_name, pri);
512 pthread_setname_np(name);
513}
514
515static void
516thread_wait_to_block(mach_port_t thread_port)
517{
518 thread_extended_info_data_t extended_info;
519 kern_return_t kr;
520
521 while (1) {
522 mach_msg_type_number_t count = THREAD_EXTENDED_INFO_COUNT;
523 kr = thread_info(thread_port, THREAD_EXTENDED_INFO,
0a7de745 524 (thread_info_t)&extended_info, &count);
d9a64523
A
525
526 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "thread_info");
527
528 if (extended_info.pth_run_state == TH_STATE_WAITING) {
529 T_LOG("Target thread blocked\n");
530 break;
531 }
532 thread_switch(thread_port, SWITCH_OPTION_DEPRESS, 0);
533 }
534}
535
536static void
537thread_wait_to_boost(mach_port_t thread_port, mach_port_t yield_thread, int priority)
538{
539 thread_extended_info_data_t extended_info;
540 kern_return_t kr;
541
542 while (1) {
543 mach_msg_type_number_t count = THREAD_EXTENDED_INFO_COUNT;
544 kr = thread_info(thread_port, THREAD_EXTENDED_INFO,
0a7de745 545 (thread_info_t)&extended_info, &count);
d9a64523
A
546
547 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "thread_info");
548
549 if (extended_info.pth_priority >= priority) {
550 T_LOG("Target thread boosted\n");
551 break;
552 }
553 thread_switch(yield_thread, SWITCH_OPTION_DEPRESS, 0);
554 }
555}
556
557static void
558dispatch_sync_wait(mach_port_t owner_thread, qos_class_t promote_qos)
559{
560 struct kevent_qos_s kev_err[] = {{ 0 }};
561 uint32_t fflags = 0;
562 uint64_t mask = 0;
563 uint16_t action = 0;
564 int r;
565
566 action = EV_ADD | EV_DISABLE;
567 fflags = NOTE_WL_SYNC_WAIT | NOTE_WL_DISCOVER_OWNER;
568
569 dispatch_sync_owner = owner_thread;
570
571 struct kevent_qos_s kev[] = {{
0a7de745
A
572 .ident = mach_thread_self(),
573 .filter = EVFILT_WORKLOOP,
574 .flags = action,
575 .fflags = fflags,
576 .udata = (uintptr_t) &def_thread_port,
577 .qos = (int32_t)_pthread_qos_class_encode(promote_qos, 0, 0),
578 .ext[EV_EXTIDX_WL_MASK] = mask,
579 .ext[EV_EXTIDX_WL_VALUE] = dispatch_sync_owner,
580 .ext[EV_EXTIDX_WL_ADDR] = (uint64_t)&dispatch_sync_owner,
581 }};
d9a64523
A
582
583 /* Setup workloop to fake dispatch sync wait on a workloop */
584 r = kevent_id(30, kev, 1, kev_err, 1, NULL,
0a7de745 585 NULL, KEVENT_FLAG_WORKLOOP | KEVENT_FLAG_ERROR_EVENTS);
d9a64523
A
586 T_QUIET; T_LOG("dispatch_sync_wait returned\n");
587}
588
589static void
590dispatch_sync_cancel(mach_port_t owner_thread, qos_class_t promote_qos)
591{
592 struct kevent_qos_s kev_err[] = {{ 0 }};
593 uint32_t fflags = 0;
594 uint64_t mask = 0;
595 uint16_t action = 0;
596 int r;
597
598 action = EV_DELETE | EV_ENABLE;
599 fflags = NOTE_WL_SYNC_WAKE | NOTE_WL_END_OWNERSHIP;
600
601 dispatch_sync_owner = owner_thread;
602
603 struct kevent_qos_s kev[] = {{
0a7de745
A
604 .ident = def_thread_port,
605 .filter = EVFILT_WORKLOOP,
606 .flags = action,
607 .fflags = fflags,
608 .udata = (uintptr_t) &def_thread_port,
609 .qos = (int32_t)_pthread_qos_class_encode(promote_qos, 0, 0),
610 .ext[EV_EXTIDX_WL_MASK] = mask,
611 .ext[EV_EXTIDX_WL_VALUE] = dispatch_sync_owner,
612 .ext[EV_EXTIDX_WL_ADDR] = (uint64_t)&dispatch_sync_owner,
613 }};
d9a64523
A
614
615 /* Setup workloop to fake dispatch sync wake on a workloop */
616 r = kevent_id(30, kev, 1, kev_err, 1, NULL,
0a7de745 617 NULL, KEVENT_FLAG_WORKLOOP | KEVENT_FLAG_ERROR_EVENTS);
d9a64523 618 T_QUIET; T_LOG("dispatch_sync_cancel returned\n");
d9a64523
A
619}
620
621static void *
622thread_at_sixty(void *arg __unused)
623{
624 int policy;
625 struct sched_param param;
626 int ret;
627 void *load_token;
628 uint64_t before_lock_time, after_lock_time;
629
630 sixty_thread_port = mach_thread_self();
631
632 set_thread_name(__FUNCTION__);
633
634 /* Change our priority to 60 */
635 ret = pthread_getschedparam(pthread_self(), &policy, &param);
636 T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "pthread_getschedparam");
637
638 param.sched_priority = 60;
639
640 ret = pthread_setschedparam(pthread_self(), policy, &param);
641 T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "pthread_setschedparam");
642
643 ret = pthread_getschedparam(pthread_self(), &policy, &param);
644 T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "pthread_getschedparam");
645
646 T_LOG("My priority is %d", param.sched_priority);
647
648 thread_wait_to_boost(in_thread_port, ui_thread_port, 46);
649
650 if (spin_for_ever) {
651 /* Schedule load at Default */
652 sched_create_load_at_qos(QOS_CLASS_DEFAULT, &load_token);
653 }
654
655 T_LOG("Thread at priority 60 trying to acquire UI lock");
656
657 before_lock_time = mach_absolute_time();
658 ull_lock(&lock_UI, 3, UL_UNFAIR_LOCK, 0);
659 after_lock_time = mach_absolute_time();
660
661 T_QUIET; T_LOG("The time for priority 60 thread to acquire lock was %llu \n",
0a7de745 662 (after_lock_time - before_lock_time));
cb323159 663 T_END;
d9a64523
A
664}
665
666static void *
667thread_at_ui(void *arg __unused)
668{
669 ui_thread_port = mach_thread_self();
670
671 set_thread_name(__FUNCTION__);
672
673 /* Grab the first ulock */
674 ull_lock(&lock_UI, 2, UL_UNFAIR_LOCK, 0);
675
676 thread_wait_to_boost(def_thread_port, in_thread_port, 37);
677 thread_create_at_qos(QOS_CLASS_USER_INTERACTIVE, thread_at_sixty);
678
679 T_LOG("Thread at UI priority trying to acquire IN lock");
680 ull_lock(&lock_IN, 2, UL_UNFAIR_LOCK, 0);
681 ull_unlock(&lock_UI, 2, UL_UNFAIR_LOCK, 0);
682 return NULL;
683}
684
685static void *
686thread_at_in(void *arg __unused)
687{
688 in_thread_port = mach_thread_self();
689
690 set_thread_name(__FUNCTION__);
691
692 /* Grab the first ulock */
693 ull_lock(&lock_IN, 2, UL_UNFAIR_LOCK, 0);
694
695 T_LOG("Thread at IN priority got first lock ");
696
697 thread_wait_to_boost(main_thread_port, def_thread_port, 31);
698
699 /* Create a new thread at QOS_CLASS_USER_INTERACTIVE qos */
700 thread_create_at_qos(QOS_CLASS_USER_INTERACTIVE, thread_at_ui);
701
702 T_LOG("Thread at IN priority trying to acquire default lock");
703 ull_lock(&lock_DEF, 1, UL_UNFAIR_LOCK, 0);
704 ull_unlock(&lock_IN, 2, UL_UNFAIR_LOCK, 0);
705 return NULL;
706}
707
708static void *
709thread_at_default(void *arg __unused)
710{
711 def_thread_port = mach_thread_self();
712
713 set_thread_name(__FUNCTION__);
714
715 /* Grab the first ulock */
716 ull_lock(&lock_DEF, 1, UL_UNFAIR_LOCK, 0);
717
718 T_LOG("Thread at DEFAULT priority got first lock ");
719
720 thread_wait_to_block(main_thread_port);
721
722 /* Create a new thread at QOS_CLASS_USER_INITIATED qos */
723 thread_create_at_qos(QOS_CLASS_USER_INITIATED, thread_at_in);
724
725 T_LOG("Thread at Default priority trying to wait on dispatch sync for maintenance thread");
726 dispatch_sync_wait(main_thread_port, QOS_CLASS_DEFAULT);
727 ull_unlock(&lock_DEF, 1, UL_UNFAIR_LOCK, 0);
728 return NULL;
729}
730
731static void *
732thread_at_maintenance(void *arg __unused)
733{
cb323159
A
734 mach_port_t service_port;
735 mach_port_t conn_port;
d9a64523 736 mach_port_t special_reply_port;
cb323159
A
737 mach_port_options_t opts = {
738 .flags = MPO_INSERT_SEND_RIGHT,
739 };
d9a64523
A
740
741 main_thread_port = mach_thread_self();
742
743 set_thread_name(__FUNCTION__);
744
745 kern_return_t kr = bootstrap_look_up(bootstrap_port,
cb323159 746 TURNSTILE_MULTIHOP_SERVICE_NAME, &service_port);
d9a64523
A
747 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "client bootstrap_look_up");
748
cb323159
A
749 kr = mach_port_construct(mach_task_self(), &opts, 0ull, &conn_port);
750 T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "mach_port_construct");
751
d9a64523
A
752 special_reply_port = thread_get_special_reply_port();
753 T_QUIET; T_ASSERT_TRUE(MACH_PORT_VALID(special_reply_port), "get_thread_special_reply_port");
754
755 /* Become the dispatch sync owner, dispatch_sync_owner will be set in dispatch_sync_wait function */
756
cb323159
A
757 /* Send a sync message */
758 send(conn_port, special_reply_port, MACH_PORT_NULL,
0a7de745 759 (uint32_t)_pthread_qos_class_encode(QOS_CLASS_MAINTENANCE, 0, 0), 0);
d9a64523 760
cb323159
A
761 /* Send an async checkin message */
762 send(service_port, MACH_PORT_NULL, conn_port,
0a7de745 763 (uint32_t)_pthread_qos_class_encode(QOS_CLASS_MAINTENANCE, 0, 0), 0);
d9a64523
A
764
765 /* Create a new thread at QOS_CLASS_DEFAULT qos */
766 thread_create_at_qos(QOS_CLASS_DEFAULT, thread_at_default);
767
768 /* Block on Sync IPC */
cb323159
A
769 receive(special_reply_port, service_port);
770
771 T_LOG("received reply");
d9a64523
A
772
773 dispatch_sync_cancel(def_thread_port, QOS_CLASS_DEFAULT);
774 return NULL;
775}
776
777T_HELPER_DECL(three_ulock_sync_ipc_hop,
0a7de745 778 "Create chain of 4 threads with 3 ulocks and 1 sync IPC at different qos")
d9a64523 779{
cb323159
A
780 thread_create_at_qos(QOS_CLASS_MAINTENANCE, thread_at_maintenance);
781 sigsuspend(0);
d9a64523
A
782}
783
784static void
785thread_create_at_qos(qos_class_t qos, void * (*function)(void *))
786{
787 qos_class_t qos_thread;
788 pthread_t thread;
0a7de745 789 pthread_attr_t attr;
d9a64523
A
790 int ret;
791
792 ret = setpriority(PRIO_DARWIN_ROLE, 0, PRIO_DARWIN_ROLE_UI_FOCAL);
793 if (ret != 0) {
794 T_LOG("set priority failed\n");
795 }
796
0a7de745
A
797 pthread_attr_init(&attr);
798 pthread_attr_set_qos_class_np(&attr, qos, 0);
799 pthread_create(&thread, &attr, function, NULL);
d9a64523
A
800
801 T_LOG("pthread created\n");
802 pthread_get_qos_class_np(thread, &qos_thread, NULL);
803}
804
805#pragma mark Mach receive - kevent_qos
806
cb323159
A
807T_HELPER_DECL(server_kevent_id,
808 "Reply with the QoS that a dispatch source event handler ran with")
d9a64523 809{
d9a64523 810 T_QUIET; T_ASSERT_POSIX_ZERO(_pthread_workqueue_init_with_workloop(
0a7de745
A
811 worker_cb, event_cb,
812 (pthread_workqueue_function_workloop_t)workloop_cb_test_intransit, 0, 0), NULL);
d9a64523 813
cb323159 814 register_port(LISTENER_WLID, get_server_port());
d9a64523
A
815 sigsuspend(0);
816 T_ASSERT_FAIL("should receive a message");
817}
818
819#define TEST_MULTIHOP(server_name, client_name, name) \
820 T_DECL(server_kevent_id_##name, \
0a7de745
A
821 "Event delivery using a kevent_id", \
822 T_META_ASROOT(YES)) \
d9a64523 823 { \
0a7de745 824 run_client_server(server_name, client_name); \
d9a64523
A
825 }
826
827#define TEST_MULTIHOP_SPIN(server_name, client_name, name) \
828 T_DECL(server_kevent_id_##name, \
0a7de745
A
829 "Event delivery using a kevent_id", \
830 T_META_ASROOT(YES), T_META_ENABLED(FALSE)) \
d9a64523 831 { \
0a7de745
A
832 spin_for_ever = true; \
833 run_client_server(server_name, client_name); \
834 spin_for_ever = false; \
d9a64523
A
835 }
836
837/*
838 * Test 1: Test multihop priority boosting with ulocks, dispatch sync and sync IPC.
839 *
840 * Create thread's at different Qos and acquire a ulock and block on next ulock/dispatch sync
841 * creating a sync chain. The last hop the chain is blocked on Sync IPC.
842 */
843TEST_MULTIHOP("server_kevent_id", "three_ulock_sync_ipc_hop", three_ulock_sync_ipc_hop)
844
845/*
846 * Test 2: Test multihop priority boosting with ulocks, dispatch sync and sync IPC.
847 *
848 * Create thread's at different Qos and acquire a ulock and block on next ulock/dispatch sync
849 * creating a sync chain. The last hop the chain is blocked on Sync IPC.
850 * Before the last priority 60 thread blocks on ulock, it also starts spinforeverd at priority 31.
851 */
852TEST_MULTIHOP_SPIN("server_kevent_id", "three_ulock_sync_ipc_hop", three_ulock_sync_ipc_hop_spin)