]>
Commit | Line | Data |
---|---|---|
d9a64523 A |
1 | /* |
2 | * turnstile_multihop: Tests turnstile and multi hop priority propagation. | |
3 | */ | |
4 | ||
5 | #ifdef T_NAMESPACE | |
6 | #undef T_NAMESPACE | |
7 | #endif | |
8 | ||
9 | #include <darwintest.h> | |
10 | #include <darwintest_multiprocess.h> | |
11 | ||
12 | #include <dispatch/dispatch.h> | |
13 | #include <pthread.h> | |
14 | #include <launch.h> | |
15 | #include <mach/mach.h> | |
16 | #include <mach/message.h> | |
17 | #include <mach/mach_voucher.h> | |
18 | #include <pthread/workqueue_private.h> | |
19 | #include <voucher/ipc_pthread_priority_types.h> | |
20 | #include <servers/bootstrap.h> | |
21 | #include <stdlib.h> | |
22 | #include <sys/event.h> | |
23 | #include <unistd.h> | |
24 | #include <crt_externs.h> | |
25 | #include <signal.h> | |
26 | #include <sys/types.h> | |
27 | #include <sys/sysctl.h> | |
28 | #include <libkern/OSAtomic.h> | |
29 | #include <sys/wait.h> | |
30 | ||
31 | #include "turnstile_multihop_helper.h" | |
32 | ||
33 | T_GLOBAL_META(T_META_NAMESPACE("xnu.turnstile_multihop")); | |
34 | ||
35 | #define HELPER_TIMEOUT_SECS (3000) | |
36 | ||
37 | static boolean_t spin_for_ever = false; | |
38 | ||
39 | static void | |
40 | thread_create_at_qos(qos_class_t qos, void * (*function)(void *)); | |
41 | static uint64_t | |
42 | nanoseconds_to_absolutetime(uint64_t nanoseconds); | |
43 | static int | |
44 | sched_create_load_at_qos(qos_class_t qos, void **load_token); | |
45 | static int | |
46 | sched_terminate_load(void *load_token) __unused; | |
47 | static void do_work(int num); | |
48 | static void | |
49 | dispatch_sync_cancel(mach_port_t owner_thread, qos_class_t promote_qos); | |
50 | ||
51 | static void *sched_load_thread(void *); | |
52 | ||
53 | struct load_token_context { | |
54 | volatile int threads_should_exit; | |
55 | int thread_count; | |
56 | qos_class_t qos; | |
57 | pthread_t *threads; | |
58 | }; | |
59 | ||
60 | static struct mach_timebase_info sched_mti; | |
61 | static pthread_once_t sched_mti_once_control = PTHREAD_ONCE_INIT; | |
62 | ||
63 | static void sched_mti_init(void) | |
64 | { | |
65 | mach_timebase_info(&sched_mti); | |
66 | } | |
67 | uint64_t | |
68 | nanoseconds_to_absolutetime(uint64_t nanoseconds) | |
69 | { | |
70 | pthread_once(&sched_mti_once_control, sched_mti_init); | |
71 | ||
72 | return (uint64_t)(nanoseconds * (((double)sched_mti.denom) / ((double)sched_mti.numer))); | |
73 | } | |
74 | ||
75 | static int | |
76 | sched_create_load_at_qos(qos_class_t qos, void **load_token) | |
77 | { | |
78 | struct load_token_context *context = NULL; | |
79 | int ret; | |
80 | int ncpu; | |
81 | size_t ncpu_size = sizeof(ncpu); | |
82 | int nthreads; | |
83 | int i; | |
84 | pthread_attr_t attr; | |
85 | ||
86 | ret = sysctlbyname("hw.ncpu", &ncpu, &ncpu_size, NULL, 0); | |
87 | if (ret == -1) { | |
88 | T_LOG("sysctlbyname(hw.ncpu)"); | |
89 | return errno; | |
90 | } | |
91 | ||
92 | T_QUIET; T_LOG("%s: Detected %d CPUs\n", __FUNCTION__, ncpu); | |
93 | ||
94 | nthreads = ncpu; | |
95 | T_QUIET; T_LOG("%s: Will create %d threads\n", __FUNCTION__, nthreads); | |
96 | ||
97 | ret = pthread_attr_init(&attr); | |
98 | T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "pthread_attr_init"); | |
99 | ||
100 | if (&pthread_attr_set_qos_class_np) { | |
101 | ret = pthread_attr_set_qos_class_np(&attr, qos, 0); | |
102 | T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "pthread_attr_set_qos_class_np"); | |
103 | } | |
104 | ||
105 | context = calloc(1, sizeof(*context)); | |
106 | if (context == NULL) { T_QUIET; T_LOG("calloc returned error"); return ENOMEM; } | |
107 | ||
108 | context->threads_should_exit = 0; | |
109 | context->thread_count = nthreads; | |
110 | context->qos = qos; | |
111 | context->threads = calloc((unsigned int)nthreads, sizeof(pthread_t)); | |
112 | ||
113 | OSMemoryBarrier(); | |
114 | ||
115 | for (i=0; i < nthreads; i++) { | |
116 | ret = pthread_create(&context->threads[i], &attr, sched_load_thread, context); | |
117 | T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "pthread_create"); | |
118 | T_QUIET; T_LOG("%s: Created thread %d (%p)\n", __FUNCTION__, i, (void *)context->threads[i]); | |
119 | } | |
120 | ||
121 | ret = pthread_attr_destroy(&attr); | |
122 | T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "pthread_attr_destroy"); | |
123 | ||
124 | *load_token = context; | |
125 | ||
126 | return 0; | |
127 | } | |
128 | ||
129 | static void * | |
130 | sched_load_thread(void *arg) | |
131 | { | |
132 | struct load_token_context *context = (struct load_token_context *)arg; | |
133 | ||
134 | T_QUIET; T_LOG("%s: Thread started %p\n", __FUNCTION__, (void *)pthread_self()); | |
135 | ||
136 | while (!context->threads_should_exit) { | |
137 | uint64_t start = mach_absolute_time(); | |
138 | uint64_t end = start + nanoseconds_to_absolutetime(900ULL * NSEC_PER_MSEC); | |
139 | ||
140 | while ((mach_absolute_time() < end) && !context->threads_should_exit); | |
141 | } | |
142 | ||
143 | T_QUIET; T_LOG("%s: Thread terminating %p\n", __FUNCTION__, (void *)pthread_self()); | |
144 | ||
145 | return NULL; | |
146 | } | |
147 | ||
148 | static int | |
149 | sched_terminate_load(void *load_token) | |
150 | { | |
151 | int ret; | |
152 | int i; | |
153 | struct load_token_context *context = (struct load_token_context *)load_token; | |
154 | ||
155 | context->threads_should_exit = 1; | |
156 | OSMemoryBarrier(); | |
157 | ||
158 | for (i=0; i < context->thread_count; i++) { | |
159 | T_QUIET; T_LOG("%s: Joining thread %d (%p)\n", __FUNCTION__, i, (void *)context->threads[i]); | |
160 | ret = pthread_join(context->threads[i], NULL); | |
161 | T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "pthread_join"); | |
162 | } | |
163 | ||
164 | free(context->threads); | |
165 | free(context); | |
166 | ||
167 | return 0; | |
168 | } | |
169 | ||
170 | ||
171 | // Find the first num primes, simply as a means of doing work | |
172 | static void do_work(int num) | |
173 | { | |
174 | volatile int i = 3, count, c; | |
175 | ||
176 | for(count = 2; count <= num; ) { | |
177 | for(c = 2; c <= i; c++) { | |
178 | if(i%c == 0) { | |
179 | break; | |
180 | } | |
181 | } | |
182 | if(c == i) { | |
183 | count++; | |
184 | } | |
185 | i++; | |
186 | } | |
187 | } | |
188 | ||
189 | #pragma mark pthread callbacks | |
190 | ||
191 | static void | |
192 | worker_cb(pthread_priority_t __unused priority) | |
193 | { | |
194 | T_FAIL("a worker thread was created"); | |
195 | } | |
196 | ||
197 | static void | |
198 | event_cb(void ** __unused events, int * __unused nevents) | |
199 | { | |
200 | T_FAIL("a kevent routine was called instead of workloop"); | |
201 | } | |
202 | ||
203 | static uint32_t | |
204 | get_user_promotion_basepri(void) | |
205 | { | |
206 | mach_msg_type_number_t count = THREAD_POLICY_STATE_COUNT; | |
207 | struct thread_policy_state thread_policy; | |
208 | boolean_t get_default = FALSE; | |
209 | mach_port_t thread_port = pthread_mach_thread_np(pthread_self()); | |
210 | ||
211 | kern_return_t kr = thread_policy_get(thread_port, THREAD_POLICY_STATE, | |
212 | (thread_policy_t)&thread_policy, &count, &get_default); | |
213 | T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "thread_policy_get"); | |
214 | return thread_policy.thps_user_promotion_basepri; | |
215 | } | |
216 | ||
217 | static int messages_received = 0; | |
218 | /* | |
219 | * Basic WL handler callback, it checks the | |
220 | * effective Qos of the servicer thread. | |
221 | */ | |
222 | static void | |
223 | workloop_cb_test_intransit(uint64_t *workloop_id __unused, void **eventslist __unused, int *events) | |
224 | { | |
225 | messages_received++; | |
226 | T_LOG("Workloop handler workloop_cb_test_intransit called. Received message no %d", | |
227 | messages_received); | |
228 | ||
229 | ||
230 | /* Skip the test if we can't check Qos */ | |
231 | if (geteuid() != 0) { | |
232 | T_SKIP("kevent_qos test requires root privileges to run."); | |
233 | } | |
234 | ||
235 | if (messages_received == 1) { | |
236 | ||
237 | sleep(5); | |
238 | T_LOG("Do some CPU work."); | |
239 | do_work(5000); | |
240 | ||
241 | /* Check if the override now is IN + 60 boost */ | |
242 | T_EXPECT_EFFECTIVE_QOS_EQ(QOS_CLASS_USER_INITIATED, | |
243 | "dispatch_source event handler QoS should be QOS_CLASS_USER_INITIATED"); | |
244 | T_EXPECT_EQ(get_user_promotion_basepri(), 60u, | |
245 | "dispatch_source event handler should be overridden at 60"); | |
246 | ||
247 | /* Enable the knote to get 2nd message */ | |
248 | struct kevent_qos_s *kev = *eventslist; | |
249 | kev->flags = EV_ADD | EV_ENABLE | EV_UDATA_SPECIFIC | EV_DISPATCH | EV_VANISHED; | |
250 | kev->fflags = (MACH_RCV_MSG | MACH_RCV_LARGE | MACH_RCV_LARGE_IDENTITY | | |
251 | MACH_RCV_TRAILER_ELEMENTS(MACH_RCV_TRAILER_CTX) | | |
252 | MACH_RCV_TRAILER_TYPE(MACH_MSG_TRAILER_FORMAT_0) | | |
253 | MACH_RCV_VOUCHER); | |
254 | *events = 1; | |
255 | } else { | |
256 | *events = 0; | |
257 | exit(0); | |
258 | } | |
259 | } | |
260 | ||
261 | static void | |
262 | run_client_server(const char *server_name, const char *client_name) | |
263 | { | |
264 | dt_helper_t helpers[] = { | |
265 | dt_launchd_helper_domain("com.apple.xnu.test.turnstile_multihop.plist", | |
266 | server_name, NULL, LAUNCH_SYSTEM_DOMAIN), | |
267 | dt_fork_helper(client_name) | |
268 | }; | |
269 | dt_run_helpers(helpers, 2, HELPER_TIMEOUT_SECS); | |
270 | } | |
271 | ||
272 | #pragma mark Mach receive | |
273 | ||
274 | #define TURNSTILE_MULTIHOP_SERVICE_NAME "com.apple.xnu.test.turnstile_multihop" | |
275 | ||
276 | static mach_port_t | |
277 | get_server_port(void) | |
278 | { | |
279 | mach_port_t port; | |
280 | kern_return_t kr = bootstrap_check_in(bootstrap_port, | |
281 | TURNSTILE_MULTIHOP_SERVICE_NAME, &port); | |
282 | T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "server bootstrap_check_in"); | |
283 | return port; | |
284 | } | |
285 | ||
286 | static mach_voucher_t | |
287 | create_pthpriority_voucher(mach_msg_priority_t qos) | |
288 | { | |
289 | char voucher_buf[sizeof(mach_voucher_attr_recipe_data_t) + sizeof(ipc_pthread_priority_value_t)]; | |
290 | ||
291 | mach_voucher_t voucher = MACH_PORT_NULL; | |
292 | kern_return_t ret; | |
293 | ipc_pthread_priority_value_t ipc_pthread_priority_value = | |
294 | (ipc_pthread_priority_value_t)qos; | |
295 | ||
296 | mach_voucher_attr_raw_recipe_array_t recipes; | |
297 | mach_voucher_attr_raw_recipe_size_t recipe_size = 0; | |
298 | mach_voucher_attr_recipe_t recipe = | |
299 | (mach_voucher_attr_recipe_t)&voucher_buf[recipe_size]; | |
300 | ||
301 | recipe->key = MACH_VOUCHER_ATTR_KEY_PTHPRIORITY; | |
302 | recipe->command = MACH_VOUCHER_ATTR_PTHPRIORITY_CREATE; | |
303 | recipe->previous_voucher = MACH_VOUCHER_NULL; | |
304 | memcpy((char *)&recipe->content[0], &ipc_pthread_priority_value, sizeof(ipc_pthread_priority_value)); | |
305 | recipe->content_size = sizeof(ipc_pthread_priority_value_t); | |
306 | recipe_size += sizeof(mach_voucher_attr_recipe_data_t) + recipe->content_size; | |
307 | ||
308 | recipes = (mach_voucher_attr_raw_recipe_array_t)&voucher_buf[0]; | |
309 | ||
310 | ret = host_create_mach_voucher(mach_host_self(), | |
311 | recipes, | |
312 | recipe_size, | |
313 | &voucher); | |
314 | ||
315 | T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "client host_create_mach_voucher"); | |
316 | return voucher; | |
317 | } | |
318 | ||
319 | static void | |
320 | send( | |
321 | mach_port_t send_port, | |
322 | mach_port_t reply_port, | |
323 | mach_port_t msg_port, | |
324 | mach_msg_priority_t qos, | |
325 | mach_msg_option_t options) | |
326 | { | |
327 | kern_return_t ret = 0; | |
328 | ||
329 | struct { | |
330 | mach_msg_header_t header; | |
331 | mach_msg_body_t body; | |
332 | mach_msg_port_descriptor_t port_descriptor; | |
333 | } send_msg = { | |
334 | .header = { | |
335 | .msgh_remote_port = send_port, | |
336 | .msgh_local_port = reply_port, | |
337 | .msgh_bits = MACH_MSGH_BITS_SET(MACH_MSG_TYPE_COPY_SEND, | |
338 | reply_port ? MACH_MSG_TYPE_MAKE_SEND_ONCE : 0, | |
339 | MACH_MSG_TYPE_MOVE_SEND, | |
340 | MACH_MSGH_BITS_COMPLEX), | |
341 | .msgh_id = 0x100, | |
342 | .msgh_size = sizeof(send_msg), | |
343 | }, | |
344 | .body = { | |
345 | .msgh_descriptor_count = 1, | |
346 | }, | |
347 | .port_descriptor = { | |
348 | .name = msg_port, | |
349 | .disposition = MACH_MSG_TYPE_MOVE_RECEIVE, | |
350 | .type = MACH_MSG_PORT_DESCRIPTOR, | |
351 | }, | |
352 | }; | |
353 | ||
354 | if (options & MACH_SEND_SYNC_USE_THRPRI) { | |
355 | send_msg.header.msgh_voucher_port = create_pthpriority_voucher(qos); | |
356 | } | |
357 | ||
358 | if (msg_port == MACH_PORT_NULL) { | |
359 | send_msg.body.msgh_descriptor_count = 0; | |
360 | } | |
361 | ||
362 | ret = mach_msg(&(send_msg.header), | |
363 | MACH_SEND_MSG | | |
364 | MACH_SEND_TIMEOUT | | |
365 | MACH_SEND_OVERRIDE| | |
366 | ((reply_port ? MACH_SEND_SYNC_OVERRIDE : 0) | options), | |
367 | send_msg.header.msgh_size, | |
368 | 0, | |
369 | MACH_PORT_NULL, | |
370 | 10000, | |
371 | 0); | |
372 | ||
373 | T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "client mach_msg"); | |
374 | } | |
375 | ||
376 | static void | |
377 | receive( | |
378 | mach_port_t rcv_port, | |
379 | mach_port_t notify_port) | |
380 | { | |
381 | kern_return_t ret = 0; | |
382 | ||
383 | struct { | |
384 | mach_msg_header_t header; | |
385 | mach_msg_body_t body; | |
386 | mach_msg_port_descriptor_t port_descriptor; | |
387 | } rcv_msg = { | |
388 | .header = | |
389 | { | |
390 | .msgh_remote_port = MACH_PORT_NULL, | |
391 | .msgh_local_port = rcv_port, | |
392 | .msgh_size = sizeof(rcv_msg), | |
393 | }, | |
394 | }; | |
395 | ||
396 | T_LOG("Client: Starting sync receive\n"); | |
397 | ||
398 | ret = mach_msg(&(rcv_msg.header), | |
399 | MACH_RCV_MSG | | |
400 | MACH_RCV_SYNC_WAIT, | |
401 | 0, | |
402 | rcv_msg.header.msgh_size, | |
403 | rcv_port, | |
404 | 0, | |
405 | notify_port); | |
406 | } | |
407 | ||
408 | static lock_t lock_DEF; | |
409 | static lock_t lock_IN; | |
410 | static lock_t lock_UI; | |
411 | ||
412 | static mach_port_t main_thread_port; | |
413 | static mach_port_t def_thread_port; | |
414 | static mach_port_t in_thread_port; | |
415 | static mach_port_t ui_thread_port; | |
416 | static mach_port_t sixty_thread_port; | |
417 | ||
418 | static uint64_t dispatch_sync_owner; | |
419 | ||
420 | static int get_pri(thread_t thread_port) { | |
421 | kern_return_t kr; | |
422 | ||
423 | thread_extended_info_data_t extended_info; | |
424 | mach_msg_type_number_t count = THREAD_EXTENDED_INFO_COUNT; | |
425 | kr = thread_info(thread_port, THREAD_EXTENDED_INFO, | |
426 | (thread_info_t)&extended_info, &count); | |
427 | ||
428 | T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "thread_info"); | |
429 | ||
430 | return extended_info.pth_curpri; | |
431 | } | |
432 | ||
433 | static void | |
434 | set_thread_name(const char *fn_name) | |
435 | { | |
436 | char name[50] = ""; | |
437 | ||
438 | thread_t thread_port = pthread_mach_thread_np(pthread_self()); | |
439 | ||
440 | int pri = get_pri(thread_port); | |
441 | ||
442 | snprintf(name, sizeof(name), "%s at pri %2d", fn_name, pri); | |
443 | pthread_setname_np(name); | |
444 | } | |
445 | ||
446 | static void | |
447 | thread_wait_to_block(mach_port_t thread_port) | |
448 | { | |
449 | thread_extended_info_data_t extended_info; | |
450 | kern_return_t kr; | |
451 | ||
452 | while (1) { | |
453 | mach_msg_type_number_t count = THREAD_EXTENDED_INFO_COUNT; | |
454 | kr = thread_info(thread_port, THREAD_EXTENDED_INFO, | |
455 | (thread_info_t)&extended_info, &count); | |
456 | ||
457 | T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "thread_info"); | |
458 | ||
459 | if (extended_info.pth_run_state == TH_STATE_WAITING) { | |
460 | T_LOG("Target thread blocked\n"); | |
461 | break; | |
462 | } | |
463 | thread_switch(thread_port, SWITCH_OPTION_DEPRESS, 0); | |
464 | } | |
465 | } | |
466 | ||
467 | static void | |
468 | thread_wait_to_boost(mach_port_t thread_port, mach_port_t yield_thread, int priority) | |
469 | { | |
470 | thread_extended_info_data_t extended_info; | |
471 | kern_return_t kr; | |
472 | ||
473 | while (1) { | |
474 | mach_msg_type_number_t count = THREAD_EXTENDED_INFO_COUNT; | |
475 | kr = thread_info(thread_port, THREAD_EXTENDED_INFO, | |
476 | (thread_info_t)&extended_info, &count); | |
477 | ||
478 | T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "thread_info"); | |
479 | ||
480 | if (extended_info.pth_priority >= priority) { | |
481 | T_LOG("Target thread boosted\n"); | |
482 | break; | |
483 | } | |
484 | thread_switch(yield_thread, SWITCH_OPTION_DEPRESS, 0); | |
485 | } | |
486 | } | |
487 | ||
488 | static void | |
489 | dispatch_sync_wait(mach_port_t owner_thread, qos_class_t promote_qos) | |
490 | { | |
491 | struct kevent_qos_s kev_err[] = {{ 0 }}; | |
492 | uint32_t fflags = 0; | |
493 | uint64_t mask = 0; | |
494 | uint16_t action = 0; | |
495 | int r; | |
496 | ||
497 | action = EV_ADD | EV_DISABLE; | |
498 | fflags = NOTE_WL_SYNC_WAIT | NOTE_WL_DISCOVER_OWNER; | |
499 | ||
500 | dispatch_sync_owner = owner_thread; | |
501 | ||
502 | struct kevent_qos_s kev[] = {{ | |
503 | .ident = mach_thread_self(), | |
504 | .filter = EVFILT_WORKLOOP, | |
505 | .flags = action, | |
506 | .fflags = fflags, | |
507 | .udata = (uintptr_t) &def_thread_port, | |
508 | .qos = (int32_t)_pthread_qos_class_encode(promote_qos, 0, 0), | |
509 | .ext[EV_EXTIDX_WL_MASK] = mask, | |
510 | .ext[EV_EXTIDX_WL_VALUE] = dispatch_sync_owner, | |
511 | .ext[EV_EXTIDX_WL_ADDR] = (uint64_t)&dispatch_sync_owner, | |
512 | }}; | |
513 | ||
514 | /* Setup workloop to fake dispatch sync wait on a workloop */ | |
515 | r = kevent_id(30, kev, 1, kev_err, 1, NULL, | |
516 | NULL, KEVENT_FLAG_WORKLOOP | KEVENT_FLAG_ERROR_EVENTS); | |
517 | T_QUIET; T_LOG("dispatch_sync_wait returned\n"); | |
518 | } | |
519 | ||
520 | static void | |
521 | dispatch_sync_cancel(mach_port_t owner_thread, qos_class_t promote_qos) | |
522 | { | |
523 | struct kevent_qos_s kev_err[] = {{ 0 }}; | |
524 | uint32_t fflags = 0; | |
525 | uint64_t mask = 0; | |
526 | uint16_t action = 0; | |
527 | int r; | |
528 | ||
529 | action = EV_DELETE | EV_ENABLE; | |
530 | fflags = NOTE_WL_SYNC_WAKE | NOTE_WL_END_OWNERSHIP; | |
531 | ||
532 | dispatch_sync_owner = owner_thread; | |
533 | ||
534 | struct kevent_qos_s kev[] = {{ | |
535 | .ident = def_thread_port, | |
536 | .filter = EVFILT_WORKLOOP, | |
537 | .flags = action, | |
538 | .fflags = fflags, | |
539 | .udata = (uintptr_t) &def_thread_port, | |
540 | .qos = (int32_t)_pthread_qos_class_encode(promote_qos, 0, 0), | |
541 | .ext[EV_EXTIDX_WL_MASK] = mask, | |
542 | .ext[EV_EXTIDX_WL_VALUE] = dispatch_sync_owner, | |
543 | .ext[EV_EXTIDX_WL_ADDR] = (uint64_t)&dispatch_sync_owner, | |
544 | }}; | |
545 | ||
546 | /* Setup workloop to fake dispatch sync wake on a workloop */ | |
547 | r = kevent_id(30, kev, 1, kev_err, 1, NULL, | |
548 | NULL, KEVENT_FLAG_WORKLOOP | KEVENT_FLAG_ERROR_EVENTS); | |
549 | T_QUIET; T_LOG("dispatch_sync_cancel returned\n"); | |
550 | ||
551 | } | |
552 | ||
553 | static void * | |
554 | thread_at_sixty(void *arg __unused) | |
555 | { | |
556 | int policy; | |
557 | struct sched_param param; | |
558 | int ret; | |
559 | void *load_token; | |
560 | uint64_t before_lock_time, after_lock_time; | |
561 | ||
562 | sixty_thread_port = mach_thread_self(); | |
563 | ||
564 | set_thread_name(__FUNCTION__); | |
565 | ||
566 | /* Change our priority to 60 */ | |
567 | ret = pthread_getschedparam(pthread_self(), &policy, ¶m); | |
568 | T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "pthread_getschedparam"); | |
569 | ||
570 | param.sched_priority = 60; | |
571 | ||
572 | ret = pthread_setschedparam(pthread_self(), policy, ¶m); | |
573 | T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "pthread_setschedparam"); | |
574 | ||
575 | ret = pthread_getschedparam(pthread_self(), &policy, ¶m); | |
576 | T_QUIET; T_ASSERT_MACH_SUCCESS(ret, "pthread_getschedparam"); | |
577 | ||
578 | T_LOG("My priority is %d", param.sched_priority); | |
579 | ||
580 | thread_wait_to_boost(in_thread_port, ui_thread_port, 46); | |
581 | ||
582 | if (spin_for_ever) { | |
583 | /* Schedule load at Default */ | |
584 | sched_create_load_at_qos(QOS_CLASS_DEFAULT, &load_token); | |
585 | } | |
586 | ||
587 | T_LOG("Thread at priority 60 trying to acquire UI lock"); | |
588 | ||
589 | before_lock_time = mach_absolute_time(); | |
590 | ull_lock(&lock_UI, 3, UL_UNFAIR_LOCK, 0); | |
591 | after_lock_time = mach_absolute_time(); | |
592 | ||
593 | T_QUIET; T_LOG("The time for priority 60 thread to acquire lock was %llu \n", | |
594 | (after_lock_time - before_lock_time)); | |
595 | exit(0); | |
596 | } | |
597 | ||
598 | static void * | |
599 | thread_at_ui(void *arg __unused) | |
600 | { | |
601 | ui_thread_port = mach_thread_self(); | |
602 | ||
603 | set_thread_name(__FUNCTION__); | |
604 | ||
605 | /* Grab the first ulock */ | |
606 | ull_lock(&lock_UI, 2, UL_UNFAIR_LOCK, 0); | |
607 | ||
608 | thread_wait_to_boost(def_thread_port, in_thread_port, 37); | |
609 | thread_create_at_qos(QOS_CLASS_USER_INTERACTIVE, thread_at_sixty); | |
610 | ||
611 | T_LOG("Thread at UI priority trying to acquire IN lock"); | |
612 | ull_lock(&lock_IN, 2, UL_UNFAIR_LOCK, 0); | |
613 | ull_unlock(&lock_UI, 2, UL_UNFAIR_LOCK, 0); | |
614 | return NULL; | |
615 | } | |
616 | ||
617 | static void * | |
618 | thread_at_in(void *arg __unused) | |
619 | { | |
620 | in_thread_port = mach_thread_self(); | |
621 | ||
622 | set_thread_name(__FUNCTION__); | |
623 | ||
624 | /* Grab the first ulock */ | |
625 | ull_lock(&lock_IN, 2, UL_UNFAIR_LOCK, 0); | |
626 | ||
627 | T_LOG("Thread at IN priority got first lock "); | |
628 | ||
629 | thread_wait_to_boost(main_thread_port, def_thread_port, 31); | |
630 | ||
631 | /* Create a new thread at QOS_CLASS_USER_INTERACTIVE qos */ | |
632 | thread_create_at_qos(QOS_CLASS_USER_INTERACTIVE, thread_at_ui); | |
633 | ||
634 | T_LOG("Thread at IN priority trying to acquire default lock"); | |
635 | ull_lock(&lock_DEF, 1, UL_UNFAIR_LOCK, 0); | |
636 | ull_unlock(&lock_IN, 2, UL_UNFAIR_LOCK, 0); | |
637 | return NULL; | |
638 | } | |
639 | ||
640 | static void * | |
641 | thread_at_default(void *arg __unused) | |
642 | { | |
643 | def_thread_port = mach_thread_self(); | |
644 | ||
645 | set_thread_name(__FUNCTION__); | |
646 | ||
647 | /* Grab the first ulock */ | |
648 | ull_lock(&lock_DEF, 1, UL_UNFAIR_LOCK, 0); | |
649 | ||
650 | T_LOG("Thread at DEFAULT priority got first lock "); | |
651 | ||
652 | thread_wait_to_block(main_thread_port); | |
653 | ||
654 | /* Create a new thread at QOS_CLASS_USER_INITIATED qos */ | |
655 | thread_create_at_qos(QOS_CLASS_USER_INITIATED, thread_at_in); | |
656 | ||
657 | T_LOG("Thread at Default priority trying to wait on dispatch sync for maintenance thread"); | |
658 | dispatch_sync_wait(main_thread_port, QOS_CLASS_DEFAULT); | |
659 | ull_unlock(&lock_DEF, 1, UL_UNFAIR_LOCK, 0); | |
660 | return NULL; | |
661 | } | |
662 | ||
663 | static void * | |
664 | thread_at_maintenance(void *arg __unused) | |
665 | { | |
666 | mach_port_t qos_send_port; | |
667 | mach_port_t special_reply_port; | |
668 | ||
669 | main_thread_port = mach_thread_self(); | |
670 | ||
671 | set_thread_name(__FUNCTION__); | |
672 | ||
673 | kern_return_t kr = bootstrap_look_up(bootstrap_port, | |
674 | TURNSTILE_MULTIHOP_SERVICE_NAME, &qos_send_port); | |
675 | T_QUIET; T_ASSERT_MACH_SUCCESS(kr, "client bootstrap_look_up"); | |
676 | ||
677 | special_reply_port = thread_get_special_reply_port(); | |
678 | T_QUIET; T_ASSERT_TRUE(MACH_PORT_VALID(special_reply_port), "get_thread_special_reply_port"); | |
679 | ||
680 | /* Become the dispatch sync owner, dispatch_sync_owner will be set in dispatch_sync_wait function */ | |
681 | ||
682 | /* Send an async message */ | |
683 | send(qos_send_port, MACH_PORT_NULL, MACH_PORT_NULL, | |
684 | (uint32_t)_pthread_qos_class_encode(QOS_CLASS_MAINTENANCE, 0, 0), 0); | |
685 | ||
686 | /* Send a sync message */ | |
687 | send(qos_send_port, special_reply_port, MACH_PORT_NULL, | |
688 | (uint32_t)_pthread_qos_class_encode(QOS_CLASS_MAINTENANCE, 0, 0), 0); | |
689 | ||
690 | /* Create a new thread at QOS_CLASS_DEFAULT qos */ | |
691 | thread_create_at_qos(QOS_CLASS_DEFAULT, thread_at_default); | |
692 | ||
693 | /* Block on Sync IPC */ | |
694 | receive(special_reply_port, qos_send_port); | |
695 | ||
696 | dispatch_sync_cancel(def_thread_port, QOS_CLASS_DEFAULT); | |
697 | return NULL; | |
698 | } | |
699 | ||
700 | T_HELPER_DECL(three_ulock_sync_ipc_hop, | |
701 | "Create chain of 4 threads with 3 ulocks and 1 sync IPC at different qos") | |
702 | { | |
703 | dt_stat_time_t roundtrip_stat = dt_stat_time_create("multihop_lock_acquire"); | |
704 | ||
705 | T_STAT_MEASURE_LOOP(roundtrip_stat) { | |
706 | if (fork() == 0) { | |
707 | thread_create_at_qos(QOS_CLASS_MAINTENANCE, thread_at_maintenance); | |
708 | sigsuspend(0); | |
709 | exit(0); | |
710 | } | |
711 | wait(NULL); | |
712 | } | |
713 | ||
714 | dt_stat_finalize(roundtrip_stat); | |
715 | T_END; | |
716 | } | |
717 | ||
718 | static void | |
719 | thread_create_at_qos(qos_class_t qos, void * (*function)(void *)) | |
720 | { | |
721 | qos_class_t qos_thread; | |
722 | pthread_t thread; | |
723 | pthread_attr_t attr; | |
724 | int ret; | |
725 | ||
726 | ret = setpriority(PRIO_DARWIN_ROLE, 0, PRIO_DARWIN_ROLE_UI_FOCAL); | |
727 | if (ret != 0) { | |
728 | T_LOG("set priority failed\n"); | |
729 | } | |
730 | ||
731 | pthread_attr_init(&attr); | |
732 | pthread_attr_set_qos_class_np(&attr, qos, 0); | |
733 | pthread_create(&thread, &attr, function, NULL); | |
734 | ||
735 | T_LOG("pthread created\n"); | |
736 | pthread_get_qos_class_np(thread, &qos_thread, NULL); | |
737 | } | |
738 | ||
739 | #pragma mark Mach receive - kevent_qos | |
740 | ||
741 | static void | |
742 | expect_kevent_id_recv(mach_port_t port) | |
743 | { | |
744 | int r; | |
745 | ||
746 | T_QUIET; T_ASSERT_POSIX_ZERO(_pthread_workqueue_init_with_workloop( | |
747 | worker_cb, event_cb, | |
748 | (pthread_workqueue_function_workloop_t)workloop_cb_test_intransit, 0, 0), NULL); | |
749 | ||
750 | struct kevent_qos_s kev[] = {{ | |
751 | .ident = port, | |
752 | .filter = EVFILT_MACHPORT, | |
753 | .flags = EV_ADD | EV_UDATA_SPECIFIC | EV_DISPATCH | EV_VANISHED, | |
754 | .fflags = (MACH_RCV_MSG | MACH_RCV_LARGE | MACH_RCV_LARGE_IDENTITY | | |
755 | MACH_RCV_TRAILER_ELEMENTS(MACH_RCV_TRAILER_CTX) | | |
756 | MACH_RCV_TRAILER_TYPE(MACH_MSG_TRAILER_FORMAT_0) | | |
757 | MACH_RCV_VOUCHER), | |
758 | .data = 1, | |
759 | .qos = (int32_t)_pthread_qos_class_encode(QOS_CLASS_MAINTENANCE, 0, 0) | |
760 | }}; | |
761 | ||
762 | struct kevent_qos_s kev_err[] = {{ 0 }}; | |
763 | ||
764 | /* Setup workloop for mach msg rcv */ | |
765 | r = kevent_id(25, kev, 1, kev_err, 1, NULL, | |
766 | NULL, KEVENT_FLAG_WORKLOOP | KEVENT_FLAG_ERROR_EVENTS); | |
767 | ||
768 | T_QUIET; T_ASSERT_POSIX_SUCCESS(r, "kevent_id"); | |
769 | T_QUIET; T_ASSERT_EQ(r, 0, "no errors returned from kevent_id"); | |
770 | } | |
771 | ||
772 | T_HELPER_DECL(server_kevent_id, | |
773 | "Reply with the QoS that a dispatch source event handler ran with") | |
774 | { | |
775 | expect_kevent_id_recv(get_server_port()); | |
776 | sigsuspend(0); | |
777 | T_ASSERT_FAIL("should receive a message"); | |
778 | } | |
779 | ||
780 | #define TEST_MULTIHOP(server_name, client_name, name) \ | |
781 | T_DECL(server_kevent_id_##name, \ | |
782 | "Event delivery using a kevent_id", \ | |
783 | T_META_ASROOT(YES)) \ | |
784 | { \ | |
785 | run_client_server(server_name, client_name); \ | |
786 | } | |
787 | ||
788 | #define TEST_MULTIHOP_SPIN(server_name, client_name, name) \ | |
789 | T_DECL(server_kevent_id_##name, \ | |
790 | "Event delivery using a kevent_id", \ | |
791 | T_META_ASROOT(YES), T_META_ENABLED(FALSE)) \ | |
792 | { \ | |
793 | spin_for_ever = true; \ | |
794 | run_client_server(server_name, client_name); \ | |
795 | spin_for_ever = false; \ | |
796 | } | |
797 | ||
798 | /* | |
799 | * Test 1: Test multihop priority boosting with ulocks, dispatch sync and sync IPC. | |
800 | * | |
801 | * Create thread's at different Qos and acquire a ulock and block on next ulock/dispatch sync | |
802 | * creating a sync chain. The last hop the chain is blocked on Sync IPC. | |
803 | */ | |
804 | TEST_MULTIHOP("server_kevent_id", "three_ulock_sync_ipc_hop", three_ulock_sync_ipc_hop) | |
805 | ||
806 | /* | |
807 | * Test 2: Test multihop priority boosting with ulocks, dispatch sync and sync IPC. | |
808 | * | |
809 | * Create thread's at different Qos and acquire a ulock and block on next ulock/dispatch sync | |
810 | * creating a sync chain. The last hop the chain is blocked on Sync IPC. | |
811 | * Before the last priority 60 thread blocks on ulock, it also starts spinforeverd at priority 31. | |
812 | */ | |
813 | TEST_MULTIHOP_SPIN("server_kevent_id", "three_ulock_sync_ipc_hop", three_ulock_sync_ipc_hop_spin) |