]> git.saurik.com Git - apple/xnu.git/blame - osfmk/ipc/ipc_eventlink.c
xnu-7195.60.75.tar.gz
[apple/xnu.git] / osfmk / ipc / ipc_eventlink.c
CommitLineData
f427ee49
A
1/*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29#include <mach/mach_types.h>
30#include <mach/mach_traps.h>
31#include <mach/kern_return.h>
32#include <mach/sync_policy.h>
33#include <mach/task.h>
34
35#include <kern/misc_protos.h>
36#include <kern/spl.h>
37#include <kern/ipc_sync.h>
38#include <kern/ipc_tt.h>
39#include <kern/thread.h>
40#include <kern/clock.h>
41#include <ipc/ipc_port.h>
42#include <ipc/ipc_space.h>
43#include <ipc/ipc_eventlink.h>
44#include <kern/host.h>
45#include <kern/waitq.h>
46#include <kern/zalloc.h>
47#include <kern/mach_param.h>
48#include <mach/mach_traps.h>
49#include <mach/mach_eventlink_server.h>
50
51#include <libkern/OSAtomic.h>
52
53static ZONE_DECLARE(ipc_eventlink_zone, "ipc_eventlink",
54 sizeof(struct ipc_eventlink_base), ZC_NONE);
55
56os_refgrp_decl(static, ipc_eventlink_refgrp, "eventlink", NULL);
57
58#if DEVELOPMENT || DEBUG
59static queue_head_t ipc_eventlink_list = QUEUE_HEAD_INITIALIZER(ipc_eventlink_list);
60static LCK_GRP_DECLARE(ipc_eventlink_dev_lock_grp, "ipc_eventlink_dev_lock");
61static LCK_SPIN_DECLARE(global_ipc_eventlink_lock, &ipc_eventlink_dev_lock_grp);
62
63#define global_ipc_eventlink_lock() \
64 lck_spin_lock_grp(&global_ipc_eventlink_lock, &ipc_eventlink_dev_lock_grp)
65#define global_ipc_eventlink_lock_try() \
66 lck_spin_try_lock_grp(&global_ipc_eventlink_lock, &ipc_eventlink_dev_lock_grp)
67#define global_ipc_eventlink_unlock() \
68 lck_spin_unlock(&global_ipc_eventlink_lock)
69
70#endif /* DEVELOPMENT || DEBUG */
71
72/* Forward declarations */
73static struct ipc_eventlink_base *
74ipc_eventlink_alloc(void);
75
76static void
77ipc_eventlink_initialize(
78 struct ipc_eventlink_base *ipc_eventlink_base);
79
80static kern_return_t
81ipc_eventlink_destroy_internal(
82 struct ipc_eventlink *ipc_eventlink);
83
84static kern_return_t
85ipc_eventlink_signal(
86 struct ipc_eventlink *ipc_eventlink);
87
88static uint64_t
89ipc_eventlink_signal_wait_until_trap_internal(
90 mach_port_name_t wait_port,
91 mach_port_name_t signal_port,
92 uint64_t count,
93 mach_eventlink_signal_wait_option_t el_option,
94 kern_clock_id_t clock_id,
95 uint64_t deadline);
96
97static kern_return_t
98ipc_eventlink_signal_wait_internal(
99 struct ipc_eventlink *wait_eventlink,
100 struct ipc_eventlink *signal_eventlink,
101 uint64_t deadline,
102 uint64_t *count,
103 ipc_eventlink_option_t eventlink_option);
104
105static kern_return_t
106ipc_eventlink_convert_wait_result(int wait_result);
107
108static kern_return_t
109ipc_eventlink_signal_internal_locked(
110 struct ipc_eventlink *signal_eventlink,
111 ipc_eventlink_option_t eventlink_option);
112
113static kern_return_t
114convert_port_to_eventlink_locked(
115 ipc_port_t port,
116 struct ipc_eventlink **ipc_eventlink_ptr);
117
118static kern_return_t
119port_name_to_eventlink(
120 mach_port_name_t name,
121 struct ipc_eventlink **ipc_eventlink_ptr);
122
123/*
124 * Name: ipc_eventlink_alloc
125 *
126 * Description: Allocates an ipc_eventlink struct and initializes it.
127 *
128 * Args: None.
129 *
130 * Returns:
131 * ipc_eventlink_base on Success.
132 */
133static struct ipc_eventlink_base *
134ipc_eventlink_alloc(void)
135{
136 struct ipc_eventlink_base *ipc_eventlink_base = IPC_EVENTLINK_BASE_NULL;
137 ipc_eventlink_base = zalloc(ipc_eventlink_zone);
138
139 ipc_eventlink_initialize(ipc_eventlink_base);
140
141#if DEVELOPMENT || DEBUG
142 /* Add ipc_eventlink to global list */
143 global_ipc_eventlink_lock();
144 queue_enter(&ipc_eventlink_list, ipc_eventlink_base,
145 struct ipc_eventlink_base *, elb_global_elm);
146 global_ipc_eventlink_unlock();
147#endif
148 return ipc_eventlink_base;
149}
150
151/*
152 * Name: ipc_eventlink_initialize
153 *
154 * Description: Initializes ipc eventlink struct.
155 *
156 * Args: ipc eventlink base.
157 *
158 * Returns:
159 * KERN_SUCCESS on Success.
160 */
161static void
162ipc_eventlink_initialize(
163 struct ipc_eventlink_base *ipc_eventlink_base)
164{
165 int i;
166 kern_return_t kr;
167
168 kr = waitq_init(&ipc_eventlink_base->elb_waitq, SYNC_POLICY_DISABLE_IRQ);
169 assert(kr == KERN_SUCCESS);
170
171 /* Initialize the count to 2, refs for each ipc eventlink port */
172 os_ref_init_count(&ipc_eventlink_base->elb_ref_count, &ipc_eventlink_refgrp, 2);
173 ipc_eventlink_base->elb_active = TRUE;
174 ipc_eventlink_base->elb_type = IPC_EVENTLINK_TYPE_NO_COPYIN;
175
176 for (i = 0; i < 2; i++) {
177 struct ipc_eventlink *ipc_eventlink = &(ipc_eventlink_base->elb_eventlink[i]);
178
179 ipc_eventlink->el_port = ipc_kobject_alloc_port((ipc_kobject_t)ipc_eventlink,
180 IKOT_EVENTLINK, IPC_KOBJECT_ALLOC_MAKE_SEND | IPC_KOBJECT_ALLOC_NSREQUEST);
181 /* ipc_kobject_alloc_port never fails */
182 ipc_eventlink->el_thread = THREAD_NULL;
183 ipc_eventlink->el_sync_counter = 0;
184 ipc_eventlink->el_wait_counter = UINT64_MAX;
185 ipc_eventlink->el_base = ipc_eventlink_base;
186 }
187}
188
189/*
190 * Name: mach_eventlink_create
191 *
192 * Description: Allocates an ipc_eventlink struct and initializes it.
193 *
194 * Args:
195 * task : task port of the process
196 * mach_eventlink_create_option_t: option
197 * eventlink_port_pair: eventlink port array
198 *
199 * Returns:
200 * KERN_SUCCESS on Success.
201 */
202kern_return_t
203mach_eventlink_create(
204 task_t task,
205 mach_eventlink_create_option_t elc_option,
206 eventlink_port_pair_t eventlink_port_pair)
207{
208 int i;
209 struct ipc_eventlink_base *ipc_eventlink_base;
210
211 if (task == TASK_NULL || task != current_task() ||
212 elc_option != MELC_OPTION_NO_COPYIN) {
213 return KERN_INVALID_ARGUMENT;
214 }
215
216 ipc_eventlink_base = ipc_eventlink_alloc();
217
218 for (i = 0; i < 2; i++) {
219 eventlink_port_pair[i] = ipc_eventlink_base->elb_eventlink[i].el_port;
220 }
221
222 return KERN_SUCCESS;
223}
224
225/*
226 * Name: mach_eventlink_destroy
227 *
228 * Description: Destroy an ipc_eventlink, wakeup all threads.
229 *
230 * Args:
231 * eventlink: eventlink
232 *
233 * Returns:
234 * KERN_SUCCESS on Success.
235 */
236kern_return_t
237mach_eventlink_destroy(
238 struct ipc_eventlink *ipc_eventlink)
239{
240 ipc_eventlink_destroy_internal(ipc_eventlink);
241
242 /* mach_eventlink_destroy should succeed for terminated eventlink */
243 return KERN_SUCCESS;
244}
245
246/*
247 * Name: ipc_eventlink_destroy_internal
248 *
249 * Description: Destroy an ipc_eventlink, wakeup all threads.
250 *
251 * Args:
252 * eventlink: eventlink
253 *
254 * Returns:
255 * KERN_SUCCESS on Success.
256 */
257static kern_return_t
258ipc_eventlink_destroy_internal(
259 struct ipc_eventlink *ipc_eventlink)
260{
261 spl_t s;
262 int i;
263 struct ipc_eventlink_base *ipc_eventlink_base;
264 thread_t associated_thread[2] = {};
265 ipc_port_t ipc_eventlink_port = IPC_PORT_NULL;
266 ipc_port_t ipc_eventlink_port_remote = IPC_PORT_NULL;
267
268 if (ipc_eventlink == IPC_EVENTLINK_NULL) {
269 return KERN_TERMINATED;
270 }
271
272 s = splsched();
273 ipc_eventlink_lock(ipc_eventlink);
274
275 ipc_eventlink_base = ipc_eventlink->el_base;
276
277 /* Check if the eventlink is active */
278 if (!ipc_eventlink_active(ipc_eventlink)) {
279 ipc_eventlink_unlock(ipc_eventlink);
280 splx(s);
281 return KERN_TERMINATED;
282 }
283
284 for (i = 0; i < 2; i++) {
285 struct ipc_eventlink *temp_ipc_eventlink = &ipc_eventlink_base->elb_eventlink[i];
286
287 /* Wakeup threads sleeping on eventlink */
288 if (temp_ipc_eventlink->el_thread) {
289 associated_thread[i] = temp_ipc_eventlink->el_thread;
290 temp_ipc_eventlink->el_thread = THREAD_NULL;
291
292 ipc_eventlink_signal_internal_locked(temp_ipc_eventlink,
293 IPC_EVENTLINK_FORCE_WAKEUP);
294 }
295
296 /* Only destroy the port on which destroy was called */
297 if (temp_ipc_eventlink == ipc_eventlink) {
298 ipc_eventlink_port = temp_ipc_eventlink->el_port;
299 assert(ipc_eventlink_port != IPC_PORT_NULL);
300 } else {
301 /* Do not destory the remote port, else eventlink_destroy will fail */
302 ipc_eventlink_port_remote = temp_ipc_eventlink->el_port;
303 assert(ipc_eventlink_port_remote != IPC_PORT_NULL);
304 /*
305 * Take a reference on the remote port, since it could go
306 * away after eventlink lock is dropped.
307 */
308 ip_reference(ipc_eventlink_port_remote);
309 }
310 assert(temp_ipc_eventlink->el_port != IPC_PORT_NULL);
311 temp_ipc_eventlink->el_port = IPC_PORT_NULL;
312 }
313
314 /* Mark the eventlink as inactive */
315 ipc_eventlink_base->elb_active = FALSE;
316
317 ipc_eventlink_unlock(ipc_eventlink);
318 splx(s);
319
320 /* Destroy the local eventlink port */
321 ipc_port_dealloc_kernel(ipc_eventlink_port);
322 /* Drops port reference */
323
324 /* Clear the remote eventlink port without destroying it */
325 ip_lock(ipc_eventlink_port_remote);
326 if (ip_active(ipc_eventlink_port_remote)) {
327 ipc_kobject_set_atomically(ipc_eventlink_port_remote, IKO_NULL, IKOT_EVENTLINK);
328 }
329 ip_unlock(ipc_eventlink_port_remote);
330 ip_release(ipc_eventlink_port_remote);
331
332 for (i = 0; i < 2; i++) {
333 if (associated_thread[i] != THREAD_NULL &&
334 associated_thread[i] != THREAD_ASSOCIATE_WILD) {
335 thread_deallocate(associated_thread[i]);
336 }
337
338 /* Drop the eventlink reference given to port */
339 ipc_eventlink_deallocate(ipc_eventlink);
340 }
341 return KERN_SUCCESS;
342}
343
344/*
345 * Name: mach_eventlink_associate
346 *
347 * Description: Associate a thread to eventlink.
348 *
349 * Args:
350 * eventlink: eventlink
351 * thread: thread needs to be associated
352 * copyin_addr_wait: copyin addr for wait
353 * copyin_mask_wait: copyin mask for wait
354 * copyin_addr_signal: copyin addr for signal
355 * copyin_mask_signal: copyin mask for signal
356 * mach_eventlink_associate_option_t: option for eventlink associate
357 *
358 * Returns:
359 * KERN_SUCCESS on Success.
360 */
361kern_return_t
362mach_eventlink_associate(
363 struct ipc_eventlink *ipc_eventlink,
364 thread_t thread,
365 mach_vm_address_t copyin_addr_wait,
366 uint64_t copyin_mask_wait,
367 mach_vm_address_t copyin_addr_signal,
368 uint64_t copyin_mask_signal,
369 mach_eventlink_associate_option_t ela_option)
370{
371 spl_t s;
372
373 if (ipc_eventlink == IPC_EVENTLINK_NULL) {
374 return KERN_TERMINATED;
375 }
376
377 if (copyin_addr_wait != 0 || copyin_mask_wait != 0 ||
378 copyin_addr_signal != 0 || copyin_mask_signal != 0) {
379 return KERN_INVALID_ARGUMENT;
380 }
381
382 if ((thread == NULL && ela_option == MELA_OPTION_NONE) ||
383 (thread != NULL && ela_option == MELA_OPTION_ASSOCIATE_ON_WAIT)) {
384 return KERN_INVALID_ARGUMENT;
385 }
386
387 s = splsched();
388 ipc_eventlink_lock(ipc_eventlink);
389
390 /* Check if eventlink is terminated */
391 if (!ipc_eventlink_active(ipc_eventlink)) {
392 ipc_eventlink_unlock(ipc_eventlink);
393 splx(s);
394 return KERN_TERMINATED;
395 }
396
397 if (ipc_eventlink->el_thread != NULL) {
398 ipc_eventlink_unlock(ipc_eventlink);
399 splx(s);
400 return KERN_NAME_EXISTS;
401 }
402
403 if (ela_option == MELA_OPTION_ASSOCIATE_ON_WAIT) {
404 ipc_eventlink->el_thread = THREAD_ASSOCIATE_WILD;
405 } else {
406 thread_reference(thread);
407 ipc_eventlink->el_thread = thread;
408 }
409
410 ipc_eventlink_unlock(ipc_eventlink);
411 splx(s);
412 return KERN_SUCCESS;
413}
414
415/*
416 * Name: mach_eventlink_disassociate
417 *
418 * Description: Disassociate a thread from eventlink.
419 * Wake up the associated thread if blocked on eventlink.
420 *
421 * Args:
422 * eventlink: eventlink
423 * mach_eventlink_option_t: option for eventlink disassociate
424 *
425 * Returns:
426 * KERN_SUCCESS on Success.
427 */
428kern_return_t
429mach_eventlink_disassociate(
430 struct ipc_eventlink *ipc_eventlink,
431 mach_eventlink_disassociate_option_t eld_option)
432{
433 spl_t s;
434 thread_t thread;
435
436 if (ipc_eventlink == IPC_EVENTLINK_NULL) {
437 return KERN_TERMINATED;
438 }
439
440 if (eld_option != MELD_OPTION_NONE) {
441 return KERN_INVALID_ARGUMENT;
442 }
443
444 s = splsched();
445 ipc_eventlink_lock(ipc_eventlink);
446
447 /* Check if eventlink is terminated */
448 if (!ipc_eventlink_active(ipc_eventlink)) {
449 ipc_eventlink_unlock(ipc_eventlink);
450 splx(s);
451 return KERN_TERMINATED;
452 }
453
454 if (ipc_eventlink->el_thread == NULL) {
455 ipc_eventlink_unlock(ipc_eventlink);
456 splx(s);
457 return KERN_INVALID_ARGUMENT;
458 }
459
460 thread = ipc_eventlink->el_thread;
461 ipc_eventlink->el_thread = NULL;
462
463 /* wake up the thread if blocked */
464 ipc_eventlink_signal_internal_locked(ipc_eventlink,
465 IPC_EVENTLINK_FORCE_WAKEUP);
466
467 ipc_eventlink_unlock(ipc_eventlink);
468 splx(s);
469
470 if (thread != THREAD_ASSOCIATE_WILD) {
471 thread_deallocate(thread);
472 }
473 return KERN_SUCCESS;
474}
475
476/*
477 * Name: mach_eventlink_signal_trap
478 *
479 * Description: Increment the sync count of eventlink and
480 * wake up the thread waiting if sync counter is greater
481 * than wake counter.
482 *
483 * Args:
484 * eventlink: eventlink
485 *
486 * Returns:
487 * uint64_t: Contains count and error codes.
488 */
489uint64_t
490mach_eventlink_signal_trap(
491 mach_port_name_t port,
492 uint64_t signal_count __unused)
493{
494 struct ipc_eventlink *ipc_eventlink;
495 kern_return_t kr;
496 uint64_t retval = 0;
497
498 kr = port_name_to_eventlink(port, &ipc_eventlink);
499 if (kr == KERN_SUCCESS) {
500 /* Signal the remote side of the eventlink */
501 kr = ipc_eventlink_signal(eventlink_remote_side(ipc_eventlink));
502
503 /* Deallocate ref returned by port_name_to_eventlink */
504 ipc_eventlink_deallocate(ipc_eventlink);
505 }
506
507 retval = encode_eventlink_count_and_error(0, kr);
508 return retval;
509}
510
511/*
512 * Name: ipc_eventlink_signal
513 *
514 * Description: Increment the sync count of eventlink and
515 * wake up the thread waiting if sync counter is greater
516 * than wake counter.
517 *
518 * Args:
519 * eventlink: eventlink
520 *
521 * Returns:
522 * KERN_SUCCESS on Success.
523 */
524static kern_return_t
525ipc_eventlink_signal(
526 struct ipc_eventlink *ipc_eventlink)
527{
528 kern_return_t kr;
529 spl_t s;
530
531 if (ipc_eventlink == IPC_EVENTLINK_NULL) {
532 return KERN_INVALID_ARGUMENT;
533 }
534
535 s = splsched();
536 ipc_eventlink_lock(ipc_eventlink);
537
538 /* Check if eventlink is terminated */
539 if (!ipc_eventlink_active(ipc_eventlink)) {
540 ipc_eventlink_unlock(ipc_eventlink);
541 splx(s);
542 return KERN_TERMINATED;
543 }
544
545 kr = ipc_eventlink_signal_internal_locked(ipc_eventlink,
546 IPC_EVENTLINK_NONE);
547
548 ipc_eventlink_unlock(ipc_eventlink);
549 splx(s);
550
551 if (kr == KERN_NOT_WAITING) {
552 kr = KERN_SUCCESS;
553 }
554
555 return kr;
556}
557
558/*
559 * Name: mach_eventlink_wait_until_trap
560 *
561 * Description: Wait until local signal count exceeds the
562 * specified count or deadline passes.
563 *
564 * Args:
565 * wait_port: eventlink port for wait
566 * count_ptr: signal count to wait on
567 * el_option: eventlink option
568 * clock_id: clock id
569 * deadline: deadline in mach_absolute_time
570 *
571 * Returns:
572 * uint64_t: contains count and error codes
573 */
574uint64_t
575mach_eventlink_wait_until_trap(
576 mach_port_name_t eventlink_port,
577 uint64_t wait_count,
578 mach_eventlink_signal_wait_option_t option,
579 kern_clock_id_t clock_id,
580 uint64_t deadline)
581{
582 return ipc_eventlink_signal_wait_until_trap_internal(
583 eventlink_port,
584 MACH_PORT_NULL,
585 wait_count,
586 option,
587 clock_id,
588 deadline);
589}
590
591/*
592 * Name: mach_eventlink_signal_wait_until
593 *
594 * Description: Signal the opposite side of the
595 * eventlink and wait until local signal count exceeds the
596 * specified count or deadline passes.
597 *
598 * Args:
599 * wait_port: eventlink port for wait
600 * count_ptr: signal count to wait on
601 * el_option: eventlink option
602 * clock_id: clock id
603 * deadline: deadline in mach_absolute_time
604 *
605 * Returns:
606 * uint64_t: contains count and error codes
607 */
608uint64_t
609mach_eventlink_signal_wait_until_trap(
610 mach_port_name_t eventlink_port,
611 uint64_t wait_count,
612 uint64_t signal_count __unused,
613 mach_eventlink_signal_wait_option_t option,
614 kern_clock_id_t clock_id,
615 uint64_t deadline)
616{
617 return ipc_eventlink_signal_wait_until_trap_internal(
618 eventlink_port,
619 eventlink_port,
620 wait_count,
621 option,
622 clock_id,
623 deadline);
624}
625
626/*
627 * Name: ipc_eventlink_signal_wait_until_trap_internal
628 *
629 * Description: Signal the opposite side of the
630 * eventlink and wait until local signal count exceeds the
631 * specified count or deadline passes.
632 *
633 * Args:
634 * wait_port: eventlink port for wait
635 * signal_port: eventlink port for signal
636 * count: signal count to wait on
637 * el_option: eventlink option
638 * clock_id: clock id
639 * deadline: deadline in mach_absolute_time
640 *
641 * Returns:
642 * uint64_t: contains signal count and error codes
643 */
644static uint64_t
645ipc_eventlink_signal_wait_until_trap_internal(
646 mach_port_name_t wait_port,
647 mach_port_name_t signal_port,
648 uint64_t count,
649 mach_eventlink_signal_wait_option_t el_option,
650 kern_clock_id_t clock_id,
651 uint64_t deadline)
652{
653 struct ipc_eventlink *wait_ipc_eventlink = IPC_EVENTLINK_NULL;
654 struct ipc_eventlink *signal_ipc_eventlink = IPC_EVENTLINK_NULL;
655 kern_return_t kr;
656 ipc_eventlink_option_t ipc_eventlink_option = IPC_EVENTLINK_NONE;
657
658 if (clock_id != KERN_CLOCK_MACH_ABSOLUTE_TIME) {
659 return encode_eventlink_count_and_error(count, KERN_INVALID_ARGUMENT);
660 }
661
662 kr = port_name_to_eventlink(wait_port, &wait_ipc_eventlink);
663 if (kr == KERN_SUCCESS) {
664 assert(wait_ipc_eventlink != IPC_EVENTLINK_NULL);
665
666 /* Get the remote side of eventlink for signal */
667 if (signal_port != MACH_PORT_NULL) {
668 signal_ipc_eventlink = eventlink_remote_side(wait_ipc_eventlink);
669 }
670
671 if (el_option & MELSW_OPTION_NO_WAIT) {
672 ipc_eventlink_option |= IPC_EVENTLINK_NO_WAIT;
673 }
674
675 kr = ipc_eventlink_signal_wait_internal(wait_ipc_eventlink,
676 signal_ipc_eventlink, deadline,
677 &count, ipc_eventlink_option);
678
679 /* release ref returned by port_name_to_eventlink */
680 ipc_eventlink_deallocate(wait_ipc_eventlink);
681 }
682 return encode_eventlink_count_and_error(count, kr);
683}
684
685/*
686 * Name: ipc_eventlink_signal_wait_internal
687 *
688 * Description: Signal the opposite side of the
689 * eventlink and wait until local signal count exceeds the
690 * specified count or deadline passes.
691 *
692 * Args:
693 * wait_eventlink: eventlink for wait
694 * signal_eventlink: eventlink for signal
695 * deadline: deadline in mach_absolute_time
696 * count_ptr: signal count to wait on
697 * el_option: eventlink option
698 *
699 * Returns:
700 * KERN_SUCCESS on Success.
701 * signal count is returned implicitly in count arg.
702 */
703static kern_return_t
704ipc_eventlink_signal_wait_internal(
705 struct ipc_eventlink *wait_eventlink,
706 struct ipc_eventlink *signal_eventlink,
707 uint64_t deadline,
708 uint64_t *count,
709 ipc_eventlink_option_t eventlink_option)
710{
711 spl_t s;
712 kern_return_t kr = KERN_ALREADY_WAITING;
713 thread_t self = current_thread();
714 struct ipc_eventlink_base *ipc_eventlink_base = wait_eventlink->el_base;
715 thread_t handoff_thread = THREAD_NULL;
716 thread_handoff_option_t handoff_option = THREAD_HANDOFF_NONE;
717 uint64_t old_signal_count;
718 wait_result_t wr;
719
720 s = splsched();
721 ipc_eventlink_lock(wait_eventlink);
722
723 /* Check if eventlink is terminated */
724 if (!ipc_eventlink_active(wait_eventlink)) {
725 kr = KERN_TERMINATED;
726 goto unlock;
727 }
728
729 /* Check if waiting thread is associated to eventlink */
730 if (wait_eventlink->el_thread != THREAD_ASSOCIATE_WILD &&
731 wait_eventlink->el_thread != self) {
732 kr = KERN_INVALID_ARGUMENT;
733 goto unlock;
734 }
735
736 /* Check if thread already waiting for associate on wait case */
737 if (wait_eventlink->el_thread == THREAD_ASSOCIATE_WILD &&
738 wait_eventlink->el_wait_counter != UINT64_MAX) {
739 kr = KERN_INVALID_ARGUMENT;
740 goto unlock;
741 }
742
743 /* Check if the signal count exceeds the count provided */
744 if (*count < wait_eventlink->el_sync_counter) {
745 *count = wait_eventlink->el_sync_counter;
746 kr = KERN_SUCCESS;
747 } else if (eventlink_option & IPC_EVENTLINK_NO_WAIT) {
748 /* Check if no block was passed */
749 *count = wait_eventlink->el_sync_counter;
750 kr = KERN_OPERATION_TIMED_OUT;
751 } else {
752 /* Update the wait counter and add thread to waitq */
753 wait_eventlink->el_wait_counter = *count;
754 old_signal_count = wait_eventlink->el_sync_counter;
755
756 thread_set_pending_block_hint(self, kThreadWaitEventlink);
757 (void)waitq_assert_wait64_locked(
758 &ipc_eventlink_base->elb_waitq,
759 CAST_EVENT64_T(wait_eventlink),
760 THREAD_ABORTSAFE,
761 TIMEOUT_URGENCY_USER_NORMAL,
762 deadline, TIMEOUT_NO_LEEWAY,
763 self);
764
765 eventlink_option |= IPC_EVENTLINK_HANDOFF;
766 }
767
768 /* Check if we need to signal the other side of eventlink */
769 if (signal_eventlink != IPC_EVENTLINK_NULL) {
770 kern_return_t signal_kr;
771 signal_kr = ipc_eventlink_signal_internal_locked(signal_eventlink,
772 eventlink_option);
773
774 if (signal_kr == KERN_NOT_WAITING) {
775 assert(self->handoff_thread == THREAD_NULL);
776 }
777 }
778
779 if (kr != KERN_ALREADY_WAITING) {
780 goto unlock;
781 }
782
783 if (self->handoff_thread) {
784 handoff_thread = self->handoff_thread;
785 self->handoff_thread = THREAD_NULL;
786 handoff_option = THREAD_HANDOFF_SETRUN_NEEDED;
787 }
788
789 ipc_eventlink_unlock(wait_eventlink);
790 splx(s);
791
792 wr = thread_handoff_deallocate(handoff_thread, handoff_option);
793 kr = ipc_eventlink_convert_wait_result(wr);
794
795 assert(self->handoff_thread == THREAD_NULL);
796
797 /* Increment the count value if eventlink_signal was called */
798 if (kr == KERN_SUCCESS) {
799 *count += 1;
800 } else {
801 *count = old_signal_count;
802 }
803
804 return kr;
805
806unlock:
807 ipc_eventlink_unlock(wait_eventlink);
808 splx(s);
809 assert(self->handoff_thread == THREAD_NULL);
810
811 return kr;
812}
813
814/*
815 * Name: ipc_eventlink_convert_wait_result
816 *
817 * Description: Convert wait result to return value
818 * for wait trap.
819 *
820 * Args:
821 * wait_result: result from thread handoff
822 *
823 * Returns:
824 * KERN_SUCCESS on Success.
825 */
826static kern_return_t
827ipc_eventlink_convert_wait_result(int wait_result)
828{
829 switch (wait_result) {
830 case THREAD_AWAKENED:
831 return KERN_SUCCESS;
832
833 case THREAD_TIMED_OUT:
834 return KERN_OPERATION_TIMED_OUT;
835
836 case THREAD_INTERRUPTED:
837 return KERN_ABORTED;
838
839 case THREAD_RESTART:
840 return KERN_TERMINATED;
841
842 default:
843 panic("ipc_eventlink_wait_block\n");
844 return KERN_FAILURE;
845 }
846}
847
848/*
849 * Name: ipc_eventlink_signal_internal_locked
850 *
851 * Description: Increment the sync count of eventlink and
852 * wake up the thread waiting if sync counter is greater
853 * than wake counter.
854 *
855 * Args:
856 * eventlink: eventlink
857 * ipc_eventlink_option_t: options
858 *
859 * Returns:
860 * KERN_SUCCESS on Success.
861 */
862static kern_return_t
863ipc_eventlink_signal_internal_locked(
864 struct ipc_eventlink *signal_eventlink,
865 ipc_eventlink_option_t eventlink_option)
866{
867 kern_return_t kr = KERN_NOT_WAITING;
868 struct ipc_eventlink_base *ipc_eventlink_base = signal_eventlink->el_base;
869
870 if (eventlink_option & IPC_EVENTLINK_FORCE_WAKEUP) {
871 /* Adjust the wait counter */
872 signal_eventlink->el_wait_counter = UINT64_MAX;
873
874 kr = waitq_wakeup64_all_locked(
875 &ipc_eventlink_base->elb_waitq,
876 CAST_EVENT64_T(signal_eventlink),
877 THREAD_RESTART, NULL,
878 WAITQ_ALL_PRIORITIES,
879 WAITQ_KEEP_LOCKED);
880 return kr;
881 }
882
883 /* Increment the eventlink sync count */
884 signal_eventlink->el_sync_counter++;
885
886 /* Check if thread needs to be woken up */
887 if (signal_eventlink->el_sync_counter > signal_eventlink->el_wait_counter) {
888 waitq_options_t wq_option = (eventlink_option & IPC_EVENTLINK_HANDOFF) ?
889 WQ_OPTION_HANDOFF : WQ_OPTION_NONE;
890
891 /* Adjust the wait counter */
892 signal_eventlink->el_wait_counter = UINT64_MAX;
893
894 kr = waitq_wakeup64_one_locked(
895 &ipc_eventlink_base->elb_waitq,
896 CAST_EVENT64_T(signal_eventlink),
897 THREAD_AWAKENED, NULL,
898 WAITQ_ALL_PRIORITIES,
899 WAITQ_KEEP_LOCKED,
900 wq_option);
901 }
902
903 return kr;
904}
905
906/*
907 * Name: ipc_eventlink_reference
908 *
909 * Description: Increment ref on ipc eventlink struct
910 *
911 * Args:
912 * eventlink: eventlink
913 *
914 * Returns: None
915 */
916void
917ipc_eventlink_reference(
918 struct ipc_eventlink *ipc_eventlink)
919{
920 os_ref_retain(&ipc_eventlink->el_base->elb_ref_count);
921}
922
923/*
924 * Name: ipc_eventlink_deallocate
925 *
926 * Description: Decrement ref on ipc eventlink struct
927 *
928 * Args:
929 * eventlink: eventlink
930 *
931 * Returns: None
932 */
933void
934ipc_eventlink_deallocate(
935 struct ipc_eventlink *ipc_eventlink)
936{
937 if (ipc_eventlink == IPC_EVENTLINK_NULL) {
938 return;
939 }
940
941 struct ipc_eventlink_base *ipc_eventlink_base = ipc_eventlink->el_base;
942
943 if (os_ref_release(&ipc_eventlink_base->elb_ref_count) > 0) {
944 return;
945 }
946
947 assert(!ipc_eventlink_active(ipc_eventlink));
948
949#if DEVELOPMENT || DEBUG
950 /* Remove ipc_eventlink to global list */
951 global_ipc_eventlink_lock();
952 queue_remove(&ipc_eventlink_list, ipc_eventlink_base,
953 struct ipc_eventlink_base *, elb_global_elm);
954 global_ipc_eventlink_unlock();
955#endif
956 zfree(ipc_eventlink_zone, ipc_eventlink_base);
957}
958
959/*
960 * Name: convert_port_to_eventlink
961 *
962 * Description: Convert from a port name in the current
963 * space to an ipc eventlink. Produces an ipc eventlink ref,
964 * which may be null.
965 *
966 * Args:
967 * mach_port_t: eventlink port
968 *
969 * Returns:
970 * ipc_eventlink on Success.
971 */
972struct ipc_eventlink *
973convert_port_to_eventlink(
974 mach_port_t port)
975{
976 struct ipc_eventlink *ipc_eventlink = IPC_EVENTLINK_NULL;
977
978 if (IP_VALID(port)) {
979 ip_lock(port);
980 convert_port_to_eventlink_locked(port, &ipc_eventlink);
981 ip_unlock(port);
982 }
983
984 return ipc_eventlink;
985}
986
987/*
988 * Name: convert_port_to_eventlink_locked
989 *
990 * Description: Convert from a port name in the current
991 * space to an ipc eventlink. Produces an ipc eventlink ref,
992 * which may be null.
993 *
994 * Args:
995 * mach_port_name_t: eventlink port name
996 * ipc_eventlink_ptr: pointer to return ipc_eventlink.
997 *
998 * Returns:
999 * KERN_SUCCESS on Success.
1000 * KERN_TERMINATED on inactive eventlink.
1001 */
1002static kern_return_t
1003convert_port_to_eventlink_locked(
1004 ipc_port_t port,
1005 struct ipc_eventlink **ipc_eventlink_ptr)
1006{
1007 kern_return_t kr = KERN_INVALID_CAPABILITY;
1008 struct ipc_eventlink *ipc_eventlink = IPC_EVENTLINK_NULL;
1009
1010 if (ip_active(port) &&
1011 ip_kotype(port) == IKOT_EVENTLINK) {
1012 ipc_eventlink = (struct ipc_eventlink *)port->ip_kobject;
1013
1014 if (ipc_eventlink) {
1015 ipc_eventlink_reference(ipc_eventlink);
1016 kr = KERN_SUCCESS;
1017 } else {
1018 kr = KERN_TERMINATED;
1019 }
1020 }
1021
1022 *ipc_eventlink_ptr = ipc_eventlink;
1023 return kr;
1024}
1025
1026/*
1027 * Name: port_name_to_eventlink
1028 *
1029 * Description: Convert from a port name in the current
1030 * space to an ipc eventlink. Produces an ipc eventlink ref,
1031 * which may be null.
1032 *
1033 * Args:
1034 * mach_port_name_t: eventlink port name
1035 * ipc_eventlink_ptr: ptr to pass eventlink struct
1036 *
1037 * Returns:
1038 * KERN_SUCCESS on Success.
1039 */
1040static kern_return_t
1041port_name_to_eventlink(
1042 mach_port_name_t name,
1043 struct ipc_eventlink **ipc_eventlink_ptr)
1044{
1045 ipc_port_t kern_port;
1046 kern_return_t kr;
1047
1048 if (!MACH_PORT_VALID(name)) {
1049 *ipc_eventlink_ptr = IPC_EVENTLINK_NULL;
1050 return KERN_INVALID_NAME;
1051 }
1052
1053 kr = ipc_port_translate_send(current_space(), name, &kern_port);
1054 if (kr != KERN_SUCCESS) {
1055 *ipc_eventlink_ptr = IPC_EVENTLINK_NULL;
1056 return kr;
1057 }
1058 /* have the port locked */
1059 assert(IP_VALID(kern_port));
1060
1061 kr = convert_port_to_eventlink_locked(kern_port, ipc_eventlink_ptr);
1062 ip_unlock(kern_port);
1063
1064 return kr;
1065}
1066
1067/*
1068 * Name: ipc_eventlink_notify
1069 *
1070 * Description: Destroy an ipc_eventlink, wakeup all threads.
1071 *
1072 * Args:
1073 * msg: msg contaning eventlink port
1074 *
1075 * Returns:
1076 * None.
1077 */
1078void
1079ipc_eventlink_notify(
1080 mach_msg_header_t *msg)
1081{
1082 kern_return_t kr;
1083 mach_no_senders_notification_t *notification = (void *)msg;
1084 ipc_port_t port = notification->not_header.msgh_remote_port;
1085 struct ipc_eventlink *ipc_eventlink;
1086
1087 if (!ip_active(port)) {
1088 return;
1089 }
1090
1091 /* Get ipc_eventlink reference */
1092 ip_lock(port);
1093
1094 /* Make sure port is still active */
1095 if (!ip_active(port)) {
1096 ip_unlock(port);
1097 return;
1098 }
1099
1100 convert_port_to_eventlink_locked(port, &ipc_eventlink);
1101 ip_unlock(port);
1102
1103 kr = ipc_eventlink_destroy_internal(ipc_eventlink);
1104 if (kr == KERN_TERMINATED) {
1105 /* eventlink is already inactive, destroy the port */
1106 ipc_port_dealloc_kernel(port);
1107 }
1108
1109 /* Drop the reference returned by convert_port_to_eventlink_locked */
1110 ipc_eventlink_deallocate(ipc_eventlink);
1111}
1112
1113#define WAITQ_TO_EVENTLINK(wq) ((struct ipc_eventlink_base *) ((uintptr_t)(wq) - offsetof(struct ipc_eventlink_base, elb_waitq)))
1114
1115/*
1116 * Name: kdp_eventlink_find_owner
1117 *
1118 * Description: Find who will signal the waiting thread.
1119 *
1120 * Args:
1121 * waitq: eventlink waitq
1122 * wait_event: eventlink wait event
1123 * waitinfo: waitinfo struct
1124 *
1125 * Returns:
1126 * None.
1127 */
1128void
1129kdp_eventlink_find_owner(
1130 struct waitq *waitq,
1131 event64_t event,
1132 thread_waitinfo_t *waitinfo)
1133{
1134 assert(waitinfo->wait_type == kThreadWaitEventlink);
1135 waitinfo->owner = 0;
1136 waitinfo->context = 0;
1137
1138 if (waitq_held(waitq)) {
1139 return;
1140 }
1141
1142 struct ipc_eventlink_base *ipc_eventlink_base = WAITQ_TO_EVENTLINK(waitq);
1143
1144 if (event == CAST_EVENT64_T(&ipc_eventlink_base->elb_eventlink[0])) {
1145 /* Use the other end of eventlink for signal thread */
1146 if (ipc_eventlink_base->elb_eventlink[1].el_thread != THREAD_ASSOCIATE_WILD) {
1147 waitinfo->owner = thread_tid(ipc_eventlink_base->elb_eventlink[1].el_thread);
1148 } else {
1149 waitinfo->owner = 0;
1150 }
1151 } else if (event == CAST_EVENT64_T(&ipc_eventlink_base->elb_eventlink[1])) {
1152 /* Use the other end of eventlink for signal thread */
1153 if (ipc_eventlink_base->elb_eventlink[0].el_thread != THREAD_ASSOCIATE_WILD) {
1154 waitinfo->owner = thread_tid(ipc_eventlink_base->elb_eventlink[0].el_thread);
1155 } else {
1156 waitinfo->owner = 0;
1157 }
1158 }
1159
1160 return;
1161}