2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 #include <mach/mach_types.h>
30 #include <mach/mach_traps.h>
31 #include <mach/kern_return.h>
32 #include <mach/sync_policy.h>
33 #include <mach/task.h>
35 #include <kern/misc_protos.h>
37 #include <kern/ipc_sync.h>
38 #include <kern/ipc_tt.h>
39 #include <kern/thread.h>
40 #include <kern/clock.h>
41 #include <ipc/ipc_port.h>
42 #include <ipc/ipc_space.h>
43 #include <ipc/ipc_eventlink.h>
44 #include <kern/host.h>
45 #include <kern/waitq.h>
46 #include <kern/zalloc.h>
47 #include <kern/mach_param.h>
48 #include <mach/mach_traps.h>
49 #include <mach/mach_eventlink_server.h>
51 #include <libkern/OSAtomic.h>
53 static ZONE_DECLARE(ipc_eventlink_zone
, "ipc_eventlink",
54 sizeof(struct ipc_eventlink_base
), ZC_NONE
);
56 os_refgrp_decl(static, ipc_eventlink_refgrp
, "eventlink", NULL
);
58 #if DEVELOPMENT || DEBUG
59 static queue_head_t ipc_eventlink_list
= QUEUE_HEAD_INITIALIZER(ipc_eventlink_list
);
60 static LCK_GRP_DECLARE(ipc_eventlink_dev_lock_grp
, "ipc_eventlink_dev_lock");
61 static LCK_SPIN_DECLARE(global_ipc_eventlink_lock
, &ipc_eventlink_dev_lock_grp
);
63 #define global_ipc_eventlink_lock() \
64 lck_spin_lock_grp(&global_ipc_eventlink_lock, &ipc_eventlink_dev_lock_grp)
65 #define global_ipc_eventlink_lock_try() \
66 lck_spin_try_lock_grp(&global_ipc_eventlink_lock, &ipc_eventlink_dev_lock_grp)
67 #define global_ipc_eventlink_unlock() \
68 lck_spin_unlock(&global_ipc_eventlink_lock)
70 #endif /* DEVELOPMENT || DEBUG */
72 /* Forward declarations */
73 static struct ipc_eventlink_base
*
74 ipc_eventlink_alloc(void);
77 ipc_eventlink_initialize(
78 struct ipc_eventlink_base
*ipc_eventlink_base
);
81 ipc_eventlink_destroy_internal(
82 struct ipc_eventlink
*ipc_eventlink
);
86 struct ipc_eventlink
*ipc_eventlink
);
89 ipc_eventlink_signal_wait_until_trap_internal(
90 mach_port_name_t wait_port
,
91 mach_port_name_t signal_port
,
93 mach_eventlink_signal_wait_option_t el_option
,
94 kern_clock_id_t clock_id
,
98 ipc_eventlink_signal_wait_internal(
99 struct ipc_eventlink
*wait_eventlink
,
100 struct ipc_eventlink
*signal_eventlink
,
103 ipc_eventlink_option_t eventlink_option
);
106 ipc_eventlink_convert_wait_result(int wait_result
);
109 ipc_eventlink_signal_internal_locked(
110 struct ipc_eventlink
*signal_eventlink
,
111 ipc_eventlink_option_t eventlink_option
);
114 convert_port_to_eventlink_locked(
116 struct ipc_eventlink
**ipc_eventlink_ptr
);
119 port_name_to_eventlink(
120 mach_port_name_t name
,
121 struct ipc_eventlink
**ipc_eventlink_ptr
);
124 * Name: ipc_eventlink_alloc
126 * Description: Allocates an ipc_eventlink struct and initializes it.
131 * ipc_eventlink_base on Success.
133 static struct ipc_eventlink_base
*
134 ipc_eventlink_alloc(void)
136 struct ipc_eventlink_base
*ipc_eventlink_base
= IPC_EVENTLINK_BASE_NULL
;
137 ipc_eventlink_base
= zalloc(ipc_eventlink_zone
);
139 ipc_eventlink_initialize(ipc_eventlink_base
);
141 #if DEVELOPMENT || DEBUG
142 /* Add ipc_eventlink to global list */
143 global_ipc_eventlink_lock();
144 queue_enter(&ipc_eventlink_list
, ipc_eventlink_base
,
145 struct ipc_eventlink_base
*, elb_global_elm
);
146 global_ipc_eventlink_unlock();
148 return ipc_eventlink_base
;
152 * Name: ipc_eventlink_initialize
154 * Description: Initializes ipc eventlink struct.
156 * Args: ipc eventlink base.
159 * KERN_SUCCESS on Success.
162 ipc_eventlink_initialize(
163 struct ipc_eventlink_base
*ipc_eventlink_base
)
168 kr
= waitq_init(&ipc_eventlink_base
->elb_waitq
, SYNC_POLICY_DISABLE_IRQ
);
169 assert(kr
== KERN_SUCCESS
);
171 /* Initialize the count to 2, refs for each ipc eventlink port */
172 os_ref_init_count(&ipc_eventlink_base
->elb_ref_count
, &ipc_eventlink_refgrp
, 2);
173 ipc_eventlink_base
->elb_active
= TRUE
;
174 ipc_eventlink_base
->elb_type
= IPC_EVENTLINK_TYPE_NO_COPYIN
;
176 for (i
= 0; i
< 2; i
++) {
177 struct ipc_eventlink
*ipc_eventlink
= &(ipc_eventlink_base
->elb_eventlink
[i
]);
179 ipc_eventlink
->el_port
= ipc_kobject_alloc_port((ipc_kobject_t
)ipc_eventlink
,
180 IKOT_EVENTLINK
, IPC_KOBJECT_ALLOC_MAKE_SEND
| IPC_KOBJECT_ALLOC_NSREQUEST
);
181 /* ipc_kobject_alloc_port never fails */
182 ipc_eventlink
->el_thread
= THREAD_NULL
;
183 ipc_eventlink
->el_sync_counter
= 0;
184 ipc_eventlink
->el_wait_counter
= UINT64_MAX
;
185 ipc_eventlink
->el_base
= ipc_eventlink_base
;
190 * Name: mach_eventlink_create
192 * Description: Allocates an ipc_eventlink struct and initializes it.
195 * task : task port of the process
196 * mach_eventlink_create_option_t: option
197 * eventlink_port_pair: eventlink port array
200 * KERN_SUCCESS on Success.
203 mach_eventlink_create(
205 mach_eventlink_create_option_t elc_option
,
206 eventlink_port_pair_t eventlink_port_pair
)
209 struct ipc_eventlink_base
*ipc_eventlink_base
;
211 if (task
== TASK_NULL
|| task
!= current_task() ||
212 elc_option
!= MELC_OPTION_NO_COPYIN
) {
213 return KERN_INVALID_ARGUMENT
;
216 ipc_eventlink_base
= ipc_eventlink_alloc();
218 for (i
= 0; i
< 2; i
++) {
219 eventlink_port_pair
[i
] = ipc_eventlink_base
->elb_eventlink
[i
].el_port
;
226 * Name: mach_eventlink_destroy
228 * Description: Destroy an ipc_eventlink, wakeup all threads.
231 * eventlink: eventlink
234 * KERN_SUCCESS on Success.
237 mach_eventlink_destroy(
238 struct ipc_eventlink
*ipc_eventlink
)
240 ipc_eventlink_destroy_internal(ipc_eventlink
);
242 /* mach_eventlink_destroy should succeed for terminated eventlink */
247 * Name: ipc_eventlink_destroy_internal
249 * Description: Destroy an ipc_eventlink, wakeup all threads.
252 * eventlink: eventlink
255 * KERN_SUCCESS on Success.
258 ipc_eventlink_destroy_internal(
259 struct ipc_eventlink
*ipc_eventlink
)
263 struct ipc_eventlink_base
*ipc_eventlink_base
;
264 thread_t associated_thread
[2] = {};
265 ipc_port_t ipc_eventlink_port
= IPC_PORT_NULL
;
266 ipc_port_t ipc_eventlink_port_remote
= IPC_PORT_NULL
;
268 if (ipc_eventlink
== IPC_EVENTLINK_NULL
) {
269 return KERN_TERMINATED
;
273 ipc_eventlink_lock(ipc_eventlink
);
275 ipc_eventlink_base
= ipc_eventlink
->el_base
;
277 /* Check if the eventlink is active */
278 if (!ipc_eventlink_active(ipc_eventlink
)) {
279 ipc_eventlink_unlock(ipc_eventlink
);
281 return KERN_TERMINATED
;
284 for (i
= 0; i
< 2; i
++) {
285 struct ipc_eventlink
*temp_ipc_eventlink
= &ipc_eventlink_base
->elb_eventlink
[i
];
287 /* Wakeup threads sleeping on eventlink */
288 if (temp_ipc_eventlink
->el_thread
) {
289 associated_thread
[i
] = temp_ipc_eventlink
->el_thread
;
290 temp_ipc_eventlink
->el_thread
= THREAD_NULL
;
292 ipc_eventlink_signal_internal_locked(temp_ipc_eventlink
,
293 IPC_EVENTLINK_FORCE_WAKEUP
);
296 /* Only destroy the port on which destroy was called */
297 if (temp_ipc_eventlink
== ipc_eventlink
) {
298 ipc_eventlink_port
= temp_ipc_eventlink
->el_port
;
299 assert(ipc_eventlink_port
!= IPC_PORT_NULL
);
301 /* Do not destory the remote port, else eventlink_destroy will fail */
302 ipc_eventlink_port_remote
= temp_ipc_eventlink
->el_port
;
303 assert(ipc_eventlink_port_remote
!= IPC_PORT_NULL
);
305 * Take a reference on the remote port, since it could go
306 * away after eventlink lock is dropped.
308 ip_reference(ipc_eventlink_port_remote
);
310 assert(temp_ipc_eventlink
->el_port
!= IPC_PORT_NULL
);
311 temp_ipc_eventlink
->el_port
= IPC_PORT_NULL
;
314 /* Mark the eventlink as inactive */
315 ipc_eventlink_base
->elb_active
= FALSE
;
317 ipc_eventlink_unlock(ipc_eventlink
);
320 /* Destroy the local eventlink port */
321 ipc_port_dealloc_kernel(ipc_eventlink_port
);
322 /* Drops port reference */
324 /* Clear the remote eventlink port without destroying it */
325 ip_lock(ipc_eventlink_port_remote
);
326 if (ip_active(ipc_eventlink_port_remote
)) {
327 ipc_kobject_set_atomically(ipc_eventlink_port_remote
, IKO_NULL
, IKOT_EVENTLINK
);
329 ip_unlock(ipc_eventlink_port_remote
);
330 ip_release(ipc_eventlink_port_remote
);
332 for (i
= 0; i
< 2; i
++) {
333 if (associated_thread
[i
] != THREAD_NULL
&&
334 associated_thread
[i
] != THREAD_ASSOCIATE_WILD
) {
335 thread_deallocate(associated_thread
[i
]);
338 /* Drop the eventlink reference given to port */
339 ipc_eventlink_deallocate(ipc_eventlink
);
345 * Name: mach_eventlink_associate
347 * Description: Associate a thread to eventlink.
350 * eventlink: eventlink
351 * thread: thread needs to be associated
352 * copyin_addr_wait: copyin addr for wait
353 * copyin_mask_wait: copyin mask for wait
354 * copyin_addr_signal: copyin addr for signal
355 * copyin_mask_signal: copyin mask for signal
356 * mach_eventlink_associate_option_t: option for eventlink associate
359 * KERN_SUCCESS on Success.
362 mach_eventlink_associate(
363 struct ipc_eventlink
*ipc_eventlink
,
365 mach_vm_address_t copyin_addr_wait
,
366 uint64_t copyin_mask_wait
,
367 mach_vm_address_t copyin_addr_signal
,
368 uint64_t copyin_mask_signal
,
369 mach_eventlink_associate_option_t ela_option
)
373 if (ipc_eventlink
== IPC_EVENTLINK_NULL
) {
374 return KERN_TERMINATED
;
377 if (copyin_addr_wait
!= 0 || copyin_mask_wait
!= 0 ||
378 copyin_addr_signal
!= 0 || copyin_mask_signal
!= 0) {
379 return KERN_INVALID_ARGUMENT
;
382 if ((thread
== NULL
&& ela_option
== MELA_OPTION_NONE
) ||
383 (thread
!= NULL
&& ela_option
== MELA_OPTION_ASSOCIATE_ON_WAIT
)) {
384 return KERN_INVALID_ARGUMENT
;
388 ipc_eventlink_lock(ipc_eventlink
);
390 /* Check if eventlink is terminated */
391 if (!ipc_eventlink_active(ipc_eventlink
)) {
392 ipc_eventlink_unlock(ipc_eventlink
);
394 return KERN_TERMINATED
;
397 if (ipc_eventlink
->el_thread
!= NULL
) {
398 ipc_eventlink_unlock(ipc_eventlink
);
400 return KERN_NAME_EXISTS
;
403 if (ela_option
== MELA_OPTION_ASSOCIATE_ON_WAIT
) {
404 ipc_eventlink
->el_thread
= THREAD_ASSOCIATE_WILD
;
406 thread_reference(thread
);
407 ipc_eventlink
->el_thread
= thread
;
410 ipc_eventlink_unlock(ipc_eventlink
);
416 * Name: mach_eventlink_disassociate
418 * Description: Disassociate a thread from eventlink.
419 * Wake up the associated thread if blocked on eventlink.
422 * eventlink: eventlink
423 * mach_eventlink_option_t: option for eventlink disassociate
426 * KERN_SUCCESS on Success.
429 mach_eventlink_disassociate(
430 struct ipc_eventlink
*ipc_eventlink
,
431 mach_eventlink_disassociate_option_t eld_option
)
436 if (ipc_eventlink
== IPC_EVENTLINK_NULL
) {
437 return KERN_TERMINATED
;
440 if (eld_option
!= MELD_OPTION_NONE
) {
441 return KERN_INVALID_ARGUMENT
;
445 ipc_eventlink_lock(ipc_eventlink
);
447 /* Check if eventlink is terminated */
448 if (!ipc_eventlink_active(ipc_eventlink
)) {
449 ipc_eventlink_unlock(ipc_eventlink
);
451 return KERN_TERMINATED
;
454 if (ipc_eventlink
->el_thread
== NULL
) {
455 ipc_eventlink_unlock(ipc_eventlink
);
457 return KERN_INVALID_ARGUMENT
;
460 thread
= ipc_eventlink
->el_thread
;
461 ipc_eventlink
->el_thread
= NULL
;
463 /* wake up the thread if blocked */
464 ipc_eventlink_signal_internal_locked(ipc_eventlink
,
465 IPC_EVENTLINK_FORCE_WAKEUP
);
467 ipc_eventlink_unlock(ipc_eventlink
);
470 if (thread
!= THREAD_ASSOCIATE_WILD
) {
471 thread_deallocate(thread
);
477 * Name: mach_eventlink_signal_trap
479 * Description: Increment the sync count of eventlink and
480 * wake up the thread waiting if sync counter is greater
484 * eventlink: eventlink
487 * uint64_t: Contains count and error codes.
490 mach_eventlink_signal_trap(
491 mach_port_name_t port
,
492 uint64_t signal_count __unused
)
494 struct ipc_eventlink
*ipc_eventlink
;
498 kr
= port_name_to_eventlink(port
, &ipc_eventlink
);
499 if (kr
== KERN_SUCCESS
) {
500 /* Signal the remote side of the eventlink */
501 kr
= ipc_eventlink_signal(eventlink_remote_side(ipc_eventlink
));
503 /* Deallocate ref returned by port_name_to_eventlink */
504 ipc_eventlink_deallocate(ipc_eventlink
);
507 retval
= encode_eventlink_count_and_error(0, kr
);
512 * Name: ipc_eventlink_signal
514 * Description: Increment the sync count of eventlink and
515 * wake up the thread waiting if sync counter is greater
519 * eventlink: eventlink
522 * KERN_SUCCESS on Success.
525 ipc_eventlink_signal(
526 struct ipc_eventlink
*ipc_eventlink
)
531 if (ipc_eventlink
== IPC_EVENTLINK_NULL
) {
532 return KERN_INVALID_ARGUMENT
;
536 ipc_eventlink_lock(ipc_eventlink
);
538 /* Check if eventlink is terminated */
539 if (!ipc_eventlink_active(ipc_eventlink
)) {
540 ipc_eventlink_unlock(ipc_eventlink
);
542 return KERN_TERMINATED
;
545 kr
= ipc_eventlink_signal_internal_locked(ipc_eventlink
,
548 ipc_eventlink_unlock(ipc_eventlink
);
551 if (kr
== KERN_NOT_WAITING
) {
559 * Name: mach_eventlink_wait_until_trap
561 * Description: Wait until local signal count exceeds the
562 * specified count or deadline passes.
565 * wait_port: eventlink port for wait
566 * count_ptr: signal count to wait on
567 * el_option: eventlink option
569 * deadline: deadline in mach_absolute_time
572 * uint64_t: contains count and error codes
575 mach_eventlink_wait_until_trap(
576 mach_port_name_t eventlink_port
,
578 mach_eventlink_signal_wait_option_t option
,
579 kern_clock_id_t clock_id
,
582 return ipc_eventlink_signal_wait_until_trap_internal(
592 * Name: mach_eventlink_signal_wait_until
594 * Description: Signal the opposite side of the
595 * eventlink and wait until local signal count exceeds the
596 * specified count or deadline passes.
599 * wait_port: eventlink port for wait
600 * count_ptr: signal count to wait on
601 * el_option: eventlink option
603 * deadline: deadline in mach_absolute_time
606 * uint64_t: contains count and error codes
609 mach_eventlink_signal_wait_until_trap(
610 mach_port_name_t eventlink_port
,
612 uint64_t signal_count __unused
,
613 mach_eventlink_signal_wait_option_t option
,
614 kern_clock_id_t clock_id
,
617 return ipc_eventlink_signal_wait_until_trap_internal(
627 * Name: ipc_eventlink_signal_wait_until_trap_internal
629 * Description: Signal the opposite side of the
630 * eventlink and wait until local signal count exceeds the
631 * specified count or deadline passes.
634 * wait_port: eventlink port for wait
635 * signal_port: eventlink port for signal
636 * count: signal count to wait on
637 * el_option: eventlink option
639 * deadline: deadline in mach_absolute_time
642 * uint64_t: contains signal count and error codes
645 ipc_eventlink_signal_wait_until_trap_internal(
646 mach_port_name_t wait_port
,
647 mach_port_name_t signal_port
,
649 mach_eventlink_signal_wait_option_t el_option
,
650 kern_clock_id_t clock_id
,
653 struct ipc_eventlink
*wait_ipc_eventlink
= IPC_EVENTLINK_NULL
;
654 struct ipc_eventlink
*signal_ipc_eventlink
= IPC_EVENTLINK_NULL
;
656 ipc_eventlink_option_t ipc_eventlink_option
= IPC_EVENTLINK_NONE
;
658 if (clock_id
!= KERN_CLOCK_MACH_ABSOLUTE_TIME
) {
659 return encode_eventlink_count_and_error(count
, KERN_INVALID_ARGUMENT
);
662 kr
= port_name_to_eventlink(wait_port
, &wait_ipc_eventlink
);
663 if (kr
== KERN_SUCCESS
) {
664 assert(wait_ipc_eventlink
!= IPC_EVENTLINK_NULL
);
666 /* Get the remote side of eventlink for signal */
667 if (signal_port
!= MACH_PORT_NULL
) {
668 signal_ipc_eventlink
= eventlink_remote_side(wait_ipc_eventlink
);
671 if (el_option
& MELSW_OPTION_NO_WAIT
) {
672 ipc_eventlink_option
|= IPC_EVENTLINK_NO_WAIT
;
675 kr
= ipc_eventlink_signal_wait_internal(wait_ipc_eventlink
,
676 signal_ipc_eventlink
, deadline
,
677 &count
, ipc_eventlink_option
);
679 /* release ref returned by port_name_to_eventlink */
680 ipc_eventlink_deallocate(wait_ipc_eventlink
);
682 return encode_eventlink_count_and_error(count
, kr
);
686 * Name: ipc_eventlink_signal_wait_internal
688 * Description: Signal the opposite side of the
689 * eventlink and wait until local signal count exceeds the
690 * specified count or deadline passes.
693 * wait_eventlink: eventlink for wait
694 * signal_eventlink: eventlink for signal
695 * deadline: deadline in mach_absolute_time
696 * count_ptr: signal count to wait on
697 * el_option: eventlink option
700 * KERN_SUCCESS on Success.
701 * signal count is returned implicitly in count arg.
704 ipc_eventlink_signal_wait_internal(
705 struct ipc_eventlink
*wait_eventlink
,
706 struct ipc_eventlink
*signal_eventlink
,
709 ipc_eventlink_option_t eventlink_option
)
712 kern_return_t kr
= KERN_ALREADY_WAITING
;
713 thread_t self
= current_thread();
714 struct ipc_eventlink_base
*ipc_eventlink_base
= wait_eventlink
->el_base
;
715 thread_t handoff_thread
= THREAD_NULL
;
716 thread_handoff_option_t handoff_option
= THREAD_HANDOFF_NONE
;
717 uint64_t old_signal_count
;
721 ipc_eventlink_lock(wait_eventlink
);
723 /* Check if eventlink is terminated */
724 if (!ipc_eventlink_active(wait_eventlink
)) {
725 kr
= KERN_TERMINATED
;
729 /* Check if waiting thread is associated to eventlink */
730 if (wait_eventlink
->el_thread
!= THREAD_ASSOCIATE_WILD
&&
731 wait_eventlink
->el_thread
!= self
) {
732 kr
= KERN_INVALID_ARGUMENT
;
736 /* Check if thread already waiting for associate on wait case */
737 if (wait_eventlink
->el_thread
== THREAD_ASSOCIATE_WILD
&&
738 wait_eventlink
->el_wait_counter
!= UINT64_MAX
) {
739 kr
= KERN_INVALID_ARGUMENT
;
743 /* Check if the signal count exceeds the count provided */
744 if (*count
< wait_eventlink
->el_sync_counter
) {
745 *count
= wait_eventlink
->el_sync_counter
;
747 } else if (eventlink_option
& IPC_EVENTLINK_NO_WAIT
) {
748 /* Check if no block was passed */
749 *count
= wait_eventlink
->el_sync_counter
;
750 kr
= KERN_OPERATION_TIMED_OUT
;
752 /* Update the wait counter and add thread to waitq */
753 wait_eventlink
->el_wait_counter
= *count
;
754 old_signal_count
= wait_eventlink
->el_sync_counter
;
756 thread_set_pending_block_hint(self
, kThreadWaitEventlink
);
757 (void)waitq_assert_wait64_locked(
758 &ipc_eventlink_base
->elb_waitq
,
759 CAST_EVENT64_T(wait_eventlink
),
761 TIMEOUT_URGENCY_USER_NORMAL
,
762 deadline
, TIMEOUT_NO_LEEWAY
,
765 eventlink_option
|= IPC_EVENTLINK_HANDOFF
;
768 /* Check if we need to signal the other side of eventlink */
769 if (signal_eventlink
!= IPC_EVENTLINK_NULL
) {
770 kern_return_t signal_kr
;
771 signal_kr
= ipc_eventlink_signal_internal_locked(signal_eventlink
,
774 if (signal_kr
== KERN_NOT_WAITING
) {
775 assert(self
->handoff_thread
== THREAD_NULL
);
779 if (kr
!= KERN_ALREADY_WAITING
) {
783 if (self
->handoff_thread
) {
784 handoff_thread
= self
->handoff_thread
;
785 self
->handoff_thread
= THREAD_NULL
;
786 handoff_option
= THREAD_HANDOFF_SETRUN_NEEDED
;
789 ipc_eventlink_unlock(wait_eventlink
);
792 wr
= thread_handoff_deallocate(handoff_thread
, handoff_option
);
793 kr
= ipc_eventlink_convert_wait_result(wr
);
795 assert(self
->handoff_thread
== THREAD_NULL
);
797 /* Increment the count value if eventlink_signal was called */
798 if (kr
== KERN_SUCCESS
) {
801 *count
= old_signal_count
;
807 ipc_eventlink_unlock(wait_eventlink
);
809 assert(self
->handoff_thread
== THREAD_NULL
);
815 * Name: ipc_eventlink_convert_wait_result
817 * Description: Convert wait result to return value
821 * wait_result: result from thread handoff
824 * KERN_SUCCESS on Success.
827 ipc_eventlink_convert_wait_result(int wait_result
)
829 switch (wait_result
) {
830 case THREAD_AWAKENED
:
833 case THREAD_TIMED_OUT
:
834 return KERN_OPERATION_TIMED_OUT
;
836 case THREAD_INTERRUPTED
:
840 return KERN_TERMINATED
;
843 panic("ipc_eventlink_wait_block\n");
849 * Name: ipc_eventlink_signal_internal_locked
851 * Description: Increment the sync count of eventlink and
852 * wake up the thread waiting if sync counter is greater
856 * eventlink: eventlink
857 * ipc_eventlink_option_t: options
860 * KERN_SUCCESS on Success.
863 ipc_eventlink_signal_internal_locked(
864 struct ipc_eventlink
*signal_eventlink
,
865 ipc_eventlink_option_t eventlink_option
)
867 kern_return_t kr
= KERN_NOT_WAITING
;
868 struct ipc_eventlink_base
*ipc_eventlink_base
= signal_eventlink
->el_base
;
870 if (eventlink_option
& IPC_EVENTLINK_FORCE_WAKEUP
) {
871 /* Adjust the wait counter */
872 signal_eventlink
->el_wait_counter
= UINT64_MAX
;
874 kr
= waitq_wakeup64_all_locked(
875 &ipc_eventlink_base
->elb_waitq
,
876 CAST_EVENT64_T(signal_eventlink
),
877 THREAD_RESTART
, NULL
,
878 WAITQ_ALL_PRIORITIES
,
883 /* Increment the eventlink sync count */
884 signal_eventlink
->el_sync_counter
++;
886 /* Check if thread needs to be woken up */
887 if (signal_eventlink
->el_sync_counter
> signal_eventlink
->el_wait_counter
) {
888 waitq_options_t wq_option
= (eventlink_option
& IPC_EVENTLINK_HANDOFF
) ?
889 WQ_OPTION_HANDOFF
: WQ_OPTION_NONE
;
891 /* Adjust the wait counter */
892 signal_eventlink
->el_wait_counter
= UINT64_MAX
;
894 kr
= waitq_wakeup64_one_locked(
895 &ipc_eventlink_base
->elb_waitq
,
896 CAST_EVENT64_T(signal_eventlink
),
897 THREAD_AWAKENED
, NULL
,
898 WAITQ_ALL_PRIORITIES
,
907 * Name: ipc_eventlink_reference
909 * Description: Increment ref on ipc eventlink struct
912 * eventlink: eventlink
917 ipc_eventlink_reference(
918 struct ipc_eventlink
*ipc_eventlink
)
920 os_ref_retain(&ipc_eventlink
->el_base
->elb_ref_count
);
924 * Name: ipc_eventlink_deallocate
926 * Description: Decrement ref on ipc eventlink struct
929 * eventlink: eventlink
934 ipc_eventlink_deallocate(
935 struct ipc_eventlink
*ipc_eventlink
)
937 if (ipc_eventlink
== IPC_EVENTLINK_NULL
) {
941 struct ipc_eventlink_base
*ipc_eventlink_base
= ipc_eventlink
->el_base
;
943 if (os_ref_release(&ipc_eventlink_base
->elb_ref_count
) > 0) {
947 assert(!ipc_eventlink_active(ipc_eventlink
));
949 #if DEVELOPMENT || DEBUG
950 /* Remove ipc_eventlink to global list */
951 global_ipc_eventlink_lock();
952 queue_remove(&ipc_eventlink_list
, ipc_eventlink_base
,
953 struct ipc_eventlink_base
*, elb_global_elm
);
954 global_ipc_eventlink_unlock();
956 zfree(ipc_eventlink_zone
, ipc_eventlink_base
);
960 * Name: convert_port_to_eventlink
962 * Description: Convert from a port name in the current
963 * space to an ipc eventlink. Produces an ipc eventlink ref,
967 * mach_port_t: eventlink port
970 * ipc_eventlink on Success.
972 struct ipc_eventlink
*
973 convert_port_to_eventlink(
976 struct ipc_eventlink
*ipc_eventlink
= IPC_EVENTLINK_NULL
;
978 if (IP_VALID(port
)) {
980 convert_port_to_eventlink_locked(port
, &ipc_eventlink
);
984 return ipc_eventlink
;
988 * Name: convert_port_to_eventlink_locked
990 * Description: Convert from a port name in the current
991 * space to an ipc eventlink. Produces an ipc eventlink ref,
995 * mach_port_name_t: eventlink port name
996 * ipc_eventlink_ptr: pointer to return ipc_eventlink.
999 * KERN_SUCCESS on Success.
1000 * KERN_TERMINATED on inactive eventlink.
1002 static kern_return_t
1003 convert_port_to_eventlink_locked(
1005 struct ipc_eventlink
**ipc_eventlink_ptr
)
1007 kern_return_t kr
= KERN_INVALID_CAPABILITY
;
1008 struct ipc_eventlink
*ipc_eventlink
= IPC_EVENTLINK_NULL
;
1010 if (ip_active(port
) &&
1011 ip_kotype(port
) == IKOT_EVENTLINK
) {
1012 ipc_eventlink
= (struct ipc_eventlink
*)port
->ip_kobject
;
1014 if (ipc_eventlink
) {
1015 ipc_eventlink_reference(ipc_eventlink
);
1018 kr
= KERN_TERMINATED
;
1022 *ipc_eventlink_ptr
= ipc_eventlink
;
1027 * Name: port_name_to_eventlink
1029 * Description: Convert from a port name in the current
1030 * space to an ipc eventlink. Produces an ipc eventlink ref,
1031 * which may be null.
1034 * mach_port_name_t: eventlink port name
1035 * ipc_eventlink_ptr: ptr to pass eventlink struct
1038 * KERN_SUCCESS on Success.
1040 static kern_return_t
1041 port_name_to_eventlink(
1042 mach_port_name_t name
,
1043 struct ipc_eventlink
**ipc_eventlink_ptr
)
1045 ipc_port_t kern_port
;
1048 if (!MACH_PORT_VALID(name
)) {
1049 *ipc_eventlink_ptr
= IPC_EVENTLINK_NULL
;
1050 return KERN_INVALID_NAME
;
1053 kr
= ipc_port_translate_send(current_space(), name
, &kern_port
);
1054 if (kr
!= KERN_SUCCESS
) {
1055 *ipc_eventlink_ptr
= IPC_EVENTLINK_NULL
;
1058 /* have the port locked */
1059 assert(IP_VALID(kern_port
));
1061 kr
= convert_port_to_eventlink_locked(kern_port
, ipc_eventlink_ptr
);
1062 ip_unlock(kern_port
);
1068 * Name: ipc_eventlink_notify
1070 * Description: Destroy an ipc_eventlink, wakeup all threads.
1073 * msg: msg contaning eventlink port
1079 ipc_eventlink_notify(
1080 mach_msg_header_t
*msg
)
1083 mach_no_senders_notification_t
*notification
= (void *)msg
;
1084 ipc_port_t port
= notification
->not_header
.msgh_remote_port
;
1085 struct ipc_eventlink
*ipc_eventlink
;
1087 if (!ip_active(port
)) {
1091 /* Get ipc_eventlink reference */
1094 /* Make sure port is still active */
1095 if (!ip_active(port
)) {
1100 convert_port_to_eventlink_locked(port
, &ipc_eventlink
);
1103 kr
= ipc_eventlink_destroy_internal(ipc_eventlink
);
1104 if (kr
== KERN_TERMINATED
) {
1105 /* eventlink is already inactive, destroy the port */
1106 ipc_port_dealloc_kernel(port
);
1109 /* Drop the reference returned by convert_port_to_eventlink_locked */
1110 ipc_eventlink_deallocate(ipc_eventlink
);
1113 #define WAITQ_TO_EVENTLINK(wq) ((struct ipc_eventlink_base *) ((uintptr_t)(wq) - offsetof(struct ipc_eventlink_base, elb_waitq)))
1116 * Name: kdp_eventlink_find_owner
1118 * Description: Find who will signal the waiting thread.
1121 * waitq: eventlink waitq
1122 * wait_event: eventlink wait event
1123 * waitinfo: waitinfo struct
1129 kdp_eventlink_find_owner(
1130 struct waitq
*waitq
,
1132 thread_waitinfo_t
*waitinfo
)
1134 assert(waitinfo
->wait_type
== kThreadWaitEventlink
);
1135 waitinfo
->owner
= 0;
1136 waitinfo
->context
= 0;
1138 if (waitq_held(waitq
)) {
1142 struct ipc_eventlink_base
*ipc_eventlink_base
= WAITQ_TO_EVENTLINK(waitq
);
1144 if (event
== CAST_EVENT64_T(&ipc_eventlink_base
->elb_eventlink
[0])) {
1145 /* Use the other end of eventlink for signal thread */
1146 if (ipc_eventlink_base
->elb_eventlink
[1].el_thread
!= THREAD_ASSOCIATE_WILD
) {
1147 waitinfo
->owner
= thread_tid(ipc_eventlink_base
->elb_eventlink
[1].el_thread
);
1149 waitinfo
->owner
= 0;
1151 } else if (event
== CAST_EVENT64_T(&ipc_eventlink_base
->elb_eventlink
[1])) {
1152 /* Use the other end of eventlink for signal thread */
1153 if (ipc_eventlink_base
->elb_eventlink
[0].el_thread
!= THREAD_ASSOCIATE_WILD
) {
1154 waitinfo
->owner
= thread_tid(ipc_eventlink_base
->elb_eventlink
[0].el_thread
);
1156 waitinfo
->owner
= 0;