2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * @OSF_FREE_COPYRIGHT@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
57 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
58 * support for mandatory and extensible security protections. This notice
59 * is included in support of clause 2.2 (b) of the Apple Public License,
65 * File: ipc/ipc_port.c
69 * Functions to manipulate IPC ports.
72 #include <mach_assert.h>
74 #include <mach/port.h>
75 #include <mach/kern_return.h>
76 #include <kern/ipc_kobject.h>
77 #include <kern/thread.h>
78 #include <kern/misc_protos.h>
79 #include <kern/waitq.h>
80 #include <kern/policy_internal.h>
81 #include <kern/debug.h>
82 #include <kern/kcdata.h>
83 #include <ipc/ipc_entry.h>
84 #include <ipc/ipc_space.h>
85 #include <ipc/ipc_object.h>
86 #include <ipc/ipc_right.h>
87 #include <ipc/ipc_port.h>
88 #include <ipc/ipc_pset.h>
89 #include <ipc/ipc_kmsg.h>
90 #include <ipc/ipc_mqueue.h>
91 #include <ipc/ipc_notify.h>
92 #include <ipc/ipc_table.h>
93 #include <ipc/ipc_importance.h>
94 #include <machine/limits.h>
95 #include <kern/turnstile.h>
96 #include <kern/machine.h>
98 #include <security/mac_mach_internal.h>
102 static TUNABLE(bool, prioritize_launch
, "prioritize_launch", true);
103 TUNABLE_WRITEABLE(int, ipc_portbt
, "ipc_portbt", false);
105 LCK_SPIN_DECLARE_ATTR(ipc_port_multiple_lock_data
, &ipc_lck_grp
, &ipc_lck_attr
);
106 ipc_port_timestamp_t ipc_port_timestamp_data
;
109 void ipc_port_init_debug(
111 uintptr_t *callstack
,
112 unsigned int callstack_max
);
114 void ipc_port_callstack_init_debug(
115 uintptr_t *callstack
,
116 unsigned int callstack_max
);
118 #endif /* MACH_ASSERT */
121 ipc_port_send_turnstile_recompute_push_locked(
125 ipc_port_get_watchport_inheritor(
129 ipc_port_release(ipc_port_t port
)
135 ipc_port_reference(ipc_port_t port
)
141 * Routine: ipc_port_timestamp
143 * Retrieve a timestamp value.
147 ipc_port_timestamp(void)
149 return OSIncrementAtomic(&ipc_port_timestamp_data
);
153 * Routine: ipc_port_request_alloc
155 * Try to allocate a request slot.
156 * If successful, returns the request index.
157 * Otherwise returns zero.
159 * The port is locked and active.
161 * KERN_SUCCESS A request index was found.
162 * KERN_NO_SPACE No index allocated.
165 #if IMPORTANCE_INHERITANCE
167 ipc_port_request_alloc(
169 mach_port_name_t name
,
171 boolean_t send_possible
,
173 ipc_port_request_index_t
*indexp
,
174 boolean_t
*importantp
)
177 ipc_port_request_alloc(
179 mach_port_name_t name
,
181 boolean_t send_possible
,
183 ipc_port_request_index_t
*indexp
)
184 #endif /* IMPORTANCE_INHERITANCE */
186 ipc_port_request_t ipr
, table
;
187 ipc_port_request_index_t index
;
190 #if IMPORTANCE_INHERITANCE
192 #endif /* IMPORTANCE_INHERITANCE */
194 require_ip_active(port
);
195 assert(name
!= MACH_PORT_NULL
);
196 assert(soright
!= IP_NULL
);
198 table
= port
->ip_requests
;
200 if (table
== IPR_NULL
) {
201 return KERN_NO_SPACE
;
204 index
= table
->ipr_next
;
206 return KERN_NO_SPACE
;
210 assert(ipr
->ipr_name
== MACH_PORT_NULL
);
212 table
->ipr_next
= ipr
->ipr_next
;
213 ipr
->ipr_name
= name
;
216 mask
|= IPR_SOR_SPREQ_MASK
;
218 mask
|= IPR_SOR_SPARM_MASK
;
219 if (port
->ip_sprequests
== 0) {
220 port
->ip_sprequests
= 1;
221 #if IMPORTANCE_INHERITANCE
222 /* TODO: Live importance support in send-possible */
223 if (port
->ip_impdonation
!= 0 &&
224 port
->ip_spimportant
== 0 &&
225 (task_is_importance_donor(current_task()))) {
228 #endif /* IMPORTANCE_INHERTANCE */
232 ipr
->ipr_soright
= IPR_SOR_MAKE(soright
, mask
);
240 * Routine: ipc_port_request_grow
242 * Grow a port's table of requests.
244 * The port must be locked and active.
245 * Nothing else locked; will allocate memory.
246 * Upon return the port is unlocked.
248 * KERN_SUCCESS Grew the table.
249 * KERN_SUCCESS Somebody else grew the table.
250 * KERN_SUCCESS The port died.
251 * KERN_RESOURCE_SHORTAGE Couldn't allocate new table.
252 * KERN_NO_SPACE Couldn't grow to desired size
256 ipc_port_request_grow(
258 ipc_table_elems_t target_size
)
260 ipc_table_size_t its
;
261 ipc_port_request_t otable
, ntable
;
262 require_ip_active(port
);
264 otable
= port
->ip_requests
;
265 if (otable
== IPR_NULL
) {
266 its
= &ipc_table_requests
[0];
268 its
= otable
->ipr_size
+ 1;
271 if (target_size
!= ITS_SIZE_NONE
) {
272 if ((otable
!= IPR_NULL
) &&
273 (target_size
<= otable
->ipr_size
->its_size
)) {
277 while ((its
->its_size
) && (its
->its_size
< target_size
)) {
280 if (its
->its_size
== 0) {
282 return KERN_NO_SPACE
;
289 if ((its
->its_size
== 0) ||
290 ((ntable
= it_requests_alloc(its
)) == IPR_NULL
)) {
292 return KERN_RESOURCE_SHORTAGE
;
298 * Check that port is still active and that nobody else
299 * has slipped in and grown the table on us. Note that
300 * just checking if the current table pointer == otable
301 * isn't sufficient; must check ipr_size.
304 if (ip_active(port
) && (port
->ip_requests
== otable
) &&
305 ((otable
== IPR_NULL
) || (otable
->ipr_size
+ 1 == its
))) {
306 ipc_table_size_t oits
;
307 ipc_table_elems_t osize
, nsize
;
308 ipc_port_request_index_t free
, i
;
310 /* copy old table to new table */
312 if (otable
!= IPR_NULL
) {
313 oits
= otable
->ipr_size
;
314 osize
= oits
->its_size
;
315 free
= otable
->ipr_next
;
317 (void) memcpy((void *)(ntable
+ 1),
318 (const void *)(otable
+ 1),
319 (osize
- 1) * sizeof(struct ipc_port_request
));
326 nsize
= its
->its_size
;
327 assert(nsize
> osize
);
329 /* add new elements to the new table's free list */
331 for (i
= osize
; i
< nsize
; i
++) {
332 ipc_port_request_t ipr
= &ntable
[i
];
334 ipr
->ipr_name
= MACH_PORT_NULL
;
335 ipr
->ipr_next
= free
;
339 ntable
->ipr_next
= free
;
340 ntable
->ipr_size
= its
;
341 port
->ip_requests
= ntable
;
345 if (otable
!= IPR_NULL
) {
346 it_requests_free(oits
, otable
);
351 it_requests_free(its
, ntable
);
358 * Routine: ipc_port_request_sparm
360 * Arm delayed send-possible request.
362 * The port must be locked and active.
364 * Returns TRUE if the request was armed
365 * (or armed with importance in that version).
369 ipc_port_request_sparm(
371 __assert_only mach_port_name_t name
,
372 ipc_port_request_index_t index
,
373 mach_msg_option_t option
,
374 mach_msg_priority_t priority
)
376 if (index
!= IE_REQ_NONE
) {
377 ipc_port_request_t ipr
, table
;
379 require_ip_active(port
);
381 table
= port
->ip_requests
;
382 assert(table
!= IPR_NULL
);
385 assert(ipr
->ipr_name
== name
);
387 /* Is there a valid destination? */
388 if (IPR_SOR_SPREQ(ipr
->ipr_soright
)) {
389 ipr
->ipr_soright
= IPR_SOR_MAKE(ipr
->ipr_soright
, IPR_SOR_SPARM_MASK
);
390 port
->ip_sprequests
= 1;
392 if (option
& MACH_SEND_OVERRIDE
) {
393 /* apply override to message queue */
394 mach_msg_qos_t qos_ovr
;
395 if (mach_msg_priority_is_pthread_priority(priority
)) {
396 qos_ovr
= _pthread_priority_thread_qos(priority
);
398 qos_ovr
= mach_msg_priority_overide_qos(priority
);
401 ipc_mqueue_override_send(&port
->ip_messages
, qos_ovr
);
405 #if IMPORTANCE_INHERITANCE
406 if (((option
& MACH_SEND_NOIMPORTANCE
) == 0) &&
407 (port
->ip_impdonation
!= 0) &&
408 (port
->ip_spimportant
== 0) &&
409 (((option
& MACH_SEND_IMPORTANCE
) != 0) ||
410 (task_is_importance_donor(current_task())))) {
415 #endif /* IMPORTANCE_INHERITANCE */
422 * Routine: ipc_port_request_type
424 * Determine the type(s) of port requests enabled for a name.
426 * The port must be locked or inactive (to avoid table growth).
427 * The index must not be IE_REQ_NONE and for the name in question.
430 ipc_port_request_type(
432 __assert_only mach_port_name_t name
,
433 ipc_port_request_index_t index
)
435 ipc_port_request_t ipr
, table
;
436 mach_port_type_t type
= 0;
438 table
= port
->ip_requests
;
439 assert(table
!= IPR_NULL
);
441 assert(index
!= IE_REQ_NONE
);
443 assert(ipr
->ipr_name
== name
);
445 if (IP_VALID(IPR_SOR_PORT(ipr
->ipr_soright
))) {
446 type
|= MACH_PORT_TYPE_DNREQUEST
;
448 if (IPR_SOR_SPREQ(ipr
->ipr_soright
)) {
449 type
|= MACH_PORT_TYPE_SPREQUEST
;
451 if (!IPR_SOR_SPARMED(ipr
->ipr_soright
)) {
452 type
|= MACH_PORT_TYPE_SPREQUEST_DELAYED
;
460 * Routine: ipc_port_request_cancel
462 * Cancel a dead-name/send-possible request and return the send-once right.
464 * The port must be locked and active.
465 * The index must not be IPR_REQ_NONE and must correspond with name.
469 ipc_port_request_cancel(
471 __assert_only mach_port_name_t name
,
472 ipc_port_request_index_t index
)
474 ipc_port_request_t ipr
, table
;
475 ipc_port_t request
= IP_NULL
;
477 require_ip_active(port
);
478 table
= port
->ip_requests
;
479 assert(table
!= IPR_NULL
);
481 assert(index
!= IE_REQ_NONE
);
483 assert(ipr
->ipr_name
== name
);
484 request
= IPR_SOR_PORT(ipr
->ipr_soright
);
486 /* return ipr to the free list inside the table */
487 ipr
->ipr_name
= MACH_PORT_NULL
;
488 ipr
->ipr_next
= table
->ipr_next
;
489 table
->ipr_next
= index
;
495 * Routine: ipc_port_pdrequest
497 * Make a port-deleted request, returning the
498 * previously registered send-once right.
499 * Just cancels the previous request if notify is IP_NULL.
501 * The port is locked and active. It is unlocked.
502 * Consumes a ref for notify (if non-null), and
503 * returns previous with a ref (if non-null).
510 ipc_port_t
*previousp
)
513 require_ip_active(port
);
515 previous
= port
->ip_pdrequest
;
516 port
->ip_pdrequest
= notify
;
519 *previousp
= previous
;
523 * Routine: ipc_port_nsrequest
525 * Make a no-senders request, returning the
526 * previously registered send-once right.
527 * Just cancels the previous request if notify is IP_NULL.
529 * The port is locked and active. It is unlocked.
530 * Consumes a ref for notify (if non-null), and
531 * returns previous with a ref (if non-null).
537 mach_port_mscount_t sync
,
539 ipc_port_t
*previousp
)
542 mach_port_mscount_t mscount
;
543 require_ip_active(port
);
545 previous
= port
->ip_nsrequest
;
546 mscount
= port
->ip_mscount
;
548 if ((port
->ip_srights
== 0) && (sync
<= mscount
) &&
549 (notify
!= IP_NULL
)) {
550 port
->ip_nsrequest
= IP_NULL
;
552 ipc_notify_no_senders(notify
, mscount
);
554 port
->ip_nsrequest
= notify
;
558 *previousp
= previous
;
563 * Routine: ipc_port_clear_receiver
565 * Prepares a receive right for transmission/destruction,
566 * optionally performs mqueue destruction (with port lock held)
569 * The port is locked and active.
571 * If should_destroy is TRUE, then the return value indicates
572 * whether the caller needs to reap kmsg structures that should
573 * be destroyed (by calling ipc_kmsg_reap_delayed)
575 * If should_destroy is FALSE, this always returns FALSE
579 ipc_port_clear_receiver(
581 boolean_t should_destroy
)
583 ipc_mqueue_t mqueue
= &port
->ip_messages
;
584 boolean_t reap_messages
= FALSE
;
587 * Pull ourselves out of any sets to which we belong.
588 * We hold the port locked, so even though this acquires and releases
589 * the mqueue lock, we know we won't be added to any other sets.
591 if (port
->ip_in_pset
!= 0) {
592 ipc_pset_remove_from_all(port
);
593 assert(port
->ip_in_pset
== 0);
597 * Send anyone waiting on the port's queue directly away.
598 * Also clear the mscount, seqno, guard bits
601 if (port
->ip_receiver_name
) {
602 ipc_mqueue_changed(port
->ip_receiver
, mqueue
);
604 ipc_mqueue_changed(NULL
, mqueue
);
606 port
->ip_mscount
= 0;
607 mqueue
->imq_seqno
= 0;
608 port
->ip_context
= port
->ip_guarded
= port
->ip_strict_guard
= 0;
610 * clear the immovable bit so the port can move back to anyone listening
611 * for the port destroy notification
613 port
->ip_immovable_receive
= 0;
615 if (should_destroy
) {
617 * Mark the port and mqueue invalid, preventing further send/receive
618 * operations from succeeding. It's important for this to be
619 * done under the same lock hold as the ipc_mqueue_changed
620 * call to avoid additional threads blocking on an mqueue
621 * that's being destroyed.
623 * The port active bit needs to be guarded under mqueue lock for
626 port
->ip_object
.io_bits
&= ~IO_BITS_ACTIVE
;
627 port
->ip_timestamp
= ipc_port_timestamp();
628 reap_messages
= ipc_mqueue_destroy_locked(mqueue
);
630 /* make port be in limbo */
631 port
->ip_receiver_name
= MACH_PORT_NULL
;
632 port
->ip_destination
= IP_NULL
;
635 imq_unlock(&port
->ip_messages
);
637 return reap_messages
;
641 * Routine: ipc_port_init
643 * Initializes a newly-allocated port.
644 * Doesn't touch the ip_object fields.
646 * The memory is expected to be zero initialized (allocated with Z_ZERO).
653 ipc_port_init_flags_t flags
,
654 mach_port_name_t name
)
656 /* port->ip_kobject doesn't have to be initialized */
658 port
->ip_receiver
= space
;
659 port
->ip_receiver_name
= name
;
661 if (flags
& IPC_PORT_INIT_MAKE_SEND_RIGHT
) {
662 port
->ip_srights
= 1;
663 port
->ip_mscount
= 1;
666 if (flags
& IPC_PORT_INIT_FILTER_MESSAGE
) {
667 port
->ip_object
.io_bits
|= IP_BIT_FILTER_MSG
;
670 port
->ip_tg_block_tracking
= (flags
& IPC_PORT_INIT_TG_BLOCK_TRACKING
) != 0;
672 if (flags
& IPC_PORT_INIT_SPECIAL_REPLY
) {
673 port
->ip_specialreply
= true;
674 port
->ip_immovable_receive
= true;
677 port
->ip_sync_link_state
= PORT_SYNC_LINK_ANY
;
679 ipc_mqueue_kind_t kind
= IPC_MQUEUE_KIND_NONE
;
680 if (flags
& IPC_PORT_INIT_MESSAGE_QUEUE
) {
681 kind
= IPC_MQUEUE_KIND_PORT
;
683 ipc_mqueue_init(&port
->ip_messages
, kind
);
687 * Routine: ipc_port_alloc
691 * Nothing locked. If successful, the port is returned
692 * locked. (The caller doesn't have a reference.)
694 * KERN_SUCCESS The port is allocated.
695 * KERN_INVALID_TASK The space is dead.
696 * KERN_NO_SPACE No room for an entry in the space.
697 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
703 ipc_port_init_flags_t flags
,
704 mach_port_name_t
*namep
,
708 mach_port_name_t name
;
710 mach_port_type_t type
= MACH_PORT_TYPE_RECEIVE
;
711 mach_port_urefs_t urefs
= 0;
714 uintptr_t buf
[IP_CALLSTACK_MAX
];
715 ipc_port_callstack_init_debug(&buf
[0], IP_CALLSTACK_MAX
);
716 #endif /* MACH_ASSERT */
718 if (flags
& IPC_PORT_INIT_MAKE_SEND_RIGHT
) {
719 type
|= MACH_PORT_TYPE_SEND
;
722 kr
= ipc_object_alloc(space
, IOT_PORT
, type
, urefs
,
723 &name
, (ipc_object_t
*) &port
);
724 if (kr
!= KERN_SUCCESS
) {
728 /* port and space are locked */
729 ipc_port_init(port
, space
, flags
, name
);
732 ipc_port_init_debug(port
, &buf
[0], IP_CALLSTACK_MAX
);
733 #endif /* MACH_ASSERT */
735 /* unlock space after init */
736 is_write_unlock(space
);
745 * Routine: ipc_port_alloc_name
747 * Allocate a port, with a specific name.
749 * Nothing locked. If successful, the port is returned
750 * locked. (The caller doesn't have a reference.)
752 * KERN_SUCCESS The port is allocated.
753 * KERN_INVALID_TASK The space is dead.
754 * KERN_NAME_EXISTS The name already denotes a right.
755 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
761 ipc_port_init_flags_t flags
,
762 mach_port_name_t name
,
767 mach_port_type_t type
= MACH_PORT_TYPE_RECEIVE
;
768 mach_port_urefs_t urefs
= 0;
771 uintptr_t buf
[IP_CALLSTACK_MAX
];
772 ipc_port_callstack_init_debug(&buf
[0], IP_CALLSTACK_MAX
);
773 #endif /* MACH_ASSERT */
775 if (flags
& IPC_PORT_INIT_MAKE_SEND_RIGHT
) {
776 type
|= MACH_PORT_TYPE_SEND
;
779 kr
= ipc_object_alloc_name(space
, IOT_PORT
, type
, urefs
,
780 name
, (ipc_object_t
*) &port
);
781 if (kr
!= KERN_SUCCESS
) {
787 ipc_port_init(port
, space
, flags
, name
);
790 ipc_port_init_debug(port
, &buf
[0], IP_CALLSTACK_MAX
);
791 #endif /* MACH_ASSERT */
799 * Routine: ipc_port_spnotify
801 * Generate send-possible port notifications.
803 * Nothing locked, reference held on port.
809 ipc_port_request_index_t index
= 0;
810 ipc_table_elems_t size
= 0;
813 * If the port has no send-possible request
814 * armed, don't bother to lock the port.
816 if (port
->ip_sprequests
== 0) {
822 #if IMPORTANCE_INHERITANCE
823 if (port
->ip_spimportant
!= 0) {
824 port
->ip_spimportant
= 0;
825 if (ipc_port_importance_delta(port
, IPID_OPTION_NORMAL
, -1) == TRUE
) {
829 #endif /* IMPORTANCE_INHERITANCE */
831 if (port
->ip_sprequests
== 0) {
835 port
->ip_sprequests
= 0;
838 if (ip_active(port
)) {
839 ipc_port_request_t requests
;
841 /* table may change each time port unlocked (reload) */
842 requests
= port
->ip_requests
;
843 assert(requests
!= IPR_NULL
);
846 * no need to go beyond table size when first
847 * we entered - those are future notifications.
850 size
= requests
->ipr_size
->its_size
;
853 /* no need to backtrack either */
854 while (++index
< size
) {
855 ipc_port_request_t ipr
= &requests
[index
];
856 mach_port_name_t name
= ipr
->ipr_name
;
857 ipc_port_t soright
= IPR_SOR_PORT(ipr
->ipr_soright
);
858 boolean_t armed
= IPR_SOR_SPARMED(ipr
->ipr_soright
);
860 if (MACH_PORT_VALID(name
) && armed
&& IP_VALID(soright
)) {
861 /* claim send-once right - slot still inuse */
862 ipr
->ipr_soright
= IP_NULL
;
865 ipc_notify_send_possible(soright
, name
);
877 * Routine: ipc_port_dnnotify
879 * Generate dead name notifications for
880 * all outstanding dead-name and send-
884 * Port must be inactive.
885 * Reference held on port.
891 ipc_port_request_t requests
= port
->ip_requests
;
893 assert(!ip_active(port
));
894 if (requests
!= IPR_NULL
) {
895 ipc_table_size_t its
= requests
->ipr_size
;
896 ipc_table_elems_t size
= its
->its_size
;
897 ipc_port_request_index_t index
;
898 for (index
= 1; index
< size
; index
++) {
899 ipc_port_request_t ipr
= &requests
[index
];
900 mach_port_name_t name
= ipr
->ipr_name
;
901 ipc_port_t soright
= IPR_SOR_PORT(ipr
->ipr_soright
);
903 if (MACH_PORT_VALID(name
) && IP_VALID(soright
)) {
904 ipc_notify_dead_name(soright
, name
);
912 * Routine: ipc_port_destroy
914 * Destroys a port. Cleans up queued messages.
916 * If the port has a backup, it doesn't get destroyed,
917 * but is sent in a port-destroyed notification to the backup.
919 * The port is locked and alive; nothing else locked.
920 * The caller has a reference, which is consumed.
921 * Afterwards, the port is unlocked and dead.
925 ipc_port_destroy(ipc_port_t port
)
927 ipc_port_t pdrequest
, nsrequest
;
930 boolean_t special_reply
= port
->ip_specialreply
;
931 struct task_watchport_elem
*watchport_elem
= NULL
;
933 #if IMPORTANCE_INHERITANCE
934 ipc_importance_task_t release_imp_task
= IIT_NULL
;
935 thread_t self
= current_thread();
936 boolean_t top
= (self
->ith_assertions
== 0);
937 natural_t assertcnt
= 0;
938 #endif /* IMPORTANCE_INHERITANCE */
940 require_ip_active(port
);
941 /* port->ip_receiver_name is garbage */
942 /* port->ip_receiver/port->ip_destination is garbage */
944 /* clear any reply-port context */
945 port
->ip_reply_context
= 0;
947 /* check for a backup port */
948 pdrequest
= port
->ip_pdrequest
;
951 * Panic if a special reply has ip_pdrequest or ip_tempowner
952 * set, as this causes a type confusion while accessing the
955 if (special_reply
&& (pdrequest
|| port
->ip_tempowner
)) {
956 panic("ipc_port_destroy: invalid state");
959 #if IMPORTANCE_INHERITANCE
960 /* determine how many assertions to drop and from whom */
961 if (port
->ip_tempowner
!= 0) {
963 release_imp_task
= port
->ip_imp_task
;
964 if (IIT_NULL
!= release_imp_task
) {
965 port
->ip_imp_task
= IIT_NULL
;
966 assertcnt
= port
->ip_impcount
;
968 /* Otherwise, nothing to drop */
970 assertcnt
= port
->ip_impcount
;
971 if (pdrequest
!= IP_NULL
) {
972 /* mark in limbo for the journey */
973 port
->ip_tempowner
= 1;
978 self
->ith_assertions
= assertcnt
;
980 #endif /* IMPORTANCE_INHERITANCE */
982 if (pdrequest
!= IP_NULL
) {
983 /* clear receiver, don't destroy the port */
984 (void)ipc_port_clear_receiver(port
, FALSE
);
985 assert(port
->ip_in_pset
== 0);
986 assert(port
->ip_mscount
== 0);
988 /* we assume the ref for pdrequest */
989 port
->ip_pdrequest
= IP_NULL
;
991 imq_lock(&port
->ip_messages
);
992 watchport_elem
= ipc_port_clear_watchport_elem_internal(port
);
993 ipc_port_send_turnstile_recompute_push_locked(port
);
994 /* mqueue and port unlocked */
997 ipc_port_adjust_special_reply_port(port
,
998 IPC_PORT_ADJUST_SR_ALLOW_SYNC_LINKAGE
);
1001 if (watchport_elem
) {
1002 task_watchport_elem_deallocate(watchport_elem
);
1003 watchport_elem
= NULL
;
1005 /* consumes our refs for port and pdrequest */
1006 ipc_notify_port_destroyed(pdrequest
, port
);
1008 goto drop_assertions
;
1012 * The mach_msg_* paths don't hold a port lock, they only hold a
1013 * reference to the port object. If a thread raced us and is now
1014 * blocked waiting for message reception on this mqueue (or waiting
1015 * for ipc_mqueue_full), it will never be woken up. We call
1016 * ipc_port_clear_receiver() here, _after_ the port has been marked
1017 * inactive, to wakeup any threads which may be blocked and ensure
1018 * that no other thread can get lost waiting for a wake up on a
1019 * port/mqueue that's been destroyed.
1021 boolean_t reap_msgs
= FALSE
;
1022 reap_msgs
= ipc_port_clear_receiver(port
, TRUE
); /* marks port and mqueue inactive */
1023 assert(port
->ip_in_pset
== 0);
1024 assert(port
->ip_mscount
== 0);
1026 imq_lock(&port
->ip_messages
);
1027 watchport_elem
= ipc_port_clear_watchport_elem_internal(port
);
1028 imq_unlock(&port
->ip_messages
);
1029 nsrequest
= port
->ip_nsrequest
;
1032 * If the port has a preallocated message buffer and that buffer
1033 * is not inuse, free it. If it has an inuse one, then the kmsg
1034 * free will detect that we freed the association and it can free it
1035 * like a normal buffer.
1037 * Once the port is marked inactive we don't need to keep it locked.
1039 if (IP_PREALLOC(port
)) {
1040 ipc_port_t inuse_port
;
1042 kmsg
= port
->ip_premsg
;
1043 assert(kmsg
!= IKM_NULL
);
1044 inuse_port
= ikm_prealloc_inuse_port(kmsg
);
1045 ipc_kmsg_clear_prealloc(kmsg
, port
);
1047 imq_lock(&port
->ip_messages
);
1048 ipc_port_send_turnstile_recompute_push_locked(port
);
1049 /* mqueue and port unlocked */
1051 if (inuse_port
!= IP_NULL
) {
1052 assert(inuse_port
== port
);
1054 ipc_kmsg_free(kmsg
);
1057 imq_lock(&port
->ip_messages
);
1058 ipc_port_send_turnstile_recompute_push_locked(port
);
1059 /* mqueue and port unlocked */
1062 /* Deallocate the watchport element */
1063 if (watchport_elem
) {
1064 task_watchport_elem_deallocate(watchport_elem
);
1065 watchport_elem
= NULL
;
1068 /* unlink the kmsg from special reply port */
1069 if (special_reply
) {
1070 ipc_port_adjust_special_reply_port(port
,
1071 IPC_PORT_ADJUST_SR_ALLOW_SYNC_LINKAGE
);
1074 /* throw away no-senders request */
1075 if (nsrequest
!= IP_NULL
) {
1076 ipc_notify_send_once(nsrequest
); /* consumes ref */
1079 * Reap any kmsg objects waiting to be destroyed.
1080 * This must be done after we've released the port lock.
1083 ipc_kmsg_reap_delayed();
1086 mqueue
= &port
->ip_messages
;
1088 /* cleanup waitq related resources */
1089 ipc_mqueue_deinit(mqueue
);
1091 /* generate dead-name notifications */
1092 ipc_port_dnnotify(port
);
1094 ipc_kobject_destroy(port
);
1096 ip_release(port
); /* consume caller's ref */
1099 #if IMPORTANCE_INHERITANCE
1100 if (release_imp_task
!= IIT_NULL
) {
1101 if (assertcnt
> 0) {
1103 self
->ith_assertions
= 0;
1104 assert(ipc_importance_task_is_any_receiver_type(release_imp_task
));
1105 ipc_importance_task_drop_internal_assertion(release_imp_task
, assertcnt
);
1107 ipc_importance_task_release(release_imp_task
);
1108 } else if (assertcnt
> 0) {
1110 self
->ith_assertions
= 0;
1111 release_imp_task
= current_task()->task_imp_base
;
1112 if (ipc_importance_task_is_any_receiver_type(release_imp_task
)) {
1113 ipc_importance_task_drop_internal_assertion(release_imp_task
, assertcnt
);
1117 #endif /* IMPORTANCE_INHERITANCE */
1121 * Routine: ipc_port_destination_chain_lock
1123 * Search for the end of the chain (a port not in transit),
1124 * acquiring locks along the way, and return it in `base`.
1126 * Returns true if a reference was taken on `base`
1130 * ipc_port_multiple_lock held.
1133 ipc_port_destination_chain_lock(
1140 if (!ip_active(port
)) {
1142 * Active ports that are ip_lock()ed cannot go away.
1144 * But inactive ports at the end of walking
1145 * an ip_destination chain are only protected
1146 * from space termination cleanup while the entire
1147 * chain of ports leading to them is held.
1149 * Callers of this code tend to unlock the chain
1150 * in the same order than this walk which doesn't
1151 * protect `base` properly when it's inactive.
1153 * In that case, take a reference that the caller
1154 * is responsible for releasing.
1160 if ((port
->ip_receiver_name
!= MACH_PORT_NULL
) ||
1161 (port
->ip_destination
== IP_NULL
)) {
1166 port
= port
->ip_destination
;
1172 * Routine: ipc_port_check_circularity
1174 * Check if queueing "port" in a message for "dest"
1175 * would create a circular group of ports and messages.
1177 * If no circularity (FALSE returned), then "port"
1178 * is changed from "in limbo" to "in transit".
1180 * That is, we want to set port->ip_destination == dest,
1181 * but guaranteeing that this doesn't create a circle
1182 * port->ip_destination->ip_destination->... == port
1185 * No ports locked. References held for "port" and "dest".
1189 ipc_port_check_circularity(
1193 #if IMPORTANCE_INHERITANCE
1194 /* adjust importance counts at the same time */
1195 return ipc_importance_check_circularity(port
, dest
);
1198 struct task_watchport_elem
*watchport_elem
= NULL
;
1199 bool took_base_ref
= false;
1201 assert(port
!= IP_NULL
);
1202 assert(dest
!= IP_NULL
);
1209 /* Check if destination needs a turnstile */
1210 ipc_port_send_turnstile_prepare(dest
);
1213 * First try a quick check that can run in parallel.
1214 * No circularity if dest is not in transit.
1217 if (ip_lock_try(dest
)) {
1218 if (!ip_active(dest
) ||
1219 (dest
->ip_receiver_name
!= MACH_PORT_NULL
) ||
1220 (dest
->ip_destination
== IP_NULL
)) {
1224 /* dest is in transit; further checking necessary */
1230 ipc_port_multiple_lock(); /* massive serialization */
1233 * Search for the end of the chain (a port not in transit),
1234 * acquiring locks along the way.
1237 took_base_ref
= ipc_port_destination_chain_lock(dest
, &base
);
1238 /* all ports in chain from dest to base, inclusive, are locked */
1241 /* circularity detected! */
1243 ipc_port_multiple_unlock();
1245 /* port (== base) is in limbo */
1246 require_ip_active(port
);
1247 assert(port
->ip_receiver_name
== MACH_PORT_NULL
);
1248 assert(port
->ip_destination
== IP_NULL
);
1249 assert(!took_base_ref
);
1252 while (base
!= IP_NULL
) {
1255 /* dest is in transit or in limbo */
1256 require_ip_active(base
);
1257 assert(base
->ip_receiver_name
== MACH_PORT_NULL
);
1259 next
= base
->ip_destination
;
1264 ipc_port_send_turnstile_complete(dest
);
1269 * The guarantee: lock port while the entire chain is locked.
1270 * Once port is locked, we can take a reference to dest,
1271 * add port to the chain, and unlock everything.
1275 ipc_port_multiple_unlock();
1278 imq_lock(&port
->ip_messages
);
1280 /* port is in limbo */
1281 require_ip_active(port
);
1282 assert(port
->ip_receiver_name
== MACH_PORT_NULL
);
1283 assert(port
->ip_destination
== IP_NULL
);
1285 /* Clear the watchport boost */
1286 watchport_elem
= ipc_port_clear_watchport_elem_internal(port
);
1288 /* Check if the port is being enqueued as a part of sync bootstrap checkin */
1289 if (dest
->ip_specialreply
&& dest
->ip_sync_bootstrap_checkin
) {
1290 port
->ip_sync_bootstrap_checkin
= 1;
1294 port
->ip_destination
= dest
;
1296 /* Setup linkage for source port if it has sync ipc push */
1297 struct turnstile
*send_turnstile
= TURNSTILE_NULL
;
1298 if (port_send_turnstile(port
)) {
1299 send_turnstile
= turnstile_prepare((uintptr_t)port
,
1300 port_send_turnstile_address(port
),
1301 TURNSTILE_NULL
, TURNSTILE_SYNC_IPC
);
1304 * What ipc_port_adjust_port_locked would do,
1305 * but we need to also drop even more locks before
1306 * calling turnstile_update_inheritor_complete().
1308 ipc_port_adjust_sync_link_state_locked(port
, PORT_SYNC_LINK_ANY
, NULL
);
1310 turnstile_update_inheritor(send_turnstile
, port_send_turnstile(dest
),
1311 (TURNSTILE_INHERITOR_TURNSTILE
| TURNSTILE_IMMEDIATE_UPDATE
));
1313 /* update complete and turnstile complete called after dropping all locks */
1315 imq_unlock(&port
->ip_messages
);
1317 /* now unlock chain */
1328 /* port is in transit */
1329 require_ip_active(dest
);
1330 assert(dest
->ip_receiver_name
== MACH_PORT_NULL
);
1331 assert(dest
->ip_destination
!= IP_NULL
);
1333 next
= dest
->ip_destination
;
1338 /* base is not in transit */
1339 assert(!ip_active(base
) ||
1340 (base
->ip_receiver_name
!= MACH_PORT_NULL
) ||
1341 (base
->ip_destination
== IP_NULL
));
1344 if (took_base_ref
) {
1348 /* All locks dropped, call turnstile_update_inheritor_complete for source port's turnstile */
1349 if (send_turnstile
) {
1350 turnstile_update_inheritor_complete(send_turnstile
, TURNSTILE_INTERLOCK_NOT_HELD
);
1352 /* Take the mq lock to call turnstile complete */
1353 imq_lock(&port
->ip_messages
);
1354 turnstile_complete((uintptr_t)port
, port_send_turnstile_address(port
), NULL
, TURNSTILE_SYNC_IPC
);
1355 send_turnstile
= TURNSTILE_NULL
;
1356 imq_unlock(&port
->ip_messages
);
1357 turnstile_cleanup();
1360 if (watchport_elem
) {
1361 task_watchport_elem_deallocate(watchport_elem
);
1365 #endif /* !IMPORTANCE_INHERITANCE */
1369 * Routine: ipc_port_watchport_elem
1371 * Get the port's watchport elem field
1376 static struct task_watchport_elem
*
1377 ipc_port_watchport_elem(ipc_port_t port
)
1379 return port
->ip_messages
.imq_wait_queue
.waitq_tspriv
;
1383 * Routine: ipc_port_update_watchport_elem
1385 * Set the port's watchport elem field
1390 static inline struct task_watchport_elem
*
1391 ipc_port_update_watchport_elem(ipc_port_t port
, struct task_watchport_elem
*we
)
1393 assert(!port
->ip_specialreply
);
1394 struct task_watchport_elem
*old_we
= ipc_port_watchport_elem(port
);
1395 port
->ip_messages
.imq_wait_queue
.waitq_tspriv
= we
;
1400 * Routine: ipc_special_reply_stash_pid_locked
1402 * Set the pid of process that copied out send once right to special reply port.
1408 ipc_special_reply_stash_pid_locked(ipc_port_t port
, int pid
)
1410 assert(port
->ip_specialreply
);
1411 port
->ip_messages
.imq_wait_queue
.waitq_priv_pid
= pid
;
1416 * Routine: ipc_special_reply_get_pid_locked
1418 * Get the pid of process that copied out send once right to special reply port.
1424 ipc_special_reply_get_pid_locked(ipc_port_t port
)
1426 assert(port
->ip_specialreply
);
1427 return port
->ip_messages
.imq_wait_queue
.waitq_priv_pid
;
1431 * Update the recv turnstile inheritor for a port.
1433 * Sync IPC through the port receive turnstile only happens for the special
1434 * reply port case. It has three sub-cases:
1436 * 1. a send-once right is in transit, and pushes on the send turnstile of its
1437 * destination mqueue.
1439 * 2. a send-once right has been stashed on a knote it was copied out "through",
1440 * as the first such copied out port.
1442 * 3. a send-once right has been stashed on a knote it was copied out "through",
1443 * as the second or more copied out port.
1446 ipc_port_recv_update_inheritor(
1448 struct turnstile
*rcv_turnstile
,
1449 turnstile_update_flags_t flags
)
1451 struct turnstile
*inheritor
= TURNSTILE_NULL
;
1454 if (ip_active(port
) && port
->ip_specialreply
) {
1455 imq_held(&port
->ip_messages
);
1457 switch (port
->ip_sync_link_state
) {
1458 case PORT_SYNC_LINK_PORT
:
1459 if (port
->ip_sync_inheritor_port
!= NULL
) {
1460 inheritor
= port_send_turnstile(port
->ip_sync_inheritor_port
);
1464 case PORT_SYNC_LINK_WORKLOOP_KNOTE
:
1465 kn
= port
->ip_sync_inheritor_knote
;
1466 inheritor
= filt_ipc_kqueue_turnstile(kn
);
1469 case PORT_SYNC_LINK_WORKLOOP_STASH
:
1470 inheritor
= port
->ip_sync_inheritor_ts
;
1475 turnstile_update_inheritor(rcv_turnstile
, inheritor
,
1476 flags
| TURNSTILE_INHERITOR_TURNSTILE
);
1480 * Update the send turnstile inheritor for a port.
1482 * Sync IPC through the port send turnstile has 7 possible reasons to be linked:
1484 * 1. a special reply port is part of sync ipc for bootstrap checkin and needs
1485 * to push on thread doing the sync ipc.
1487 * 2. a receive right is in transit, and pushes on the send turnstile of its
1488 * destination mqueue.
1490 * 3. port was passed as an exec watchport and port is pushing on main thread
1493 * 4. a receive right has been stashed on a knote it was copied out "through",
1494 * as the first such copied out port (same as PORT_SYNC_LINK_WORKLOOP_KNOTE
1495 * for the special reply port)
1497 * 5. a receive right has been stashed on a knote it was copied out "through",
1498 * as the second or more copied out port (same as
1499 * PORT_SYNC_LINK_WORKLOOP_STASH for the special reply port)
1501 * 6. a receive right has been copied out as a part of sync bootstrap checkin
1502 * and needs to push on thread doing the sync bootstrap checkin.
1504 * 7. the receive right is monitored by a knote, and pushes on any that is
1505 * registered on a workloop. filt_machport makes sure that if such a knote
1506 * exists, it is kept as the first item in the knote list, so we never need
1510 ipc_port_send_update_inheritor(
1512 struct turnstile
*send_turnstile
,
1513 turnstile_update_flags_t flags
)
1515 ipc_mqueue_t mqueue
= &port
->ip_messages
;
1516 turnstile_inheritor_t inheritor
= TURNSTILE_INHERITOR_NULL
;
1518 turnstile_update_flags_t inheritor_flags
= TURNSTILE_INHERITOR_TURNSTILE
;
1522 if (!ip_active(port
)) {
1523 /* this port is no longer active, it should not push anywhere */
1524 } else if (port
->ip_specialreply
) {
1526 if (port
->ip_sync_bootstrap_checkin
&& prioritize_launch
) {
1527 inheritor
= port
->ip_messages
.imq_srp_owner_thread
;
1528 inheritor_flags
= TURNSTILE_INHERITOR_THREAD
;
1530 } else if (port
->ip_receiver_name
== MACH_PORT_NULL
&&
1531 port
->ip_destination
!= NULL
) {
1533 inheritor
= port_send_turnstile(port
->ip_destination
);
1534 } else if (ipc_port_watchport_elem(port
) != NULL
) {
1536 if (prioritize_launch
) {
1537 assert(port
->ip_sync_link_state
== PORT_SYNC_LINK_ANY
);
1538 inheritor
= ipc_port_get_watchport_inheritor(port
);
1539 inheritor_flags
= TURNSTILE_INHERITOR_THREAD
;
1541 } else if (port
->ip_sync_link_state
== PORT_SYNC_LINK_WORKLOOP_KNOTE
) {
1543 inheritor
= filt_ipc_kqueue_turnstile(mqueue
->imq_inheritor_knote
);
1544 } else if (port
->ip_sync_link_state
== PORT_SYNC_LINK_WORKLOOP_STASH
) {
1546 inheritor
= mqueue
->imq_inheritor_turnstile
;
1547 } else if (port
->ip_sync_link_state
== PORT_SYNC_LINK_RCV_THREAD
) {
1549 if (prioritize_launch
) {
1550 inheritor
= port
->ip_messages
.imq_inheritor_thread_ref
;
1551 inheritor_flags
= TURNSTILE_INHERITOR_THREAD
;
1553 } else if ((kn
= SLIST_FIRST(&mqueue
->imq_klist
))) {
1554 /* Case 7. Push on a workloop that is interested */
1555 if (filt_machport_kqueue_has_turnstile(kn
)) {
1556 assert(port
->ip_sync_link_state
== PORT_SYNC_LINK_ANY
);
1557 inheritor
= filt_ipc_kqueue_turnstile(kn
);
1561 turnstile_update_inheritor(send_turnstile
, inheritor
,
1562 flags
| inheritor_flags
);
1566 * Routine: ipc_port_send_turnstile_prepare
1568 * Get a reference on port's send turnstile, if
1569 * port does not have a send turnstile then allocate one.
1572 * Nothing is locked.
1575 ipc_port_send_turnstile_prepare(ipc_port_t port
)
1577 struct turnstile
*turnstile
= TURNSTILE_NULL
;
1578 struct turnstile
*send_turnstile
= TURNSTILE_NULL
;
1581 imq_lock(&port
->ip_messages
);
1583 if (port_send_turnstile(port
) == NULL
||
1584 port_send_turnstile(port
)->ts_port_ref
== 0) {
1585 if (turnstile
== TURNSTILE_NULL
) {
1586 imq_unlock(&port
->ip_messages
);
1587 turnstile
= turnstile_alloc();
1591 send_turnstile
= turnstile_prepare((uintptr_t)port
,
1592 port_send_turnstile_address(port
),
1593 turnstile
, TURNSTILE_SYNC_IPC
);
1594 turnstile
= TURNSTILE_NULL
;
1596 ipc_port_send_update_inheritor(port
, send_turnstile
,
1597 TURNSTILE_IMMEDIATE_UPDATE
);
1599 /* turnstile complete will be called in ipc_port_send_turnstile_complete */
1602 /* Increment turnstile counter */
1603 port_send_turnstile(port
)->ts_port_ref
++;
1604 imq_unlock(&port
->ip_messages
);
1606 if (send_turnstile
) {
1607 turnstile_update_inheritor_complete(send_turnstile
,
1608 TURNSTILE_INTERLOCK_NOT_HELD
);
1610 if (turnstile
!= TURNSTILE_NULL
) {
1611 turnstile_deallocate(turnstile
);
1617 * Routine: ipc_port_send_turnstile_complete
1619 * Drop a ref on the port's send turnstile, if the
1620 * ref becomes zero, deallocate the turnstile.
1623 * The space might be locked, use safe deallocate.
1626 ipc_port_send_turnstile_complete(ipc_port_t port
)
1628 struct turnstile
*turnstile
= TURNSTILE_NULL
;
1630 /* Drop turnstile count on dest port */
1631 imq_lock(&port
->ip_messages
);
1633 port_send_turnstile(port
)->ts_port_ref
--;
1634 if (port_send_turnstile(port
)->ts_port_ref
== 0) {
1635 turnstile_complete((uintptr_t)port
, port_send_turnstile_address(port
),
1636 &turnstile
, TURNSTILE_SYNC_IPC
);
1637 assert(turnstile
!= TURNSTILE_NULL
);
1639 imq_unlock(&port
->ip_messages
);
1640 turnstile_cleanup();
1642 if (turnstile
!= TURNSTILE_NULL
) {
1643 turnstile_deallocate_safe(turnstile
);
1644 turnstile
= TURNSTILE_NULL
;
1649 * Routine: ipc_port_rcv_turnstile
1651 * Get the port's receive turnstile
1654 * mqueue locked or thread waiting on turnstile is locked.
1656 static struct turnstile
*
1657 ipc_port_rcv_turnstile(ipc_port_t port
)
1659 return *port_rcv_turnstile_address(port
);
1664 * Routine: ipc_port_link_special_reply_port
1666 * Link the special reply port with the destination port.
1667 * Allocates turnstile to dest port.
1670 * Nothing is locked.
1673 ipc_port_link_special_reply_port(
1674 ipc_port_t special_reply_port
,
1675 ipc_port_t dest_port
,
1676 boolean_t sync_bootstrap_checkin
)
1678 boolean_t drop_turnstile_ref
= FALSE
;
1679 boolean_t special_reply
= FALSE
;
1681 /* Check if dest_port needs a turnstile */
1682 ipc_port_send_turnstile_prepare(dest_port
);
1684 /* Lock the special reply port and establish the linkage */
1685 ip_lock(special_reply_port
);
1686 imq_lock(&special_reply_port
->ip_messages
);
1688 special_reply
= special_reply_port
->ip_specialreply
;
1690 if (sync_bootstrap_checkin
&& special_reply
) {
1691 special_reply_port
->ip_sync_bootstrap_checkin
= 1;
1694 /* Check if we need to drop the acquired turnstile ref on dest port */
1695 if (!special_reply
||
1696 special_reply_port
->ip_sync_link_state
!= PORT_SYNC_LINK_ANY
||
1697 special_reply_port
->ip_sync_inheritor_port
!= IPC_PORT_NULL
) {
1698 drop_turnstile_ref
= TRUE
;
1700 /* take a reference on dest_port */
1701 ip_reference(dest_port
);
1702 special_reply_port
->ip_sync_inheritor_port
= dest_port
;
1703 special_reply_port
->ip_sync_link_state
= PORT_SYNC_LINK_PORT
;
1706 imq_unlock(&special_reply_port
->ip_messages
);
1707 ip_unlock(special_reply_port
);
1709 if (special_reply
) {
1711 * For special reply ports, if the destination port is
1712 * marked with the thread group blocked tracking flag,
1713 * callout to the performance controller.
1715 ipc_port_thread_group_blocked(dest_port
);
1718 if (drop_turnstile_ref
) {
1719 ipc_port_send_turnstile_complete(dest_port
);
1726 * Routine: ipc_port_thread_group_blocked
1728 * Call thread_group_blocked callout if the port
1729 * has ip_tg_block_tracking bit set and the thread
1730 * has not made this callout already.
1733 * Nothing is locked.
1736 ipc_port_thread_group_blocked(ipc_port_t port __unused
)
1738 #if CONFIG_THREAD_GROUPS
1739 bool port_tg_block_tracking
= false;
1740 thread_t self
= current_thread();
1742 if (self
->thread_group
== NULL
||
1743 (self
->options
& TH_OPT_IPC_TG_BLOCKED
)) {
1747 port_tg_block_tracking
= port
->ip_tg_block_tracking
;
1748 if (!port_tg_block_tracking
) {
1752 machine_thread_group_blocked(self
->thread_group
, NULL
,
1753 PERFCONTROL_CALLOUT_BLOCKING_TG_RENDER_SERVER
, self
);
1755 self
->options
|= TH_OPT_IPC_TG_BLOCKED
;
1760 * Routine: ipc_port_thread_group_unblocked
1762 * Call thread_group_unblocked callout if the
1763 * thread had previously made a thread_group_blocked
1764 * callout before (indicated by TH_OPT_IPC_TG_BLOCKED
1765 * flag on the thread).
1768 * Nothing is locked.
1771 ipc_port_thread_group_unblocked(void)
1773 #if CONFIG_THREAD_GROUPS
1774 thread_t self
= current_thread();
1776 if (!(self
->options
& TH_OPT_IPC_TG_BLOCKED
)) {
1780 machine_thread_group_unblocked(self
->thread_group
, NULL
,
1781 PERFCONTROL_CALLOUT_BLOCKING_TG_RENDER_SERVER
, self
);
1783 self
->options
&= ~TH_OPT_IPC_TG_BLOCKED
;
1787 #if DEVELOPMENT || DEBUG
1789 ipc_special_reply_port_bits_reset(ipc_port_t special_reply_port
)
1791 special_reply_port
->ip_srp_lost_link
= 0;
1792 special_reply_port
->ip_srp_msg_sent
= 0;
1796 ipc_special_reply_port_msg_sent_reset(ipc_port_t special_reply_port
)
1798 if (special_reply_port
->ip_specialreply
== 1) {
1799 special_reply_port
->ip_srp_msg_sent
= 0;
1804 ipc_special_reply_port_msg_sent(ipc_port_t special_reply_port
)
1806 if (special_reply_port
->ip_specialreply
== 1) {
1807 special_reply_port
->ip_srp_msg_sent
= 1;
1812 ipc_special_reply_port_lost_link(ipc_port_t special_reply_port
)
1814 if (special_reply_port
->ip_specialreply
== 1 && special_reply_port
->ip_srp_msg_sent
== 0) {
1815 special_reply_port
->ip_srp_lost_link
= 1;
1819 #else /* DEVELOPMENT || DEBUG */
1821 ipc_special_reply_port_bits_reset(__unused ipc_port_t special_reply_port
)
1827 ipc_special_reply_port_msg_sent_reset(__unused ipc_port_t special_reply_port
)
1833 ipc_special_reply_port_msg_sent(__unused ipc_port_t special_reply_port
)
1839 ipc_special_reply_port_lost_link(__unused ipc_port_t special_reply_port
)
1843 #endif /* DEVELOPMENT || DEBUG */
1846 * Routine: ipc_port_adjust_special_reply_port_locked
1848 * If the special port has a turnstile, update its inheritor.
1850 * Special reply port locked on entry.
1851 * Special reply port unlocked on return.
1852 * The passed in port is a special reply port.
1857 ipc_port_adjust_special_reply_port_locked(
1858 ipc_port_t special_reply_port
,
1861 boolean_t get_turnstile
)
1863 ipc_port_t dest_port
= IPC_PORT_NULL
;
1864 int sync_link_state
= PORT_SYNC_LINK_NO_LINKAGE
;
1865 turnstile_inheritor_t inheritor
= TURNSTILE_INHERITOR_NULL
;
1866 struct turnstile
*ts
= TURNSTILE_NULL
;
1868 ip_lock_held(special_reply_port
); // ip_sync_link_state is touched
1869 imq_lock(&special_reply_port
->ip_messages
);
1871 if (!special_reply_port
->ip_specialreply
) {
1872 // only mach_msg_receive_results_complete() calls this with any port
1873 assert(get_turnstile
);
1877 if (flags
& IPC_PORT_ADJUST_SR_RECEIVED_MSG
) {
1878 ipc_special_reply_port_msg_sent_reset(special_reply_port
);
1881 if (flags
& IPC_PORT_ADJUST_UNLINK_THREAD
) {
1882 special_reply_port
->ip_messages
.imq_srp_owner_thread
= NULL
;
1885 if (flags
& IPC_PORT_ADJUST_RESET_BOOSTRAP_CHECKIN
) {
1886 special_reply_port
->ip_sync_bootstrap_checkin
= 0;
1889 /* Check if the special reply port is marked non-special */
1890 if (special_reply_port
->ip_sync_link_state
== PORT_SYNC_LINK_ANY
) {
1892 if (get_turnstile
) {
1893 turnstile_complete((uintptr_t)special_reply_port
,
1894 port_rcv_turnstile_address(special_reply_port
), NULL
, TURNSTILE_SYNC_IPC
);
1896 imq_unlock(&special_reply_port
->ip_messages
);
1897 ip_unlock(special_reply_port
);
1898 if (get_turnstile
) {
1899 turnstile_cleanup();
1904 if (flags
& IPC_PORT_ADJUST_SR_LINK_WORKLOOP
) {
1905 if (ITH_KNOTE_VALID(kn
, MACH_MSG_TYPE_PORT_SEND_ONCE
)) {
1906 inheritor
= filt_machport_stash_port(kn
, special_reply_port
,
1909 } else if (flags
& IPC_PORT_ADJUST_SR_ALLOW_SYNC_LINKAGE
) {
1910 sync_link_state
= PORT_SYNC_LINK_ANY
;
1913 /* Check if need to break linkage */
1914 if (!get_turnstile
&& sync_link_state
== PORT_SYNC_LINK_NO_LINKAGE
&&
1915 special_reply_port
->ip_sync_link_state
== PORT_SYNC_LINK_NO_LINKAGE
) {
1916 imq_unlock(&special_reply_port
->ip_messages
);
1917 ip_unlock(special_reply_port
);
1921 switch (special_reply_port
->ip_sync_link_state
) {
1922 case PORT_SYNC_LINK_PORT
:
1923 dest_port
= special_reply_port
->ip_sync_inheritor_port
;
1924 special_reply_port
->ip_sync_inheritor_port
= IPC_PORT_NULL
;
1926 case PORT_SYNC_LINK_WORKLOOP_KNOTE
:
1927 special_reply_port
->ip_sync_inheritor_knote
= NULL
;
1929 case PORT_SYNC_LINK_WORKLOOP_STASH
:
1930 special_reply_port
->ip_sync_inheritor_ts
= NULL
;
1935 * Stash (or unstash) the server's PID in the ip_sorights field of the
1936 * special reply port, so that stackshot can later retrieve who the client
1939 if (special_reply_port
->ip_sync_link_state
== PORT_SYNC_LINK_PORT
&&
1940 sync_link_state
== PORT_SYNC_LINK_NO_LINKAGE
) {
1941 ipc_special_reply_stash_pid_locked(special_reply_port
, pid_from_task(current_task()));
1942 } else if (special_reply_port
->ip_sync_link_state
== PORT_SYNC_LINK_NO_LINKAGE
&&
1943 sync_link_state
== PORT_SYNC_LINK_ANY
) {
1944 /* If we are resetting the special reply port, remove the stashed pid. */
1945 ipc_special_reply_stash_pid_locked(special_reply_port
, 0);
1948 special_reply_port
->ip_sync_link_state
= sync_link_state
;
1950 switch (sync_link_state
) {
1951 case PORT_SYNC_LINK_WORKLOOP_KNOTE
:
1952 special_reply_port
->ip_sync_inheritor_knote
= kn
;
1954 case PORT_SYNC_LINK_WORKLOOP_STASH
:
1955 special_reply_port
->ip_sync_inheritor_ts
= inheritor
;
1957 case PORT_SYNC_LINK_NO_LINKAGE
:
1958 if (flags
& IPC_PORT_ADJUST_SR_ENABLE_EVENT
) {
1959 ipc_special_reply_port_lost_link(special_reply_port
);
1964 /* Get thread's turnstile donated to special reply port */
1965 if (get_turnstile
) {
1966 turnstile_complete((uintptr_t)special_reply_port
,
1967 port_rcv_turnstile_address(special_reply_port
), NULL
, TURNSTILE_SYNC_IPC
);
1969 ts
= ipc_port_rcv_turnstile(special_reply_port
);
1971 turnstile_reference(ts
);
1972 ipc_port_recv_update_inheritor(special_reply_port
, ts
,
1973 TURNSTILE_IMMEDIATE_UPDATE
);
1977 imq_unlock(&special_reply_port
->ip_messages
);
1978 ip_unlock(special_reply_port
);
1980 if (get_turnstile
) {
1981 turnstile_cleanup();
1983 /* Call turnstile cleanup after dropping the interlock */
1984 turnstile_update_inheritor_complete(ts
, TURNSTILE_INTERLOCK_NOT_HELD
);
1985 turnstile_deallocate_safe(ts
);
1988 /* Release the ref on the dest port and its turnstile */
1990 ipc_port_send_turnstile_complete(dest_port
);
1991 /* release the reference on the dest port */
1992 ip_release(dest_port
);
1997 * Routine: ipc_port_adjust_special_reply_port
1999 * If the special port has a turnstile, update its inheritor.
2006 ipc_port_adjust_special_reply_port(
2010 if (port
->ip_specialreply
) {
2012 ipc_port_adjust_special_reply_port_locked(port
, NULL
, flags
, FALSE
);
2017 * Routine: ipc_port_adjust_sync_link_state_locked
2019 * Update the sync link state of the port and the
2020 * turnstile inheritor.
2022 * Port and mqueue locked on entry.
2023 * Port and mqueue locked on return.
2028 ipc_port_adjust_sync_link_state_locked(
2030 int sync_link_state
,
2031 turnstile_inheritor_t inheritor
)
2033 switch (port
->ip_sync_link_state
) {
2034 case PORT_SYNC_LINK_RCV_THREAD
:
2035 /* deallocate the thread reference for the inheritor */
2036 thread_deallocate_safe(port
->ip_messages
.imq_inheritor_thread_ref
);
2039 klist_init(&port
->ip_messages
.imq_klist
);
2042 switch (sync_link_state
) {
2043 case PORT_SYNC_LINK_WORKLOOP_KNOTE
:
2044 port
->ip_messages
.imq_inheritor_knote
= inheritor
;
2046 case PORT_SYNC_LINK_WORKLOOP_STASH
:
2047 port
->ip_messages
.imq_inheritor_turnstile
= inheritor
;
2049 case PORT_SYNC_LINK_RCV_THREAD
:
2050 /* The thread could exit without clearing port state, take a thread ref */
2051 thread_reference((thread_t
)inheritor
);
2052 port
->ip_messages
.imq_inheritor_thread_ref
= inheritor
;
2055 klist_init(&port
->ip_messages
.imq_klist
);
2056 sync_link_state
= PORT_SYNC_LINK_ANY
;
2059 port
->ip_sync_link_state
= sync_link_state
;
2064 * Routine: ipc_port_adjust_port_locked
2066 * If the port has a turnstile, update its inheritor.
2068 * Port locked on entry.
2069 * Port unlocked on return.
2074 ipc_port_adjust_port_locked(
2077 boolean_t sync_bootstrap_checkin
)
2079 int sync_link_state
= PORT_SYNC_LINK_ANY
;
2080 turnstile_inheritor_t inheritor
= TURNSTILE_INHERITOR_NULL
;
2082 ip_lock_held(port
); // ip_sync_link_state is touched
2083 imq_held(&port
->ip_messages
);
2085 assert(!port
->ip_specialreply
);
2088 inheritor
= filt_machport_stash_port(kn
, port
, &sync_link_state
);
2089 if (sync_link_state
== PORT_SYNC_LINK_WORKLOOP_KNOTE
) {
2092 } else if (sync_bootstrap_checkin
) {
2093 inheritor
= current_thread();
2094 sync_link_state
= PORT_SYNC_LINK_RCV_THREAD
;
2097 ipc_port_adjust_sync_link_state_locked(port
, sync_link_state
, inheritor
);
2098 port
->ip_sync_bootstrap_checkin
= 0;
2100 ipc_port_send_turnstile_recompute_push_locked(port
);
2101 /* port and mqueue unlocked */
2105 * Routine: ipc_port_clear_sync_rcv_thread_boost_locked
2107 * If the port is pushing on rcv thread, clear it.
2109 * Port locked on entry
2110 * mqueue is not locked.
2111 * Port unlocked on return.
2116 ipc_port_clear_sync_rcv_thread_boost_locked(
2119 ip_lock_held(port
); // ip_sync_link_state is touched
2121 if (port
->ip_sync_link_state
!= PORT_SYNC_LINK_RCV_THREAD
) {
2126 imq_lock(&port
->ip_messages
);
2127 ipc_port_adjust_sync_link_state_locked(port
, PORT_SYNC_LINK_ANY
, NULL
);
2129 ipc_port_send_turnstile_recompute_push_locked(port
);
2130 /* port and mqueue unlocked */
2134 * Routine: ipc_port_add_watchport_elem_locked
2136 * Transfer the turnstile boost of watchport to task calling exec.
2138 * Port locked on entry.
2139 * Port unlocked on return.
2141 * KERN_SUCESS on success.
2142 * KERN_FAILURE otherwise.
2145 ipc_port_add_watchport_elem_locked(
2147 struct task_watchport_elem
*watchport_elem
,
2148 struct task_watchport_elem
**old_elem
)
2151 imq_held(&port
->ip_messages
);
2153 /* Watchport boost only works for non-special active ports mapped in an ipc space */
2154 if (!ip_active(port
) || port
->ip_specialreply
||
2155 port
->ip_receiver_name
== MACH_PORT_NULL
) {
2156 imq_unlock(&port
->ip_messages
);
2158 return KERN_FAILURE
;
2161 if (port
->ip_sync_link_state
!= PORT_SYNC_LINK_ANY
) {
2162 /* Sever the linkage if the port was pushing on knote */
2163 ipc_port_adjust_sync_link_state_locked(port
, PORT_SYNC_LINK_ANY
, NULL
);
2166 *old_elem
= ipc_port_update_watchport_elem(port
, watchport_elem
);
2168 ipc_port_send_turnstile_recompute_push_locked(port
);
2169 /* port and mqueue unlocked */
2170 return KERN_SUCCESS
;
2174 * Routine: ipc_port_clear_watchport_elem_internal_conditional_locked
2176 * Remove the turnstile boost of watchport and recompute the push.
2178 * Port locked on entry.
2179 * Port unlocked on return.
2181 * KERN_SUCESS on success.
2182 * KERN_FAILURE otherwise.
2185 ipc_port_clear_watchport_elem_internal_conditional_locked(
2187 struct task_watchport_elem
*watchport_elem
)
2190 imq_held(&port
->ip_messages
);
2192 if (ipc_port_watchport_elem(port
) != watchport_elem
) {
2193 imq_unlock(&port
->ip_messages
);
2195 return KERN_FAILURE
;
2198 ipc_port_clear_watchport_elem_internal(port
);
2199 ipc_port_send_turnstile_recompute_push_locked(port
);
2200 /* port and mqueue unlocked */
2201 return KERN_SUCCESS
;
2205 * Routine: ipc_port_replace_watchport_elem_conditional_locked
2207 * Replace the turnstile boost of watchport and recompute the push.
2209 * Port locked on entry.
2210 * Port unlocked on return.
2212 * KERN_SUCESS on success.
2213 * KERN_FAILURE otherwise.
2216 ipc_port_replace_watchport_elem_conditional_locked(
2218 struct task_watchport_elem
*old_watchport_elem
,
2219 struct task_watchport_elem
*new_watchport_elem
)
2222 imq_held(&port
->ip_messages
);
2224 if (ipc_port_watchport_elem(port
) != old_watchport_elem
) {
2225 imq_unlock(&port
->ip_messages
);
2227 return KERN_FAILURE
;
2230 ipc_port_update_watchport_elem(port
, new_watchport_elem
);
2231 ipc_port_send_turnstile_recompute_push_locked(port
);
2232 /* port and mqueue unlocked */
2233 return KERN_SUCCESS
;
2237 * Routine: ipc_port_clear_watchport_elem_internal
2239 * Remove the turnstile boost of watchport.
2241 * Port locked on entry.
2242 * Port locked on return.
2244 * Old task_watchport_elem returned.
2246 struct task_watchport_elem
*
2247 ipc_port_clear_watchport_elem_internal(
2251 imq_held(&port
->ip_messages
);
2253 if (port
->ip_specialreply
) {
2257 return ipc_port_update_watchport_elem(port
, NULL
);
2261 * Routine: ipc_port_send_turnstile_recompute_push_locked
2263 * Update send turnstile inheritor of port and recompute the push.
2265 * Port locked on entry.
2266 * Port unlocked on return.
2271 ipc_port_send_turnstile_recompute_push_locked(
2274 struct turnstile
*send_turnstile
= port_send_turnstile(port
);
2275 if (send_turnstile
) {
2276 turnstile_reference(send_turnstile
);
2277 ipc_port_send_update_inheritor(port
, send_turnstile
,
2278 TURNSTILE_IMMEDIATE_UPDATE
);
2280 imq_unlock(&port
->ip_messages
);
2283 if (send_turnstile
) {
2284 turnstile_update_inheritor_complete(send_turnstile
,
2285 TURNSTILE_INTERLOCK_NOT_HELD
);
2286 turnstile_deallocate_safe(send_turnstile
);
2291 * Routine: ipc_port_get_watchport_inheritor
2293 * Returns inheritor for watchport.
2298 * watchport inheritor.
2301 ipc_port_get_watchport_inheritor(
2304 imq_held(&port
->ip_messages
);
2305 return ipc_port_watchport_elem(port
)->twe_task
->watchports
->tw_thread
;
2309 * Routine: ipc_port_get_receiver_task
2311 * Returns receiver task pointer and its pid (if any) for port.
2317 ipc_port_get_receiver_task(ipc_port_t port
, uintptr_t *task
)
2319 task_t receiver
= TASK_NULL
;
2327 if (ip_active(port
) &&
2328 MACH_PORT_VALID(port
->ip_receiver_name
) &&
2329 port
->ip_receiver
&&
2330 port
->ip_receiver
!= ipc_space_kernel
&&
2331 port
->ip_receiver
!= ipc_space_reply
) {
2332 receiver
= port
->ip_receiver
->is_task
;
2333 pid
= task_pid(receiver
);
2339 *task
= (uintptr_t)receiver
;
2345 * Routine: ipc_port_impcount_delta
2347 * Adjust only the importance count associated with a port.
2348 * If there are any adjustments to be made to receiver task,
2349 * those are handled elsewhere.
2351 * For now, be defensive during deductions to make sure the
2352 * impcount for the port doesn't underflow zero. This will
2353 * go away when the port boost addition is made atomic (see
2354 * note in ipc_port_importance_delta()).
2356 * The port is referenced and locked.
2357 * Nothing else is locked.
2360 ipc_port_impcount_delta(
2362 mach_port_delta_t delta
,
2363 ipc_port_t __unused base
)
2365 mach_port_delta_t absdelta
;
2367 if (!ip_active(port
)) {
2371 /* adding/doing nothing is easy */
2373 port
->ip_impcount
+= delta
;
2377 absdelta
= 0 - delta
;
2378 if (port
->ip_impcount
>= absdelta
) {
2379 port
->ip_impcount
-= absdelta
;
2383 #if (DEVELOPMENT || DEBUG)
2384 if (port
->ip_receiver_name
!= MACH_PORT_NULL
) {
2385 task_t target_task
= port
->ip_receiver
->is_task
;
2386 ipc_importance_task_t target_imp
= target_task
->task_imp_base
;
2387 const char *target_procname
;
2390 if (target_imp
!= IIT_NULL
) {
2391 target_procname
= target_imp
->iit_procname
;
2392 target_pid
= target_imp
->iit_bsd_pid
;
2394 target_procname
= "unknown";
2397 printf("Over-release of importance assertions for port 0x%x receiver pid %d (%s), "
2398 "dropping %d assertion(s) but port only has %d remaining.\n",
2399 port
->ip_receiver_name
,
2400 target_pid
, target_procname
,
2401 absdelta
, port
->ip_impcount
);
2402 } else if (base
!= IP_NULL
) {
2403 task_t target_task
= base
->ip_receiver
->is_task
;
2404 ipc_importance_task_t target_imp
= target_task
->task_imp_base
;
2405 const char *target_procname
;
2408 if (target_imp
!= IIT_NULL
) {
2409 target_procname
= target_imp
->iit_procname
;
2410 target_pid
= target_imp
->iit_bsd_pid
;
2412 target_procname
= "unknown";
2415 printf("Over-release of importance assertions for port 0x%lx "
2416 "enqueued on port 0x%x with receiver pid %d (%s), "
2417 "dropping %d assertion(s) but port only has %d remaining.\n",
2418 (unsigned long)VM_KERNEL_UNSLIDE_OR_PERM((uintptr_t)port
),
2419 base
->ip_receiver_name
,
2420 target_pid
, target_procname
,
2421 absdelta
, port
->ip_impcount
);
2425 delta
= 0 - port
->ip_impcount
;
2426 port
->ip_impcount
= 0;
2431 * Routine: ipc_port_importance_delta_internal
2433 * Adjust the importance count through the given port.
2434 * If the port is in transit, apply the delta throughout
2435 * the chain. Determine if the there is a task at the
2436 * base of the chain that wants/needs to be adjusted,
2437 * and if so, apply the delta.
2439 * The port is referenced and locked on entry.
2440 * Importance may be locked.
2441 * Nothing else is locked.
2442 * The lock may be dropped on exit.
2443 * Returns TRUE if lock was dropped.
2445 #if IMPORTANCE_INHERITANCE
2448 ipc_port_importance_delta_internal(
2451 mach_port_delta_t
*deltap
,
2452 ipc_importance_task_t
*imp_task
)
2454 ipc_port_t next
, base
;
2455 bool dropped
= false;
2456 bool took_base_ref
= false;
2458 *imp_task
= IIT_NULL
;
2464 assert(options
== IPID_OPTION_NORMAL
|| options
== IPID_OPTION_SENDPOSSIBLE
);
2468 /* if port is in transit, have to search for end of chain */
2469 if (ip_active(port
) &&
2470 port
->ip_destination
!= IP_NULL
&&
2471 port
->ip_receiver_name
== MACH_PORT_NULL
) {
2475 ipc_port_multiple_lock(); /* massive serialization */
2477 took_base_ref
= ipc_port_destination_chain_lock(port
, &base
);
2478 /* all ports in chain from port to base, inclusive, are locked */
2480 ipc_port_multiple_unlock();
2484 * If the port lock is dropped b/c the port is in transit, there is a
2485 * race window where another thread can drain messages and/or fire a
2486 * send possible notification before we get here.
2488 * We solve this race by checking to see if our caller armed the send
2489 * possible notification, whether or not it's been fired yet, and
2490 * whether or not we've already set the port's ip_spimportant bit. If
2491 * we don't need a send-possible boost, then we'll just apply a
2492 * harmless 0-boost to the port.
2494 if (options
& IPID_OPTION_SENDPOSSIBLE
) {
2495 assert(*deltap
== 1);
2496 if (port
->ip_sprequests
&& port
->ip_spimportant
== 0) {
2497 port
->ip_spimportant
= 1;
2503 /* unlock down to the base, adjusting boost(s) at each level */
2505 *deltap
= ipc_port_impcount_delta(port
, *deltap
, base
);
2511 /* port is in transit */
2512 assert(port
->ip_tempowner
== 0);
2513 next
= port
->ip_destination
;
2518 /* find the task (if any) to boost according to the base */
2519 if (ip_active(base
)) {
2520 if (base
->ip_tempowner
!= 0) {
2521 if (IIT_NULL
!= base
->ip_imp_task
) {
2522 *imp_task
= base
->ip_imp_task
;
2524 /* otherwise don't boost */
2525 } else if (base
->ip_receiver_name
!= MACH_PORT_NULL
) {
2526 ipc_space_t space
= base
->ip_receiver
;
2528 /* only spaces with boost-accepting tasks */
2529 if (space
->is_task
!= TASK_NULL
&&
2530 ipc_importance_task_is_any_receiver_type(space
->is_task
->task_imp_base
)) {
2531 *imp_task
= space
->is_task
->task_imp_base
;
2537 * Only the base is locked. If we have to hold or drop task
2538 * importance assertions, we'll have to drop that lock as well.
2540 if (*imp_task
!= IIT_NULL
) {
2541 /* take a reference before unlocking base */
2542 ipc_importance_task_reference(*imp_task
);
2547 if (took_base_ref
) {
2554 #endif /* IMPORTANCE_INHERITANCE */
2557 * Routine: ipc_port_importance_delta
2559 * Adjust the importance count through the given port.
2560 * If the port is in transit, apply the delta throughout
2563 * If there is a task at the base of the chain that wants/needs
2564 * to be adjusted, apply the delta.
2566 * The port is referenced and locked on entry.
2567 * Nothing else is locked.
2568 * The lock may be dropped on exit.
2569 * Returns TRUE if lock was dropped.
2571 #if IMPORTANCE_INHERITANCE
2574 ipc_port_importance_delta(
2577 mach_port_delta_t delta
)
2579 ipc_importance_task_t imp_task
= IIT_NULL
;
2582 dropped
= ipc_port_importance_delta_internal(port
, options
, &delta
, &imp_task
);
2584 if (IIT_NULL
== imp_task
|| delta
== 0) {
2592 assert(ipc_importance_task_is_any_receiver_type(imp_task
));
2595 ipc_importance_task_hold_internal_assertion(imp_task
, delta
);
2597 ipc_importance_task_drop_internal_assertion(imp_task
, -delta
);
2600 ipc_importance_task_release(imp_task
);
2603 #endif /* IMPORTANCE_INHERITANCE */
2606 * Routine: ipc_port_make_send_locked
2608 * Make a naked send right from a receive right.
2611 * port locked and active.
2614 ipc_port_make_send_locked(
2617 require_ip_active(port
);
2625 * Routine: ipc_port_make_send
2627 * Make a naked send right from a receive right.
2634 if (!IP_VALID(port
)) {
2639 if (ip_active(port
)) {
2640 ipc_port_make_send_locked(port
);
2649 * Routine: ipc_port_copy_send_locked
2651 * Make a naked send right from another naked send right.
2653 * port locked and active.
2656 ipc_port_copy_send_locked(
2659 assert(port
->ip_srights
> 0);
2665 * Routine: ipc_port_copy_send
2667 * Make a naked send right from another naked send right.
2668 * IP_NULL -> IP_NULL
2669 * IP_DEAD -> IP_DEAD
2670 * dead port -> IP_DEAD
2671 * live port -> port + ref
2673 * Nothing locked except possibly a space.
2682 if (!IP_VALID(port
)) {
2687 if (ip_active(port
)) {
2688 ipc_port_copy_send_locked(port
);
2699 * Routine: ipc_port_copyout_send
2701 * Copyout a naked send right (possibly null/dead),
2702 * or if that fails, destroy the right.
2707 static mach_port_name_t
2708 ipc_port_copyout_send_internal(
2711 ipc_object_copyout_flags_t flags
)
2713 mach_port_name_t name
;
2715 if (IP_VALID(sright
)) {
2718 kr
= ipc_object_copyout(space
, ip_to_object(sright
),
2719 MACH_MSG_TYPE_PORT_SEND
, flags
, NULL
, NULL
, &name
);
2720 if (kr
!= KERN_SUCCESS
) {
2721 if (kr
== KERN_INVALID_CAPABILITY
) {
2722 name
= MACH_PORT_DEAD
;
2724 name
= MACH_PORT_NULL
;
2728 name
= CAST_MACH_PORT_TO_NAME(sright
);
2735 ipc_port_copyout_send(
2739 return ipc_port_copyout_send_internal(sright
, space
, IPC_OBJECT_COPYOUT_FLAGS_NONE
);
2743 ipc_port_copyout_send_pinned(
2747 return ipc_port_copyout_send_internal(sright
, space
, IPC_OBJECT_COPYOUT_FLAGS_PINNED
);
2751 * Routine: ipc_port_release_send_and_unlock
2753 * Release a naked send right.
2754 * Consumes a ref for the port.
2756 * Port is valid and locked on entry
2757 * Port is unlocked on exit.
2760 ipc_port_release_send_and_unlock(
2763 ipc_port_t nsrequest
= IP_NULL
;
2764 mach_port_mscount_t mscount
;
2766 assert(port
->ip_srights
> 0);
2767 if (port
->ip_srights
== 0) {
2768 panic("Over-release of port %p send right!", port
);
2773 if (!ip_active(port
)) {
2779 if (port
->ip_srights
== 0 &&
2780 port
->ip_nsrequest
!= IP_NULL
) {
2781 nsrequest
= port
->ip_nsrequest
;
2782 port
->ip_nsrequest
= IP_NULL
;
2783 mscount
= port
->ip_mscount
;
2786 ipc_notify_no_senders(nsrequest
, mscount
);
2794 * Routine: ipc_port_release_send
2796 * Release a naked send right.
2797 * Consumes a ref for the port.
2803 ipc_port_release_send(
2806 if (IP_VALID(port
)) {
2808 ipc_port_release_send_and_unlock(port
);
2813 * Routine: ipc_port_make_sonce_locked
2815 * Make a naked send-once right from a receive right.
2817 * The port is locked and active.
2821 ipc_port_make_sonce_locked(
2824 require_ip_active(port
);
2825 port
->ip_sorights
++;
2831 * Routine: ipc_port_make_sonce
2833 * Make a naked send-once right from a receive right.
2835 * The port is not locked.
2839 ipc_port_make_sonce(
2842 if (!IP_VALID(port
)) {
2847 if (ip_active(port
)) {
2848 ipc_port_make_sonce_locked(port
);
2857 * Routine: ipc_port_release_sonce
2859 * Release a naked send-once right.
2860 * Consumes a ref for the port.
2862 * In normal situations, this is never used.
2863 * Send-once rights are only consumed when
2864 * a message (possibly a send-once notification)
2867 * Nothing locked except possibly a space.
2871 ipc_port_release_sonce(
2874 if (!IP_VALID(port
)) {
2878 ipc_port_adjust_special_reply_port(port
, IPC_PORT_ADJUST_RESET_BOOSTRAP_CHECKIN
);
2882 assert(port
->ip_sorights
> 0);
2883 if (port
->ip_sorights
== 0) {
2884 panic("Over-release of port %p send-once right!", port
);
2887 port
->ip_sorights
--;
2894 * Routine: ipc_port_release_receive
2896 * Release a naked (in limbo or in transit) receive right.
2897 * Consumes a ref for the port; destroys the port.
2903 ipc_port_release_receive(
2908 if (!IP_VALID(port
)) {
2913 require_ip_active(port
);
2914 assert(port
->ip_receiver_name
== MACH_PORT_NULL
);
2915 dest
= port
->ip_destination
;
2917 ipc_port_destroy(port
); /* consumes ref, unlocks */
2919 if (dest
!= IP_NULL
) {
2920 ipc_port_send_turnstile_complete(dest
);
2926 * Routine: ipc_port_alloc_special
2928 * Allocate a port in a special space.
2929 * The new port is returned with one ref.
2930 * If unsuccessful, IP_NULL is returned.
2936 ipc_port_alloc_special(
2938 ipc_port_init_flags_t flags
)
2942 port
= ip_object_to_port(io_alloc(IOT_PORT
, Z_WAITOK
| Z_ZERO
));
2943 if (port
== IP_NULL
) {
2948 uintptr_t buf
[IP_CALLSTACK_MAX
];
2949 ipc_port_callstack_init_debug(&buf
[0], IP_CALLSTACK_MAX
);
2950 #endif /* MACH_ASSERT */
2952 io_lock_init(ip_to_object(port
));
2953 port
->ip_references
= 1;
2954 port
->ip_object
.io_bits
= io_makebits(TRUE
, IOT_PORT
, 0);
2956 ipc_port_init(port
, space
, flags
, 1);
2959 ipc_port_init_debug(port
, &buf
[0], IP_CALLSTACK_MAX
);
2960 #endif /* MACH_ASSERT */
2966 * Routine: ipc_port_dealloc_special
2968 * Deallocate a port in a special space.
2969 * Consumes one ref for the port.
2975 ipc_port_dealloc_special(
2977 __assert_only ipc_space_t space
)
2980 require_ip_active(port
);
2981 // assert(port->ip_receiver_name != MACH_PORT_NULL);
2982 assert(port
->ip_receiver
== space
);
2985 * We clear ip_receiver_name and ip_receiver to simplify
2986 * the ipc_space_kernel check in ipc_mqueue_send.
2989 imq_lock(&port
->ip_messages
);
2990 port
->ip_receiver_name
= MACH_PORT_NULL
;
2991 port
->ip_receiver
= IS_NULL
;
2992 imq_unlock(&port
->ip_messages
);
2994 /* relevant part of ipc_port_clear_receiver */
2995 port
->ip_mscount
= 0;
2996 port
->ip_messages
.imq_seqno
= 0;
2998 ipc_port_destroy(port
);
3002 * Routine: ipc_port_finalize
3004 * Called on last reference deallocate to
3005 * free any remaining data associated with the
3014 ipc_port_request_t requests
= port
->ip_requests
;
3016 assert(port_send_turnstile(port
) == TURNSTILE_NULL
);
3017 if (imq_is_turnstile_proxy(&port
->ip_messages
)) {
3018 assert(ipc_port_rcv_turnstile(port
) == TURNSTILE_NULL
);
3021 if (ip_active(port
)) {
3022 panic("Trying to free an active port. port %p", port
);
3025 if (requests
!= IPR_NULL
) {
3026 ipc_table_size_t its
= requests
->ipr_size
;
3027 it_requests_free(its
, requests
);
3028 port
->ip_requests
= IPR_NULL
;
3031 ipc_mqueue_deinit(&port
->ip_messages
);
3034 ipc_port_track_dealloc(port
);
3035 #endif /* MACH_ASSERT */
3039 * Routine: kdp_mqueue_send_find_owner
3041 * Discover the owner of the ipc_mqueue that contains the input
3042 * waitq object. The thread blocked on the waitq should be
3043 * waiting for an IPC_MQUEUE_FULL event.
3045 * The 'waitinfo->wait_type' value should already be set to
3046 * kThreadWaitPortSend.
3048 * If we find out that the containing port is actually in
3049 * transit, we reset the wait_type field to reflect this.
3052 kdp_mqueue_send_find_owner(struct waitq
* waitq
, __assert_only event64_t event
, thread_waitinfo_t
* waitinfo
)
3054 struct turnstile
*turnstile
;
3055 assert(waitinfo
->wait_type
== kThreadWaitPortSend
);
3056 assert(event
== IPC_MQUEUE_FULL
);
3057 assert(waitq_is_turnstile_queue(waitq
));
3059 turnstile
= waitq_to_turnstile(waitq
);
3060 ipc_port_t port
= (ipc_port_t
)turnstile
->ts_proprietor
; /* we are blocking on send */
3062 zone_id_require(ZONE_ID_IPC_PORT
, sizeof(struct ipc_port
), port
);
3064 waitinfo
->owner
= 0;
3065 waitinfo
->context
= VM_KERNEL_UNSLIDE_OR_PERM(port
);
3066 if (ip_lock_held_kdp(port
)) {
3068 * someone has the port locked: it may be in an
3069 * inconsistent state: bail
3071 waitinfo
->owner
= STACKSHOT_WAITOWNER_PORT_LOCKED
;
3075 if (ip_active(port
)) {
3076 if (port
->ip_tempowner
) {
3077 if (port
->ip_imp_task
!= IIT_NULL
&& port
->ip_imp_task
->iit_task
!= NULL
) {
3078 /* port is held by a tempowner */
3079 waitinfo
->owner
= pid_from_task(port
->ip_imp_task
->iit_task
);
3081 waitinfo
->owner
= STACKSHOT_WAITOWNER_INTRANSIT
;
3083 } else if (port
->ip_receiver_name
) {
3084 /* port in a space */
3085 if (port
->ip_receiver
== ipc_space_kernel
) {
3087 * The kernel pid is 0, make this
3088 * distinguishable from no-owner and
3089 * inconsistent port state.
3091 waitinfo
->owner
= STACKSHOT_WAITOWNER_KERNEL
;
3093 waitinfo
->owner
= pid_from_task(port
->ip_receiver
->is_task
);
3095 } else if (port
->ip_destination
!= IP_NULL
) {
3096 /* port in transit */
3097 waitinfo
->wait_type
= kThreadWaitPortSendInTransit
;
3098 waitinfo
->owner
= VM_KERNEL_UNSLIDE_OR_PERM(port
->ip_destination
);
3104 * Routine: kdp_mqueue_recv_find_owner
3106 * Discover the "owner" of the ipc_mqueue that contains the input
3107 * waitq object. The thread blocked on the waitq is trying to
3108 * receive on the mqueue.
3110 * The 'waitinfo->wait_type' value should already be set to
3111 * kThreadWaitPortReceive.
3113 * If we find that we are actualy waiting on a port set, we reset
3114 * the wait_type field to reflect this.
3117 kdp_mqueue_recv_find_owner(struct waitq
* waitq
, __assert_only event64_t event
, thread_waitinfo_t
* waitinfo
)
3119 assert(waitinfo
->wait_type
== kThreadWaitPortReceive
);
3120 assert(event
== IPC_MQUEUE_RECEIVE
);
3122 ipc_mqueue_t mqueue
= imq_from_waitq(waitq
);
3123 waitinfo
->owner
= 0;
3124 if (imq_is_set(mqueue
)) { /* we are waiting on a port set */
3125 ipc_pset_t set
= ips_from_mq(mqueue
);
3127 zone_id_require(ZONE_ID_IPC_PORT_SET
, sizeof(struct ipc_pset
), set
);
3129 /* Reset wait type to specify waiting on port set receive */
3130 waitinfo
->wait_type
= kThreadWaitPortSetReceive
;
3131 waitinfo
->context
= VM_KERNEL_UNSLIDE_OR_PERM(set
);
3132 if (ips_lock_held_kdp(set
)) {
3133 waitinfo
->owner
= STACKSHOT_WAITOWNER_PSET_LOCKED
;
3135 /* There is no specific owner "at the other end" of a port set, so leave unset. */
3137 ipc_port_t port
= ip_from_mq(mqueue
);
3139 zone_id_require(ZONE_ID_IPC_PORT
, sizeof(struct ipc_port
), port
);
3141 waitinfo
->context
= VM_KERNEL_UNSLIDE_OR_PERM(port
);
3142 if (ip_lock_held_kdp(port
)) {
3143 waitinfo
->owner
= STACKSHOT_WAITOWNER_PORT_LOCKED
;
3147 if (ip_active(port
)) {
3148 if (port
->ip_receiver_name
!= MACH_PORT_NULL
) {
3149 waitinfo
->owner
= port
->ip_receiver_name
;
3151 waitinfo
->owner
= STACKSHOT_WAITOWNER_INTRANSIT
;
3158 #include <kern/machine.h>
3161 * Keep a list of all allocated ports.
3162 * Allocation is intercepted via ipc_port_init;
3163 * deallocation is intercepted via io_free.
3166 queue_head_t port_alloc_queue
= QUEUE_HEAD_INITIALIZER(port_alloc_queue
);
3167 LCK_SPIN_DECLARE(port_alloc_queue_lock
, &ipc_lck_grp
, &ipc_lck_attr
);
3170 unsigned long port_count
= 0;
3171 unsigned long port_count_warning
= 20000;
3172 unsigned long port_timestamp
= 0;
3174 void db_port_stack_trace(
3179 unsigned int verbose
,
3180 unsigned int display
,
3181 unsigned int ref_search
,
3182 unsigned int ref_target
);
3185 extern int proc_pid(struct proc
*);
3186 #endif /* MACH_BSD */
3189 * Initialize all of the debugging state in a port.
3190 * Insert the port into a global list of all allocated ports.
3193 ipc_port_init_debug(
3195 uintptr_t *callstack
,
3196 unsigned int callstack_max
)
3200 port
->ip_thread
= current_thread();
3201 port
->ip_timetrack
= port_timestamp
++;
3202 for (i
= 0; i
< callstack_max
; ++i
) {
3203 port
->ip_callstack
[i
] = callstack
[i
];
3205 for (i
= 0; i
< IP_NSPARES
; ++i
) {
3206 port
->ip_spares
[i
] = 0;
3210 task_t task
= current_task();
3211 if (task
!= TASK_NULL
) {
3212 struct proc
* proc
= (struct proc
*) get_bsdtask_info(task
);
3214 port
->ip_spares
[0] = proc_pid(proc
);
3217 #endif /* MACH_BSD */
3220 lck_spin_lock(&port_alloc_queue_lock
);
3222 if (port_count_warning
> 0 && port_count
>= port_count_warning
) {
3223 assert(port_count
< port_count_warning
);
3225 queue_enter(&port_alloc_queue
, port
, ipc_port_t
, ip_port_links
);
3226 lck_spin_unlock(&port_alloc_queue_lock
);
3231 * Routine: ipc_port_callstack_init_debug
3233 * Calls the machine-dependent routine to
3234 * fill in an array with up to IP_CALLSTACK_MAX
3235 * levels of return pc information
3237 * May block (via copyin)
3240 ipc_port_callstack_init_debug(
3241 uintptr_t *callstack
,
3242 unsigned int callstack_max
)
3246 /* guarantee the callstack is initialized */
3247 for (i
= 0; i
< callstack_max
; i
++) {
3252 machine_callstack(callstack
, callstack_max
);
3257 * Remove a port from the queue of allocated ports.
3258 * This routine should be invoked JUST prior to
3259 * deallocating the actual memory occupied by the port.
3263 ipc_port_track_dealloc(
3264 __unused ipc_port_t port
)
3269 ipc_port_track_dealloc(
3272 lck_spin_lock(&port_alloc_queue_lock
);
3273 assert(port_count
> 0);
3275 queue_remove(&port_alloc_queue
, port
, ipc_port_t
, ip_port_links
);
3276 lck_spin_unlock(&port_alloc_queue_lock
);
3281 #endif /* MACH_ASSERT */