2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * @OSF_FREE_COPYRIGHT@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
57 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
58 * support for mandatory and extensible security protections. This notice
59 * is included in support of clause 2.2 (b) of the Apple Public License,
65 * File: ipc/ipc_port.c
69 * Functions to manipulate IPC ports.
72 #include <mach_assert.h>
74 #include <mach/port.h>
75 #include <mach/kern_return.h>
76 #include <kern/ipc_kobject.h>
77 #include <kern/thread.h>
78 #include <kern/misc_protos.h>
79 #include <kern/waitq.h>
80 #include <kern/policy_internal.h>
81 #include <kern/debug.h>
82 #include <kern/kcdata.h>
83 #include <ipc/ipc_entry.h>
84 #include <ipc/ipc_space.h>
85 #include <ipc/ipc_object.h>
86 #include <ipc/ipc_port.h>
87 #include <ipc/ipc_pset.h>
88 #include <ipc/ipc_kmsg.h>
89 #include <ipc/ipc_mqueue.h>
90 #include <ipc/ipc_notify.h>
91 #include <ipc/ipc_table.h>
92 #include <ipc/ipc_importance.h>
93 #include <machine/limits.h>
94 #include <kern/turnstile.h>
95 #include <kern/machine.h>
97 #include <security/mac_mach_internal.h>
101 static TUNABLE(bool, prioritize_launch
, "prioritize_launch", true);
102 TUNABLE_WRITEABLE(int, ipc_portbt
, "ipc_portbt", false);
104 LCK_SPIN_DECLARE_ATTR(ipc_port_multiple_lock_data
, &ipc_lck_grp
, &ipc_lck_attr
);
105 ipc_port_timestamp_t ipc_port_timestamp_data
;
108 void ipc_port_init_debug(
110 uintptr_t *callstack
,
111 unsigned int callstack_max
);
113 void ipc_port_callstack_init_debug(
114 uintptr_t *callstack
,
115 unsigned int callstack_max
);
117 #endif /* MACH_ASSERT */
120 ipc_port_send_turnstile_recompute_push_locked(
124 ipc_port_get_watchport_inheritor(
128 ipc_port_release(ipc_port_t port
)
134 ipc_port_reference(ipc_port_t port
)
140 * Routine: ipc_port_timestamp
142 * Retrieve a timestamp value.
146 ipc_port_timestamp(void)
148 return OSIncrementAtomic(&ipc_port_timestamp_data
);
152 * Routine: ipc_port_request_alloc
154 * Try to allocate a request slot.
155 * If successful, returns the request index.
156 * Otherwise returns zero.
158 * The port is locked and active.
160 * KERN_SUCCESS A request index was found.
161 * KERN_NO_SPACE No index allocated.
164 #if IMPORTANCE_INHERITANCE
166 ipc_port_request_alloc(
168 mach_port_name_t name
,
170 boolean_t send_possible
,
172 ipc_port_request_index_t
*indexp
,
173 boolean_t
*importantp
)
176 ipc_port_request_alloc(
178 mach_port_name_t name
,
180 boolean_t send_possible
,
182 ipc_port_request_index_t
*indexp
)
183 #endif /* IMPORTANCE_INHERITANCE */
185 ipc_port_request_t ipr
, table
;
186 ipc_port_request_index_t index
;
189 #if IMPORTANCE_INHERITANCE
191 #endif /* IMPORTANCE_INHERITANCE */
193 require_ip_active(port
);
194 assert(name
!= MACH_PORT_NULL
);
195 assert(soright
!= IP_NULL
);
197 table
= port
->ip_requests
;
199 if (table
== IPR_NULL
) {
200 return KERN_NO_SPACE
;
203 index
= table
->ipr_next
;
205 return KERN_NO_SPACE
;
209 assert(ipr
->ipr_name
== MACH_PORT_NULL
);
211 table
->ipr_next
= ipr
->ipr_next
;
212 ipr
->ipr_name
= name
;
215 mask
|= IPR_SOR_SPREQ_MASK
;
217 mask
|= IPR_SOR_SPARM_MASK
;
218 if (port
->ip_sprequests
== 0) {
219 port
->ip_sprequests
= 1;
220 #if IMPORTANCE_INHERITANCE
221 /* TODO: Live importance support in send-possible */
222 if (port
->ip_impdonation
!= 0 &&
223 port
->ip_spimportant
== 0 &&
224 (task_is_importance_donor(current_task()))) {
227 #endif /* IMPORTANCE_INHERTANCE */
231 ipr
->ipr_soright
= IPR_SOR_MAKE(soright
, mask
);
239 * Routine: ipc_port_request_grow
241 * Grow a port's table of requests.
243 * The port must be locked and active.
244 * Nothing else locked; will allocate memory.
245 * Upon return the port is unlocked.
247 * KERN_SUCCESS Grew the table.
248 * KERN_SUCCESS Somebody else grew the table.
249 * KERN_SUCCESS The port died.
250 * KERN_RESOURCE_SHORTAGE Couldn't allocate new table.
251 * KERN_NO_SPACE Couldn't grow to desired size
255 ipc_port_request_grow(
257 ipc_table_elems_t target_size
)
259 ipc_table_size_t its
;
260 ipc_port_request_t otable
, ntable
;
261 require_ip_active(port
);
263 otable
= port
->ip_requests
;
264 if (otable
== IPR_NULL
) {
265 its
= &ipc_table_requests
[0];
267 its
= otable
->ipr_size
+ 1;
270 if (target_size
!= ITS_SIZE_NONE
) {
271 if ((otable
!= IPR_NULL
) &&
272 (target_size
<= otable
->ipr_size
->its_size
)) {
276 while ((its
->its_size
) && (its
->its_size
< target_size
)) {
279 if (its
->its_size
== 0) {
281 return KERN_NO_SPACE
;
288 if ((its
->its_size
== 0) ||
289 ((ntable
= it_requests_alloc(its
)) == IPR_NULL
)) {
291 return KERN_RESOURCE_SHORTAGE
;
297 * Check that port is still active and that nobody else
298 * has slipped in and grown the table on us. Note that
299 * just checking if the current table pointer == otable
300 * isn't sufficient; must check ipr_size.
303 if (ip_active(port
) && (port
->ip_requests
== otable
) &&
304 ((otable
== IPR_NULL
) || (otable
->ipr_size
+ 1 == its
))) {
305 ipc_table_size_t oits
;
306 ipc_table_elems_t osize
, nsize
;
307 ipc_port_request_index_t free
, i
;
309 /* copy old table to new table */
311 if (otable
!= IPR_NULL
) {
312 oits
= otable
->ipr_size
;
313 osize
= oits
->its_size
;
314 free
= otable
->ipr_next
;
316 (void) memcpy((void *)(ntable
+ 1),
317 (const void *)(otable
+ 1),
318 (osize
- 1) * sizeof(struct ipc_port_request
));
325 nsize
= its
->its_size
;
326 assert(nsize
> osize
);
328 /* add new elements to the new table's free list */
330 for (i
= osize
; i
< nsize
; i
++) {
331 ipc_port_request_t ipr
= &ntable
[i
];
333 ipr
->ipr_name
= MACH_PORT_NULL
;
334 ipr
->ipr_next
= free
;
338 ntable
->ipr_next
= free
;
339 ntable
->ipr_size
= its
;
340 port
->ip_requests
= ntable
;
344 if (otable
!= IPR_NULL
) {
345 it_requests_free(oits
, otable
);
350 it_requests_free(its
, ntable
);
357 * Routine: ipc_port_request_sparm
359 * Arm delayed send-possible request.
361 * The port must be locked and active.
363 * Returns TRUE if the request was armed
364 * (or armed with importance in that version).
368 ipc_port_request_sparm(
370 __assert_only mach_port_name_t name
,
371 ipc_port_request_index_t index
,
372 mach_msg_option_t option
,
373 mach_msg_priority_t priority
)
375 if (index
!= IE_REQ_NONE
) {
376 ipc_port_request_t ipr
, table
;
378 require_ip_active(port
);
380 table
= port
->ip_requests
;
381 assert(table
!= IPR_NULL
);
384 assert(ipr
->ipr_name
== name
);
386 /* Is there a valid destination? */
387 if (IPR_SOR_SPREQ(ipr
->ipr_soright
)) {
388 ipr
->ipr_soright
= IPR_SOR_MAKE(ipr
->ipr_soright
, IPR_SOR_SPARM_MASK
);
389 port
->ip_sprequests
= 1;
391 if (option
& MACH_SEND_OVERRIDE
) {
392 /* apply override to message queue */
393 mach_msg_qos_t qos_ovr
;
394 if (mach_msg_priority_is_pthread_priority(priority
)) {
395 qos_ovr
= _pthread_priority_thread_qos(priority
);
397 qos_ovr
= mach_msg_priority_overide_qos(priority
);
400 ipc_mqueue_override_send(&port
->ip_messages
, qos_ovr
);
404 #if IMPORTANCE_INHERITANCE
405 if (((option
& MACH_SEND_NOIMPORTANCE
) == 0) &&
406 (port
->ip_impdonation
!= 0) &&
407 (port
->ip_spimportant
== 0) &&
408 (((option
& MACH_SEND_IMPORTANCE
) != 0) ||
409 (task_is_importance_donor(current_task())))) {
414 #endif /* IMPORTANCE_INHERITANCE */
421 * Routine: ipc_port_request_type
423 * Determine the type(s) of port requests enabled for a name.
425 * The port must be locked or inactive (to avoid table growth).
426 * The index must not be IE_REQ_NONE and for the name in question.
429 ipc_port_request_type(
431 __assert_only mach_port_name_t name
,
432 ipc_port_request_index_t index
)
434 ipc_port_request_t ipr
, table
;
435 mach_port_type_t type
= 0;
437 table
= port
->ip_requests
;
438 assert(table
!= IPR_NULL
);
440 assert(index
!= IE_REQ_NONE
);
442 assert(ipr
->ipr_name
== name
);
444 if (IP_VALID(IPR_SOR_PORT(ipr
->ipr_soright
))) {
445 type
|= MACH_PORT_TYPE_DNREQUEST
;
447 if (IPR_SOR_SPREQ(ipr
->ipr_soright
)) {
448 type
|= MACH_PORT_TYPE_SPREQUEST
;
450 if (!IPR_SOR_SPARMED(ipr
->ipr_soright
)) {
451 type
|= MACH_PORT_TYPE_SPREQUEST_DELAYED
;
459 * Routine: ipc_port_request_cancel
461 * Cancel a dead-name/send-possible request and return the send-once right.
463 * The port must be locked and active.
464 * The index must not be IPR_REQ_NONE and must correspond with name.
468 ipc_port_request_cancel(
470 __assert_only mach_port_name_t name
,
471 ipc_port_request_index_t index
)
473 ipc_port_request_t ipr
, table
;
474 ipc_port_t request
= IP_NULL
;
476 require_ip_active(port
);
477 table
= port
->ip_requests
;
478 assert(table
!= IPR_NULL
);
480 assert(index
!= IE_REQ_NONE
);
482 assert(ipr
->ipr_name
== name
);
483 request
= IPR_SOR_PORT(ipr
->ipr_soright
);
485 /* return ipr to the free list inside the table */
486 ipr
->ipr_name
= MACH_PORT_NULL
;
487 ipr
->ipr_next
= table
->ipr_next
;
488 table
->ipr_next
= index
;
494 * Routine: ipc_port_pdrequest
496 * Make a port-deleted request, returning the
497 * previously registered send-once right.
498 * Just cancels the previous request if notify is IP_NULL.
500 * The port is locked and active. It is unlocked.
501 * Consumes a ref for notify (if non-null), and
502 * returns previous with a ref (if non-null).
509 ipc_port_t
*previousp
)
512 require_ip_active(port
);
514 previous
= port
->ip_pdrequest
;
515 port
->ip_pdrequest
= notify
;
518 *previousp
= previous
;
522 * Routine: ipc_port_nsrequest
524 * Make a no-senders request, returning the
525 * previously registered send-once right.
526 * Just cancels the previous request if notify is IP_NULL.
528 * The port is locked and active. It is unlocked.
529 * Consumes a ref for notify (if non-null), and
530 * returns previous with a ref (if non-null).
536 mach_port_mscount_t sync
,
538 ipc_port_t
*previousp
)
541 mach_port_mscount_t mscount
;
542 require_ip_active(port
);
544 previous
= port
->ip_nsrequest
;
545 mscount
= port
->ip_mscount
;
547 if ((port
->ip_srights
== 0) && (sync
<= mscount
) &&
548 (notify
!= IP_NULL
)) {
549 port
->ip_nsrequest
= IP_NULL
;
551 ipc_notify_no_senders(notify
, mscount
);
553 port
->ip_nsrequest
= notify
;
557 *previousp
= previous
;
562 * Routine: ipc_port_clear_receiver
564 * Prepares a receive right for transmission/destruction,
565 * optionally performs mqueue destruction (with port lock held)
568 * The port is locked and active.
570 * If should_destroy is TRUE, then the return value indicates
571 * whether the caller needs to reap kmsg structures that should
572 * be destroyed (by calling ipc_kmsg_reap_delayed)
574 * If should_destroy is FALSE, this always returns FALSE
578 ipc_port_clear_receiver(
580 boolean_t should_destroy
)
582 ipc_mqueue_t mqueue
= &port
->ip_messages
;
583 boolean_t reap_messages
= FALSE
;
586 * Pull ourselves out of any sets to which we belong.
587 * We hold the port locked, so even though this acquires and releases
588 * the mqueue lock, we know we won't be added to any other sets.
590 if (port
->ip_in_pset
!= 0) {
591 ipc_pset_remove_from_all(port
);
592 assert(port
->ip_in_pset
== 0);
596 * Send anyone waiting on the port's queue directly away.
597 * Also clear the mscount, seqno, guard bits
600 if (port
->ip_receiver_name
) {
601 ipc_mqueue_changed(port
->ip_receiver
, mqueue
);
603 ipc_mqueue_changed(NULL
, mqueue
);
605 port
->ip_mscount
= 0;
606 mqueue
->imq_seqno
= 0;
607 port
->ip_context
= port
->ip_guarded
= port
->ip_strict_guard
= 0;
609 * clear the immovable bit so the port can move back to anyone listening
610 * for the port destroy notification
612 port
->ip_immovable_receive
= 0;
614 if (should_destroy
) {
616 * Mark the port and mqueue invalid, preventing further send/receive
617 * operations from succeeding. It's important for this to be
618 * done under the same lock hold as the ipc_mqueue_changed
619 * call to avoid additional threads blocking on an mqueue
620 * that's being destroyed.
622 * The port active bit needs to be guarded under mqueue lock for
625 port
->ip_object
.io_bits
&= ~IO_BITS_ACTIVE
;
626 port
->ip_timestamp
= ipc_port_timestamp();
627 reap_messages
= ipc_mqueue_destroy_locked(mqueue
);
629 /* make port be in limbo */
630 port
->ip_receiver_name
= MACH_PORT_NULL
;
631 port
->ip_destination
= IP_NULL
;
634 imq_unlock(&port
->ip_messages
);
636 return reap_messages
;
640 * Routine: ipc_port_init
642 * Initializes a newly-allocated port.
643 * Doesn't touch the ip_object fields.
650 ipc_port_init_flags_t flags
,
651 mach_port_name_t name
)
653 /* port->ip_kobject doesn't have to be initialized */
655 port
->ip_receiver
= space
;
656 port
->ip_receiver_name
= name
;
658 port
->ip_mscount
= 0;
659 port
->ip_srights
= 0;
660 port
->ip_sorights
= 0;
661 if (flags
& IPC_PORT_INIT_MAKE_SEND_RIGHT
) {
662 port
->ip_srights
= 1;
663 port
->ip_mscount
= 1;
666 port
->ip_nsrequest
= IP_NULL
;
667 port
->ip_pdrequest
= IP_NULL
;
668 port
->ip_requests
= IPR_NULL
;
670 port
->ip_premsg
= IKM_NULL
;
671 port
->ip_context
= 0;
672 port
->ip_reply_context
= 0;
674 port
->ip_sprequests
= 0;
675 port
->ip_spimportant
= 0;
676 port
->ip_impdonation
= 0;
677 port
->ip_tempowner
= 0;
679 port
->ip_guarded
= 0;
680 port
->ip_strict_guard
= 0;
681 port
->ip_immovable_receive
= 0;
682 port
->ip_no_grant
= 0;
683 port
->ip_immovable_send
= 0;
684 port
->ip_impcount
= 0;
686 if (flags
& IPC_PORT_INIT_FILTER_MESSAGE
) {
687 port
->ip_object
.io_bits
|= IP_BIT_FILTER_MSG
;
690 port
->ip_tg_block_tracking
= (flags
& IPC_PORT_INIT_TG_BLOCK_TRACKING
) != 0;
691 port
->ip_specialreply
= (flags
& IPC_PORT_INIT_SPECIAL_REPLY
) != 0;
692 port
->ip_sync_link_state
= PORT_SYNC_LINK_ANY
;
693 port
->ip_sync_bootstrap_checkin
= 0;
695 ipc_special_reply_port_bits_reset(port
);
697 port
->ip_send_turnstile
= TURNSTILE_NULL
;
699 ipc_mqueue_kind_t kind
= IPC_MQUEUE_KIND_NONE
;
700 if (flags
& IPC_PORT_INIT_MESSAGE_QUEUE
) {
701 kind
= IPC_MQUEUE_KIND_PORT
;
703 ipc_mqueue_init(&port
->ip_messages
, kind
);
707 * Routine: ipc_port_alloc
711 * Nothing locked. If successful, the port is returned
712 * locked. (The caller doesn't have a reference.)
714 * KERN_SUCCESS The port is allocated.
715 * KERN_INVALID_TASK The space is dead.
716 * KERN_NO_SPACE No room for an entry in the space.
717 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
723 ipc_port_init_flags_t flags
,
724 mach_port_name_t
*namep
,
728 mach_port_name_t name
;
730 mach_port_type_t type
= MACH_PORT_TYPE_RECEIVE
;
731 mach_port_urefs_t urefs
= 0;
734 uintptr_t buf
[IP_CALLSTACK_MAX
];
735 ipc_port_callstack_init_debug(&buf
[0], IP_CALLSTACK_MAX
);
736 #endif /* MACH_ASSERT */
738 if (flags
& IPC_PORT_INIT_MAKE_SEND_RIGHT
) {
739 type
|= MACH_PORT_TYPE_SEND
;
742 kr
= ipc_object_alloc(space
, IOT_PORT
, type
, urefs
,
743 &name
, (ipc_object_t
*) &port
);
744 if (kr
!= KERN_SUCCESS
) {
748 /* port and space are locked */
749 ipc_port_init(port
, space
, flags
, name
);
752 ipc_port_init_debug(port
, &buf
[0], IP_CALLSTACK_MAX
);
753 #endif /* MACH_ASSERT */
755 /* unlock space after init */
756 is_write_unlock(space
);
765 * Routine: ipc_port_alloc_name
767 * Allocate a port, with a specific name.
769 * Nothing locked. If successful, the port is returned
770 * locked. (The caller doesn't have a reference.)
772 * KERN_SUCCESS The port is allocated.
773 * KERN_INVALID_TASK The space is dead.
774 * KERN_NAME_EXISTS The name already denotes a right.
775 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
781 ipc_port_init_flags_t flags
,
782 mach_port_name_t name
,
787 mach_port_type_t type
= MACH_PORT_TYPE_RECEIVE
;
788 mach_port_urefs_t urefs
= 0;
791 uintptr_t buf
[IP_CALLSTACK_MAX
];
792 ipc_port_callstack_init_debug(&buf
[0], IP_CALLSTACK_MAX
);
793 #endif /* MACH_ASSERT */
795 if (flags
& IPC_PORT_INIT_MAKE_SEND_RIGHT
) {
796 type
|= MACH_PORT_TYPE_SEND
;
799 kr
= ipc_object_alloc_name(space
, IOT_PORT
, type
, urefs
,
800 name
, (ipc_object_t
*) &port
);
801 if (kr
!= KERN_SUCCESS
) {
807 ipc_port_init(port
, space
, flags
, name
);
810 ipc_port_init_debug(port
, &buf
[0], IP_CALLSTACK_MAX
);
811 #endif /* MACH_ASSERT */
819 * Routine: ipc_port_spnotify
821 * Generate send-possible port notifications.
823 * Nothing locked, reference held on port.
829 ipc_port_request_index_t index
= 0;
830 ipc_table_elems_t size
= 0;
833 * If the port has no send-possible request
834 * armed, don't bother to lock the port.
836 if (port
->ip_sprequests
== 0) {
842 #if IMPORTANCE_INHERITANCE
843 if (port
->ip_spimportant
!= 0) {
844 port
->ip_spimportant
= 0;
845 if (ipc_port_importance_delta(port
, IPID_OPTION_NORMAL
, -1) == TRUE
) {
849 #endif /* IMPORTANCE_INHERITANCE */
851 if (port
->ip_sprequests
== 0) {
855 port
->ip_sprequests
= 0;
858 if (ip_active(port
)) {
859 ipc_port_request_t requests
;
861 /* table may change each time port unlocked (reload) */
862 requests
= port
->ip_requests
;
863 assert(requests
!= IPR_NULL
);
866 * no need to go beyond table size when first
867 * we entered - those are future notifications.
870 size
= requests
->ipr_size
->its_size
;
873 /* no need to backtrack either */
874 while (++index
< size
) {
875 ipc_port_request_t ipr
= &requests
[index
];
876 mach_port_name_t name
= ipr
->ipr_name
;
877 ipc_port_t soright
= IPR_SOR_PORT(ipr
->ipr_soright
);
878 boolean_t armed
= IPR_SOR_SPARMED(ipr
->ipr_soright
);
880 if (MACH_PORT_VALID(name
) && armed
&& IP_VALID(soright
)) {
881 /* claim send-once right - slot still inuse */
882 ipr
->ipr_soright
= IP_NULL
;
885 ipc_notify_send_possible(soright
, name
);
897 * Routine: ipc_port_dnnotify
899 * Generate dead name notifications for
900 * all outstanding dead-name and send-
904 * Port must be inactive.
905 * Reference held on port.
911 ipc_port_request_t requests
= port
->ip_requests
;
913 assert(!ip_active(port
));
914 if (requests
!= IPR_NULL
) {
915 ipc_table_size_t its
= requests
->ipr_size
;
916 ipc_table_elems_t size
= its
->its_size
;
917 ipc_port_request_index_t index
;
918 for (index
= 1; index
< size
; index
++) {
919 ipc_port_request_t ipr
= &requests
[index
];
920 mach_port_name_t name
= ipr
->ipr_name
;
921 ipc_port_t soright
= IPR_SOR_PORT(ipr
->ipr_soright
);
923 if (MACH_PORT_VALID(name
) && IP_VALID(soright
)) {
924 ipc_notify_dead_name(soright
, name
);
932 * Routine: ipc_port_destroy
934 * Destroys a port. Cleans up queued messages.
936 * If the port has a backup, it doesn't get destroyed,
937 * but is sent in a port-destroyed notification to the backup.
939 * The port is locked and alive; nothing else locked.
940 * The caller has a reference, which is consumed.
941 * Afterwards, the port is unlocked and dead.
945 ipc_port_destroy(ipc_port_t port
)
947 ipc_port_t pdrequest
, nsrequest
;
950 boolean_t special_reply
= port
->ip_specialreply
;
951 struct task_watchport_elem
*watchport_elem
= NULL
;
953 #if IMPORTANCE_INHERITANCE
954 ipc_importance_task_t release_imp_task
= IIT_NULL
;
955 thread_t self
= current_thread();
956 boolean_t top
= (self
->ith_assertions
== 0);
957 natural_t assertcnt
= 0;
958 #endif /* IMPORTANCE_INHERITANCE */
960 require_ip_active(port
);
961 /* port->ip_receiver_name is garbage */
962 /* port->ip_receiver/port->ip_destination is garbage */
964 /* clear any reply-port context */
965 port
->ip_reply_context
= 0;
967 /* check for a backup port */
968 pdrequest
= port
->ip_pdrequest
;
970 #if IMPORTANCE_INHERITANCE
971 /* determine how many assertions to drop and from whom */
972 if (port
->ip_tempowner
!= 0) {
974 release_imp_task
= port
->ip_imp_task
;
975 if (IIT_NULL
!= release_imp_task
) {
976 port
->ip_imp_task
= IIT_NULL
;
977 assertcnt
= port
->ip_impcount
;
979 /* Otherwise, nothing to drop */
981 assertcnt
= port
->ip_impcount
;
982 if (pdrequest
!= IP_NULL
) {
983 /* mark in limbo for the journey */
984 port
->ip_tempowner
= 1;
989 self
->ith_assertions
= assertcnt
;
991 #endif /* IMPORTANCE_INHERITANCE */
993 if (pdrequest
!= IP_NULL
) {
994 /* clear receiver, don't destroy the port */
995 (void)ipc_port_clear_receiver(port
, FALSE
);
996 assert(port
->ip_in_pset
== 0);
997 assert(port
->ip_mscount
== 0);
999 /* we assume the ref for pdrequest */
1000 port
->ip_pdrequest
= IP_NULL
;
1002 imq_lock(&port
->ip_messages
);
1003 watchport_elem
= ipc_port_clear_watchport_elem_internal(port
);
1004 ipc_port_send_turnstile_recompute_push_locked(port
);
1005 /* mqueue and port unlocked */
1007 if (special_reply
) {
1008 ipc_port_adjust_special_reply_port(port
,
1009 IPC_PORT_ADJUST_SR_ALLOW_SYNC_LINKAGE
);
1012 if (watchport_elem
) {
1013 task_watchport_elem_deallocate(watchport_elem
);
1014 watchport_elem
= NULL
;
1016 /* consumes our refs for port and pdrequest */
1017 ipc_notify_port_destroyed(pdrequest
, port
);
1019 goto drop_assertions
;
1023 * The mach_msg_* paths don't hold a port lock, they only hold a
1024 * reference to the port object. If a thread raced us and is now
1025 * blocked waiting for message reception on this mqueue (or waiting
1026 * for ipc_mqueue_full), it will never be woken up. We call
1027 * ipc_port_clear_receiver() here, _after_ the port has been marked
1028 * inactive, to wakeup any threads which may be blocked and ensure
1029 * that no other thread can get lost waiting for a wake up on a
1030 * port/mqueue that's been destroyed.
1032 boolean_t reap_msgs
= FALSE
;
1033 reap_msgs
= ipc_port_clear_receiver(port
, TRUE
); /* marks port and mqueue inactive */
1034 assert(port
->ip_in_pset
== 0);
1035 assert(port
->ip_mscount
== 0);
1037 imq_lock(&port
->ip_messages
);
1038 watchport_elem
= ipc_port_clear_watchport_elem_internal(port
);
1039 imq_unlock(&port
->ip_messages
);
1040 nsrequest
= port
->ip_nsrequest
;
1043 * If the port has a preallocated message buffer and that buffer
1044 * is not inuse, free it. If it has an inuse one, then the kmsg
1045 * free will detect that we freed the association and it can free it
1046 * like a normal buffer.
1048 * Once the port is marked inactive we don't need to keep it locked.
1050 if (IP_PREALLOC(port
)) {
1051 ipc_port_t inuse_port
;
1053 kmsg
= port
->ip_premsg
;
1054 assert(kmsg
!= IKM_NULL
);
1055 inuse_port
= ikm_prealloc_inuse_port(kmsg
);
1056 ipc_kmsg_clear_prealloc(kmsg
, port
);
1058 imq_lock(&port
->ip_messages
);
1059 ipc_port_send_turnstile_recompute_push_locked(port
);
1060 /* mqueue and port unlocked */
1062 if (inuse_port
!= IP_NULL
) {
1063 assert(inuse_port
== port
);
1065 ipc_kmsg_free(kmsg
);
1068 imq_lock(&port
->ip_messages
);
1069 ipc_port_send_turnstile_recompute_push_locked(port
);
1070 /* mqueue and port unlocked */
1073 /* Deallocate the watchport element */
1074 if (watchport_elem
) {
1075 task_watchport_elem_deallocate(watchport_elem
);
1076 watchport_elem
= NULL
;
1079 /* unlink the kmsg from special reply port */
1080 if (special_reply
) {
1081 ipc_port_adjust_special_reply_port(port
,
1082 IPC_PORT_ADJUST_SR_ALLOW_SYNC_LINKAGE
);
1085 /* throw away no-senders request */
1086 if (nsrequest
!= IP_NULL
) {
1087 ipc_notify_send_once(nsrequest
); /* consumes ref */
1090 * Reap any kmsg objects waiting to be destroyed.
1091 * This must be done after we've released the port lock.
1094 ipc_kmsg_reap_delayed();
1097 mqueue
= &port
->ip_messages
;
1099 /* cleanup waitq related resources */
1100 ipc_mqueue_deinit(mqueue
);
1102 /* generate dead-name notifications */
1103 ipc_port_dnnotify(port
);
1105 ipc_kobject_destroy(port
);
1107 ip_release(port
); /* consume caller's ref */
1110 #if IMPORTANCE_INHERITANCE
1111 if (release_imp_task
!= IIT_NULL
) {
1112 if (assertcnt
> 0) {
1114 self
->ith_assertions
= 0;
1115 assert(ipc_importance_task_is_any_receiver_type(release_imp_task
));
1116 ipc_importance_task_drop_internal_assertion(release_imp_task
, assertcnt
);
1118 ipc_importance_task_release(release_imp_task
);
1119 } else if (assertcnt
> 0) {
1121 self
->ith_assertions
= 0;
1122 release_imp_task
= current_task()->task_imp_base
;
1123 if (ipc_importance_task_is_any_receiver_type(release_imp_task
)) {
1124 ipc_importance_task_drop_internal_assertion(release_imp_task
, assertcnt
);
1128 #endif /* IMPORTANCE_INHERITANCE */
1132 * Routine: ipc_port_destination_chain_lock
1134 * Search for the end of the chain (a port not in transit),
1135 * acquiring locks along the way, and return it in `base`.
1137 * Returns true if a reference was taken on `base`
1141 * ipc_port_multiple_lock held.
1144 ipc_port_destination_chain_lock(
1151 if (!ip_active(port
)) {
1153 * Active ports that are ip_lock()ed cannot go away.
1155 * But inactive ports at the end of walking
1156 * an ip_destination chain are only protected
1157 * from space termination cleanup while the entire
1158 * chain of ports leading to them is held.
1160 * Callers of this code tend to unlock the chain
1161 * in the same order than this walk which doesn't
1162 * protect `base` properly when it's inactive.
1164 * In that case, take a reference that the caller
1165 * is responsible for releasing.
1171 if ((port
->ip_receiver_name
!= MACH_PORT_NULL
) ||
1172 (port
->ip_destination
== IP_NULL
)) {
1177 port
= port
->ip_destination
;
1183 * Routine: ipc_port_check_circularity
1185 * Check if queueing "port" in a message for "dest"
1186 * would create a circular group of ports and messages.
1188 * If no circularity (FALSE returned), then "port"
1189 * is changed from "in limbo" to "in transit".
1191 * That is, we want to set port->ip_destination == dest,
1192 * but guaranteeing that this doesn't create a circle
1193 * port->ip_destination->ip_destination->... == port
1196 * No ports locked. References held for "port" and "dest".
1200 ipc_port_check_circularity(
1204 #if IMPORTANCE_INHERITANCE
1205 /* adjust importance counts at the same time */
1206 return ipc_importance_check_circularity(port
, dest
);
1209 struct task_watchport_elem
*watchport_elem
= NULL
;
1210 bool took_base_ref
= false;
1212 assert(port
!= IP_NULL
);
1213 assert(dest
!= IP_NULL
);
1220 /* Check if destination needs a turnstile */
1221 ipc_port_send_turnstile_prepare(dest
);
1224 * First try a quick check that can run in parallel.
1225 * No circularity if dest is not in transit.
1228 if (ip_lock_try(dest
)) {
1229 if (!ip_active(dest
) ||
1230 (dest
->ip_receiver_name
!= MACH_PORT_NULL
) ||
1231 (dest
->ip_destination
== IP_NULL
)) {
1235 /* dest is in transit; further checking necessary */
1241 ipc_port_multiple_lock(); /* massive serialization */
1244 * Search for the end of the chain (a port not in transit),
1245 * acquiring locks along the way.
1248 took_base_ref
= ipc_port_destination_chain_lock(dest
, &base
);
1249 /* all ports in chain from dest to base, inclusive, are locked */
1252 /* circularity detected! */
1254 ipc_port_multiple_unlock();
1256 /* port (== base) is in limbo */
1257 require_ip_active(port
);
1258 assert(port
->ip_receiver_name
== MACH_PORT_NULL
);
1259 assert(port
->ip_destination
== IP_NULL
);
1260 assert(!took_base_ref
);
1263 while (base
!= IP_NULL
) {
1266 /* dest is in transit or in limbo */
1267 require_ip_active(base
);
1268 assert(base
->ip_receiver_name
== MACH_PORT_NULL
);
1270 next
= base
->ip_destination
;
1275 ipc_port_send_turnstile_complete(dest
);
1280 * The guarantee: lock port while the entire chain is locked.
1281 * Once port is locked, we can take a reference to dest,
1282 * add port to the chain, and unlock everything.
1286 ipc_port_multiple_unlock();
1289 imq_lock(&port
->ip_messages
);
1291 /* port is in limbo */
1292 require_ip_active(port
);
1293 assert(port
->ip_receiver_name
== MACH_PORT_NULL
);
1294 assert(port
->ip_destination
== IP_NULL
);
1296 /* Clear the watchport boost */
1297 watchport_elem
= ipc_port_clear_watchport_elem_internal(port
);
1299 /* Check if the port is being enqueued as a part of sync bootstrap checkin */
1300 if (dest
->ip_specialreply
&& dest
->ip_sync_bootstrap_checkin
) {
1301 port
->ip_sync_bootstrap_checkin
= 1;
1305 port
->ip_destination
= dest
;
1307 /* Setup linkage for source port if it has sync ipc push */
1308 struct turnstile
*send_turnstile
= TURNSTILE_NULL
;
1309 if (port_send_turnstile(port
)) {
1310 send_turnstile
= turnstile_prepare((uintptr_t)port
,
1311 port_send_turnstile_address(port
),
1312 TURNSTILE_NULL
, TURNSTILE_SYNC_IPC
);
1315 * What ipc_port_adjust_port_locked would do,
1316 * but we need to also drop even more locks before
1317 * calling turnstile_update_inheritor_complete().
1319 ipc_port_adjust_sync_link_state_locked(port
, PORT_SYNC_LINK_ANY
, NULL
);
1321 turnstile_update_inheritor(send_turnstile
, port_send_turnstile(dest
),
1322 (TURNSTILE_INHERITOR_TURNSTILE
| TURNSTILE_IMMEDIATE_UPDATE
));
1324 /* update complete and turnstile complete called after dropping all locks */
1326 imq_unlock(&port
->ip_messages
);
1328 /* now unlock chain */
1339 /* port is in transit */
1340 require_ip_active(dest
);
1341 assert(dest
->ip_receiver_name
== MACH_PORT_NULL
);
1342 assert(dest
->ip_destination
!= IP_NULL
);
1344 next
= dest
->ip_destination
;
1349 /* base is not in transit */
1350 assert(!ip_active(base
) ||
1351 (base
->ip_receiver_name
!= MACH_PORT_NULL
) ||
1352 (base
->ip_destination
== IP_NULL
));
1355 if (took_base_ref
) {
1359 /* All locks dropped, call turnstile_update_inheritor_complete for source port's turnstile */
1360 if (send_turnstile
) {
1361 turnstile_update_inheritor_complete(send_turnstile
, TURNSTILE_INTERLOCK_NOT_HELD
);
1363 /* Take the mq lock to call turnstile complete */
1364 imq_lock(&port
->ip_messages
);
1365 turnstile_complete((uintptr_t)port
, port_send_turnstile_address(port
), NULL
, TURNSTILE_SYNC_IPC
);
1366 send_turnstile
= TURNSTILE_NULL
;
1367 imq_unlock(&port
->ip_messages
);
1368 turnstile_cleanup();
1371 if (watchport_elem
) {
1372 task_watchport_elem_deallocate(watchport_elem
);
1376 #endif /* !IMPORTANCE_INHERITANCE */
1380 * Routine: ipc_port_watchport_elem
1382 * Get the port's watchport elem field
1387 static struct task_watchport_elem
*
1388 ipc_port_watchport_elem(ipc_port_t port
)
1390 return port
->ip_messages
.imq_wait_queue
.waitq_tspriv
;
1394 * Routine: ipc_port_update_watchport_elem
1396 * Set the port's watchport elem field
1401 static inline struct task_watchport_elem
*
1402 ipc_port_update_watchport_elem(ipc_port_t port
, struct task_watchport_elem
*we
)
1404 assert(!port
->ip_specialreply
);
1405 struct task_watchport_elem
*old_we
= ipc_port_watchport_elem(port
);
1406 port
->ip_messages
.imq_wait_queue
.waitq_tspriv
= we
;
1411 * Routine: ipc_special_reply_stash_pid_locked
1413 * Set the pid of process that copied out send once right to special reply port.
1419 ipc_special_reply_stash_pid_locked(ipc_port_t port
, int pid
)
1421 assert(port
->ip_specialreply
);
1422 port
->ip_messages
.imq_wait_queue
.waitq_priv_pid
= pid
;
1427 * Routine: ipc_special_reply_get_pid_locked
1429 * Get the pid of process that copied out send once right to special reply port.
1435 ipc_special_reply_get_pid_locked(ipc_port_t port
)
1437 assert(port
->ip_specialreply
);
1438 return port
->ip_messages
.imq_wait_queue
.waitq_priv_pid
;
1442 * Update the recv turnstile inheritor for a port.
1444 * Sync IPC through the port receive turnstile only happens for the special
1445 * reply port case. It has three sub-cases:
1447 * 1. a send-once right is in transit, and pushes on the send turnstile of its
1448 * destination mqueue.
1450 * 2. a send-once right has been stashed on a knote it was copied out "through",
1451 * as the first such copied out port.
1453 * 3. a send-once right has been stashed on a knote it was copied out "through",
1454 * as the second or more copied out port.
1457 ipc_port_recv_update_inheritor(
1459 struct turnstile
*rcv_turnstile
,
1460 turnstile_update_flags_t flags
)
1462 struct turnstile
*inheritor
= TURNSTILE_NULL
;
1465 if (ip_active(port
) && port
->ip_specialreply
) {
1466 imq_held(&port
->ip_messages
);
1468 switch (port
->ip_sync_link_state
) {
1469 case PORT_SYNC_LINK_PORT
:
1470 if (port
->ip_sync_inheritor_port
!= NULL
) {
1471 inheritor
= port_send_turnstile(port
->ip_sync_inheritor_port
);
1475 case PORT_SYNC_LINK_WORKLOOP_KNOTE
:
1476 kn
= port
->ip_sync_inheritor_knote
;
1477 inheritor
= filt_ipc_kqueue_turnstile(kn
);
1480 case PORT_SYNC_LINK_WORKLOOP_STASH
:
1481 inheritor
= port
->ip_sync_inheritor_ts
;
1486 turnstile_update_inheritor(rcv_turnstile
, inheritor
,
1487 flags
| TURNSTILE_INHERITOR_TURNSTILE
);
1491 * Update the send turnstile inheritor for a port.
1493 * Sync IPC through the port send turnstile has 7 possible reasons to be linked:
1495 * 1. a special reply port is part of sync ipc for bootstrap checkin and needs
1496 * to push on thread doing the sync ipc.
1498 * 2. a receive right is in transit, and pushes on the send turnstile of its
1499 * destination mqueue.
1501 * 3. port was passed as an exec watchport and port is pushing on main thread
1504 * 4. a receive right has been stashed on a knote it was copied out "through",
1505 * as the first such copied out port (same as PORT_SYNC_LINK_WORKLOOP_KNOTE
1506 * for the special reply port)
1508 * 5. a receive right has been stashed on a knote it was copied out "through",
1509 * as the second or more copied out port (same as
1510 * PORT_SYNC_LINK_WORKLOOP_STASH for the special reply port)
1512 * 6. a receive right has been copied out as a part of sync bootstrap checkin
1513 * and needs to push on thread doing the sync bootstrap checkin.
1515 * 7. the receive right is monitored by a knote, and pushes on any that is
1516 * registered on a workloop. filt_machport makes sure that if such a knote
1517 * exists, it is kept as the first item in the knote list, so we never need
1521 ipc_port_send_update_inheritor(
1523 struct turnstile
*send_turnstile
,
1524 turnstile_update_flags_t flags
)
1526 ipc_mqueue_t mqueue
= &port
->ip_messages
;
1527 turnstile_inheritor_t inheritor
= TURNSTILE_INHERITOR_NULL
;
1529 turnstile_update_flags_t inheritor_flags
= TURNSTILE_INHERITOR_TURNSTILE
;
1531 assert(imq_held(mqueue
));
1533 if (!ip_active(port
)) {
1534 /* this port is no longer active, it should not push anywhere */
1535 } else if (port
->ip_specialreply
) {
1537 if (port
->ip_sync_bootstrap_checkin
&& prioritize_launch
) {
1538 inheritor
= port
->ip_messages
.imq_srp_owner_thread
;
1539 inheritor_flags
= TURNSTILE_INHERITOR_THREAD
;
1541 } else if (port
->ip_receiver_name
== MACH_PORT_NULL
&&
1542 port
->ip_destination
!= NULL
) {
1544 inheritor
= port_send_turnstile(port
->ip_destination
);
1545 } else if (ipc_port_watchport_elem(port
) != NULL
) {
1547 if (prioritize_launch
) {
1548 assert(port
->ip_sync_link_state
== PORT_SYNC_LINK_ANY
);
1549 inheritor
= ipc_port_get_watchport_inheritor(port
);
1550 inheritor_flags
= TURNSTILE_INHERITOR_THREAD
;
1552 } else if (port
->ip_sync_link_state
== PORT_SYNC_LINK_WORKLOOP_KNOTE
) {
1554 inheritor
= filt_ipc_kqueue_turnstile(mqueue
->imq_inheritor_knote
);
1555 } else if (port
->ip_sync_link_state
== PORT_SYNC_LINK_WORKLOOP_STASH
) {
1557 inheritor
= mqueue
->imq_inheritor_turnstile
;
1558 } else if (port
->ip_sync_link_state
== PORT_SYNC_LINK_RCV_THREAD
) {
1560 if (prioritize_launch
) {
1561 inheritor
= port
->ip_messages
.imq_inheritor_thread_ref
;
1562 inheritor_flags
= TURNSTILE_INHERITOR_THREAD
;
1564 } else if ((kn
= SLIST_FIRST(&mqueue
->imq_klist
))) {
1565 /* Case 7. Push on a workloop that is interested */
1566 if (filt_machport_kqueue_has_turnstile(kn
)) {
1567 assert(port
->ip_sync_link_state
== PORT_SYNC_LINK_ANY
);
1568 inheritor
= filt_ipc_kqueue_turnstile(kn
);
1572 turnstile_update_inheritor(send_turnstile
, inheritor
,
1573 flags
| inheritor_flags
);
1577 * Routine: ipc_port_send_turnstile_prepare
1579 * Get a reference on port's send turnstile, if
1580 * port does not have a send turnstile then allocate one.
1583 * Nothing is locked.
1586 ipc_port_send_turnstile_prepare(ipc_port_t port
)
1588 struct turnstile
*turnstile
= TURNSTILE_NULL
;
1589 struct turnstile
*send_turnstile
= TURNSTILE_NULL
;
1592 imq_lock(&port
->ip_messages
);
1594 if (port_send_turnstile(port
) == NULL
||
1595 port_send_turnstile(port
)->ts_port_ref
== 0) {
1596 if (turnstile
== TURNSTILE_NULL
) {
1597 imq_unlock(&port
->ip_messages
);
1598 turnstile
= turnstile_alloc();
1602 send_turnstile
= turnstile_prepare((uintptr_t)port
,
1603 port_send_turnstile_address(port
),
1604 turnstile
, TURNSTILE_SYNC_IPC
);
1605 turnstile
= TURNSTILE_NULL
;
1607 ipc_port_send_update_inheritor(port
, send_turnstile
,
1608 TURNSTILE_IMMEDIATE_UPDATE
);
1610 /* turnstile complete will be called in ipc_port_send_turnstile_complete */
1613 /* Increment turnstile counter */
1614 port_send_turnstile(port
)->ts_port_ref
++;
1615 imq_unlock(&port
->ip_messages
);
1617 if (send_turnstile
) {
1618 turnstile_update_inheritor_complete(send_turnstile
,
1619 TURNSTILE_INTERLOCK_NOT_HELD
);
1621 if (turnstile
!= TURNSTILE_NULL
) {
1622 turnstile_deallocate(turnstile
);
1628 * Routine: ipc_port_send_turnstile_complete
1630 * Drop a ref on the port's send turnstile, if the
1631 * ref becomes zero, deallocate the turnstile.
1634 * The space might be locked, use safe deallocate.
1637 ipc_port_send_turnstile_complete(ipc_port_t port
)
1639 struct turnstile
*turnstile
= TURNSTILE_NULL
;
1641 /* Drop turnstile count on dest port */
1642 imq_lock(&port
->ip_messages
);
1644 port_send_turnstile(port
)->ts_port_ref
--;
1645 if (port_send_turnstile(port
)->ts_port_ref
== 0) {
1646 turnstile_complete((uintptr_t)port
, port_send_turnstile_address(port
),
1647 &turnstile
, TURNSTILE_SYNC_IPC
);
1648 assert(turnstile
!= TURNSTILE_NULL
);
1650 imq_unlock(&port
->ip_messages
);
1651 turnstile_cleanup();
1653 if (turnstile
!= TURNSTILE_NULL
) {
1654 turnstile_deallocate_safe(turnstile
);
1655 turnstile
= TURNSTILE_NULL
;
1660 * Routine: ipc_port_rcv_turnstile
1662 * Get the port's receive turnstile
1665 * mqueue locked or thread waiting on turnstile is locked.
1667 static struct turnstile
*
1668 ipc_port_rcv_turnstile(ipc_port_t port
)
1670 return *port_rcv_turnstile_address(port
);
1675 * Routine: ipc_port_link_special_reply_port
1677 * Link the special reply port with the destination port.
1678 * Allocates turnstile to dest port.
1681 * Nothing is locked.
1684 ipc_port_link_special_reply_port(
1685 ipc_port_t special_reply_port
,
1686 ipc_port_t dest_port
,
1687 boolean_t sync_bootstrap_checkin
)
1689 boolean_t drop_turnstile_ref
= FALSE
;
1690 boolean_t special_reply
= FALSE
;
1692 /* Check if dest_port needs a turnstile */
1693 ipc_port_send_turnstile_prepare(dest_port
);
1695 /* Lock the special reply port and establish the linkage */
1696 ip_lock(special_reply_port
);
1697 imq_lock(&special_reply_port
->ip_messages
);
1699 special_reply
= special_reply_port
->ip_specialreply
;
1701 if (sync_bootstrap_checkin
&& special_reply
) {
1702 special_reply_port
->ip_sync_bootstrap_checkin
= 1;
1705 /* Check if we need to drop the acquired turnstile ref on dest port */
1706 if (!special_reply
||
1707 special_reply_port
->ip_sync_link_state
!= PORT_SYNC_LINK_ANY
||
1708 special_reply_port
->ip_sync_inheritor_port
!= IPC_PORT_NULL
) {
1709 drop_turnstile_ref
= TRUE
;
1711 /* take a reference on dest_port */
1712 ip_reference(dest_port
);
1713 special_reply_port
->ip_sync_inheritor_port
= dest_port
;
1714 special_reply_port
->ip_sync_link_state
= PORT_SYNC_LINK_PORT
;
1717 imq_unlock(&special_reply_port
->ip_messages
);
1718 ip_unlock(special_reply_port
);
1720 if (special_reply
) {
1722 * For special reply ports, if the destination port is
1723 * marked with the thread group blocked tracking flag,
1724 * callout to the performance controller.
1726 ipc_port_thread_group_blocked(dest_port
);
1729 if (drop_turnstile_ref
) {
1730 ipc_port_send_turnstile_complete(dest_port
);
1737 * Routine: ipc_port_thread_group_blocked
1739 * Call thread_group_blocked callout if the port
1740 * has ip_tg_block_tracking bit set and the thread
1741 * has not made this callout already.
1744 * Nothing is locked.
1747 ipc_port_thread_group_blocked(ipc_port_t port __unused
)
1749 #if CONFIG_THREAD_GROUPS
1750 bool port_tg_block_tracking
= false;
1751 thread_t self
= current_thread();
1753 if (self
->thread_group
== NULL
||
1754 (self
->options
& TH_OPT_IPC_TG_BLOCKED
)) {
1758 port_tg_block_tracking
= port
->ip_tg_block_tracking
;
1759 if (!port_tg_block_tracking
) {
1763 machine_thread_group_blocked(self
->thread_group
, NULL
,
1764 PERFCONTROL_CALLOUT_BLOCKING_TG_RENDER_SERVER
, self
);
1766 self
->options
|= TH_OPT_IPC_TG_BLOCKED
;
1771 * Routine: ipc_port_thread_group_unblocked
1773 * Call thread_group_unblocked callout if the
1774 * thread had previously made a thread_group_blocked
1775 * callout before (indicated by TH_OPT_IPC_TG_BLOCKED
1776 * flag on the thread).
1779 * Nothing is locked.
1782 ipc_port_thread_group_unblocked(void)
1784 #if CONFIG_THREAD_GROUPS
1785 thread_t self
= current_thread();
1787 if (!(self
->options
& TH_OPT_IPC_TG_BLOCKED
)) {
1791 machine_thread_group_unblocked(self
->thread_group
, NULL
,
1792 PERFCONTROL_CALLOUT_BLOCKING_TG_RENDER_SERVER
, self
);
1794 self
->options
&= ~TH_OPT_IPC_TG_BLOCKED
;
1798 #if DEVELOPMENT || DEBUG
1800 ipc_special_reply_port_bits_reset(ipc_port_t special_reply_port
)
1802 special_reply_port
->ip_srp_lost_link
= 0;
1803 special_reply_port
->ip_srp_msg_sent
= 0;
1807 ipc_special_reply_port_msg_sent_reset(ipc_port_t special_reply_port
)
1809 if (special_reply_port
->ip_specialreply
== 1) {
1810 special_reply_port
->ip_srp_msg_sent
= 0;
1815 ipc_special_reply_port_msg_sent(ipc_port_t special_reply_port
)
1817 if (special_reply_port
->ip_specialreply
== 1) {
1818 special_reply_port
->ip_srp_msg_sent
= 1;
1823 ipc_special_reply_port_lost_link(ipc_port_t special_reply_port
)
1825 if (special_reply_port
->ip_specialreply
== 1 && special_reply_port
->ip_srp_msg_sent
== 0) {
1826 special_reply_port
->ip_srp_lost_link
= 1;
1830 #else /* DEVELOPMENT || DEBUG */
1832 ipc_special_reply_port_bits_reset(__unused ipc_port_t special_reply_port
)
1838 ipc_special_reply_port_msg_sent_reset(__unused ipc_port_t special_reply_port
)
1844 ipc_special_reply_port_msg_sent(__unused ipc_port_t special_reply_port
)
1850 ipc_special_reply_port_lost_link(__unused ipc_port_t special_reply_port
)
1854 #endif /* DEVELOPMENT || DEBUG */
1857 * Routine: ipc_port_adjust_special_reply_port_locked
1859 * If the special port has a turnstile, update its inheritor.
1861 * Special reply port locked on entry.
1862 * Special reply port unlocked on return.
1863 * The passed in port is a special reply port.
1868 ipc_port_adjust_special_reply_port_locked(
1869 ipc_port_t special_reply_port
,
1872 boolean_t get_turnstile
)
1874 ipc_port_t dest_port
= IPC_PORT_NULL
;
1875 int sync_link_state
= PORT_SYNC_LINK_NO_LINKAGE
;
1876 turnstile_inheritor_t inheritor
= TURNSTILE_INHERITOR_NULL
;
1877 struct turnstile
*ts
= TURNSTILE_NULL
;
1879 ip_lock_held(special_reply_port
); // ip_sync_link_state is touched
1880 imq_lock(&special_reply_port
->ip_messages
);
1882 if (!special_reply_port
->ip_specialreply
) {
1883 // only mach_msg_receive_results_complete() calls this with any port
1884 assert(get_turnstile
);
1888 if (flags
& IPC_PORT_ADJUST_SR_RECEIVED_MSG
) {
1889 ipc_special_reply_port_msg_sent_reset(special_reply_port
);
1892 if (flags
& IPC_PORT_ADJUST_UNLINK_THREAD
) {
1893 special_reply_port
->ip_messages
.imq_srp_owner_thread
= NULL
;
1896 if (flags
& IPC_PORT_ADJUST_RESET_BOOSTRAP_CHECKIN
) {
1897 special_reply_port
->ip_sync_bootstrap_checkin
= 0;
1900 /* Check if the special reply port is marked non-special */
1901 if (special_reply_port
->ip_sync_link_state
== PORT_SYNC_LINK_ANY
) {
1903 if (get_turnstile
) {
1904 turnstile_complete((uintptr_t)special_reply_port
,
1905 port_rcv_turnstile_address(special_reply_port
), NULL
, TURNSTILE_SYNC_IPC
);
1907 imq_unlock(&special_reply_port
->ip_messages
);
1908 ip_unlock(special_reply_port
);
1909 if (get_turnstile
) {
1910 turnstile_cleanup();
1915 if (flags
& IPC_PORT_ADJUST_SR_LINK_WORKLOOP
) {
1916 if (ITH_KNOTE_VALID(kn
, MACH_MSG_TYPE_PORT_SEND_ONCE
)) {
1917 inheritor
= filt_machport_stash_port(kn
, special_reply_port
,
1920 } else if (flags
& IPC_PORT_ADJUST_SR_ALLOW_SYNC_LINKAGE
) {
1921 sync_link_state
= PORT_SYNC_LINK_ANY
;
1924 /* Check if need to break linkage */
1925 if (!get_turnstile
&& sync_link_state
== PORT_SYNC_LINK_NO_LINKAGE
&&
1926 special_reply_port
->ip_sync_link_state
== PORT_SYNC_LINK_NO_LINKAGE
) {
1927 imq_unlock(&special_reply_port
->ip_messages
);
1928 ip_unlock(special_reply_port
);
1932 switch (special_reply_port
->ip_sync_link_state
) {
1933 case PORT_SYNC_LINK_PORT
:
1934 dest_port
= special_reply_port
->ip_sync_inheritor_port
;
1935 special_reply_port
->ip_sync_inheritor_port
= IPC_PORT_NULL
;
1937 case PORT_SYNC_LINK_WORKLOOP_KNOTE
:
1938 special_reply_port
->ip_sync_inheritor_knote
= NULL
;
1940 case PORT_SYNC_LINK_WORKLOOP_STASH
:
1941 special_reply_port
->ip_sync_inheritor_ts
= NULL
;
1946 * Stash (or unstash) the server's PID in the ip_sorights field of the
1947 * special reply port, so that stackshot can later retrieve who the client
1950 if (special_reply_port
->ip_sync_link_state
== PORT_SYNC_LINK_PORT
&&
1951 sync_link_state
== PORT_SYNC_LINK_NO_LINKAGE
) {
1952 ipc_special_reply_stash_pid_locked(special_reply_port
, pid_from_task(current_task()));
1953 } else if (special_reply_port
->ip_sync_link_state
== PORT_SYNC_LINK_NO_LINKAGE
&&
1954 sync_link_state
== PORT_SYNC_LINK_ANY
) {
1955 /* If we are resetting the special reply port, remove the stashed pid. */
1956 ipc_special_reply_stash_pid_locked(special_reply_port
, 0);
1959 special_reply_port
->ip_sync_link_state
= sync_link_state
;
1961 switch (sync_link_state
) {
1962 case PORT_SYNC_LINK_WORKLOOP_KNOTE
:
1963 special_reply_port
->ip_sync_inheritor_knote
= kn
;
1965 case PORT_SYNC_LINK_WORKLOOP_STASH
:
1966 special_reply_port
->ip_sync_inheritor_ts
= inheritor
;
1968 case PORT_SYNC_LINK_NO_LINKAGE
:
1969 if (flags
& IPC_PORT_ADJUST_SR_ENABLE_EVENT
) {
1970 ipc_special_reply_port_lost_link(special_reply_port
);
1975 /* Get thread's turnstile donated to special reply port */
1976 if (get_turnstile
) {
1977 turnstile_complete((uintptr_t)special_reply_port
,
1978 port_rcv_turnstile_address(special_reply_port
), NULL
, TURNSTILE_SYNC_IPC
);
1980 ts
= ipc_port_rcv_turnstile(special_reply_port
);
1982 turnstile_reference(ts
);
1983 ipc_port_recv_update_inheritor(special_reply_port
, ts
,
1984 TURNSTILE_IMMEDIATE_UPDATE
);
1988 imq_unlock(&special_reply_port
->ip_messages
);
1989 ip_unlock(special_reply_port
);
1991 if (get_turnstile
) {
1992 turnstile_cleanup();
1994 /* Call turnstile cleanup after dropping the interlock */
1995 turnstile_update_inheritor_complete(ts
, TURNSTILE_INTERLOCK_NOT_HELD
);
1996 turnstile_deallocate_safe(ts
);
1999 /* Release the ref on the dest port and its turnstile */
2001 ipc_port_send_turnstile_complete(dest_port
);
2002 /* release the reference on the dest port */
2003 ip_release(dest_port
);
2008 * Routine: ipc_port_adjust_special_reply_port
2010 * If the special port has a turnstile, update its inheritor.
2017 ipc_port_adjust_special_reply_port(
2021 if (port
->ip_specialreply
) {
2023 ipc_port_adjust_special_reply_port_locked(port
, NULL
, flags
, FALSE
);
2028 * Routine: ipc_port_adjust_sync_link_state_locked
2030 * Update the sync link state of the port and the
2031 * turnstile inheritor.
2033 * Port and mqueue locked on entry.
2034 * Port and mqueue locked on return.
2039 ipc_port_adjust_sync_link_state_locked(
2041 int sync_link_state
,
2042 turnstile_inheritor_t inheritor
)
2044 switch (port
->ip_sync_link_state
) {
2045 case PORT_SYNC_LINK_RCV_THREAD
:
2046 /* deallocate the thread reference for the inheritor */
2047 thread_deallocate_safe(port
->ip_messages
.imq_inheritor_thread_ref
);
2050 klist_init(&port
->ip_messages
.imq_klist
);
2053 switch (sync_link_state
) {
2054 case PORT_SYNC_LINK_WORKLOOP_KNOTE
:
2055 port
->ip_messages
.imq_inheritor_knote
= inheritor
;
2057 case PORT_SYNC_LINK_WORKLOOP_STASH
:
2058 port
->ip_messages
.imq_inheritor_turnstile
= inheritor
;
2060 case PORT_SYNC_LINK_RCV_THREAD
:
2061 /* The thread could exit without clearing port state, take a thread ref */
2062 thread_reference((thread_t
)inheritor
);
2063 port
->ip_messages
.imq_inheritor_thread_ref
= inheritor
;
2066 klist_init(&port
->ip_messages
.imq_klist
);
2067 sync_link_state
= PORT_SYNC_LINK_ANY
;
2070 port
->ip_sync_link_state
= sync_link_state
;
2075 * Routine: ipc_port_adjust_port_locked
2077 * If the port has a turnstile, update its inheritor.
2079 * Port locked on entry.
2080 * Port unlocked on return.
2085 ipc_port_adjust_port_locked(
2088 boolean_t sync_bootstrap_checkin
)
2090 int sync_link_state
= PORT_SYNC_LINK_ANY
;
2091 turnstile_inheritor_t inheritor
= TURNSTILE_INHERITOR_NULL
;
2093 ip_lock_held(port
); // ip_sync_link_state is touched
2094 imq_held(&port
->ip_messages
);
2096 assert(!port
->ip_specialreply
);
2099 inheritor
= filt_machport_stash_port(kn
, port
, &sync_link_state
);
2100 if (sync_link_state
== PORT_SYNC_LINK_WORKLOOP_KNOTE
) {
2103 } else if (sync_bootstrap_checkin
) {
2104 inheritor
= current_thread();
2105 sync_link_state
= PORT_SYNC_LINK_RCV_THREAD
;
2108 ipc_port_adjust_sync_link_state_locked(port
, sync_link_state
, inheritor
);
2109 port
->ip_sync_bootstrap_checkin
= 0;
2111 ipc_port_send_turnstile_recompute_push_locked(port
);
2112 /* port and mqueue unlocked */
2116 * Routine: ipc_port_clear_sync_rcv_thread_boost_locked
2118 * If the port is pushing on rcv thread, clear it.
2120 * Port locked on entry
2121 * mqueue is not locked.
2122 * Port unlocked on return.
2127 ipc_port_clear_sync_rcv_thread_boost_locked(
2130 ip_lock_held(port
); // ip_sync_link_state is touched
2132 if (port
->ip_sync_link_state
!= PORT_SYNC_LINK_RCV_THREAD
) {
2137 imq_lock(&port
->ip_messages
);
2138 ipc_port_adjust_sync_link_state_locked(port
, PORT_SYNC_LINK_ANY
, NULL
);
2140 ipc_port_send_turnstile_recompute_push_locked(port
);
2141 /* port and mqueue unlocked */
2145 * Routine: ipc_port_add_watchport_elem_locked
2147 * Transfer the turnstile boost of watchport to task calling exec.
2149 * Port locked on entry.
2150 * Port unlocked on return.
2152 * KERN_SUCESS on success.
2153 * KERN_FAILURE otherwise.
2156 ipc_port_add_watchport_elem_locked(
2158 struct task_watchport_elem
*watchport_elem
,
2159 struct task_watchport_elem
**old_elem
)
2162 imq_held(&port
->ip_messages
);
2164 /* Watchport boost only works for non-special active ports mapped in an ipc space */
2165 if (!ip_active(port
) || port
->ip_specialreply
||
2166 port
->ip_receiver_name
== MACH_PORT_NULL
) {
2167 imq_unlock(&port
->ip_messages
);
2169 return KERN_FAILURE
;
2172 if (port
->ip_sync_link_state
!= PORT_SYNC_LINK_ANY
) {
2173 /* Sever the linkage if the port was pushing on knote */
2174 ipc_port_adjust_sync_link_state_locked(port
, PORT_SYNC_LINK_ANY
, NULL
);
2177 *old_elem
= ipc_port_update_watchport_elem(port
, watchport_elem
);
2179 ipc_port_send_turnstile_recompute_push_locked(port
);
2180 /* port and mqueue unlocked */
2181 return KERN_SUCCESS
;
2185 * Routine: ipc_port_clear_watchport_elem_internal_conditional_locked
2187 * Remove the turnstile boost of watchport and recompute the push.
2189 * Port locked on entry.
2190 * Port unlocked on return.
2192 * KERN_SUCESS on success.
2193 * KERN_FAILURE otherwise.
2196 ipc_port_clear_watchport_elem_internal_conditional_locked(
2198 struct task_watchport_elem
*watchport_elem
)
2201 imq_held(&port
->ip_messages
);
2203 if (ipc_port_watchport_elem(port
) != watchport_elem
) {
2204 imq_unlock(&port
->ip_messages
);
2206 return KERN_FAILURE
;
2209 ipc_port_clear_watchport_elem_internal(port
);
2210 ipc_port_send_turnstile_recompute_push_locked(port
);
2211 /* port and mqueue unlocked */
2212 return KERN_SUCCESS
;
2216 * Routine: ipc_port_replace_watchport_elem_conditional_locked
2218 * Replace the turnstile boost of watchport and recompute the push.
2220 * Port locked on entry.
2221 * Port unlocked on return.
2223 * KERN_SUCESS on success.
2224 * KERN_FAILURE otherwise.
2227 ipc_port_replace_watchport_elem_conditional_locked(
2229 struct task_watchport_elem
*old_watchport_elem
,
2230 struct task_watchport_elem
*new_watchport_elem
)
2233 imq_held(&port
->ip_messages
);
2235 if (ipc_port_watchport_elem(port
) != old_watchport_elem
) {
2236 imq_unlock(&port
->ip_messages
);
2238 return KERN_FAILURE
;
2241 ipc_port_update_watchport_elem(port
, new_watchport_elem
);
2242 ipc_port_send_turnstile_recompute_push_locked(port
);
2243 /* port and mqueue unlocked */
2244 return KERN_SUCCESS
;
2248 * Routine: ipc_port_clear_watchport_elem_internal
2250 * Remove the turnstile boost of watchport.
2252 * Port locked on entry.
2253 * Port locked on return.
2255 * Old task_watchport_elem returned.
2257 struct task_watchport_elem
*
2258 ipc_port_clear_watchport_elem_internal(
2262 imq_held(&port
->ip_messages
);
2264 if (port
->ip_specialreply
) {
2268 return ipc_port_update_watchport_elem(port
, NULL
);
2272 * Routine: ipc_port_send_turnstile_recompute_push_locked
2274 * Update send turnstile inheritor of port and recompute the push.
2276 * Port locked on entry.
2277 * Port unlocked on return.
2282 ipc_port_send_turnstile_recompute_push_locked(
2285 struct turnstile
*send_turnstile
= port_send_turnstile(port
);
2286 if (send_turnstile
) {
2287 turnstile_reference(send_turnstile
);
2288 ipc_port_send_update_inheritor(port
, send_turnstile
,
2289 TURNSTILE_IMMEDIATE_UPDATE
);
2291 imq_unlock(&port
->ip_messages
);
2294 if (send_turnstile
) {
2295 turnstile_update_inheritor_complete(send_turnstile
,
2296 TURNSTILE_INTERLOCK_NOT_HELD
);
2297 turnstile_deallocate_safe(send_turnstile
);
2302 * Routine: ipc_port_get_watchport_inheritor
2304 * Returns inheritor for watchport.
2309 * watchport inheritor.
2312 ipc_port_get_watchport_inheritor(
2315 imq_held(&port
->ip_messages
);
2316 return ipc_port_watchport_elem(port
)->twe_task
->watchports
->tw_thread
;
2320 * Routine: ipc_port_impcount_delta
2322 * Adjust only the importance count associated with a port.
2323 * If there are any adjustments to be made to receiver task,
2324 * those are handled elsewhere.
2326 * For now, be defensive during deductions to make sure the
2327 * impcount for the port doesn't underflow zero. This will
2328 * go away when the port boost addition is made atomic (see
2329 * note in ipc_port_importance_delta()).
2331 * The port is referenced and locked.
2332 * Nothing else is locked.
2335 ipc_port_impcount_delta(
2337 mach_port_delta_t delta
,
2338 ipc_port_t __unused base
)
2340 mach_port_delta_t absdelta
;
2342 if (!ip_active(port
)) {
2346 /* adding/doing nothing is easy */
2348 port
->ip_impcount
+= delta
;
2352 absdelta
= 0 - delta
;
2353 if (port
->ip_impcount
>= absdelta
) {
2354 port
->ip_impcount
-= absdelta
;
2358 #if (DEVELOPMENT || DEBUG)
2359 if (port
->ip_receiver_name
!= MACH_PORT_NULL
) {
2360 task_t target_task
= port
->ip_receiver
->is_task
;
2361 ipc_importance_task_t target_imp
= target_task
->task_imp_base
;
2362 const char *target_procname
;
2365 if (target_imp
!= IIT_NULL
) {
2366 target_procname
= target_imp
->iit_procname
;
2367 target_pid
= target_imp
->iit_bsd_pid
;
2369 target_procname
= "unknown";
2372 printf("Over-release of importance assertions for port 0x%x receiver pid %d (%s), "
2373 "dropping %d assertion(s) but port only has %d remaining.\n",
2374 port
->ip_receiver_name
,
2375 target_pid
, target_procname
,
2376 absdelta
, port
->ip_impcount
);
2377 } else if (base
!= IP_NULL
) {
2378 task_t target_task
= base
->ip_receiver
->is_task
;
2379 ipc_importance_task_t target_imp
= target_task
->task_imp_base
;
2380 const char *target_procname
;
2383 if (target_imp
!= IIT_NULL
) {
2384 target_procname
= target_imp
->iit_procname
;
2385 target_pid
= target_imp
->iit_bsd_pid
;
2387 target_procname
= "unknown";
2390 printf("Over-release of importance assertions for port 0x%lx "
2391 "enqueued on port 0x%x with receiver pid %d (%s), "
2392 "dropping %d assertion(s) but port only has %d remaining.\n",
2393 (unsigned long)VM_KERNEL_UNSLIDE_OR_PERM((uintptr_t)port
),
2394 base
->ip_receiver_name
,
2395 target_pid
, target_procname
,
2396 absdelta
, port
->ip_impcount
);
2400 delta
= 0 - port
->ip_impcount
;
2401 port
->ip_impcount
= 0;
2406 * Routine: ipc_port_importance_delta_internal
2408 * Adjust the importance count through the given port.
2409 * If the port is in transit, apply the delta throughout
2410 * the chain. Determine if the there is a task at the
2411 * base of the chain that wants/needs to be adjusted,
2412 * and if so, apply the delta.
2414 * The port is referenced and locked on entry.
2415 * Importance may be locked.
2416 * Nothing else is locked.
2417 * The lock may be dropped on exit.
2418 * Returns TRUE if lock was dropped.
2420 #if IMPORTANCE_INHERITANCE
2423 ipc_port_importance_delta_internal(
2426 mach_port_delta_t
*deltap
,
2427 ipc_importance_task_t
*imp_task
)
2429 ipc_port_t next
, base
;
2430 bool dropped
= false;
2431 bool took_base_ref
= false;
2433 *imp_task
= IIT_NULL
;
2439 assert(options
== IPID_OPTION_NORMAL
|| options
== IPID_OPTION_SENDPOSSIBLE
);
2443 /* if port is in transit, have to search for end of chain */
2444 if (ip_active(port
) &&
2445 port
->ip_destination
!= IP_NULL
&&
2446 port
->ip_receiver_name
== MACH_PORT_NULL
) {
2450 ipc_port_multiple_lock(); /* massive serialization */
2452 took_base_ref
= ipc_port_destination_chain_lock(port
, &base
);
2453 /* all ports in chain from port to base, inclusive, are locked */
2455 ipc_port_multiple_unlock();
2459 * If the port lock is dropped b/c the port is in transit, there is a
2460 * race window where another thread can drain messages and/or fire a
2461 * send possible notification before we get here.
2463 * We solve this race by checking to see if our caller armed the send
2464 * possible notification, whether or not it's been fired yet, and
2465 * whether or not we've already set the port's ip_spimportant bit. If
2466 * we don't need a send-possible boost, then we'll just apply a
2467 * harmless 0-boost to the port.
2469 if (options
& IPID_OPTION_SENDPOSSIBLE
) {
2470 assert(*deltap
== 1);
2471 if (port
->ip_sprequests
&& port
->ip_spimportant
== 0) {
2472 port
->ip_spimportant
= 1;
2478 /* unlock down to the base, adjusting boost(s) at each level */
2480 *deltap
= ipc_port_impcount_delta(port
, *deltap
, base
);
2486 /* port is in transit */
2487 assert(port
->ip_tempowner
== 0);
2488 next
= port
->ip_destination
;
2493 /* find the task (if any) to boost according to the base */
2494 if (ip_active(base
)) {
2495 if (base
->ip_tempowner
!= 0) {
2496 if (IIT_NULL
!= base
->ip_imp_task
) {
2497 *imp_task
= base
->ip_imp_task
;
2499 /* otherwise don't boost */
2500 } else if (base
->ip_receiver_name
!= MACH_PORT_NULL
) {
2501 ipc_space_t space
= base
->ip_receiver
;
2503 /* only spaces with boost-accepting tasks */
2504 if (space
->is_task
!= TASK_NULL
&&
2505 ipc_importance_task_is_any_receiver_type(space
->is_task
->task_imp_base
)) {
2506 *imp_task
= space
->is_task
->task_imp_base
;
2512 * Only the base is locked. If we have to hold or drop task
2513 * importance assertions, we'll have to drop that lock as well.
2515 if (*imp_task
!= IIT_NULL
) {
2516 /* take a reference before unlocking base */
2517 ipc_importance_task_reference(*imp_task
);
2522 if (took_base_ref
) {
2529 #endif /* IMPORTANCE_INHERITANCE */
2532 * Routine: ipc_port_importance_delta
2534 * Adjust the importance count through the given port.
2535 * If the port is in transit, apply the delta throughout
2538 * If there is a task at the base of the chain that wants/needs
2539 * to be adjusted, apply the delta.
2541 * The port is referenced and locked on entry.
2542 * Nothing else is locked.
2543 * The lock may be dropped on exit.
2544 * Returns TRUE if lock was dropped.
2546 #if IMPORTANCE_INHERITANCE
2549 ipc_port_importance_delta(
2552 mach_port_delta_t delta
)
2554 ipc_importance_task_t imp_task
= IIT_NULL
;
2557 dropped
= ipc_port_importance_delta_internal(port
, options
, &delta
, &imp_task
);
2559 if (IIT_NULL
== imp_task
|| delta
== 0) {
2567 assert(ipc_importance_task_is_any_receiver_type(imp_task
));
2570 ipc_importance_task_hold_internal_assertion(imp_task
, delta
);
2572 ipc_importance_task_drop_internal_assertion(imp_task
, -delta
);
2575 ipc_importance_task_release(imp_task
);
2578 #endif /* IMPORTANCE_INHERITANCE */
2581 * Routine: ipc_port_make_send_locked
2583 * Make a naked send right from a receive right.
2586 * port locked and active.
2589 ipc_port_make_send_locked(
2592 require_ip_active(port
);
2600 * Routine: ipc_port_make_send
2602 * Make a naked send right from a receive right.
2609 if (!IP_VALID(port
)) {
2614 if (ip_active(port
)) {
2615 ipc_port_make_send_locked(port
);
2624 * Routine: ipc_port_copy_send_locked
2626 * Make a naked send right from another naked send right.
2628 * port locked and active.
2631 ipc_port_copy_send_locked(
2634 assert(port
->ip_srights
> 0);
2640 * Routine: ipc_port_copy_send
2642 * Make a naked send right from another naked send right.
2643 * IP_NULL -> IP_NULL
2644 * IP_DEAD -> IP_DEAD
2645 * dead port -> IP_DEAD
2646 * live port -> port + ref
2648 * Nothing locked except possibly a space.
2657 if (!IP_VALID(port
)) {
2662 if (ip_active(port
)) {
2663 ipc_port_copy_send_locked(port
);
2674 * Routine: ipc_port_copyout_send
2676 * Copyout a naked send right (possibly null/dead),
2677 * or if that fails, destroy the right.
2683 ipc_port_copyout_send(
2687 mach_port_name_t name
;
2689 if (IP_VALID(sright
)) {
2692 kr
= ipc_object_copyout(space
, ip_to_object(sright
),
2693 MACH_MSG_TYPE_PORT_SEND
, NULL
, NULL
, &name
);
2694 if (kr
!= KERN_SUCCESS
) {
2695 ipc_port_release_send(sright
);
2697 if (kr
== KERN_INVALID_CAPABILITY
) {
2698 name
= MACH_PORT_DEAD
;
2700 name
= MACH_PORT_NULL
;
2704 name
= CAST_MACH_PORT_TO_NAME(sright
);
2711 * Routine: ipc_port_release_send
2713 * Release a naked send right.
2714 * Consumes a ref for the port.
2720 ipc_port_release_send(
2723 ipc_port_t nsrequest
= IP_NULL
;
2724 mach_port_mscount_t mscount
;
2726 if (!IP_VALID(port
)) {
2732 assert(port
->ip_srights
> 0);
2733 if (port
->ip_srights
== 0) {
2734 panic("Over-release of port %p send right!", port
);
2739 if (!ip_active(port
)) {
2745 if (port
->ip_srights
== 0 &&
2746 port
->ip_nsrequest
!= IP_NULL
) {
2747 nsrequest
= port
->ip_nsrequest
;
2748 port
->ip_nsrequest
= IP_NULL
;
2749 mscount
= port
->ip_mscount
;
2752 ipc_notify_no_senders(nsrequest
, mscount
);
2760 * Routine: ipc_port_make_sonce_locked
2762 * Make a naked send-once right from a receive right.
2764 * The port is locked and active.
2768 ipc_port_make_sonce_locked(
2771 require_ip_active(port
);
2772 port
->ip_sorights
++;
2778 * Routine: ipc_port_make_sonce
2780 * Make a naked send-once right from a receive right.
2782 * The port is not locked.
2786 ipc_port_make_sonce(
2789 if (!IP_VALID(port
)) {
2794 if (ip_active(port
)) {
2795 ipc_port_make_sonce_locked(port
);
2804 * Routine: ipc_port_release_sonce
2806 * Release a naked send-once right.
2807 * Consumes a ref for the port.
2809 * In normal situations, this is never used.
2810 * Send-once rights are only consumed when
2811 * a message (possibly a send-once notification)
2814 * Nothing locked except possibly a space.
2818 ipc_port_release_sonce(
2821 if (!IP_VALID(port
)) {
2825 ipc_port_adjust_special_reply_port(port
, IPC_PORT_ADJUST_RESET_BOOSTRAP_CHECKIN
);
2829 assert(port
->ip_sorights
> 0);
2830 if (port
->ip_sorights
== 0) {
2831 panic("Over-release of port %p send-once right!", port
);
2834 port
->ip_sorights
--;
2841 * Routine: ipc_port_release_receive
2843 * Release a naked (in limbo or in transit) receive right.
2844 * Consumes a ref for the port; destroys the port.
2850 ipc_port_release_receive(
2855 if (!IP_VALID(port
)) {
2860 require_ip_active(port
);
2861 assert(port
->ip_receiver_name
== MACH_PORT_NULL
);
2862 dest
= port
->ip_destination
;
2864 ipc_port_destroy(port
); /* consumes ref, unlocks */
2866 if (dest
!= IP_NULL
) {
2867 ipc_port_send_turnstile_complete(dest
);
2873 * Routine: ipc_port_alloc_special
2875 * Allocate a port in a special space.
2876 * The new port is returned with one ref.
2877 * If unsuccessful, IP_NULL is returned.
2883 ipc_port_alloc_special(
2885 ipc_port_init_flags_t flags
)
2889 port
= ip_object_to_port(io_alloc(IOT_PORT
));
2890 if (port
== IP_NULL
) {
2895 uintptr_t buf
[IP_CALLSTACK_MAX
];
2896 ipc_port_callstack_init_debug(&buf
[0], IP_CALLSTACK_MAX
);
2897 #endif /* MACH_ASSERT */
2899 bzero((char *)port
, sizeof(*port
));
2900 io_lock_init(ip_to_object(port
));
2901 port
->ip_references
= 1;
2902 port
->ip_object
.io_bits
= io_makebits(TRUE
, IOT_PORT
, 0);
2904 ipc_port_init(port
, space
, flags
, 1);
2907 ipc_port_init_debug(port
, &buf
[0], IP_CALLSTACK_MAX
);
2908 #endif /* MACH_ASSERT */
2914 * Routine: ipc_port_dealloc_special
2916 * Deallocate a port in a special space.
2917 * Consumes one ref for the port.
2923 ipc_port_dealloc_special(
2925 __assert_only ipc_space_t space
)
2928 require_ip_active(port
);
2929 // assert(port->ip_receiver_name != MACH_PORT_NULL);
2930 assert(port
->ip_receiver
== space
);
2933 * We clear ip_receiver_name and ip_receiver to simplify
2934 * the ipc_space_kernel check in ipc_mqueue_send.
2937 imq_lock(&port
->ip_messages
);
2938 port
->ip_receiver_name
= MACH_PORT_NULL
;
2939 port
->ip_receiver
= IS_NULL
;
2940 imq_unlock(&port
->ip_messages
);
2942 /* relevant part of ipc_port_clear_receiver */
2943 port
->ip_mscount
= 0;
2944 port
->ip_messages
.imq_seqno
= 0;
2946 ipc_port_destroy(port
);
2950 * Routine: ipc_port_finalize
2952 * Called on last reference deallocate to
2953 * free any remaining data associated with the
2962 ipc_port_request_t requests
= port
->ip_requests
;
2964 assert(port_send_turnstile(port
) == TURNSTILE_NULL
);
2965 if (imq_is_turnstile_proxy(&port
->ip_messages
)) {
2966 assert(ipc_port_rcv_turnstile(port
) == TURNSTILE_NULL
);
2969 if (ip_active(port
)) {
2970 panic("Trying to free an active port. port %p", port
);
2973 if (requests
!= IPR_NULL
) {
2974 ipc_table_size_t its
= requests
->ipr_size
;
2975 it_requests_free(its
, requests
);
2976 port
->ip_requests
= IPR_NULL
;
2979 ipc_mqueue_deinit(&port
->ip_messages
);
2982 ipc_port_track_dealloc(port
);
2983 #endif /* MACH_ASSERT */
2987 * Routine: kdp_mqueue_send_find_owner
2989 * Discover the owner of the ipc_mqueue that contains the input
2990 * waitq object. The thread blocked on the waitq should be
2991 * waiting for an IPC_MQUEUE_FULL event.
2993 * The 'waitinfo->wait_type' value should already be set to
2994 * kThreadWaitPortSend.
2996 * If we find out that the containing port is actually in
2997 * transit, we reset the wait_type field to reflect this.
3000 kdp_mqueue_send_find_owner(struct waitq
* waitq
, __assert_only event64_t event
, thread_waitinfo_t
* waitinfo
)
3002 struct turnstile
*turnstile
;
3003 assert(waitinfo
->wait_type
== kThreadWaitPortSend
);
3004 assert(event
== IPC_MQUEUE_FULL
);
3005 assert(waitq_is_turnstile_queue(waitq
));
3007 turnstile
= waitq_to_turnstile(waitq
);
3008 ipc_port_t port
= (ipc_port_t
)turnstile
->ts_proprietor
; /* we are blocking on send */
3010 zone_id_require(ZONE_ID_IPC_PORT
, sizeof(struct ipc_port
), port
);
3012 waitinfo
->owner
= 0;
3013 waitinfo
->context
= VM_KERNEL_UNSLIDE_OR_PERM(port
);
3014 if (ip_lock_held_kdp(port
)) {
3016 * someone has the port locked: it may be in an
3017 * inconsistent state: bail
3019 waitinfo
->owner
= STACKSHOT_WAITOWNER_PORT_LOCKED
;
3023 if (ip_active(port
)) {
3024 if (port
->ip_tempowner
) {
3025 if (port
->ip_imp_task
!= IIT_NULL
&& port
->ip_imp_task
->iit_task
!= NULL
) {
3026 /* port is held by a tempowner */
3027 waitinfo
->owner
= pid_from_task(port
->ip_imp_task
->iit_task
);
3029 waitinfo
->owner
= STACKSHOT_WAITOWNER_INTRANSIT
;
3031 } else if (port
->ip_receiver_name
) {
3032 /* port in a space */
3033 if (port
->ip_receiver
== ipc_space_kernel
) {
3035 * The kernel pid is 0, make this
3036 * distinguishable from no-owner and
3037 * inconsistent port state.
3039 waitinfo
->owner
= STACKSHOT_WAITOWNER_KERNEL
;
3041 waitinfo
->owner
= pid_from_task(port
->ip_receiver
->is_task
);
3043 } else if (port
->ip_destination
!= IP_NULL
) {
3044 /* port in transit */
3045 waitinfo
->wait_type
= kThreadWaitPortSendInTransit
;
3046 waitinfo
->owner
= VM_KERNEL_UNSLIDE_OR_PERM(port
->ip_destination
);
3052 * Routine: kdp_mqueue_recv_find_owner
3054 * Discover the "owner" of the ipc_mqueue that contains the input
3055 * waitq object. The thread blocked on the waitq is trying to
3056 * receive on the mqueue.
3058 * The 'waitinfo->wait_type' value should already be set to
3059 * kThreadWaitPortReceive.
3061 * If we find that we are actualy waiting on a port set, we reset
3062 * the wait_type field to reflect this.
3065 kdp_mqueue_recv_find_owner(struct waitq
* waitq
, __assert_only event64_t event
, thread_waitinfo_t
* waitinfo
)
3067 assert(waitinfo
->wait_type
== kThreadWaitPortReceive
);
3068 assert(event
== IPC_MQUEUE_RECEIVE
);
3070 ipc_mqueue_t mqueue
= imq_from_waitq(waitq
);
3071 waitinfo
->owner
= 0;
3072 if (imq_is_set(mqueue
)) { /* we are waiting on a port set */
3073 ipc_pset_t set
= ips_from_mq(mqueue
);
3075 zone_id_require(ZONE_ID_IPC_PORT_SET
, sizeof(struct ipc_pset
), set
);
3077 /* Reset wait type to specify waiting on port set receive */
3078 waitinfo
->wait_type
= kThreadWaitPortSetReceive
;
3079 waitinfo
->context
= VM_KERNEL_UNSLIDE_OR_PERM(set
);
3080 if (ips_lock_held_kdp(set
)) {
3081 waitinfo
->owner
= STACKSHOT_WAITOWNER_PSET_LOCKED
;
3083 /* There is no specific owner "at the other end" of a port set, so leave unset. */
3085 ipc_port_t port
= ip_from_mq(mqueue
);
3087 zone_id_require(ZONE_ID_IPC_PORT
, sizeof(struct ipc_port
), port
);
3089 waitinfo
->context
= VM_KERNEL_UNSLIDE_OR_PERM(port
);
3090 if (ip_lock_held_kdp(port
)) {
3091 waitinfo
->owner
= STACKSHOT_WAITOWNER_PORT_LOCKED
;
3095 if (ip_active(port
)) {
3096 if (port
->ip_receiver_name
!= MACH_PORT_NULL
) {
3097 waitinfo
->owner
= port
->ip_receiver_name
;
3099 waitinfo
->owner
= STACKSHOT_WAITOWNER_INTRANSIT
;
3106 #include <kern/machine.h>
3109 * Keep a list of all allocated ports.
3110 * Allocation is intercepted via ipc_port_init;
3111 * deallocation is intercepted via io_free.
3114 queue_head_t port_alloc_queue
= QUEUE_HEAD_INITIALIZER(port_alloc_queue
);
3115 LCK_SPIN_DECLARE(port_alloc_queue_lock
, &ipc_lck_grp
, &ipc_lck_attr
);
3118 unsigned long port_count
= 0;
3119 unsigned long port_count_warning
= 20000;
3120 unsigned long port_timestamp
= 0;
3122 void db_port_stack_trace(
3127 unsigned int verbose
,
3128 unsigned int display
,
3129 unsigned int ref_search
,
3130 unsigned int ref_target
);
3133 extern int proc_pid(struct proc
*);
3134 #endif /* MACH_BSD */
3137 * Initialize all of the debugging state in a port.
3138 * Insert the port into a global list of all allocated ports.
3141 ipc_port_init_debug(
3143 uintptr_t *callstack
,
3144 unsigned int callstack_max
)
3148 port
->ip_thread
= current_thread();
3149 port
->ip_timetrack
= port_timestamp
++;
3150 for (i
= 0; i
< callstack_max
; ++i
) {
3151 port
->ip_callstack
[i
] = callstack
[i
];
3153 for (i
= 0; i
< IP_NSPARES
; ++i
) {
3154 port
->ip_spares
[i
] = 0;
3158 task_t task
= current_task();
3159 if (task
!= TASK_NULL
) {
3160 struct proc
* proc
= (struct proc
*) get_bsdtask_info(task
);
3162 port
->ip_spares
[0] = proc_pid(proc
);
3165 #endif /* MACH_BSD */
3168 lck_spin_lock(&port_alloc_queue_lock
);
3170 if (port_count_warning
> 0 && port_count
>= port_count_warning
) {
3171 assert(port_count
< port_count_warning
);
3173 queue_enter(&port_alloc_queue
, port
, ipc_port_t
, ip_port_links
);
3174 lck_spin_unlock(&port_alloc_queue_lock
);
3179 * Routine: ipc_port_callstack_init_debug
3181 * Calls the machine-dependent routine to
3182 * fill in an array with up to IP_CALLSTACK_MAX
3183 * levels of return pc information
3185 * May block (via copyin)
3188 ipc_port_callstack_init_debug(
3189 uintptr_t *callstack
,
3190 unsigned int callstack_max
)
3194 /* guarantee the callstack is initialized */
3195 for (i
= 0; i
< callstack_max
; i
++) {
3200 machine_callstack(callstack
, callstack_max
);
3205 * Remove a port from the queue of allocated ports.
3206 * This routine should be invoked JUST prior to
3207 * deallocating the actual memory occupied by the port.
3211 ipc_port_track_dealloc(
3212 __unused ipc_port_t port
)
3217 ipc_port_track_dealloc(
3220 lck_spin_lock(&port_alloc_queue_lock
);
3221 assert(port_count
> 0);
3223 queue_remove(&port_alloc_queue
, port
, ipc_port_t
, ip_port_links
);
3224 lck_spin_unlock(&port_alloc_queue_lock
);
3229 #endif /* MACH_ASSERT */