2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * @OSF_FREE_COPYRIGHT@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
57 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
58 * support for mandatory and extensible security protections. This notice
59 * is included in support of clause 2.2 (b) of the Apple Public License,
65 * File: ipc/ipc_port.c
69 * Functions to manipulate IPC ports.
72 #include <zone_debug.h>
73 #include <mach_assert.h>
75 #include <mach/port.h>
76 #include <mach/kern_return.h>
77 #include <kern/ipc_kobject.h>
78 #include <kern/thread.h>
79 #include <kern/misc_protos.h>
80 #include <kern/waitq.h>
81 #include <kern/policy_internal.h>
82 #include <kern/debug.h>
83 #include <kern/kcdata.h>
84 #include <ipc/ipc_entry.h>
85 #include <ipc/ipc_space.h>
86 #include <ipc/ipc_object.h>
87 #include <ipc/ipc_port.h>
88 #include <ipc/ipc_pset.h>
89 #include <ipc/ipc_kmsg.h>
90 #include <ipc/ipc_mqueue.h>
91 #include <ipc/ipc_notify.h>
92 #include <ipc/ipc_table.h>
93 #include <ipc/ipc_importance.h>
94 #include <machine/machlimits.h>
96 #include <security/mac_mach_internal.h>
100 decl_lck_spin_data(, ipc_port_multiple_lock_data
)
101 ipc_port_timestamp_t ipc_port_timestamp_data
;
105 void ipc_port_init_debug(
107 uintptr_t *callstack
,
108 unsigned int callstack_max
);
110 void ipc_port_callstack_init_debug(
111 uintptr_t *callstack
,
112 unsigned int callstack_max
);
114 #endif /* MACH_ASSERT */
117 ipc_port_release(ipc_port_t port
)
123 ipc_port_reference(ipc_port_t port
)
129 * Routine: ipc_port_timestamp
131 * Retrieve a timestamp value.
135 ipc_port_timestamp(void)
137 return OSIncrementAtomic(&ipc_port_timestamp_data
);
141 * Routine: ipc_port_request_alloc
143 * Try to allocate a request slot.
144 * If successful, returns the request index.
145 * Otherwise returns zero.
147 * The port is locked and active.
149 * KERN_SUCCESS A request index was found.
150 * KERN_NO_SPACE No index allocated.
153 #if IMPORTANCE_INHERITANCE
155 ipc_port_request_alloc(
157 mach_port_name_t name
,
159 boolean_t send_possible
,
161 ipc_port_request_index_t
*indexp
,
162 boolean_t
*importantp
)
165 ipc_port_request_alloc(
167 mach_port_name_t name
,
169 boolean_t send_possible
,
171 ipc_port_request_index_t
*indexp
)
172 #endif /* IMPORTANCE_INHERITANCE */
174 ipc_port_request_t ipr
, table
;
175 ipc_port_request_index_t index
;
178 #if IMPORTANCE_INHERITANCE
180 #endif /* IMPORTANCE_INHERITANCE */
182 assert(ip_active(port
));
183 assert(name
!= MACH_PORT_NULL
);
184 assert(soright
!= IP_NULL
);
186 table
= port
->ip_requests
;
188 if (table
== IPR_NULL
)
189 return KERN_NO_SPACE
;
191 index
= table
->ipr_next
;
193 return KERN_NO_SPACE
;
196 assert(ipr
->ipr_name
== MACH_PORT_NULL
);
198 table
->ipr_next
= ipr
->ipr_next
;
199 ipr
->ipr_name
= name
;
202 mask
|= IPR_SOR_SPREQ_MASK
;
204 mask
|= IPR_SOR_SPARM_MASK
;
205 if (port
->ip_sprequests
== 0) {
206 port
->ip_sprequests
= 1;
207 #if IMPORTANCE_INHERITANCE
208 /* TODO: Live importance support in send-possible */
209 if (port
->ip_impdonation
!= 0 &&
210 port
->ip_spimportant
== 0 &&
211 (task_is_importance_donor(current_task()))) {
214 #endif /* IMPORTANCE_INHERTANCE */
218 ipr
->ipr_soright
= IPR_SOR_MAKE(soright
, mask
);
226 * Routine: ipc_port_request_grow
228 * Grow a port's table of requests.
230 * The port must be locked and active.
231 * Nothing else locked; will allocate memory.
232 * Upon return the port is unlocked.
234 * KERN_SUCCESS Grew the table.
235 * KERN_SUCCESS Somebody else grew the table.
236 * KERN_SUCCESS The port died.
237 * KERN_RESOURCE_SHORTAGE Couldn't allocate new table.
238 * KERN_NO_SPACE Couldn't grow to desired size
242 ipc_port_request_grow(
244 ipc_table_elems_t target_size
)
246 ipc_table_size_t its
;
247 ipc_port_request_t otable
, ntable
;
249 assert(ip_active(port
));
251 otable
= port
->ip_requests
;
252 if (otable
== IPR_NULL
)
253 its
= &ipc_table_requests
[0];
255 its
= otable
->ipr_size
+ 1;
257 if (target_size
!= ITS_SIZE_NONE
) {
258 if ((otable
!= IPR_NULL
) &&
259 (target_size
<= otable
->ipr_size
->its_size
)) {
263 while ((its
->its_size
) && (its
->its_size
< target_size
)) {
266 if (its
->its_size
== 0) {
268 return KERN_NO_SPACE
;
275 if ((its
->its_size
== 0) ||
276 ((ntable
= it_requests_alloc(its
)) == IPR_NULL
)) {
278 return KERN_RESOURCE_SHORTAGE
;
284 * Check that port is still active and that nobody else
285 * has slipped in and grown the table on us. Note that
286 * just checking if the current table pointer == otable
287 * isn't sufficient; must check ipr_size.
290 if (ip_active(port
) && (port
->ip_requests
== otable
) &&
291 ((otable
== IPR_NULL
) || (otable
->ipr_size
+1 == its
))) {
292 ipc_table_size_t oits
;
293 ipc_table_elems_t osize
, nsize
;
294 ipc_port_request_index_t free
, i
;
296 /* copy old table to new table */
298 if (otable
!= IPR_NULL
) {
299 oits
= otable
->ipr_size
;
300 osize
= oits
->its_size
;
301 free
= otable
->ipr_next
;
303 (void) memcpy((void *)(ntable
+ 1),
304 (const void *)(otable
+ 1),
305 (osize
- 1) * sizeof(struct ipc_port_request
));
312 nsize
= its
->its_size
;
313 assert(nsize
> osize
);
315 /* add new elements to the new table's free list */
317 for (i
= osize
; i
< nsize
; i
++) {
318 ipc_port_request_t ipr
= &ntable
[i
];
320 ipr
->ipr_name
= MACH_PORT_NULL
;
321 ipr
->ipr_next
= free
;
325 ntable
->ipr_next
= free
;
326 ntable
->ipr_size
= its
;
327 port
->ip_requests
= ntable
;
331 if (otable
!= IPR_NULL
) {
332 it_requests_free(oits
, otable
);
337 it_requests_free(its
, ntable
);
344 * Routine: ipc_port_request_sparm
346 * Arm delayed send-possible request.
348 * The port must be locked and active.
350 * Returns TRUE if the request was armed
351 * (or armed with importance in that version).
355 ipc_port_request_sparm(
357 __assert_only mach_port_name_t name
,
358 ipc_port_request_index_t index
,
359 mach_msg_option_t option
,
360 mach_msg_priority_t override
)
362 if (index
!= IE_REQ_NONE
) {
363 ipc_port_request_t ipr
, table
;
365 assert(ip_active(port
));
367 table
= port
->ip_requests
;
368 assert(table
!= IPR_NULL
);
371 assert(ipr
->ipr_name
== name
);
373 /* Is there a valid destination? */
374 if (IPR_SOR_SPREQ(ipr
->ipr_soright
)) {
375 ipr
->ipr_soright
= IPR_SOR_MAKE(ipr
->ipr_soright
, IPR_SOR_SPARM_MASK
);
376 port
->ip_sprequests
= 1;
378 if (option
& MACH_SEND_OVERRIDE
) {
379 /* apply override to message queue */
380 ipc_mqueue_override_send(&port
->ip_messages
, override
);
383 #if IMPORTANCE_INHERITANCE
384 if (((option
& MACH_SEND_NOIMPORTANCE
) == 0) &&
385 (port
->ip_impdonation
!= 0) &&
386 (port
->ip_spimportant
== 0) &&
387 (((option
& MACH_SEND_IMPORTANCE
) != 0) ||
388 (task_is_importance_donor(current_task())))) {
393 #endif /* IMPORTANCE_INHERITANCE */
400 * Routine: ipc_port_request_type
402 * Determine the type(s) of port requests enabled for a name.
404 * The port must be locked or inactive (to avoid table growth).
405 * The index must not be IE_REQ_NONE and for the name in question.
408 ipc_port_request_type(
410 __assert_only mach_port_name_t name
,
411 ipc_port_request_index_t index
)
413 ipc_port_request_t ipr
, table
;
414 mach_port_type_t type
= 0;
416 table
= port
->ip_requests
;
417 assert (table
!= IPR_NULL
);
419 assert(index
!= IE_REQ_NONE
);
421 assert(ipr
->ipr_name
== name
);
423 if (IP_VALID(IPR_SOR_PORT(ipr
->ipr_soright
))) {
424 type
|= MACH_PORT_TYPE_DNREQUEST
;
426 if (IPR_SOR_SPREQ(ipr
->ipr_soright
)) {
427 type
|= MACH_PORT_TYPE_SPREQUEST
;
429 if (!IPR_SOR_SPARMED(ipr
->ipr_soright
)) {
430 type
|= MACH_PORT_TYPE_SPREQUEST_DELAYED
;
438 * Routine: ipc_port_request_cancel
440 * Cancel a dead-name/send-possible request and return the send-once right.
442 * The port must be locked and active.
443 * The index must not be IPR_REQ_NONE and must correspond with name.
447 ipc_port_request_cancel(
449 __assert_only mach_port_name_t name
,
450 ipc_port_request_index_t index
)
452 ipc_port_request_t ipr
, table
;
453 ipc_port_t request
= IP_NULL
;
455 assert(ip_active(port
));
456 table
= port
->ip_requests
;
457 assert(table
!= IPR_NULL
);
459 assert (index
!= IE_REQ_NONE
);
461 assert(ipr
->ipr_name
== name
);
462 request
= IPR_SOR_PORT(ipr
->ipr_soright
);
464 /* return ipr to the free list inside the table */
465 ipr
->ipr_name
= MACH_PORT_NULL
;
466 ipr
->ipr_next
= table
->ipr_next
;
467 table
->ipr_next
= index
;
473 * Routine: ipc_port_pdrequest
475 * Make a port-deleted request, returning the
476 * previously registered send-once right.
477 * Just cancels the previous request if notify is IP_NULL.
479 * The port is locked and active. It is unlocked.
480 * Consumes a ref for notify (if non-null), and
481 * returns previous with a ref (if non-null).
488 ipc_port_t
*previousp
)
492 assert(ip_active(port
));
494 previous
= port
->ip_pdrequest
;
495 port
->ip_pdrequest
= notify
;
498 *previousp
= previous
;
502 * Routine: ipc_port_nsrequest
504 * Make a no-senders request, returning the
505 * previously registered send-once right.
506 * Just cancels the previous request if notify is IP_NULL.
508 * The port is locked and active. It is unlocked.
509 * Consumes a ref for notify (if non-null), and
510 * returns previous with a ref (if non-null).
516 mach_port_mscount_t sync
,
518 ipc_port_t
*previousp
)
521 mach_port_mscount_t mscount
;
523 assert(ip_active(port
));
525 previous
= port
->ip_nsrequest
;
526 mscount
= port
->ip_mscount
;
528 if ((port
->ip_srights
== 0) && (sync
<= mscount
) &&
529 (notify
!= IP_NULL
)) {
530 port
->ip_nsrequest
= IP_NULL
;
532 ipc_notify_no_senders(notify
, mscount
);
534 port
->ip_nsrequest
= notify
;
538 *previousp
= previous
;
543 * Routine: ipc_port_clear_receiver
545 * Prepares a receive right for transmission/destruction,
546 * optionally performs mqueue destruction (with port lock held)
549 * The port is locked and active.
551 * If should_destroy is TRUE, then the return value indicates
552 * whether the caller needs to reap kmsg structures that should
553 * be destroyed (by calling ipc_kmsg_reap_delayed)
555 * If should_destroy is FALSE, this always returns FALSE
559 ipc_port_clear_receiver(
561 boolean_t should_destroy
)
563 ipc_mqueue_t mqueue
= &port
->ip_messages
;
564 boolean_t reap_messages
= FALSE
;
567 * Pull ourselves out of any sets to which we belong.
568 * We hold the port locked, so even though this acquires and releases
569 * the mqueue lock, we know we won't be added to any other sets.
571 if (port
->ip_in_pset
!= 0) {
572 ipc_pset_remove_from_all(port
);
573 assert(port
->ip_in_pset
== 0);
577 * Send anyone waiting on the port's queue directly away.
578 * Also clear the mscount and seqno.
581 ipc_mqueue_changed(mqueue
);
582 port
->ip_mscount
= 0;
583 mqueue
->imq_seqno
= 0;
584 port
->ip_context
= port
->ip_guarded
= port
->ip_strict_guard
= 0;
586 if (should_destroy
) {
588 * Mark the mqueue invalid, preventing further send/receive
589 * operations from succeeding. It's important for this to be
590 * done under the same lock hold as the ipc_mqueue_changed
591 * call to avoid additional threads blocking on an mqueue
592 * that's being destroyed.
594 reap_messages
= ipc_mqueue_destroy_locked(mqueue
);
597 imq_unlock(&port
->ip_messages
);
599 return reap_messages
;
603 * Routine: ipc_port_init
605 * Initializes a newly-allocated port.
606 * Doesn't touch the ip_object fields.
613 mach_port_name_t name
)
615 /* port->ip_kobject doesn't have to be initialized */
617 port
->ip_receiver
= space
;
618 port
->ip_receiver_name
= name
;
620 port
->ip_mscount
= 0;
621 port
->ip_srights
= 0;
622 port
->ip_sorights
= 0;
624 port
->ip_nsrequest
= IP_NULL
;
625 port
->ip_pdrequest
= IP_NULL
;
626 port
->ip_requests
= IPR_NULL
;
628 port
->ip_premsg
= IKM_NULL
;
629 port
->ip_context
= 0;
631 port
->ip_sprequests
= 0;
632 port
->ip_spimportant
= 0;
633 port
->ip_impdonation
= 0;
634 port
->ip_tempowner
= 0;
636 port
->ip_guarded
= 0;
637 port
->ip_strict_guard
= 0;
638 port
->ip_impcount
= 0;
640 port
->ip_specialreply
= 0;
641 port
->ip_link_sync_qos
= 0;
643 ipc_mqueue_init(&port
->ip_messages
,
644 FALSE
/* !set */, NULL
/* no reserved link */);
648 * Routine: ipc_port_alloc
652 * Nothing locked. If successful, the port is returned
653 * locked. (The caller doesn't have a reference.)
655 * KERN_SUCCESS The port is allocated.
656 * KERN_INVALID_TASK The space is dead.
657 * KERN_NO_SPACE No room for an entry in the space.
658 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
664 mach_port_name_t
*namep
,
668 mach_port_name_t name
;
672 uintptr_t buf
[IP_CALLSTACK_MAX
];
673 ipc_port_callstack_init_debug(&buf
[0], IP_CALLSTACK_MAX
);
674 #endif /* MACH_ASSERT */
676 kr
= ipc_object_alloc(space
, IOT_PORT
,
677 MACH_PORT_TYPE_RECEIVE
, 0,
678 &name
, (ipc_object_t
*) &port
);
679 if (kr
!= KERN_SUCCESS
)
682 /* port and space are locked */
683 ipc_port_init(port
, space
, name
);
686 ipc_port_init_debug(port
, &buf
[0], IP_CALLSTACK_MAX
);
687 #endif /* MACH_ASSERT */
689 /* unlock space after init */
690 is_write_unlock(space
);
699 * Routine: ipc_port_alloc_name
701 * Allocate a port, with a specific name.
703 * Nothing locked. If successful, the port is returned
704 * locked. (The caller doesn't have a reference.)
706 * KERN_SUCCESS The port is allocated.
707 * KERN_INVALID_TASK The space is dead.
708 * KERN_NAME_EXISTS The name already denotes a right.
709 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
715 mach_port_name_t name
,
722 uintptr_t buf
[IP_CALLSTACK_MAX
];
723 ipc_port_callstack_init_debug(&buf
[0], IP_CALLSTACK_MAX
);
724 #endif /* MACH_ASSERT */
726 kr
= ipc_object_alloc_name(space
, IOT_PORT
,
727 MACH_PORT_TYPE_RECEIVE
, 0,
728 name
, (ipc_object_t
*) &port
);
729 if (kr
!= KERN_SUCCESS
)
734 ipc_port_init(port
, space
, name
);
737 ipc_port_init_debug(port
, &buf
[0], IP_CALLSTACK_MAX
);
738 #endif /* MACH_ASSERT */
746 * Routine: ipc_port_spnotify
748 * Generate send-possible port notifications.
750 * Nothing locked, reference held on port.
756 ipc_port_request_index_t index
= 0;
757 ipc_table_elems_t size
= 0;
760 * If the port has no send-possible request
761 * armed, don't bother to lock the port.
763 if (port
->ip_sprequests
== 0)
768 #if IMPORTANCE_INHERITANCE
769 if (port
->ip_spimportant
!= 0) {
770 port
->ip_spimportant
= 0;
771 if (ipc_port_importance_delta(port
, IPID_OPTION_NORMAL
, -1) == TRUE
) {
775 #endif /* IMPORTANCE_INHERITANCE */
777 if (port
->ip_sprequests
== 0) {
781 port
->ip_sprequests
= 0;
784 if (ip_active(port
)) {
785 ipc_port_request_t requests
;
787 /* table may change each time port unlocked (reload) */
788 requests
= port
->ip_requests
;
789 assert(requests
!= IPR_NULL
);
792 * no need to go beyond table size when first
793 * we entered - those are future notifications.
796 size
= requests
->ipr_size
->its_size
;
798 /* no need to backtrack either */
799 while (++index
< size
) {
800 ipc_port_request_t ipr
= &requests
[index
];
801 mach_port_name_t name
= ipr
->ipr_name
;
802 ipc_port_t soright
= IPR_SOR_PORT(ipr
->ipr_soright
);
803 boolean_t armed
= IPR_SOR_SPARMED(ipr
->ipr_soright
);
805 if (MACH_PORT_VALID(name
) && armed
&& IP_VALID(soright
)) {
806 /* claim send-once right - slot still inuse */
807 ipr
->ipr_soright
= IP_NULL
;
810 ipc_notify_send_possible(soright
, name
);
822 * Routine: ipc_port_dnnotify
824 * Generate dead name notifications for
825 * all outstanding dead-name and send-
829 * Port must be inactive.
830 * Reference held on port.
836 ipc_port_request_t requests
= port
->ip_requests
;
838 assert(!ip_active(port
));
839 if (requests
!= IPR_NULL
) {
840 ipc_table_size_t its
= requests
->ipr_size
;
841 ipc_table_elems_t size
= its
->its_size
;
842 ipc_port_request_index_t index
;
843 for (index
= 1; index
< size
; index
++) {
844 ipc_port_request_t ipr
= &requests
[index
];
845 mach_port_name_t name
= ipr
->ipr_name
;
846 ipc_port_t soright
= IPR_SOR_PORT(ipr
->ipr_soright
);
848 if (MACH_PORT_VALID(name
) && IP_VALID(soright
)) {
849 ipc_notify_dead_name(soright
, name
);
857 * Routine: ipc_port_destroy
859 * Destroys a port. Cleans up queued messages.
861 * If the port has a backup, it doesn't get destroyed,
862 * but is sent in a port-destroyed notification to the backup.
864 * The port is locked and alive; nothing else locked.
865 * The caller has a reference, which is consumed.
866 * Afterwards, the port is unlocked and dead.
870 ipc_port_destroy(ipc_port_t port
)
872 ipc_port_t pdrequest
, nsrequest
;
875 boolean_t special_reply
= port
->ip_specialreply
;
877 #if IMPORTANCE_INHERITANCE
878 ipc_importance_task_t release_imp_task
= IIT_NULL
;
879 thread_t self
= current_thread();
880 boolean_t top
= (self
->ith_assertions
== 0);
881 natural_t assertcnt
= 0;
882 #endif /* IMPORTANCE_INHERITANCE */
884 assert(ip_active(port
));
885 /* port->ip_receiver_name is garbage */
886 /* port->ip_receiver/port->ip_destination is garbage */
888 /* check for a backup port */
889 pdrequest
= port
->ip_pdrequest
;
891 #if IMPORTANCE_INHERITANCE
892 /* determine how many assertions to drop and from whom */
893 if (port
->ip_tempowner
!= 0) {
895 release_imp_task
= port
->ip_imp_task
;
896 if (IIT_NULL
!= release_imp_task
) {
897 port
->ip_imp_task
= IIT_NULL
;
898 assertcnt
= port
->ip_impcount
;
900 /* Otherwise, nothing to drop */
902 assertcnt
= port
->ip_impcount
;
903 if (pdrequest
!= IP_NULL
)
904 /* mark in limbo for the journey */
905 port
->ip_tempowner
= 1;
909 self
->ith_assertions
= assertcnt
;
910 #endif /* IMPORTANCE_INHERITANCE */
912 if (pdrequest
!= IP_NULL
) {
913 /* clear receiver, don't destroy the port */
914 (void)ipc_port_clear_receiver(port
, FALSE
);
915 assert(port
->ip_in_pset
== 0);
916 assert(port
->ip_mscount
== 0);
918 /* we assume the ref for pdrequest */
919 port
->ip_pdrequest
= IP_NULL
;
921 /* make port be in limbo */
922 port
->ip_receiver_name
= MACH_PORT_NULL
;
923 port
->ip_destination
= IP_NULL
;
927 ipc_port_unlink_special_reply_port(port
,
928 IPC_PORT_UNLINK_SR_ALLOW_SYNC_QOS_LINKAGE
);
930 /* consumes our refs for port and pdrequest */
931 ipc_notify_port_destroyed(pdrequest
, port
);
933 goto drop_assertions
;
936 port
->ip_object
.io_bits
&= ~IO_BITS_ACTIVE
;
937 port
->ip_timestamp
= ipc_port_timestamp();
938 nsrequest
= port
->ip_nsrequest
;
941 * The mach_msg_* paths don't hold a port lock, they only hold a
942 * reference to the port object. If a thread raced us and is now
943 * blocked waiting for message reception on this mqueue (or waiting
944 * for ipc_mqueue_full), it will never be woken up. We call
945 * ipc_port_clear_receiver() here, _after_ the port has been marked
946 * inactive, to wakeup any threads which may be blocked and ensure
947 * that no other thread can get lost waiting for a wake up on a
948 * port/mqueue that's been destroyed.
950 boolean_t reap_msgs
= FALSE
;
951 reap_msgs
= ipc_port_clear_receiver(port
, TRUE
); /* marks mqueue inactive */
952 assert(port
->ip_in_pset
== 0);
953 assert(port
->ip_mscount
== 0);
956 * If the port has a preallocated message buffer and that buffer
957 * is not inuse, free it. If it has an inuse one, then the kmsg
958 * free will detect that we freed the association and it can free it
959 * like a normal buffer.
961 * Once the port is marked inactive we don't need to keep it locked.
963 if (IP_PREALLOC(port
)) {
964 ipc_port_t inuse_port
;
966 kmsg
= port
->ip_premsg
;
967 assert(kmsg
!= IKM_NULL
);
968 inuse_port
= ikm_prealloc_inuse_port(kmsg
);
969 IP_CLEAR_PREALLOC(port
, kmsg
);
971 if (inuse_port
!= IP_NULL
) {
972 assert(inuse_port
== port
);
980 /* unlink the kmsg from special reply port */
982 ipc_port_unlink_special_reply_port(port
,
983 IPC_PORT_UNLINK_SR_ALLOW_SYNC_QOS_LINKAGE
);
986 /* throw away no-senders request */
987 if (nsrequest
!= IP_NULL
)
988 ipc_notify_send_once(nsrequest
); /* consumes ref */
991 * Reap any kmsg objects waiting to be destroyed.
992 * This must be done after we've released the port lock.
995 ipc_kmsg_reap_delayed();
997 mqueue
= &port
->ip_messages
;
999 /* cleanup waitq related resources */
1000 ipc_mqueue_deinit(mqueue
);
1002 /* generate dead-name notifications */
1003 ipc_port_dnnotify(port
);
1005 ipc_kobject_destroy(port
);
1007 ip_release(port
); /* consume caller's ref */
1010 #if IMPORTANCE_INHERITANCE
1011 if (release_imp_task
!= IIT_NULL
) {
1012 if (assertcnt
> 0) {
1014 self
->ith_assertions
= 0;
1015 assert(ipc_importance_task_is_any_receiver_type(release_imp_task
));
1016 ipc_importance_task_drop_internal_assertion(release_imp_task
, assertcnt
);
1018 ipc_importance_task_release(release_imp_task
);
1020 } else if (assertcnt
> 0) {
1022 self
->ith_assertions
= 0;
1023 release_imp_task
= current_task()->task_imp_base
;
1024 if (ipc_importance_task_is_any_receiver_type(release_imp_task
)) {
1025 ipc_importance_task_drop_internal_assertion(release_imp_task
, assertcnt
);
1029 #endif /* IMPORTANCE_INHERITANCE */
1033 * Routine: ipc_port_check_circularity
1035 * Check if queueing "port" in a message for "dest"
1036 * would create a circular group of ports and messages.
1038 * If no circularity (FALSE returned), then "port"
1039 * is changed from "in limbo" to "in transit".
1041 * That is, we want to set port->ip_destination == dest,
1042 * but guaranteeing that this doesn't create a circle
1043 * port->ip_destination->ip_destination->... == port
1046 * No ports locked. References held for "port" and "dest".
1050 ipc_port_check_circularity(
1054 #if IMPORTANCE_INHERITANCE
1055 /* adjust importance counts at the same time */
1056 return ipc_importance_check_circularity(port
, dest
);
1059 sync_qos_count_t sync_qos_delta_add
[THREAD_QOS_LAST
] = {0};
1060 sync_qos_count_t sync_qos_delta_sub
[THREAD_QOS_LAST
] = {0};
1061 boolean_t update_knote
= FALSE
;
1063 assert(port
!= IP_NULL
);
1064 assert(dest
!= IP_NULL
);
1071 * First try a quick check that can run in parallel.
1072 * No circularity if dest is not in transit.
1075 if (ip_lock_try(dest
)) {
1076 if (!ip_active(dest
) ||
1077 (dest
->ip_receiver_name
!= MACH_PORT_NULL
) ||
1078 (dest
->ip_destination
== IP_NULL
))
1081 /* dest is in transit; further checking necessary */
1087 ipc_port_multiple_lock(); /* massive serialization */
1090 * Search for the end of the chain (a port not in transit),
1091 * acquiring locks along the way.
1097 if (!ip_active(base
) ||
1098 (base
->ip_receiver_name
!= MACH_PORT_NULL
) ||
1099 (base
->ip_destination
== IP_NULL
))
1102 base
= base
->ip_destination
;
1105 /* all ports in chain from dest to base, inclusive, are locked */
1108 /* circularity detected! */
1110 ipc_port_multiple_unlock();
1112 /* port (== base) is in limbo */
1114 assert(ip_active(port
));
1115 assert(port
->ip_receiver_name
== MACH_PORT_NULL
);
1116 assert(port
->ip_destination
== IP_NULL
);
1118 while (dest
!= IP_NULL
) {
1121 /* dest is in transit or in limbo */
1123 assert(ip_active(dest
));
1124 assert(dest
->ip_receiver_name
== MACH_PORT_NULL
);
1126 next
= dest
->ip_destination
;
1135 * The guarantee: lock port while the entire chain is locked.
1136 * Once port is locked, we can take a reference to dest,
1137 * add port to the chain, and unlock everything.
1141 ipc_port_multiple_unlock();
1144 imq_lock(&base
->ip_messages
);
1146 /* port is in limbo */
1148 assert(ip_active(port
));
1149 assert(port
->ip_receiver_name
== MACH_PORT_NULL
);
1150 assert(port
->ip_destination
== IP_NULL
);
1153 port
->ip_destination
= dest
;
1155 /* Capture the sync qos count delta */
1156 for (int i
= 0; i
< THREAD_QOS_LAST
; i
++) {
1157 sync_qos_delta_add
[i
] = port_sync_qos(port
, i
);
1160 /* now unlock chain */
1165 /* every port along chain tracks override behind it */
1166 update_knote
= ipc_port_sync_qos_delta(dest
, sync_qos_delta_add
, sync_qos_delta_sub
);
1170 /* port is in transit */
1172 assert(ip_active(dest
));
1173 assert(dest
->ip_receiver_name
== MACH_PORT_NULL
);
1174 assert(dest
->ip_destination
!= IP_NULL
);
1176 port
= dest
->ip_destination
;
1181 /* base is not in transit */
1182 assert(!ip_active(base
) ||
1183 (base
->ip_receiver_name
!= MACH_PORT_NULL
) ||
1184 (base
->ip_destination
== IP_NULL
));
1187 KNOTE(&base
->ip_messages
.imq_klist
, 0);
1189 imq_unlock(&base
->ip_messages
);
1194 #endif /* !IMPORTANCE_INHERITANCE */
1198 * Routine: ipc_port_link_special_reply_port_with_qos
1200 * Link the special reply port with the destination port.
1201 * Update the sync qos count of special reply port,
1205 * Nothing is locked.
1208 ipc_port_link_special_reply_port_with_qos(
1209 ipc_port_t special_reply_port
,
1210 ipc_port_t dest_port
,
1213 ipc_port_t next
, base
;
1214 sync_qos_count_t sync_qos_delta_add
[THREAD_QOS_LAST
] = {0};
1215 sync_qos_count_t sync_qos_delta_sub
[THREAD_QOS_LAST
] = {0};
1216 boolean_t update_knote
= FALSE
;
1217 boolean_t multiple_lock
= FALSE
;
1221 /* Check if dest is active */
1222 if (!ip_active(dest_port
)) {
1223 ip_unlock(dest_port
);
1224 return KERN_FAILURE
;
1227 if ((dest_port
->ip_receiver_name
== MACH_PORT_NULL
) &&
1228 (dest_port
->ip_destination
!= IP_NULL
)) {
1229 /* dest_port is in transit; need to take the serialize lock */
1230 ip_unlock(dest_port
);
1231 goto take_multiple_lock
;
1234 /* Check if the port is a special reply port */
1235 if (ip_lock_try(special_reply_port
)) {
1236 if (!special_reply_port
->ip_specialreply
||
1237 !special_reply_port
->ip_link_sync_qos
||
1238 (special_reply_port
->ip_sync_qos_override_port
!= IP_NULL
&&
1239 special_reply_port
->ip_sync_qos_override_port
!= dest_port
)) {
1241 boolean_t link_sync_qos
= special_reply_port
->ip_link_sync_qos
;
1242 ip_unlock(special_reply_port
);
1243 ip_unlock(dest_port
);
1244 /* return KERN_SUCCESS when link_sync_qos is not set */
1245 if (!link_sync_qos
) {
1246 return KERN_SUCCESS
;
1248 return KERN_FAILURE
;
1250 goto both_ports_locked
;
1254 ip_unlock(dest_port
);
1258 ipc_port_multiple_lock(); /* massive serialization */
1259 multiple_lock
= TRUE
;
1261 ip_lock(special_reply_port
);
1263 /* Check if the special reply port is marked regular */
1264 if (!special_reply_port
->ip_specialreply
||
1265 !special_reply_port
->ip_link_sync_qos
||
1266 (special_reply_port
->ip_sync_qos_override_port
!= IP_NULL
&&
1267 special_reply_port
->ip_sync_qos_override_port
!= dest_port
)) {
1269 boolean_t link_sync_qos
= special_reply_port
->ip_link_sync_qos
;
1270 ip_unlock(special_reply_port
);
1271 ipc_port_multiple_unlock();
1272 /* return KERN_SUCCESS when link_sync_qos is not set */
1273 if (!link_sync_qos
) {
1274 return KERN_SUCCESS
;
1276 return KERN_FAILURE
;
1284 /* Apply the qos to special reply port, capture the old qos */
1285 if (special_reply_port
->ip_sync_qos_override_port
!= IP_NULL
) {
1286 /* Check if qos needs to be updated */
1287 if ((sync_qos_count_t
)qos
<= port_special_qos(special_reply_port
)) {
1288 imq_lock(&dest_port
->ip_messages
);
1291 sync_qos_delta_sub
[port_special_qos(special_reply_port
)]++;
1294 set_port_special_qos(special_reply_port
, (sync_qos_count_t
)qos
);
1295 sync_qos_delta_add
[qos
]++;
1297 /* Link the special reply port to dest port */
1298 if (special_reply_port
->ip_sync_qos_override_port
== IP_NULL
) {
1299 /* take a reference on dest_port */
1300 ip_reference(dest_port
);
1301 special_reply_port
->ip_sync_qos_override_port
= dest_port
;
1304 /* Apply the sync qos delta to all in-transit ports */
1306 boolean_t port_not_in_transit
= FALSE
;
1307 if (!ip_active(next
) ||
1308 (next
->ip_receiver_name
!= MACH_PORT_NULL
) ||
1309 (next
->ip_destination
== IP_NULL
)) {
1310 /* Get the mqueue lock for destination port to update knotes */
1311 imq_lock(&next
->ip_messages
);
1312 port_not_in_transit
= TRUE
;
1314 /* Apply the sync qos delta */
1315 update_knote
= ipc_port_sync_qos_delta(next
, sync_qos_delta_add
, sync_qos_delta_sub
);
1317 if (port_not_in_transit
)
1320 next
= next
->ip_destination
;
1325 if (multiple_lock
) {
1326 ipc_port_multiple_unlock();
1329 ip_unlock(special_reply_port
);
1333 while (next
!= base
) {
1334 ipc_port_t prev
= next
;
1335 next
= next
->ip_destination
;
1341 KNOTE(&base
->ip_messages
.imq_klist
, 0);
1343 imq_unlock(&base
->ip_messages
);
1345 return KERN_SUCCESS
;
1349 * Routine: ipc_port_unlink_special_reply_port_locked
1351 * If the special port is linked to a port, adjust it's sync qos override and unlink the port.
1353 * Special reply port locked on entry.
1354 * Special reply port unlocked on return.
1359 ipc_port_unlink_special_reply_port_locked(
1360 ipc_port_t special_reply_port
,
1364 ipc_port_t dest_port
;
1365 sync_qos_count_t sync_qos
;
1366 sync_qos_count_t sync_qos_delta_add
[THREAD_QOS_LAST
] = {0};
1367 sync_qos_count_t sync_qos_delta_sub
[THREAD_QOS_LAST
] = {0};
1369 /* Return if called from copy out in pseudo receive */
1370 if (kn
== ITH_KNOTE_PSEUDO
) {
1371 ip_unlock(special_reply_port
);
1375 /* check if special port has a port linked to it */
1376 if (special_reply_port
->ip_specialreply
== 0 ||
1377 special_reply_port
->ip_sync_qos_override_port
== IP_NULL
) {
1378 set_port_special_qos(special_reply_port
, 0);
1379 if (flags
& IPC_PORT_UNLINK_SR_CLEAR_SPECIAL_REPLY
) {
1380 special_reply_port
->ip_specialreply
= 0;
1382 if (flags
& IPC_PORT_UNLINK_SR_ALLOW_SYNC_QOS_LINKAGE
) {
1383 special_reply_port
->ip_link_sync_qos
= 1;
1385 ip_unlock(special_reply_port
);
1390 * port->ip_sync_qos_override_port is not null and it is safe
1391 * to access it since ip_specialreply is set.
1393 dest_port
= special_reply_port
->ip_sync_qos_override_port
;
1394 sync_qos_delta_sub
[port_special_qos(special_reply_port
)]++;
1395 sync_qos
= port_special_qos(special_reply_port
);
1397 /* Clear qos delta for special reply port */
1398 set_port_special_qos(special_reply_port
, 0);
1399 special_reply_port
->ip_sync_qos_override_port
= IP_NULL
;
1400 if (flags
& IPC_PORT_UNLINK_SR_CLEAR_SPECIAL_REPLY
) {
1401 special_reply_port
->ip_specialreply
= 0;
1404 if (flags
& IPC_PORT_UNLINK_SR_ALLOW_SYNC_QOS_LINKAGE
) {
1405 special_reply_port
->ip_link_sync_qos
= 1;
1407 special_reply_port
->ip_link_sync_qos
= 0;
1410 ip_unlock(special_reply_port
);
1412 /* Add the sync qos on knote */
1413 if (ITH_KNOTE_VALID(kn
)) {
1414 knote_adjust_sync_qos(kn
, sync_qos
, TRUE
);
1417 /* Adjust the sync qos of destination */
1418 ipc_port_adjust_sync_qos(dest_port
, sync_qos_delta_add
, sync_qos_delta_sub
);
1419 ip_release(dest_port
);
1423 * Routine: ipc_port_unlink_special_reply_port
1425 * If the special port is linked to a port, adjust it's sync qos override and unlink the port.
1432 ipc_port_unlink_special_reply_port(
1433 ipc_port_t special_reply_port
,
1436 ip_lock(special_reply_port
);
1437 ipc_port_unlink_special_reply_port_locked(special_reply_port
, NULL
, flags
);
1438 /* special_reply_port unlocked */
1442 * Routine: ipc_port_sync_qos_delta
1444 * Adjust the sync qos count associated with a port.
1446 * For now, be defensive during deductions to make sure the
1447 * sync_qos count for the port doesn't underflow zero.
1449 * TRUE: if max sync qos of the port changes.
1452 * The port is referenced and locked.
1453 * The mqueue is locked if port is not in-transit.
1456 ipc_port_sync_qos_delta(
1458 sync_qos_count_t
*sync_qos_delta_add
,
1459 sync_qos_count_t
*sync_qos_delta_sub
)
1461 sync_qos_count_t max_sync_qos_index
;
1463 if (!ip_active(port
)) {
1467 max_sync_qos_index
= ipc_port_get_max_sync_qos_index(port
);
1469 for (int i
= 0; i
< THREAD_QOS_LAST
; i
++) {
1470 sync_qos_count_t port_sync_qos_count
= port_sync_qos(port
, i
);
1471 /* Do not let the sync qos underflow */
1472 if (sync_qos_delta_sub
[i
] > port_sync_qos_count
) {
1473 KDBG_FILTERED(IMPORTANCE_CODE(IMP_SYNC_IPC_QOS
, IMP_SYNC_IPC_QOS_UNDERFLOW
),
1474 i
, VM_KERNEL_UNSLIDE_OR_PERM(port
),
1475 port_sync_qos_count
, sync_qos_delta_sub
[i
]);
1477 set_port_sync_qos(port
, i
, 0);
1478 } else if (sync_qos_delta_sub
[i
] != 0) {
1479 KDBG_FILTERED(IMPORTANCE_CODE(IMP_SYNC_IPC_QOS
, IMP_SYNC_IPC_QOS_REMOVED
),
1480 i
, VM_KERNEL_UNSLIDE_OR_PERM(port
),
1481 port_sync_qos_count
, sync_qos_delta_sub
[i
]);
1483 set_port_sync_qos(port
, i
, (port_sync_qos_count
- sync_qos_delta_sub
[i
]));
1486 port_sync_qos_count
= port_sync_qos(port
, i
);
1487 /* Do not let the sync qos overflow */
1488 if (UCHAR_MAX
- sync_qos_delta_add
[i
] < port_sync_qos_count
) {
1489 KDBG_FILTERED(IMPORTANCE_CODE(IMP_SYNC_IPC_QOS
, IMP_SYNC_IPC_QOS_OVERFLOW
),
1490 i
, VM_KERNEL_UNSLIDE_OR_PERM(port
),
1491 port_sync_qos_count
, sync_qos_delta_add
[i
]);
1493 set_port_sync_qos(port
, i
, UCHAR_MAX
);
1494 } else if (sync_qos_delta_add
[i
] != 0) {
1495 KDBG_FILTERED(IMPORTANCE_CODE(IMP_SYNC_IPC_QOS
, IMP_SYNC_IPC_QOS_APPLIED
),
1496 i
, VM_KERNEL_UNSLIDE_OR_PERM(port
),
1497 port_sync_qos_count
, sync_qos_delta_add
[i
]);
1499 set_port_sync_qos(port
, i
, (port_sync_qos_count
+ sync_qos_delta_add
[i
]));
1502 return (ipc_port_get_max_sync_qos_index(port
) != max_sync_qos_index
);
1506 * Routine: ipc_port_get_max_sync_qos_index
1508 * Return the max sync qos of the port.
1513 ipc_port_get_max_sync_qos_index(
1517 for (i
= THREAD_QOS_LAST
- 1; i
>= 0; i
--) {
1518 if (port_sync_qos(port
, i
) != 0) {
1522 return THREAD_QOS_UNSPECIFIED
;
1526 * Routine: ipc_port_adjust_sync_qos
1528 * Adjust sync qos of the port and it's destination
1529 * port if the port is in transit.
1536 ipc_port_adjust_sync_qos(
1538 sync_qos_count_t
*sync_qos_delta_add
,
1539 sync_qos_count_t
*sync_qos_delta_sub
)
1541 boolean_t update_knote
;
1542 boolean_t multiple_lock
= FALSE
;
1543 ipc_port_t dest
, base
, next
;
1547 /* Check if the port is in transit */
1548 if (!ip_active(port
) ||
1549 (port
->ip_receiver_name
!= MACH_PORT_NULL
) ||
1550 (port
->ip_destination
== IP_NULL
)) {
1551 /* lock the mqueue since port is not in-transit */
1552 imq_lock(&port
->ip_messages
);
1553 update_knote
= ipc_port_sync_qos_delta(port
, sync_qos_delta_add
, sync_qos_delta_sub
);
1555 KNOTE(&port
->ip_messages
.imq_klist
, 0);
1557 imq_unlock(&port
->ip_messages
);
1562 dest
= port
->ip_destination
;
1563 assert(dest
!= IP_NULL
);
1565 if (ip_lock_try(dest
)) {
1566 if (!ip_active(dest
) ||
1567 (dest
->ip_receiver_name
!= MACH_PORT_NULL
) ||
1568 (dest
->ip_destination
== IP_NULL
)) {
1569 update_knote
= ipc_port_sync_qos_delta(port
, sync_qos_delta_add
, sync_qos_delta_sub
);
1572 /* lock the mqueue since dest is not in-transit */
1573 imq_lock(&dest
->ip_messages
);
1574 update_knote
= ipc_port_sync_qos_delta(dest
, sync_qos_delta_add
, sync_qos_delta_sub
);
1576 KNOTE(&dest
->ip_messages
.imq_klist
, 0);
1578 imq_unlock(&dest
->ip_messages
);
1583 /* dest is in transit; need to take the serialize lock */
1589 ipc_port_multiple_lock(); /* massive serialization */
1590 multiple_lock
= TRUE
;
1595 /* Apply the sync qos delta to all in-transit ports */
1597 boolean_t port_not_in_transit
= FALSE
;
1599 if (!ip_active(next
) ||
1600 (next
->ip_receiver_name
!= MACH_PORT_NULL
) ||
1601 (next
->ip_destination
== IP_NULL
)) {
1602 /* Get the mqueue lock for destination port to update knotes */
1603 imq_lock(&next
->ip_messages
);
1604 port_not_in_transit
= TRUE
;
1607 /* Apply the sync qos delta */
1608 update_knote
= ipc_port_sync_qos_delta(next
, sync_qos_delta_add
, sync_qos_delta_sub
);
1610 if (port_not_in_transit
)
1613 next
= next
->ip_destination
;
1617 if (multiple_lock
) {
1618 ipc_port_multiple_unlock();
1624 while (next
!= base
) {
1625 ipc_port_t prev
= next
;
1626 next
= next
->ip_destination
;
1632 KNOTE(&base
->ip_messages
.imq_klist
, 0);
1634 imq_unlock(&base
->ip_messages
);
1639 * Routine: ipc_port_impcount_delta
1641 * Adjust only the importance count associated with a port.
1642 * If there are any adjustments to be made to receiver task,
1643 * those are handled elsewhere.
1645 * For now, be defensive during deductions to make sure the
1646 * impcount for the port doesn't underflow zero. This will
1647 * go away when the port boost addition is made atomic (see
1648 * note in ipc_port_importance_delta()).
1650 * The port is referenced and locked.
1651 * Nothing else is locked.
1654 ipc_port_impcount_delta(
1656 mach_port_delta_t delta
,
1657 ipc_port_t __unused base
)
1659 mach_port_delta_t absdelta
;
1661 if (!ip_active(port
)) {
1665 /* adding/doing nothing is easy */
1667 port
->ip_impcount
+= delta
;
1671 absdelta
= 0 - delta
;
1672 if (port
->ip_impcount
>= absdelta
) {
1673 port
->ip_impcount
-= absdelta
;
1677 #if (DEVELOPMENT || DEBUG)
1678 if (port
->ip_receiver_name
!= MACH_PORT_NULL
) {
1679 task_t target_task
= port
->ip_receiver
->is_task
;
1680 ipc_importance_task_t target_imp
= target_task
->task_imp_base
;
1681 const char *target_procname
;
1684 if (target_imp
!= IIT_NULL
) {
1685 target_procname
= target_imp
->iit_procname
;
1686 target_pid
= target_imp
->iit_bsd_pid
;
1688 target_procname
= "unknown";
1691 printf("Over-release of importance assertions for port 0x%x receiver pid %d (%s), "
1692 "dropping %d assertion(s) but port only has %d remaining.\n",
1693 port
->ip_receiver_name
,
1694 target_pid
, target_procname
,
1695 absdelta
, port
->ip_impcount
);
1697 } else if (base
!= IP_NULL
) {
1698 task_t target_task
= base
->ip_receiver
->is_task
;
1699 ipc_importance_task_t target_imp
= target_task
->task_imp_base
;
1700 const char *target_procname
;
1703 if (target_imp
!= IIT_NULL
) {
1704 target_procname
= target_imp
->iit_procname
;
1705 target_pid
= target_imp
->iit_bsd_pid
;
1707 target_procname
= "unknown";
1710 printf("Over-release of importance assertions for port 0x%lx "
1711 "enqueued on port 0x%x with receiver pid %d (%s), "
1712 "dropping %d assertion(s) but port only has %d remaining.\n",
1713 (unsigned long)VM_KERNEL_UNSLIDE_OR_PERM((uintptr_t)port
),
1714 base
->ip_receiver_name
,
1715 target_pid
, target_procname
,
1716 absdelta
, port
->ip_impcount
);
1720 delta
= 0 - port
->ip_impcount
;
1721 port
->ip_impcount
= 0;
1726 * Routine: ipc_port_importance_delta_internal
1728 * Adjust the importance count through the given port.
1729 * If the port is in transit, apply the delta throughout
1730 * the chain. Determine if the there is a task at the
1731 * base of the chain that wants/needs to be adjusted,
1732 * and if so, apply the delta.
1734 * The port is referenced and locked on entry.
1735 * Importance may be locked.
1736 * Nothing else is locked.
1737 * The lock may be dropped on exit.
1738 * Returns TRUE if lock was dropped.
1740 #if IMPORTANCE_INHERITANCE
1743 ipc_port_importance_delta_internal(
1746 mach_port_delta_t
*deltap
,
1747 ipc_importance_task_t
*imp_task
)
1749 ipc_port_t next
, base
;
1750 boolean_t dropped
= FALSE
;
1752 *imp_task
= IIT_NULL
;
1757 assert(options
== IPID_OPTION_NORMAL
|| options
== IPID_OPTION_SENDPOSSIBLE
);
1761 /* if port is in transit, have to search for end of chain */
1762 if (ip_active(port
) &&
1763 port
->ip_destination
!= IP_NULL
&&
1764 port
->ip_receiver_name
== MACH_PORT_NULL
) {
1769 ipc_port_multiple_lock(); /* massive serialization */
1772 while(ip_active(base
) &&
1773 base
->ip_destination
!= IP_NULL
&&
1774 base
->ip_receiver_name
== MACH_PORT_NULL
) {
1776 base
= base
->ip_destination
;
1779 ipc_port_multiple_unlock();
1783 * If the port lock is dropped b/c the port is in transit, there is a
1784 * race window where another thread can drain messages and/or fire a
1785 * send possible notification before we get here.
1787 * We solve this race by checking to see if our caller armed the send
1788 * possible notification, whether or not it's been fired yet, and
1789 * whether or not we've already set the port's ip_spimportant bit. If
1790 * we don't need a send-possible boost, then we'll just apply a
1791 * harmless 0-boost to the port.
1793 if (options
& IPID_OPTION_SENDPOSSIBLE
) {
1794 assert(*deltap
== 1);
1795 if (port
->ip_sprequests
&& port
->ip_spimportant
== 0)
1796 port
->ip_spimportant
= 1;
1801 /* unlock down to the base, adjusting boost(s) at each level */
1803 *deltap
= ipc_port_impcount_delta(port
, *deltap
, base
);
1809 /* port is in transit */
1810 assert(port
->ip_tempowner
== 0);
1811 next
= port
->ip_destination
;
1816 /* find the task (if any) to boost according to the base */
1817 if (ip_active(base
)) {
1818 if (base
->ip_tempowner
!= 0) {
1819 if (IIT_NULL
!= base
->ip_imp_task
)
1820 *imp_task
= base
->ip_imp_task
;
1821 /* otherwise don't boost */
1823 } else if (base
->ip_receiver_name
!= MACH_PORT_NULL
) {
1824 ipc_space_t space
= base
->ip_receiver
;
1826 /* only spaces with boost-accepting tasks */
1827 if (space
->is_task
!= TASK_NULL
&&
1828 ipc_importance_task_is_any_receiver_type(space
->is_task
->task_imp_base
)) {
1829 *imp_task
= space
->is_task
->task_imp_base
;
1835 * Only the base is locked. If we have to hold or drop task
1836 * importance assertions, we'll have to drop that lock as well.
1838 if (*imp_task
!= IIT_NULL
) {
1839 /* take a reference before unlocking base */
1840 ipc_importance_task_reference(*imp_task
);
1843 if (dropped
== TRUE
) {
1849 #endif /* IMPORTANCE_INHERITANCE */
1852 * Routine: ipc_port_importance_delta
1854 * Adjust the importance count through the given port.
1855 * If the port is in transit, apply the delta throughout
1858 * If there is a task at the base of the chain that wants/needs
1859 * to be adjusted, apply the delta.
1861 * The port is referenced and locked on entry.
1862 * Nothing else is locked.
1863 * The lock may be dropped on exit.
1864 * Returns TRUE if lock was dropped.
1866 #if IMPORTANCE_INHERITANCE
1869 ipc_port_importance_delta(
1872 mach_port_delta_t delta
)
1874 ipc_importance_task_t imp_task
= IIT_NULL
;
1877 dropped
= ipc_port_importance_delta_internal(port
, options
, &delta
, &imp_task
);
1879 if (IIT_NULL
== imp_task
|| delta
== 0)
1885 assert(ipc_importance_task_is_any_receiver_type(imp_task
));
1888 ipc_importance_task_hold_internal_assertion(imp_task
, delta
);
1890 ipc_importance_task_drop_internal_assertion(imp_task
, -delta
);
1892 ipc_importance_task_release(imp_task
);
1895 #endif /* IMPORTANCE_INHERITANCE */
1898 * Routine: ipc_port_lookup_notify
1900 * Make a send-once notify port from a receive right.
1901 * Returns IP_NULL if name doesn't denote a receive right.
1903 * The space must be locked (read or write) and active.
1904 * Being the active space, we can rely on thread server_id
1905 * context to give us the proper server level sub-order
1910 ipc_port_lookup_notify(
1912 mach_port_name_t name
)
1917 assert(is_active(space
));
1919 entry
= ipc_entry_lookup(space
, name
);
1920 if (entry
== IE_NULL
)
1922 if ((entry
->ie_bits
& MACH_PORT_TYPE_RECEIVE
) == 0)
1925 __IGNORE_WCASTALIGN(port
= (ipc_port_t
) entry
->ie_object
);
1926 assert(port
!= IP_NULL
);
1929 assert(ip_active(port
));
1930 assert(port
->ip_receiver_name
== name
);
1931 assert(port
->ip_receiver
== space
);
1934 port
->ip_sorights
++;
1941 * Routine: ipc_port_make_send_locked
1943 * Make a naked send right from a receive right.
1946 * port locked and active.
1949 ipc_port_make_send_locked(
1952 assert(ip_active(port
));
1960 * Routine: ipc_port_make_send
1962 * Make a naked send right from a receive right.
1970 if (!IP_VALID(port
))
1974 if (ip_active(port
)) {
1986 * Routine: ipc_port_copy_send
1988 * Make a naked send right from another naked send right.
1989 * IP_NULL -> IP_NULL
1990 * IP_DEAD -> IP_DEAD
1991 * dead port -> IP_DEAD
1992 * live port -> port + ref
1994 * Nothing locked except possibly a space.
2003 if (!IP_VALID(port
))
2007 if (ip_active(port
)) {
2008 assert(port
->ip_srights
> 0);
2021 * Routine: ipc_port_copyout_send
2023 * Copyout a naked send right (possibly null/dead),
2024 * or if that fails, destroy the right.
2030 ipc_port_copyout_send(
2034 mach_port_name_t name
;
2036 if (IP_VALID(sright
)) {
2039 kr
= ipc_object_copyout(space
, (ipc_object_t
) sright
,
2040 MACH_MSG_TYPE_PORT_SEND
, TRUE
, &name
);
2041 if (kr
!= KERN_SUCCESS
) {
2042 ipc_port_release_send(sright
);
2044 if (kr
== KERN_INVALID_CAPABILITY
)
2045 name
= MACH_PORT_DEAD
;
2047 name
= MACH_PORT_NULL
;
2050 name
= CAST_MACH_PORT_TO_NAME(sright
);
2056 * Routine: ipc_port_release_send
2058 * Release a naked send right.
2059 * Consumes a ref for the port.
2065 ipc_port_release_send(
2068 ipc_port_t nsrequest
= IP_NULL
;
2069 mach_port_mscount_t mscount
;
2071 if (!IP_VALID(port
))
2076 assert(port
->ip_srights
> 0);
2077 if (port
->ip_srights
== 0) {
2078 panic("Over-release of port %p send right!", port
);
2083 if (!ip_active(port
)) {
2089 if (port
->ip_srights
== 0 &&
2090 port
->ip_nsrequest
!= IP_NULL
) {
2091 nsrequest
= port
->ip_nsrequest
;
2092 port
->ip_nsrequest
= IP_NULL
;
2093 mscount
= port
->ip_mscount
;
2096 ipc_notify_no_senders(nsrequest
, mscount
);
2104 * Routine: ipc_port_make_sonce_locked
2106 * Make a naked send-once right from a receive right.
2108 * The port is locked and active.
2112 ipc_port_make_sonce_locked(
2115 assert(ip_active(port
));
2116 port
->ip_sorights
++;
2122 * Routine: ipc_port_make_sonce
2124 * Make a naked send-once right from a receive right.
2126 * The port is not locked.
2130 ipc_port_make_sonce(
2133 if (!IP_VALID(port
))
2137 if (ip_active(port
)) {
2138 port
->ip_sorights
++;
2148 * Routine: ipc_port_release_sonce
2150 * Release a naked send-once right.
2151 * Consumes a ref for the port.
2153 * In normal situations, this is never used.
2154 * Send-once rights are only consumed when
2155 * a message (possibly a send-once notification)
2158 * Nothing locked except possibly a space.
2162 ipc_port_release_sonce(
2165 if (!IP_VALID(port
))
2168 ipc_port_unlink_special_reply_port(port
, IPC_PORT_UNLINK_SR_NONE
);
2172 assert(port
->ip_sorights
> 0);
2173 if (port
->ip_sorights
== 0) {
2174 panic("Over-release of port %p send-once right!", port
);
2177 port
->ip_sorights
--;
2184 * Routine: ipc_port_release_receive
2186 * Release a naked (in limbo or in transit) receive right.
2187 * Consumes a ref for the port; destroys the port.
2193 ipc_port_release_receive(
2198 if (!IP_VALID(port
))
2202 assert(ip_active(port
));
2203 assert(port
->ip_receiver_name
== MACH_PORT_NULL
);
2204 dest
= port
->ip_destination
;
2206 ipc_port_destroy(port
); /* consumes ref, unlocks */
2208 if (dest
!= IP_NULL
)
2213 * Routine: ipc_port_alloc_special
2215 * Allocate a port in a special space.
2216 * The new port is returned with one ref.
2217 * If unsuccessful, IP_NULL is returned.
2223 ipc_port_alloc_special(
2228 __IGNORE_WCASTALIGN(port
= (ipc_port_t
) io_alloc(IOT_PORT
));
2229 if (port
== IP_NULL
)
2233 uintptr_t buf
[IP_CALLSTACK_MAX
];
2234 ipc_port_callstack_init_debug(&buf
[0], IP_CALLSTACK_MAX
);
2235 #endif /* MACH_ASSERT */
2237 bzero((char *)port
, sizeof(*port
));
2238 io_lock_init(&port
->ip_object
);
2239 port
->ip_references
= 1;
2240 port
->ip_object
.io_bits
= io_makebits(TRUE
, IOT_PORT
, 0);
2242 ipc_port_init(port
, space
, 1);
2245 ipc_port_init_debug(port
, &buf
[0], IP_CALLSTACK_MAX
);
2246 #endif /* MACH_ASSERT */
2252 * Routine: ipc_port_dealloc_special
2254 * Deallocate a port in a special space.
2255 * Consumes one ref for the port.
2261 ipc_port_dealloc_special(
2263 __assert_only ipc_space_t space
)
2266 assert(ip_active(port
));
2267 // assert(port->ip_receiver_name != MACH_PORT_NULL);
2268 assert(port
->ip_receiver
== space
);
2271 * We clear ip_receiver_name and ip_receiver to simplify
2272 * the ipc_space_kernel check in ipc_mqueue_send.
2275 port
->ip_receiver_name
= MACH_PORT_NULL
;
2276 port
->ip_receiver
= IS_NULL
;
2278 /* relevant part of ipc_port_clear_receiver */
2279 ipc_port_set_mscount(port
, 0);
2280 port
->ip_messages
.imq_seqno
= 0;
2282 ipc_port_destroy(port
);
2286 * Routine: ipc_port_finalize
2288 * Called on last reference deallocate to
2289 * free any remaining data associated with the
2298 ipc_port_request_t requests
= port
->ip_requests
;
2300 assert(!ip_active(port
));
2301 if (requests
!= IPR_NULL
) {
2302 ipc_table_size_t its
= requests
->ipr_size
;
2303 it_requests_free(its
, requests
);
2304 port
->ip_requests
= IPR_NULL
;
2307 ipc_mqueue_deinit(&port
->ip_messages
);
2310 ipc_port_track_dealloc(port
);
2311 #endif /* MACH_ASSERT */
2315 * Routine: kdp_mqueue_send_find_owner
2317 * Discover the owner of the ipc_mqueue that contains the input
2318 * waitq object. The thread blocked on the waitq should be
2319 * waiting for an IPC_MQUEUE_FULL event.
2321 * The 'waitinfo->wait_type' value should already be set to
2322 * kThreadWaitPortSend.
2324 * If we find out that the containing port is actually in
2325 * transit, we reset the wait_type field to reflect this.
2328 kdp_mqueue_send_find_owner(struct waitq
* waitq
, __assert_only event64_t event
, thread_waitinfo_t
* waitinfo
)
2330 assert(waitinfo
->wait_type
== kThreadWaitPortSend
);
2331 assert(event
== IPC_MQUEUE_FULL
);
2333 ipc_mqueue_t mqueue
= imq_from_waitq(waitq
);
2334 ipc_port_t port
= ip_from_mq(mqueue
); /* we are blocking on send */
2335 assert(kdp_is_in_zone(port
, "ipc ports"));
2337 waitinfo
->owner
= 0;
2338 waitinfo
->context
= VM_KERNEL_UNSLIDE_OR_PERM(port
);
2339 if (ip_lock_held_kdp(port
)) {
2341 * someone has the port locked: it may be in an
2342 * inconsistent state: bail
2344 waitinfo
->owner
= STACKSHOT_WAITOWNER_PORT_LOCKED
;
2348 if (ip_active(port
)) {
2349 if (port
->ip_tempowner
) {
2350 if (port
->ip_imp_task
!= IIT_NULL
&& port
->ip_imp_task
->iit_task
!= NULL
) {
2351 /* port is held by a tempowner */
2352 waitinfo
->owner
= pid_from_task(port
->ip_imp_task
->iit_task
);
2354 waitinfo
->owner
= STACKSHOT_WAITOWNER_INTRANSIT
;
2356 } else if (port
->ip_receiver_name
) {
2357 /* port in a space */
2358 if (port
->ip_receiver
== ipc_space_kernel
) {
2360 * The kernel pid is 0, make this
2361 * distinguishable from no-owner and
2362 * inconsistent port state.
2364 waitinfo
->owner
= STACKSHOT_WAITOWNER_KERNEL
;
2366 waitinfo
->owner
= pid_from_task(port
->ip_receiver
->is_task
);
2368 } else if (port
->ip_destination
!= IP_NULL
) {
2369 /* port in transit */
2370 waitinfo
->wait_type
= kThreadWaitPortSendInTransit
;
2371 waitinfo
->owner
= VM_KERNEL_UNSLIDE_OR_PERM(port
->ip_destination
);
2377 * Routine: kdp_mqueue_recv_find_owner
2379 * Discover the "owner" of the ipc_mqueue that contains the input
2380 * waitq object. The thread blocked on the waitq is trying to
2381 * receive on the mqueue.
2383 * The 'waitinfo->wait_type' value should already be set to
2384 * kThreadWaitPortReceive.
2386 * If we find that we are actualy waiting on a port set, we reset
2387 * the wait_type field to reflect this.
2390 kdp_mqueue_recv_find_owner(struct waitq
* waitq
, __assert_only event64_t event
, thread_waitinfo_t
* waitinfo
)
2392 assert(waitinfo
->wait_type
== kThreadWaitPortReceive
);
2393 assert(event
== IPC_MQUEUE_RECEIVE
);
2395 ipc_mqueue_t mqueue
= imq_from_waitq(waitq
);
2396 waitinfo
->owner
= 0;
2397 if (imq_is_set(mqueue
)) { /* we are waiting on a port set */
2398 ipc_pset_t set
= ips_from_mq(mqueue
);
2399 assert(kdp_is_in_zone(set
, "ipc port sets"));
2401 /* Reset wait type to specify waiting on port set receive */
2402 waitinfo
->wait_type
= kThreadWaitPortSetReceive
;
2403 waitinfo
->context
= VM_KERNEL_UNSLIDE_OR_PERM(set
);
2404 if (ips_lock_held_kdp(set
)) {
2405 waitinfo
->owner
= STACKSHOT_WAITOWNER_PSET_LOCKED
;
2407 /* There is no specific owner "at the other end" of a port set, so leave unset. */
2409 ipc_port_t port
= ip_from_mq(mqueue
);
2410 assert(kdp_is_in_zone(port
, "ipc ports"));
2412 waitinfo
->context
= VM_KERNEL_UNSLIDE_OR_PERM(port
);
2413 if (ip_lock_held_kdp(port
)) {
2414 waitinfo
->owner
= STACKSHOT_WAITOWNER_PORT_LOCKED
;
2418 if (ip_active(port
)) {
2419 if (port
->ip_receiver_name
!= MACH_PORT_NULL
) {
2420 waitinfo
->owner
= port
->ip_receiver_name
;
2422 waitinfo
->owner
= STACKSHOT_WAITOWNER_INTRANSIT
;
2429 #include <kern/machine.h>
2432 * Keep a list of all allocated ports.
2433 * Allocation is intercepted via ipc_port_init;
2434 * deallocation is intercepted via io_free.
2437 queue_head_t port_alloc_queue
;
2438 lck_spin_t port_alloc_queue_lock
;
2441 unsigned long port_count
= 0;
2442 unsigned long port_count_warning
= 20000;
2443 unsigned long port_timestamp
= 0;
2445 void db_port_stack_trace(
2450 unsigned int verbose
,
2451 unsigned int display
,
2452 unsigned int ref_search
,
2453 unsigned int ref_target
);
2456 * Initialize global state needed for run-time
2460 ipc_port_debug_init(void)
2463 queue_init(&port_alloc_queue
);
2464 lck_spin_init(&port_alloc_queue_lock
, &ipc_lck_grp
, &ipc_lck_attr
);
2467 if (!PE_parse_boot_argn("ipc_portbt", &ipc_portbt
, sizeof (ipc_portbt
)))
2472 extern int proc_pid(struct proc
*);
2473 #endif /* MACH_BSD */
2476 * Initialize all of the debugging state in a port.
2477 * Insert the port into a global list of all allocated ports.
2480 ipc_port_init_debug(
2482 uintptr_t *callstack
,
2483 unsigned int callstack_max
)
2487 port
->ip_thread
= current_thread();
2488 port
->ip_timetrack
= port_timestamp
++;
2489 for (i
= 0; i
< callstack_max
; ++i
)
2490 port
->ip_callstack
[i
] = callstack
[i
];
2491 for (i
= 0; i
< IP_NSPARES
; ++i
)
2492 port
->ip_spares
[i
] = 0;
2495 task_t task
= current_task();
2496 if (task
!= TASK_NULL
) {
2497 struct proc
* proc
= (struct proc
*) get_bsdtask_info(task
);
2499 port
->ip_spares
[0] = proc_pid(proc
);
2501 #endif /* MACH_BSD */
2504 lck_spin_lock(&port_alloc_queue_lock
);
2506 if (port_count_warning
> 0 && port_count
>= port_count_warning
)
2507 assert(port_count
< port_count_warning
);
2508 queue_enter(&port_alloc_queue
, port
, ipc_port_t
, ip_port_links
);
2509 lck_spin_unlock(&port_alloc_queue_lock
);
2514 * Routine: ipc_port_callstack_init_debug
2516 * Calls the machine-dependent routine to
2517 * fill in an array with up to IP_CALLSTACK_MAX
2518 * levels of return pc information
2520 * May block (via copyin)
2523 ipc_port_callstack_init_debug(
2524 uintptr_t *callstack
,
2525 unsigned int callstack_max
)
2529 /* guarantee the callstack is initialized */
2530 for (i
=0; i
< callstack_max
; i
++)
2534 machine_callstack(callstack
, callstack_max
);
2538 * Remove a port from the queue of allocated ports.
2539 * This routine should be invoked JUST prior to
2540 * deallocating the actual memory occupied by the port.
2544 ipc_port_track_dealloc(
2545 __unused ipc_port_t port
)
2550 ipc_port_track_dealloc(
2553 lck_spin_lock(&port_alloc_queue_lock
);
2554 assert(port_count
> 0);
2556 queue_remove(&port_alloc_queue
, port
, ipc_port_t
, ip_port_links
);
2557 lck_spin_unlock(&port_alloc_queue_lock
);
2562 #endif /* MACH_ASSERT */