2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * @OSF_FREE_COPYRIGHT@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
57 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
58 * support for mandatory and extensible security protections. This notice
59 * is included in support of clause 2.2 (b) of the Apple Public License,
65 * File: ipc/ipc_port.c
69 * Functions to manipulate IPC ports.
72 #include <zone_debug.h>
73 #include <mach_assert.h>
75 #include <mach/port.h>
76 #include <mach/kern_return.h>
77 #include <kern/ipc_kobject.h>
78 #include <kern/thread.h>
79 #include <kern/misc_protos.h>
80 #include <kern/waitq.h>
81 #include <kern/policy_internal.h>
82 #include <ipc/ipc_entry.h>
83 #include <ipc/ipc_space.h>
84 #include <ipc/ipc_object.h>
85 #include <ipc/ipc_port.h>
86 #include <ipc/ipc_pset.h>
87 #include <ipc/ipc_kmsg.h>
88 #include <ipc/ipc_mqueue.h>
89 #include <ipc/ipc_notify.h>
90 #include <ipc/ipc_table.h>
91 #include <ipc/ipc_importance.h>
93 #include <security/mac_mach_internal.h>
97 decl_lck_spin_data(, ipc_port_multiple_lock_data
)
98 ipc_port_timestamp_t ipc_port_timestamp_data
;
102 void ipc_port_init_debug(
104 uintptr_t *callstack
,
105 unsigned int callstack_max
);
107 void ipc_port_callstack_init_debug(
108 uintptr_t *callstack
,
109 unsigned int callstack_max
);
111 #endif /* MACH_ASSERT */
114 ipc_port_release(ipc_port_t port
)
120 ipc_port_reference(ipc_port_t port
)
126 * Routine: ipc_port_timestamp
128 * Retrieve a timestamp value.
132 ipc_port_timestamp(void)
134 return OSIncrementAtomic(&ipc_port_timestamp_data
);
138 * Routine: ipc_port_request_alloc
140 * Try to allocate a request slot.
141 * If successful, returns the request index.
142 * Otherwise returns zero.
144 * The port is locked and active.
146 * KERN_SUCCESS A request index was found.
147 * KERN_NO_SPACE No index allocated.
150 #if IMPORTANCE_INHERITANCE
152 ipc_port_request_alloc(
154 mach_port_name_t name
,
156 boolean_t send_possible
,
158 ipc_port_request_index_t
*indexp
,
159 boolean_t
*importantp
)
162 ipc_port_request_alloc(
164 mach_port_name_t name
,
166 boolean_t send_possible
,
168 ipc_port_request_index_t
*indexp
)
169 #endif /* IMPORTANCE_INHERITANCE */
171 ipc_port_request_t ipr
, table
;
172 ipc_port_request_index_t index
;
175 #if IMPORTANCE_INHERITANCE
177 #endif /* IMPORTANCE_INHERITANCE */
179 assert(ip_active(port
));
180 assert(name
!= MACH_PORT_NULL
);
181 assert(soright
!= IP_NULL
);
183 table
= port
->ip_requests
;
185 if (table
== IPR_NULL
)
186 return KERN_NO_SPACE
;
188 index
= table
->ipr_next
;
190 return KERN_NO_SPACE
;
193 assert(ipr
->ipr_name
== MACH_PORT_NULL
);
195 table
->ipr_next
= ipr
->ipr_next
;
196 ipr
->ipr_name
= name
;
199 mask
|= IPR_SOR_SPREQ_MASK
;
201 mask
|= IPR_SOR_SPARM_MASK
;
202 if (port
->ip_sprequests
== 0) {
203 port
->ip_sprequests
= 1;
204 #if IMPORTANCE_INHERITANCE
205 /* TODO: Live importance support in send-possible */
206 if (port
->ip_impdonation
!= 0 &&
207 port
->ip_spimportant
== 0 &&
208 (task_is_importance_donor(current_task()))) {
211 #endif /* IMPORTANCE_INHERTANCE */
215 ipr
->ipr_soright
= IPR_SOR_MAKE(soright
, mask
);
223 * Routine: ipc_port_request_grow
225 * Grow a port's table of requests.
227 * The port must be locked and active.
228 * Nothing else locked; will allocate memory.
229 * Upon return the port is unlocked.
231 * KERN_SUCCESS Grew the table.
232 * KERN_SUCCESS Somebody else grew the table.
233 * KERN_SUCCESS The port died.
234 * KERN_RESOURCE_SHORTAGE Couldn't allocate new table.
235 * KERN_NO_SPACE Couldn't grow to desired size
239 ipc_port_request_grow(
241 ipc_table_elems_t target_size
)
243 ipc_table_size_t its
;
244 ipc_port_request_t otable
, ntable
;
246 assert(ip_active(port
));
248 otable
= port
->ip_requests
;
249 if (otable
== IPR_NULL
)
250 its
= &ipc_table_requests
[0];
252 its
= otable
->ipr_size
+ 1;
254 if (target_size
!= ITS_SIZE_NONE
) {
255 if ((otable
!= IPR_NULL
) &&
256 (target_size
<= otable
->ipr_size
->its_size
)) {
260 while ((its
->its_size
) && (its
->its_size
< target_size
)) {
263 if (its
->its_size
== 0) {
265 return KERN_NO_SPACE
;
272 if ((its
->its_size
== 0) ||
273 ((ntable
= it_requests_alloc(its
)) == IPR_NULL
)) {
275 return KERN_RESOURCE_SHORTAGE
;
281 * Check that port is still active and that nobody else
282 * has slipped in and grown the table on us. Note that
283 * just checking if the current table pointer == otable
284 * isn't sufficient; must check ipr_size.
287 if (ip_active(port
) && (port
->ip_requests
== otable
) &&
288 ((otable
== IPR_NULL
) || (otable
->ipr_size
+1 == its
))) {
289 ipc_table_size_t oits
;
290 ipc_table_elems_t osize
, nsize
;
291 ipc_port_request_index_t free
, i
;
293 /* copy old table to new table */
295 if (otable
!= IPR_NULL
) {
296 oits
= otable
->ipr_size
;
297 osize
= oits
->its_size
;
298 free
= otable
->ipr_next
;
300 (void) memcpy((void *)(ntable
+ 1),
301 (const void *)(otable
+ 1),
302 (osize
- 1) * sizeof(struct ipc_port_request
));
309 nsize
= its
->its_size
;
310 assert(nsize
> osize
);
312 /* add new elements to the new table's free list */
314 for (i
= osize
; i
< nsize
; i
++) {
315 ipc_port_request_t ipr
= &ntable
[i
];
317 ipr
->ipr_name
= MACH_PORT_NULL
;
318 ipr
->ipr_next
= free
;
322 ntable
->ipr_next
= free
;
323 ntable
->ipr_size
= its
;
324 port
->ip_requests
= ntable
;
328 if (otable
!= IPR_NULL
) {
329 it_requests_free(oits
, otable
);
334 it_requests_free(its
, ntable
);
341 * Routine: ipc_port_request_sparm
343 * Arm delayed send-possible request.
345 * The port must be locked and active.
347 * Returns TRUE if the request was armed
348 * (or armed with importance in that version).
352 ipc_port_request_sparm(
354 __assert_only mach_port_name_t name
,
355 ipc_port_request_index_t index
,
356 mach_msg_option_t option
,
357 mach_msg_priority_t override
)
359 if (index
!= IE_REQ_NONE
) {
360 ipc_port_request_t ipr
, table
;
362 assert(ip_active(port
));
364 table
= port
->ip_requests
;
365 assert(table
!= IPR_NULL
);
368 assert(ipr
->ipr_name
== name
);
370 /* Is there a valid destination? */
371 if (IPR_SOR_SPREQ(ipr
->ipr_soright
)) {
372 ipr
->ipr_soright
= IPR_SOR_MAKE(ipr
->ipr_soright
, IPR_SOR_SPARM_MASK
);
373 port
->ip_sprequests
= 1;
375 if (option
& MACH_SEND_OVERRIDE
) {
376 /* apply override to message queue */
377 ipc_mqueue_override_send(&port
->ip_messages
, override
);
380 #if IMPORTANCE_INHERITANCE
381 if (((option
& MACH_SEND_NOIMPORTANCE
) == 0) &&
382 (port
->ip_impdonation
!= 0) &&
383 (port
->ip_spimportant
== 0) &&
384 (((option
& MACH_SEND_IMPORTANCE
) != 0) ||
385 (task_is_importance_donor(current_task())))) {
390 #endif /* IMPORTANCE_INHERITANCE */
397 * Routine: ipc_port_request_type
399 * Determine the type(s) of port requests enabled for a name.
401 * The port must be locked or inactive (to avoid table growth).
402 * The index must not be IE_REQ_NONE and for the name in question.
405 ipc_port_request_type(
407 __assert_only mach_port_name_t name
,
408 ipc_port_request_index_t index
)
410 ipc_port_request_t ipr
, table
;
411 mach_port_type_t type
= 0;
413 table
= port
->ip_requests
;
414 assert (table
!= IPR_NULL
);
416 assert(index
!= IE_REQ_NONE
);
418 assert(ipr
->ipr_name
== name
);
420 if (IP_VALID(IPR_SOR_PORT(ipr
->ipr_soright
))) {
421 type
|= MACH_PORT_TYPE_DNREQUEST
;
423 if (IPR_SOR_SPREQ(ipr
->ipr_soright
)) {
424 type
|= MACH_PORT_TYPE_SPREQUEST
;
426 if (!IPR_SOR_SPARMED(ipr
->ipr_soright
)) {
427 type
|= MACH_PORT_TYPE_SPREQUEST_DELAYED
;
435 * Routine: ipc_port_request_cancel
437 * Cancel a dead-name/send-possible request and return the send-once right.
439 * The port must be locked and active.
440 * The index must not be IPR_REQ_NONE and must correspond with name.
444 ipc_port_request_cancel(
446 __assert_only mach_port_name_t name
,
447 ipc_port_request_index_t index
)
449 ipc_port_request_t ipr
, table
;
450 ipc_port_t request
= IP_NULL
;
452 assert(ip_active(port
));
453 table
= port
->ip_requests
;
454 assert(table
!= IPR_NULL
);
456 assert (index
!= IE_REQ_NONE
);
458 assert(ipr
->ipr_name
== name
);
459 request
= IPR_SOR_PORT(ipr
->ipr_soright
);
461 /* return ipr to the free list inside the table */
462 ipr
->ipr_name
= MACH_PORT_NULL
;
463 ipr
->ipr_next
= table
->ipr_next
;
464 table
->ipr_next
= index
;
470 * Routine: ipc_port_pdrequest
472 * Make a port-deleted request, returning the
473 * previously registered send-once right.
474 * Just cancels the previous request if notify is IP_NULL.
476 * The port is locked and active. It is unlocked.
477 * Consumes a ref for notify (if non-null), and
478 * returns previous with a ref (if non-null).
485 ipc_port_t
*previousp
)
489 assert(ip_active(port
));
491 previous
= port
->ip_pdrequest
;
492 port
->ip_pdrequest
= notify
;
495 *previousp
= previous
;
499 * Routine: ipc_port_nsrequest
501 * Make a no-senders request, returning the
502 * previously registered send-once right.
503 * Just cancels the previous request if notify is IP_NULL.
505 * The port is locked and active. It is unlocked.
506 * Consumes a ref for notify (if non-null), and
507 * returns previous with a ref (if non-null).
513 mach_port_mscount_t sync
,
515 ipc_port_t
*previousp
)
518 mach_port_mscount_t mscount
;
520 assert(ip_active(port
));
522 previous
= port
->ip_nsrequest
;
523 mscount
= port
->ip_mscount
;
525 if ((port
->ip_srights
== 0) && (sync
<= mscount
) &&
526 (notify
!= IP_NULL
)) {
527 port
->ip_nsrequest
= IP_NULL
;
529 ipc_notify_no_senders(notify
, mscount
);
531 port
->ip_nsrequest
= notify
;
535 *previousp
= previous
;
540 * Routine: ipc_port_clear_receiver
542 * Prepares a receive right for transmission/destruction,
543 * optionally performs mqueue destruction (with port lock held)
546 * The port is locked and active.
548 * If should_destroy is TRUE, then the return value indicates
549 * whether the caller needs to reap kmsg structures that should
550 * be destroyed (by calling ipc_kmsg_reap_delayed)
552 * If should_destroy is FALSE, this always returns FALSE
556 ipc_port_clear_receiver(
558 boolean_t should_destroy
)
560 ipc_mqueue_t mqueue
= &port
->ip_messages
;
561 boolean_t reap_messages
= FALSE
;
564 * Pull ourselves out of any sets to which we belong.
565 * We hold the port locked, so even though this acquires and releases
566 * the mqueue lock, we know we won't be added to any other sets.
568 if (port
->ip_in_pset
!= 0) {
569 ipc_pset_remove_from_all(port
);
570 assert(port
->ip_in_pset
== 0);
574 * Send anyone waiting on the port's queue directly away.
575 * Also clear the mscount and seqno.
578 ipc_mqueue_changed(mqueue
);
579 port
->ip_mscount
= 0;
580 mqueue
->imq_seqno
= 0;
581 port
->ip_context
= port
->ip_guarded
= port
->ip_strict_guard
= 0;
583 if (should_destroy
) {
585 * Mark the mqueue invalid, preventing further send/receive
586 * operations from succeeding. It's important for this to be
587 * done under the same lock hold as the ipc_mqueue_changed
588 * call to avoid additional threads blocking on an mqueue
589 * that's being destroyed.
591 reap_messages
= ipc_mqueue_destroy_locked(mqueue
);
594 imq_unlock(&port
->ip_messages
);
596 return reap_messages
;
600 * Routine: ipc_port_init
602 * Initializes a newly-allocated port.
603 * Doesn't touch the ip_object fields.
610 mach_port_name_t name
)
612 /* port->ip_kobject doesn't have to be initialized */
614 port
->ip_receiver
= space
;
615 port
->ip_receiver_name
= name
;
617 port
->ip_mscount
= 0;
618 port
->ip_srights
= 0;
619 port
->ip_sorights
= 0;
621 port
->ip_nsrequest
= IP_NULL
;
622 port
->ip_pdrequest
= IP_NULL
;
623 port
->ip_requests
= IPR_NULL
;
625 port
->ip_premsg
= IKM_NULL
;
626 port
->ip_context
= 0;
628 port
->ip_sprequests
= 0;
629 port
->ip_spimportant
= 0;
630 port
->ip_impdonation
= 0;
631 port
->ip_tempowner
= 0;
633 port
->ip_guarded
= 0;
634 port
->ip_strict_guard
= 0;
635 port
->ip_impcount
= 0;
637 port
->ip_reserved
= 0;
639 ipc_mqueue_init(&port
->ip_messages
,
640 FALSE
/* !set */, NULL
/* no reserved link */);
644 * Routine: ipc_port_alloc
648 * Nothing locked. If successful, the port is returned
649 * locked. (The caller doesn't have a reference.)
651 * KERN_SUCCESS The port is allocated.
652 * KERN_INVALID_TASK The space is dead.
653 * KERN_NO_SPACE No room for an entry in the space.
654 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
660 mach_port_name_t
*namep
,
664 mach_port_name_t name
;
668 uintptr_t buf
[IP_CALLSTACK_MAX
];
669 ipc_port_callstack_init_debug(&buf
[0], IP_CALLSTACK_MAX
);
670 #endif /* MACH_ASSERT */
672 kr
= ipc_object_alloc(space
, IOT_PORT
,
673 MACH_PORT_TYPE_RECEIVE
, 0,
674 &name
, (ipc_object_t
*) &port
);
675 if (kr
!= KERN_SUCCESS
)
678 /* port and space are locked */
679 ipc_port_init(port
, space
, name
);
682 ipc_port_init_debug(port
, &buf
[0], IP_CALLSTACK_MAX
);
683 #endif /* MACH_ASSERT */
685 /* unlock space after init */
686 is_write_unlock(space
);
695 * Routine: ipc_port_alloc_name
697 * Allocate a port, with a specific name.
699 * Nothing locked. If successful, the port is returned
700 * locked. (The caller doesn't have a reference.)
702 * KERN_SUCCESS The port is allocated.
703 * KERN_INVALID_TASK The space is dead.
704 * KERN_NAME_EXISTS The name already denotes a right.
705 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
711 mach_port_name_t name
,
718 uintptr_t buf
[IP_CALLSTACK_MAX
];
719 ipc_port_callstack_init_debug(&buf
[0], IP_CALLSTACK_MAX
);
720 #endif /* MACH_ASSERT */
722 kr
= ipc_object_alloc_name(space
, IOT_PORT
,
723 MACH_PORT_TYPE_RECEIVE
, 0,
724 name
, (ipc_object_t
*) &port
);
725 if (kr
!= KERN_SUCCESS
)
730 ipc_port_init(port
, space
, name
);
733 ipc_port_init_debug(port
, &buf
[0], IP_CALLSTACK_MAX
);
734 #endif /* MACH_ASSERT */
742 * Routine: ipc_port_spnotify
744 * Generate send-possible port notifications.
746 * Nothing locked, reference held on port.
752 ipc_port_request_index_t index
= 0;
753 ipc_table_elems_t size
= 0;
756 * If the port has no send-possible request
757 * armed, don't bother to lock the port.
759 if (port
->ip_sprequests
== 0)
764 #if IMPORTANCE_INHERITANCE
765 if (port
->ip_spimportant
!= 0) {
766 port
->ip_spimportant
= 0;
767 if (ipc_port_importance_delta(port
, IPID_OPTION_NORMAL
, -1) == TRUE
) {
771 #endif /* IMPORTANCE_INHERITANCE */
773 if (port
->ip_sprequests
== 0) {
777 port
->ip_sprequests
= 0;
780 if (ip_active(port
)) {
781 ipc_port_request_t requests
;
783 /* table may change each time port unlocked (reload) */
784 requests
= port
->ip_requests
;
785 assert(requests
!= IPR_NULL
);
788 * no need to go beyond table size when first
789 * we entered - those are future notifications.
792 size
= requests
->ipr_size
->its_size
;
794 /* no need to backtrack either */
795 while (++index
< size
) {
796 ipc_port_request_t ipr
= &requests
[index
];
797 mach_port_name_t name
= ipr
->ipr_name
;
798 ipc_port_t soright
= IPR_SOR_PORT(ipr
->ipr_soright
);
799 boolean_t armed
= IPR_SOR_SPARMED(ipr
->ipr_soright
);
801 if (MACH_PORT_VALID(name
) && armed
&& IP_VALID(soright
)) {
802 /* claim send-once right - slot still inuse */
803 ipr
->ipr_soright
= IP_NULL
;
806 ipc_notify_send_possible(soright
, name
);
818 * Routine: ipc_port_dnnotify
820 * Generate dead name notifications for
821 * all outstanding dead-name and send-
825 * Port must be inactive.
826 * Reference held on port.
832 ipc_port_request_t requests
= port
->ip_requests
;
834 assert(!ip_active(port
));
835 if (requests
!= IPR_NULL
) {
836 ipc_table_size_t its
= requests
->ipr_size
;
837 ipc_table_elems_t size
= its
->its_size
;
838 ipc_port_request_index_t index
;
839 for (index
= 1; index
< size
; index
++) {
840 ipc_port_request_t ipr
= &requests
[index
];
841 mach_port_name_t name
= ipr
->ipr_name
;
842 ipc_port_t soright
= IPR_SOR_PORT(ipr
->ipr_soright
);
844 if (MACH_PORT_VALID(name
) && IP_VALID(soright
)) {
845 ipc_notify_dead_name(soright
, name
);
853 * Routine: ipc_port_destroy
855 * Destroys a port. Cleans up queued messages.
857 * If the port has a backup, it doesn't get destroyed,
858 * but is sent in a port-destroyed notification to the backup.
860 * The port is locked and alive; nothing else locked.
861 * The caller has a reference, which is consumed.
862 * Afterwards, the port is unlocked and dead.
866 ipc_port_destroy(ipc_port_t port
)
868 ipc_port_t pdrequest
, nsrequest
;
872 #if IMPORTANCE_INHERITANCE
873 ipc_importance_task_t release_imp_task
= IIT_NULL
;
874 thread_t self
= current_thread();
875 boolean_t top
= (self
->ith_assertions
== 0);
876 natural_t assertcnt
= 0;
877 #endif /* IMPORTANCE_INHERITANCE */
879 assert(ip_active(port
));
880 /* port->ip_receiver_name is garbage */
881 /* port->ip_receiver/port->ip_destination is garbage */
883 /* check for a backup port */
884 pdrequest
= port
->ip_pdrequest
;
886 #if IMPORTANCE_INHERITANCE
887 /* determine how many assertions to drop and from whom */
888 if (port
->ip_tempowner
!= 0) {
890 release_imp_task
= port
->ip_imp_task
;
891 if (IIT_NULL
!= release_imp_task
) {
892 port
->ip_imp_task
= IIT_NULL
;
893 assertcnt
= port
->ip_impcount
;
895 /* Otherwise, nothing to drop */
897 assertcnt
= port
->ip_impcount
;
898 if (pdrequest
!= IP_NULL
)
899 /* mark in limbo for the journey */
900 port
->ip_tempowner
= 1;
904 self
->ith_assertions
= assertcnt
;
905 #endif /* IMPORTANCE_INHERITANCE */
907 if (pdrequest
!= IP_NULL
) {
908 /* clear receiver, don't destroy the port */
909 (void)ipc_port_clear_receiver(port
, FALSE
);
910 assert(port
->ip_in_pset
== 0);
911 assert(port
->ip_mscount
== 0);
913 /* we assume the ref for pdrequest */
914 port
->ip_pdrequest
= IP_NULL
;
916 /* make port be in limbo */
917 port
->ip_receiver_name
= MACH_PORT_NULL
;
918 port
->ip_destination
= IP_NULL
;
921 /* consumes our refs for port and pdrequest */
922 ipc_notify_port_destroyed(pdrequest
, port
);
924 goto drop_assertions
;
927 port
->ip_object
.io_bits
&= ~IO_BITS_ACTIVE
;
928 port
->ip_timestamp
= ipc_port_timestamp();
929 nsrequest
= port
->ip_nsrequest
;
932 * The mach_msg_* paths don't hold a port lock, they only hold a
933 * reference to the port object. If a thread raced us and is now
934 * blocked waiting for message reception on this mqueue (or waiting
935 * for ipc_mqueue_full), it will never be woken up. We call
936 * ipc_port_clear_receiver() here, _after_ the port has been marked
937 * inactive, to wakeup any threads which may be blocked and ensure
938 * that no other thread can get lost waiting for a wake up on a
939 * port/mqueue that's been destroyed.
941 boolean_t reap_msgs
= FALSE
;
942 reap_msgs
= ipc_port_clear_receiver(port
, TRUE
); /* marks mqueue inactive */
943 assert(port
->ip_in_pset
== 0);
944 assert(port
->ip_mscount
== 0);
947 * If the port has a preallocated message buffer and that buffer
948 * is not inuse, free it. If it has an inuse one, then the kmsg
949 * free will detect that we freed the association and it can free it
950 * like a normal buffer.
952 * Once the port is marked inactive we don't need to keep it locked.
954 if (IP_PREALLOC(port
)) {
955 ipc_port_t inuse_port
;
957 kmsg
= port
->ip_premsg
;
958 assert(kmsg
!= IKM_NULL
);
959 inuse_port
= ikm_prealloc_inuse_port(kmsg
);
960 IP_CLEAR_PREALLOC(port
, kmsg
);
962 if (inuse_port
!= IP_NULL
) {
963 assert(inuse_port
== port
);
971 /* throw away no-senders request */
972 if (nsrequest
!= IP_NULL
)
973 ipc_notify_send_once(nsrequest
); /* consumes ref */
976 * Reap any kmsg objects waiting to be destroyed.
977 * This must be done after we've released the port lock.
980 ipc_kmsg_reap_delayed();
982 mqueue
= &port
->ip_messages
;
984 /* cleanup waitq related resources */
985 ipc_mqueue_deinit(mqueue
);
987 /* generate dead-name notifications */
988 ipc_port_dnnotify(port
);
990 ipc_kobject_destroy(port
);
992 ip_release(port
); /* consume caller's ref */
995 #if IMPORTANCE_INHERITANCE
996 if (release_imp_task
!= IIT_NULL
) {
999 self
->ith_assertions
= 0;
1000 assert(ipc_importance_task_is_any_receiver_type(release_imp_task
));
1001 ipc_importance_task_drop_internal_assertion(release_imp_task
, assertcnt
);
1003 ipc_importance_task_release(release_imp_task
);
1005 } else if (assertcnt
> 0) {
1007 self
->ith_assertions
= 0;
1008 release_imp_task
= current_task()->task_imp_base
;
1009 if (ipc_importance_task_is_any_receiver_type(release_imp_task
)) {
1010 ipc_importance_task_drop_internal_assertion(release_imp_task
, assertcnt
);
1014 #endif /* IMPORTANCE_INHERITANCE */
1018 * Routine: ipc_port_check_circularity
1020 * Check if queueing "port" in a message for "dest"
1021 * would create a circular group of ports and messages.
1023 * If no circularity (FALSE returned), then "port"
1024 * is changed from "in limbo" to "in transit".
1026 * That is, we want to set port->ip_destination == dest,
1027 * but guaranteeing that this doesn't create a circle
1028 * port->ip_destination->ip_destination->... == port
1031 * No ports locked. References held for "port" and "dest".
1035 ipc_port_check_circularity(
1039 #if IMPORTANCE_INHERITANCE
1040 /* adjust importance counts at the same time */
1041 return ipc_importance_check_circularity(port
, dest
);
1045 assert(port
!= IP_NULL
);
1046 assert(dest
!= IP_NULL
);
1053 * First try a quick check that can run in parallel.
1054 * No circularity if dest is not in transit.
1057 if (ip_lock_try(dest
)) {
1058 if (!ip_active(dest
) ||
1059 (dest
->ip_receiver_name
!= MACH_PORT_NULL
) ||
1060 (dest
->ip_destination
== IP_NULL
))
1063 /* dest is in transit; further checking necessary */
1069 ipc_port_multiple_lock(); /* massive serialization */
1072 * Search for the end of the chain (a port not in transit),
1073 * acquiring locks along the way.
1079 if (!ip_active(base
) ||
1080 (base
->ip_receiver_name
!= MACH_PORT_NULL
) ||
1081 (base
->ip_destination
== IP_NULL
))
1084 base
= base
->ip_destination
;
1087 /* all ports in chain from dest to base, inclusive, are locked */
1090 /* circularity detected! */
1092 ipc_port_multiple_unlock();
1094 /* port (== base) is in limbo */
1096 assert(ip_active(port
));
1097 assert(port
->ip_receiver_name
== MACH_PORT_NULL
);
1098 assert(port
->ip_destination
== IP_NULL
);
1100 while (dest
!= IP_NULL
) {
1103 /* dest is in transit or in limbo */
1105 assert(ip_active(dest
));
1106 assert(dest
->ip_receiver_name
== MACH_PORT_NULL
);
1108 next
= dest
->ip_destination
;
1117 * The guarantee: lock port while the entire chain is locked.
1118 * Once port is locked, we can take a reference to dest,
1119 * add port to the chain, and unlock everything.
1123 ipc_port_multiple_unlock();
1127 /* port is in limbo */
1129 assert(ip_active(port
));
1130 assert(port
->ip_receiver_name
== MACH_PORT_NULL
);
1131 assert(port
->ip_destination
== IP_NULL
);
1134 port
->ip_destination
= dest
;
1136 /* now unlock chain */
1144 /* port is in transit */
1146 assert(ip_active(dest
));
1147 assert(dest
->ip_receiver_name
== MACH_PORT_NULL
);
1148 assert(dest
->ip_destination
!= IP_NULL
);
1150 port
= dest
->ip_destination
;
1155 /* base is not in transit */
1156 assert(!ip_active(base
) ||
1157 (base
->ip_receiver_name
!= MACH_PORT_NULL
) ||
1158 (base
->ip_destination
== IP_NULL
));
1163 #endif /* !IMPORTANCE_INHERITANCE */
1167 * Routine: ipc_port_impcount_delta
1169 * Adjust only the importance count associated with a port.
1170 * If there are any adjustments to be made to receiver task,
1171 * those are handled elsewhere.
1173 * For now, be defensive during deductions to make sure the
1174 * impcount for the port doesn't underflow zero. This will
1175 * go away when the port boost addition is made atomic (see
1176 * note in ipc_port_importance_delta()).
1178 * The port is referenced and locked.
1179 * Nothing else is locked.
1182 ipc_port_impcount_delta(
1184 mach_port_delta_t delta
,
1185 ipc_port_t __unused base
)
1187 mach_port_delta_t absdelta
;
1189 if (!ip_active(port
)) {
1193 /* adding/doing nothing is easy */
1195 port
->ip_impcount
+= delta
;
1199 absdelta
= 0 - delta
;
1200 if (port
->ip_impcount
>= absdelta
) {
1201 port
->ip_impcount
-= absdelta
;
1205 #if (DEVELOPMENT || DEBUG)
1206 if (port
->ip_receiver_name
!= MACH_PORT_NULL
) {
1207 task_t target_task
= port
->ip_receiver
->is_task
;
1208 ipc_importance_task_t target_imp
= target_task
->task_imp_base
;
1209 const char *target_procname
;
1212 if (target_imp
!= IIT_NULL
) {
1213 target_procname
= target_imp
->iit_procname
;
1214 target_pid
= target_imp
->iit_bsd_pid
;
1216 target_procname
= "unknown";
1219 printf("Over-release of importance assertions for port 0x%x receiver pid %d (%s), "
1220 "dropping %d assertion(s) but port only has %d remaining.\n",
1221 port
->ip_receiver_name
,
1222 target_pid
, target_procname
,
1223 absdelta
, port
->ip_impcount
);
1225 } else if (base
!= IP_NULL
) {
1226 task_t target_task
= base
->ip_receiver
->is_task
;
1227 ipc_importance_task_t target_imp
= target_task
->task_imp_base
;
1228 const char *target_procname
;
1231 if (target_imp
!= IIT_NULL
) {
1232 target_procname
= target_imp
->iit_procname
;
1233 target_pid
= target_imp
->iit_bsd_pid
;
1235 target_procname
= "unknown";
1238 printf("Over-release of importance assertions for port 0x%lx "
1239 "enqueued on port 0x%x with receiver pid %d (%s), "
1240 "dropping %d assertion(s) but port only has %d remaining.\n",
1241 (unsigned long)VM_KERNEL_UNSLIDE_OR_PERM((uintptr_t)port
),
1242 base
->ip_receiver_name
,
1243 target_pid
, target_procname
,
1244 absdelta
, port
->ip_impcount
);
1248 delta
= 0 - port
->ip_impcount
;
1249 port
->ip_impcount
= 0;
1254 * Routine: ipc_port_importance_delta_internal
1256 * Adjust the importance count through the given port.
1257 * If the port is in transit, apply the delta throughout
1258 * the chain. Determine if the there is a task at the
1259 * base of the chain that wants/needs to be adjusted,
1260 * and if so, apply the delta.
1262 * The port is referenced and locked on entry.
1263 * Importance may be locked.
1264 * Nothing else is locked.
1265 * The lock may be dropped on exit.
1266 * Returns TRUE if lock was dropped.
1268 #if IMPORTANCE_INHERITANCE
1271 ipc_port_importance_delta_internal(
1274 mach_port_delta_t
*deltap
,
1275 ipc_importance_task_t
*imp_task
)
1277 ipc_port_t next
, base
;
1278 boolean_t dropped
= FALSE
;
1280 *imp_task
= IIT_NULL
;
1285 assert(options
== IPID_OPTION_NORMAL
|| options
== IPID_OPTION_SENDPOSSIBLE
);
1289 /* if port is in transit, have to search for end of chain */
1290 if (ip_active(port
) &&
1291 port
->ip_destination
!= IP_NULL
&&
1292 port
->ip_receiver_name
== MACH_PORT_NULL
) {
1297 ipc_port_multiple_lock(); /* massive serialization */
1300 while(ip_active(base
) &&
1301 base
->ip_destination
!= IP_NULL
&&
1302 base
->ip_receiver_name
== MACH_PORT_NULL
) {
1304 base
= base
->ip_destination
;
1307 ipc_port_multiple_unlock();
1311 * If the port lock is dropped b/c the port is in transit, there is a
1312 * race window where another thread can drain messages and/or fire a
1313 * send possible notification before we get here.
1315 * We solve this race by checking to see if our caller armed the send
1316 * possible notification, whether or not it's been fired yet, and
1317 * whether or not we've already set the port's ip_spimportant bit. If
1318 * we don't need a send-possible boost, then we'll just apply a
1319 * harmless 0-boost to the port.
1321 if (options
& IPID_OPTION_SENDPOSSIBLE
) {
1322 assert(*deltap
== 1);
1323 if (port
->ip_sprequests
&& port
->ip_spimportant
== 0)
1324 port
->ip_spimportant
= 1;
1329 /* unlock down to the base, adjusting boost(s) at each level */
1331 *deltap
= ipc_port_impcount_delta(port
, *deltap
, base
);
1337 /* port is in transit */
1338 assert(port
->ip_tempowner
== 0);
1339 next
= port
->ip_destination
;
1344 /* find the task (if any) to boost according to the base */
1345 if (ip_active(base
)) {
1346 if (base
->ip_tempowner
!= 0) {
1347 if (IIT_NULL
!= base
->ip_imp_task
)
1348 *imp_task
= base
->ip_imp_task
;
1349 /* otherwise don't boost */
1351 } else if (base
->ip_receiver_name
!= MACH_PORT_NULL
) {
1352 ipc_space_t space
= base
->ip_receiver
;
1354 /* only spaces with boost-accepting tasks */
1355 if (space
->is_task
!= TASK_NULL
&&
1356 ipc_importance_task_is_any_receiver_type(space
->is_task
->task_imp_base
)) {
1357 *imp_task
= space
->is_task
->task_imp_base
;
1363 * Only the base is locked. If we have to hold or drop task
1364 * importance assertions, we'll have to drop that lock as well.
1366 if (*imp_task
!= IIT_NULL
) {
1367 /* take a reference before unlocking base */
1368 ipc_importance_task_reference(*imp_task
);
1371 if (dropped
== TRUE
) {
1377 #endif /* IMPORTANCE_INHERITANCE */
1380 * Routine: ipc_port_importance_delta
1382 * Adjust the importance count through the given port.
1383 * If the port is in transit, apply the delta throughout
1386 * If there is a task at the base of the chain that wants/needs
1387 * to be adjusted, apply the delta.
1389 * The port is referenced and locked on entry.
1390 * Nothing else is locked.
1391 * The lock may be dropped on exit.
1392 * Returns TRUE if lock was dropped.
1394 #if IMPORTANCE_INHERITANCE
1397 ipc_port_importance_delta(
1400 mach_port_delta_t delta
)
1402 ipc_importance_task_t imp_task
= IIT_NULL
;
1405 dropped
= ipc_port_importance_delta_internal(port
, options
, &delta
, &imp_task
);
1407 if (IIT_NULL
== imp_task
|| delta
== 0)
1413 assert(ipc_importance_task_is_any_receiver_type(imp_task
));
1416 ipc_importance_task_hold_internal_assertion(imp_task
, delta
);
1418 ipc_importance_task_drop_internal_assertion(imp_task
, -delta
);
1420 ipc_importance_task_release(imp_task
);
1423 #endif /* IMPORTANCE_INHERITANCE */
1426 * Routine: ipc_port_lookup_notify
1428 * Make a send-once notify port from a receive right.
1429 * Returns IP_NULL if name doesn't denote a receive right.
1431 * The space must be locked (read or write) and active.
1432 * Being the active space, we can rely on thread server_id
1433 * context to give us the proper server level sub-order
1438 ipc_port_lookup_notify(
1440 mach_port_name_t name
)
1445 assert(is_active(space
));
1447 entry
= ipc_entry_lookup(space
, name
);
1448 if (entry
== IE_NULL
)
1450 if ((entry
->ie_bits
& MACH_PORT_TYPE_RECEIVE
) == 0)
1453 __IGNORE_WCASTALIGN(port
= (ipc_port_t
) entry
->ie_object
);
1454 assert(port
!= IP_NULL
);
1457 assert(ip_active(port
));
1458 assert(port
->ip_receiver_name
== name
);
1459 assert(port
->ip_receiver
== space
);
1462 port
->ip_sorights
++;
1469 * Routine: ipc_port_make_send_locked
1471 * Make a naked send right from a receive right.
1474 * port locked and active.
1477 ipc_port_make_send_locked(
1480 assert(ip_active(port
));
1488 * Routine: ipc_port_make_send
1490 * Make a naked send right from a receive right.
1498 if (!IP_VALID(port
))
1502 if (ip_active(port
)) {
1514 * Routine: ipc_port_copy_send
1516 * Make a naked send right from another naked send right.
1517 * IP_NULL -> IP_NULL
1518 * IP_DEAD -> IP_DEAD
1519 * dead port -> IP_DEAD
1520 * live port -> port + ref
1522 * Nothing locked except possibly a space.
1531 if (!IP_VALID(port
))
1535 if (ip_active(port
)) {
1536 assert(port
->ip_srights
> 0);
1549 * Routine: ipc_port_copyout_send
1551 * Copyout a naked send right (possibly null/dead),
1552 * or if that fails, destroy the right.
1558 ipc_port_copyout_send(
1562 mach_port_name_t name
;
1564 if (IP_VALID(sright
)) {
1567 kr
= ipc_object_copyout(space
, (ipc_object_t
) sright
,
1568 MACH_MSG_TYPE_PORT_SEND
, TRUE
, &name
);
1569 if (kr
!= KERN_SUCCESS
) {
1570 ipc_port_release_send(sright
);
1572 if (kr
== KERN_INVALID_CAPABILITY
)
1573 name
= MACH_PORT_DEAD
;
1575 name
= MACH_PORT_NULL
;
1578 name
= CAST_MACH_PORT_TO_NAME(sright
);
1584 * Routine: ipc_port_release_send
1586 * Release a naked send right.
1587 * Consumes a ref for the port.
1593 ipc_port_release_send(
1596 ipc_port_t nsrequest
= IP_NULL
;
1597 mach_port_mscount_t mscount
;
1599 if (!IP_VALID(port
))
1604 assert(port
->ip_srights
> 0);
1607 if (!ip_active(port
)) {
1613 if (port
->ip_srights
== 0 &&
1614 port
->ip_nsrequest
!= IP_NULL
) {
1615 nsrequest
= port
->ip_nsrequest
;
1616 port
->ip_nsrequest
= IP_NULL
;
1617 mscount
= port
->ip_mscount
;
1620 ipc_notify_no_senders(nsrequest
, mscount
);
1628 * Routine: ipc_port_make_sonce_locked
1630 * Make a naked send-once right from a receive right.
1632 * The port is locked and active.
1636 ipc_port_make_sonce_locked(
1639 assert(ip_active(port
));
1640 port
->ip_sorights
++;
1646 * Routine: ipc_port_make_sonce
1648 * Make a naked send-once right from a receive right.
1650 * The port is not locked.
1654 ipc_port_make_sonce(
1657 if (!IP_VALID(port
))
1661 if (ip_active(port
)) {
1662 port
->ip_sorights
++;
1672 * Routine: ipc_port_release_sonce
1674 * Release a naked send-once right.
1675 * Consumes a ref for the port.
1677 * In normal situations, this is never used.
1678 * Send-once rights are only consumed when
1679 * a message (possibly a send-once notification)
1682 * Nothing locked except possibly a space.
1686 ipc_port_release_sonce(
1689 if (!IP_VALID(port
))
1694 assert(port
->ip_sorights
> 0);
1696 port
->ip_sorights
--;
1703 * Routine: ipc_port_release_receive
1705 * Release a naked (in limbo or in transit) receive right.
1706 * Consumes a ref for the port; destroys the port.
1712 ipc_port_release_receive(
1717 if (!IP_VALID(port
))
1721 assert(ip_active(port
));
1722 assert(port
->ip_receiver_name
== MACH_PORT_NULL
);
1723 dest
= port
->ip_destination
;
1725 ipc_port_destroy(port
); /* consumes ref, unlocks */
1727 if (dest
!= IP_NULL
)
1732 * Routine: ipc_port_alloc_special
1734 * Allocate a port in a special space.
1735 * The new port is returned with one ref.
1736 * If unsuccessful, IP_NULL is returned.
1742 ipc_port_alloc_special(
1747 __IGNORE_WCASTALIGN(port
= (ipc_port_t
) io_alloc(IOT_PORT
));
1748 if (port
== IP_NULL
)
1752 uintptr_t buf
[IP_CALLSTACK_MAX
];
1753 ipc_port_callstack_init_debug(&buf
[0], IP_CALLSTACK_MAX
);
1754 #endif /* MACH_ASSERT */
1756 bzero((char *)port
, sizeof(*port
));
1757 io_lock_init(&port
->ip_object
);
1758 port
->ip_references
= 1;
1759 port
->ip_object
.io_bits
= io_makebits(TRUE
, IOT_PORT
, 0);
1761 ipc_port_init(port
, space
, 1);
1764 ipc_port_init_debug(port
, &buf
[0], IP_CALLSTACK_MAX
);
1765 #endif /* MACH_ASSERT */
1771 * Routine: ipc_port_dealloc_special
1773 * Deallocate a port in a special space.
1774 * Consumes one ref for the port.
1780 ipc_port_dealloc_special(
1782 __assert_only ipc_space_t space
)
1785 assert(ip_active(port
));
1786 // assert(port->ip_receiver_name != MACH_PORT_NULL);
1787 assert(port
->ip_receiver
== space
);
1790 * We clear ip_receiver_name and ip_receiver to simplify
1791 * the ipc_space_kernel check in ipc_mqueue_send.
1794 port
->ip_receiver_name
= MACH_PORT_NULL
;
1795 port
->ip_receiver
= IS_NULL
;
1797 /* relevant part of ipc_port_clear_receiver */
1798 ipc_port_set_mscount(port
, 0);
1799 port
->ip_messages
.imq_seqno
= 0;
1801 ipc_port_destroy(port
);
1805 * Routine: ipc_port_finalize
1807 * Called on last reference deallocate to
1808 * free any remaining data associated with the
1817 ipc_port_request_t requests
= port
->ip_requests
;
1819 assert(!ip_active(port
));
1820 if (requests
!= IPR_NULL
) {
1821 ipc_table_size_t its
= requests
->ipr_size
;
1822 it_requests_free(its
, requests
);
1823 port
->ip_requests
= IPR_NULL
;
1826 ipc_mqueue_deinit(&port
->ip_messages
);
1829 ipc_port_track_dealloc(port
);
1830 #endif /* MACH_ASSERT */
1834 #include <kern/machine.h>
1837 * Keep a list of all allocated ports.
1838 * Allocation is intercepted via ipc_port_init;
1839 * deallocation is intercepted via io_free.
1842 queue_head_t port_alloc_queue
;
1843 lck_spin_t port_alloc_queue_lock
;
1846 unsigned long port_count
= 0;
1847 unsigned long port_count_warning
= 20000;
1848 unsigned long port_timestamp
= 0;
1850 void db_port_stack_trace(
1855 unsigned int verbose
,
1856 unsigned int display
,
1857 unsigned int ref_search
,
1858 unsigned int ref_target
);
1861 * Initialize global state needed for run-time
1865 ipc_port_debug_init(void)
1868 queue_init(&port_alloc_queue
);
1869 lck_spin_init(&port_alloc_queue_lock
, &ipc_lck_grp
, &ipc_lck_attr
);
1872 if (!PE_parse_boot_argn("ipc_portbt", &ipc_portbt
, sizeof (ipc_portbt
)))
1877 extern int proc_pid(struct proc
*);
1878 #endif /* MACH_BSD */
1881 * Initialize all of the debugging state in a port.
1882 * Insert the port into a global list of all allocated ports.
1885 ipc_port_init_debug(
1887 uintptr_t *callstack
,
1888 unsigned int callstack_max
)
1892 port
->ip_thread
= current_thread();
1893 port
->ip_timetrack
= port_timestamp
++;
1894 for (i
= 0; i
< callstack_max
; ++i
)
1895 port
->ip_callstack
[i
] = callstack
[i
];
1896 for (i
= 0; i
< IP_NSPARES
; ++i
)
1897 port
->ip_spares
[i
] = 0;
1900 task_t task
= current_task();
1901 if (task
!= TASK_NULL
) {
1902 struct proc
* proc
= (struct proc
*) get_bsdtask_info(task
);
1904 port
->ip_spares
[0] = proc_pid(proc
);
1906 #endif /* MACH_BSD */
1909 lck_spin_lock(&port_alloc_queue_lock
);
1911 if (port_count_warning
> 0 && port_count
>= port_count_warning
)
1912 assert(port_count
< port_count_warning
);
1913 queue_enter(&port_alloc_queue
, port
, ipc_port_t
, ip_port_links
);
1914 lck_spin_unlock(&port_alloc_queue_lock
);
1919 * Routine: ipc_port_callstack_init_debug
1921 * Calls the machine-dependent routine to
1922 * fill in an array with up to IP_CALLSTACK_MAX
1923 * levels of return pc information
1925 * May block (via copyin)
1928 ipc_port_callstack_init_debug(
1929 uintptr_t *callstack
,
1930 unsigned int callstack_max
)
1934 /* guarantee the callstack is initialized */
1935 for (i
=0; i
< callstack_max
; i
++)
1939 machine_callstack(callstack
, callstack_max
);
1943 * Remove a port from the queue of allocated ports.
1944 * This routine should be invoked JUST prior to
1945 * deallocating the actual memory occupied by the port.
1949 ipc_port_track_dealloc(
1950 __unused ipc_port_t port
)
1955 ipc_port_track_dealloc(
1958 lck_spin_lock(&port_alloc_queue_lock
);
1959 assert(port_count
> 0);
1961 queue_remove(&port_alloc_queue
, port
, ipc_port_t
, ip_port_links
);
1962 lck_spin_unlock(&port_alloc_queue_lock
);
1967 #endif /* MACH_ASSERT */