2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 * @OSF_FREE_COPYRIGHT@
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
46 * Carnegie Mellon requests users of this software to return to
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
57 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
58 * support for mandatory and extensible security protections. This notice
59 * is included in support of clause 2.2 (b) of the Apple Public License,
65 * File: ipc/ipc_port.c
69 * Functions to manipulate IPC ports.
72 #include <zone_debug.h>
73 #include <mach_assert.h>
75 #include <mach/port.h>
76 #include <mach/kern_return.h>
77 #include <kern/ipc_kobject.h>
78 #include <kern/thread.h>
79 #include <kern/misc_protos.h>
80 #include <kern/waitq.h>
81 #include <ipc/ipc_entry.h>
82 #include <ipc/ipc_space.h>
83 #include <ipc/ipc_object.h>
84 #include <ipc/ipc_port.h>
85 #include <ipc/ipc_pset.h>
86 #include <ipc/ipc_kmsg.h>
87 #include <ipc/ipc_mqueue.h>
88 #include <ipc/ipc_notify.h>
89 #include <ipc/ipc_table.h>
90 #include <ipc/ipc_importance.h>
92 #include <security/mac_mach_internal.h>
96 decl_lck_spin_data(, ipc_port_multiple_lock_data
)
97 ipc_port_timestamp_t ipc_port_timestamp_data
;
101 void ipc_port_init_debug(
103 uintptr_t *callstack
,
104 unsigned int callstack_max
);
106 void ipc_port_callstack_init_debug(
107 uintptr_t *callstack
,
108 unsigned int callstack_max
);
110 #endif /* MACH_ASSERT */
113 ipc_port_release(ipc_port_t port
)
119 ipc_port_reference(ipc_port_t port
)
125 * Routine: ipc_port_timestamp
127 * Retrieve a timestamp value.
131 ipc_port_timestamp(void)
133 return OSIncrementAtomic(&ipc_port_timestamp_data
);
137 * Routine: ipc_port_request_alloc
139 * Try to allocate a request slot.
140 * If successful, returns the request index.
141 * Otherwise returns zero.
143 * The port is locked and active.
145 * KERN_SUCCESS A request index was found.
146 * KERN_NO_SPACE No index allocated.
149 #if IMPORTANCE_INHERITANCE
151 ipc_port_request_alloc(
153 mach_port_name_t name
,
155 boolean_t send_possible
,
157 ipc_port_request_index_t
*indexp
,
158 boolean_t
*importantp
)
161 ipc_port_request_alloc(
163 mach_port_name_t name
,
165 boolean_t send_possible
,
167 ipc_port_request_index_t
*indexp
)
168 #endif /* IMPORTANCE_INHERITANCE */
170 ipc_port_request_t ipr
, table
;
171 ipc_port_request_index_t index
;
174 #if IMPORTANCE_INHERITANCE
176 #endif /* IMPORTANCE_INHERITANCE */
178 assert(ip_active(port
));
179 assert(name
!= MACH_PORT_NULL
);
180 assert(soright
!= IP_NULL
);
182 table
= port
->ip_requests
;
184 if (table
== IPR_NULL
)
185 return KERN_NO_SPACE
;
187 index
= table
->ipr_next
;
189 return KERN_NO_SPACE
;
192 assert(ipr
->ipr_name
== MACH_PORT_NULL
);
194 table
->ipr_next
= ipr
->ipr_next
;
195 ipr
->ipr_name
= name
;
198 mask
|= IPR_SOR_SPREQ_MASK
;
200 mask
|= IPR_SOR_SPARM_MASK
;
201 if (port
->ip_sprequests
== 0) {
202 port
->ip_sprequests
= 1;
203 #if IMPORTANCE_INHERITANCE
204 /* TODO: Live importance support in send-possible */
205 if (port
->ip_impdonation
!= 0 &&
206 port
->ip_spimportant
== 0 &&
207 (task_is_importance_donor(current_task()))) {
210 #endif /* IMPORTANCE_INHERTANCE */
214 ipr
->ipr_soright
= IPR_SOR_MAKE(soright
, mask
);
222 * Routine: ipc_port_request_grow
224 * Grow a port's table of requests.
226 * The port must be locked and active.
227 * Nothing else locked; will allocate memory.
228 * Upon return the port is unlocked.
230 * KERN_SUCCESS Grew the table.
231 * KERN_SUCCESS Somebody else grew the table.
232 * KERN_SUCCESS The port died.
233 * KERN_RESOURCE_SHORTAGE Couldn't allocate new table.
234 * KERN_NO_SPACE Couldn't grow to desired size
238 ipc_port_request_grow(
240 ipc_table_elems_t target_size
)
242 ipc_table_size_t its
;
243 ipc_port_request_t otable
, ntable
;
245 assert(ip_active(port
));
247 otable
= port
->ip_requests
;
248 if (otable
== IPR_NULL
)
249 its
= &ipc_table_requests
[0];
251 its
= otable
->ipr_size
+ 1;
253 if (target_size
!= ITS_SIZE_NONE
) {
254 if ((otable
!= IPR_NULL
) &&
255 (target_size
<= otable
->ipr_size
->its_size
)) {
259 while ((its
->its_size
) && (its
->its_size
< target_size
)) {
262 if (its
->its_size
== 0) {
264 return KERN_NO_SPACE
;
271 if ((its
->its_size
== 0) ||
272 ((ntable
= it_requests_alloc(its
)) == IPR_NULL
)) {
274 return KERN_RESOURCE_SHORTAGE
;
280 * Check that port is still active and that nobody else
281 * has slipped in and grown the table on us. Note that
282 * just checking if the current table pointer == otable
283 * isn't sufficient; must check ipr_size.
286 if (ip_active(port
) && (port
->ip_requests
== otable
) &&
287 ((otable
== IPR_NULL
) || (otable
->ipr_size
+1 == its
))) {
288 ipc_table_size_t oits
;
289 ipc_table_elems_t osize
, nsize
;
290 ipc_port_request_index_t free
, i
;
292 /* copy old table to new table */
294 if (otable
!= IPR_NULL
) {
295 oits
= otable
->ipr_size
;
296 osize
= oits
->its_size
;
297 free
= otable
->ipr_next
;
299 (void) memcpy((void *)(ntable
+ 1),
300 (const void *)(otable
+ 1),
301 (osize
- 1) * sizeof(struct ipc_port_request
));
308 nsize
= its
->its_size
;
309 assert(nsize
> osize
);
311 /* add new elements to the new table's free list */
313 for (i
= osize
; i
< nsize
; i
++) {
314 ipc_port_request_t ipr
= &ntable
[i
];
316 ipr
->ipr_name
= MACH_PORT_NULL
;
317 ipr
->ipr_next
= free
;
321 ntable
->ipr_next
= free
;
322 ntable
->ipr_size
= its
;
323 port
->ip_requests
= ntable
;
327 if (otable
!= IPR_NULL
) {
328 it_requests_free(oits
, otable
);
333 it_requests_free(its
, ntable
);
340 * Routine: ipc_port_request_sparm
342 * Arm delayed send-possible request.
344 * The port must be locked and active.
346 * Returns TRUE if the request was armed
347 * (or armed with importance in that version).
350 #if IMPORTANCE_INHERITANCE
352 ipc_port_request_sparm(
354 __assert_only mach_port_name_t name
,
355 ipc_port_request_index_t index
,
356 mach_msg_option_t option
)
359 ipc_port_request_sparm(
361 __assert_only mach_port_name_t name
,
362 ipc_port_request_index_t index
)
363 #endif /* IMPORTANCE_INHERITANCE */
365 if (index
!= IE_REQ_NONE
) {
366 ipc_port_request_t ipr
, table
;
368 assert(ip_active(port
));
370 table
= port
->ip_requests
;
371 assert(table
!= IPR_NULL
);
374 assert(ipr
->ipr_name
== name
);
376 if (IPR_SOR_SPREQ(ipr
->ipr_soright
)) {
377 ipr
->ipr_soright
= IPR_SOR_MAKE(ipr
->ipr_soright
, IPR_SOR_SPARM_MASK
);
378 port
->ip_sprequests
= 1;
379 #if IMPORTANCE_INHERITANCE
380 if (((option
& MACH_SEND_NOIMPORTANCE
) == 0) &&
381 (port
->ip_impdonation
!= 0) &&
382 (port
->ip_spimportant
== 0) &&
383 (((option
& MACH_SEND_IMPORTANCE
) != 0) ||
384 (task_is_importance_donor(current_task())))) {
389 #endif /* IMPORTANCE_INHERITANCE */
396 * Routine: ipc_port_request_type
398 * Determine the type(s) of port requests enabled for a name.
400 * The port must be locked or inactive (to avoid table growth).
401 * The index must not be IE_REQ_NONE and for the name in question.
404 ipc_port_request_type(
406 __assert_only mach_port_name_t name
,
407 ipc_port_request_index_t index
)
409 ipc_port_request_t ipr
, table
;
410 mach_port_type_t type
= 0;
412 table
= port
->ip_requests
;
413 assert (table
!= IPR_NULL
);
415 assert(index
!= IE_REQ_NONE
);
417 assert(ipr
->ipr_name
== name
);
419 if (IP_VALID(IPR_SOR_PORT(ipr
->ipr_soright
))) {
420 type
|= MACH_PORT_TYPE_DNREQUEST
;
422 if (IPR_SOR_SPREQ(ipr
->ipr_soright
)) {
423 type
|= MACH_PORT_TYPE_SPREQUEST
;
425 if (!IPR_SOR_SPARMED(ipr
->ipr_soright
)) {
426 type
|= MACH_PORT_TYPE_SPREQUEST_DELAYED
;
434 * Routine: ipc_port_request_cancel
436 * Cancel a dead-name/send-possible request and return the send-once right.
438 * The port must be locked and active.
439 * The index must not be IPR_REQ_NONE and must correspond with name.
443 ipc_port_request_cancel(
445 __assert_only mach_port_name_t name
,
446 ipc_port_request_index_t index
)
448 ipc_port_request_t ipr
, table
;
449 ipc_port_t request
= IP_NULL
;
451 assert(ip_active(port
));
452 table
= port
->ip_requests
;
453 assert(table
!= IPR_NULL
);
455 assert (index
!= IE_REQ_NONE
);
457 assert(ipr
->ipr_name
== name
);
458 request
= IPR_SOR_PORT(ipr
->ipr_soright
);
460 /* return ipr to the free list inside the table */
461 ipr
->ipr_name
= MACH_PORT_NULL
;
462 ipr
->ipr_next
= table
->ipr_next
;
463 table
->ipr_next
= index
;
469 * Routine: ipc_port_pdrequest
471 * Make a port-deleted request, returning the
472 * previously registered send-once right.
473 * Just cancels the previous request if notify is IP_NULL.
475 * The port is locked and active. It is unlocked.
476 * Consumes a ref for notify (if non-null), and
477 * returns previous with a ref (if non-null).
484 ipc_port_t
*previousp
)
488 assert(ip_active(port
));
490 previous
= port
->ip_pdrequest
;
491 port
->ip_pdrequest
= notify
;
494 *previousp
= previous
;
498 * Routine: ipc_port_nsrequest
500 * Make a no-senders request, returning the
501 * previously registered send-once right.
502 * Just cancels the previous request if notify is IP_NULL.
504 * The port is locked and active. It is unlocked.
505 * Consumes a ref for notify (if non-null), and
506 * returns previous with a ref (if non-null).
512 mach_port_mscount_t sync
,
514 ipc_port_t
*previousp
)
517 mach_port_mscount_t mscount
;
519 assert(ip_active(port
));
521 previous
= port
->ip_nsrequest
;
522 mscount
= port
->ip_mscount
;
524 if ((port
->ip_srights
== 0) && (sync
<= mscount
) &&
525 (notify
!= IP_NULL
)) {
526 port
->ip_nsrequest
= IP_NULL
;
528 ipc_notify_no_senders(notify
, mscount
);
530 port
->ip_nsrequest
= notify
;
534 *previousp
= previous
;
539 * Routine: ipc_port_clear_receiver
541 * Prepares a receive right for transmission/destruction.
543 * The port is locked and active.
547 ipc_port_clear_receiver(
552 assert(ip_active(port
));
555 * pull ourselves from any sets.
557 if (port
->ip_in_pset
!= 0) {
558 ipc_pset_remove_from_all(port
);
559 assert(port
->ip_in_pset
== 0);
563 * Send anyone waiting on the port's queue directly away.
564 * Also clear the mscount and seqno.
567 imq_lock(&port
->ip_messages
);
568 ipc_mqueue_changed(&port
->ip_messages
);
569 ipc_port_set_mscount(port
, 0);
570 port
->ip_messages
.imq_seqno
= 0;
571 port
->ip_context
= port
->ip_guarded
= port
->ip_strict_guard
= 0;
572 imq_unlock(&port
->ip_messages
);
577 * Routine: ipc_port_init
579 * Initializes a newly-allocated port.
580 * Doesn't touch the ip_object fields.
587 mach_port_name_t name
)
589 /* port->ip_kobject doesn't have to be initialized */
591 port
->ip_receiver
= space
;
592 port
->ip_receiver_name
= name
;
594 port
->ip_mscount
= 0;
595 port
->ip_srights
= 0;
596 port
->ip_sorights
= 0;
598 port
->ip_nsrequest
= IP_NULL
;
599 port
->ip_pdrequest
= IP_NULL
;
600 port
->ip_requests
= IPR_NULL
;
602 port
->ip_premsg
= IKM_NULL
;
603 port
->ip_context
= 0;
605 port
->ip_sprequests
= 0;
606 port
->ip_spimportant
= 0;
607 port
->ip_impdonation
= 0;
608 port
->ip_tempowner
= 0;
610 port
->ip_guarded
= 0;
611 port
->ip_strict_guard
= 0;
612 port
->ip_impcount
= 0;
614 port
->ip_reserved
= 0;
616 ipc_mqueue_init(&port
->ip_messages
,
617 FALSE
/* !set */, NULL
/* no reserved link */);
621 * Routine: ipc_port_alloc
625 * Nothing locked. If successful, the port is returned
626 * locked. (The caller doesn't have a reference.)
628 * KERN_SUCCESS The port is allocated.
629 * KERN_INVALID_TASK The space is dead.
630 * KERN_NO_SPACE No room for an entry in the space.
631 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
637 mach_port_name_t
*namep
,
641 mach_port_name_t name
;
645 uintptr_t buf
[IP_CALLSTACK_MAX
];
646 ipc_port_callstack_init_debug(&buf
[0], IP_CALLSTACK_MAX
);
647 #endif /* MACH_ASSERT */
649 kr
= ipc_object_alloc(space
, IOT_PORT
,
650 MACH_PORT_TYPE_RECEIVE
, 0,
651 &name
, (ipc_object_t
*) &port
);
652 if (kr
!= KERN_SUCCESS
)
655 /* port and space are locked */
656 ipc_port_init(port
, space
, name
);
659 ipc_port_init_debug(port
, &buf
[0], IP_CALLSTACK_MAX
);
660 #endif /* MACH_ASSERT */
662 /* unlock space after init */
663 is_write_unlock(space
);
672 * Routine: ipc_port_alloc_name
674 * Allocate a port, with a specific name.
676 * Nothing locked. If successful, the port is returned
677 * locked. (The caller doesn't have a reference.)
679 * KERN_SUCCESS The port is allocated.
680 * KERN_INVALID_TASK The space is dead.
681 * KERN_NAME_EXISTS The name already denotes a right.
682 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
688 mach_port_name_t name
,
695 uintptr_t buf
[IP_CALLSTACK_MAX
];
696 ipc_port_callstack_init_debug(&buf
[0], IP_CALLSTACK_MAX
);
697 #endif /* MACH_ASSERT */
699 kr
= ipc_object_alloc_name(space
, IOT_PORT
,
700 MACH_PORT_TYPE_RECEIVE
, 0,
701 name
, (ipc_object_t
*) &port
);
702 if (kr
!= KERN_SUCCESS
)
707 ipc_port_init(port
, space
, name
);
710 ipc_port_init_debug(port
, &buf
[0], IP_CALLSTACK_MAX
);
711 #endif /* MACH_ASSERT */
719 * Routine: ipc_port_spnotify
721 * Generate send-possible port notifications.
723 * Nothing locked, reference held on port.
729 ipc_port_request_index_t index
= 0;
730 ipc_table_elems_t size
= 0;
731 #if IMPORTANCE_INHERITANCE
732 boolean_t dropassert
= FALSE
;
733 #endif /* IMPORTANCE_INHERITANCE */
736 * If the port has no send-possible request
737 * armed, don't bother to lock the port.
739 if (port
->ip_sprequests
== 0)
744 #if IMPORTANCE_INHERITANCE
745 if (port
->ip_spimportant
!= 0) {
746 port
->ip_spimportant
= 0;
747 if (ipc_port_impcount_delta(port
, -1, IP_NULL
) == -1) {
751 #endif /* IMPORTANCE_INHERITANCE */
753 if (port
->ip_sprequests
== 0) {
757 port
->ip_sprequests
= 0;
760 if (ip_active(port
)) {
761 ipc_port_request_t requests
;
763 /* table may change each time port unlocked (reload) */
764 requests
= port
->ip_requests
;
765 assert(requests
!= IPR_NULL
);
768 * no need to go beyond table size when first
769 * we entered - those are future notifications.
772 size
= requests
->ipr_size
->its_size
;
774 /* no need to backtrack either */
775 while (++index
< size
) {
776 ipc_port_request_t ipr
= &requests
[index
];
777 mach_port_name_t name
= ipr
->ipr_name
;
778 ipc_port_t soright
= IPR_SOR_PORT(ipr
->ipr_soright
);
779 boolean_t armed
= IPR_SOR_SPARMED(ipr
->ipr_soright
);
781 if (MACH_PORT_VALID(name
) && armed
&& IP_VALID(soright
)) {
782 /* claim send-once right - slot still inuse */
783 ipr
->ipr_soright
= IP_NULL
;
786 ipc_notify_send_possible(soright
, name
);
795 #if IMPORTANCE_INHERITANCE
796 if (dropassert
== TRUE
&& ipc_importance_task_is_any_receiver_type(current_task()->task_imp_base
)) {
797 /* drop internal assertion */
798 ipc_importance_task_drop_internal_assertion(current_task()->task_imp_base
, 1);
800 #endif /* IMPORTANCE_INHERITANCE */
805 * Routine: ipc_port_dnnotify
807 * Generate dead name notifications for
808 * all outstanding dead-name and send-
812 * Port must be inactive.
813 * Reference held on port.
819 ipc_port_request_t requests
= port
->ip_requests
;
821 assert(!ip_active(port
));
822 if (requests
!= IPR_NULL
) {
823 ipc_table_size_t its
= requests
->ipr_size
;
824 ipc_table_elems_t size
= its
->its_size
;
825 ipc_port_request_index_t index
;
826 for (index
= 1; index
< size
; index
++) {
827 ipc_port_request_t ipr
= &requests
[index
];
828 mach_port_name_t name
= ipr
->ipr_name
;
829 ipc_port_t soright
= IPR_SOR_PORT(ipr
->ipr_soright
);
831 if (MACH_PORT_VALID(name
) && IP_VALID(soright
)) {
832 ipc_notify_dead_name(soright
, name
);
840 * Routine: ipc_port_destroy
842 * Destroys a port. Cleans up queued messages.
844 * If the port has a backup, it doesn't get destroyed,
845 * but is sent in a port-destroyed notification to the backup.
847 * The port is locked and alive; nothing else locked.
848 * The caller has a reference, which is consumed.
849 * Afterwards, the port is unlocked and dead.
856 ipc_port_t pdrequest
, nsrequest
;
860 #if IMPORTANCE_INHERITANCE
861 ipc_importance_task_t release_imp_task
= IIT_NULL
;
862 thread_t self
= current_thread();
863 boolean_t top
= (self
->ith_assertions
== 0);
864 natural_t assertcnt
= 0;
865 #endif /* IMPORTANCE_INHERITANCE */
867 assert(ip_active(port
));
868 /* port->ip_receiver_name is garbage */
869 /* port->ip_receiver/port->ip_destination is garbage */
870 assert(port
->ip_in_pset
== 0);
871 assert(port
->ip_mscount
== 0);
873 /* check for a backup port */
874 pdrequest
= port
->ip_pdrequest
;
876 #if IMPORTANCE_INHERITANCE
877 /* determine how many assertions to drop and from whom */
878 if (port
->ip_tempowner
!= 0) {
880 release_imp_task
= port
->ip_imp_task
;
881 if (IIT_NULL
!= release_imp_task
) {
882 port
->ip_imp_task
= IIT_NULL
;
883 assertcnt
= port
->ip_impcount
;
885 /* Otherwise, nothing to drop */
887 assertcnt
= port
->ip_impcount
;
888 if (pdrequest
!= IP_NULL
)
889 /* mark in limbo for the journey */
890 port
->ip_tempowner
= 1;
894 self
->ith_assertions
= assertcnt
;
895 #endif /* IMPORTANCE_INHERITANCE */
897 if (pdrequest
!= IP_NULL
) {
898 /* we assume the ref for pdrequest */
899 port
->ip_pdrequest
= IP_NULL
;
901 /* make port be in limbo */
902 port
->ip_receiver_name
= MACH_PORT_NULL
;
903 port
->ip_destination
= IP_NULL
;
906 /* consumes our refs for port and pdrequest */
907 ipc_notify_port_destroyed(pdrequest
, port
);
909 goto drop_assertions
;
912 /* once port is dead, we don't need to keep it locked */
914 port
->ip_object
.io_bits
&= ~IO_BITS_ACTIVE
;
915 port
->ip_timestamp
= ipc_port_timestamp();
916 nsrequest
= port
->ip_nsrequest
;
919 * If the port has a preallocated message buffer and that buffer
920 * is not inuse, free it. If it has an inuse one, then the kmsg
921 * free will detect that we freed the association and it can free it
922 * like a normal buffer.
924 if (IP_PREALLOC(port
)) {
925 ipc_port_t inuse_port
;
927 kmsg
= port
->ip_premsg
;
928 assert(kmsg
!= IKM_NULL
);
929 inuse_port
= ikm_prealloc_inuse_port(kmsg
);
930 IP_CLEAR_PREALLOC(port
, kmsg
);
932 if (inuse_port
!= IP_NULL
) {
933 assert(inuse_port
== port
);
941 /* throw away no-senders request */
942 if (nsrequest
!= IP_NULL
)
943 ipc_notify_send_once(nsrequest
); /* consumes ref */
945 /* destroy any queued messages */
946 mqueue
= &port
->ip_messages
;
947 ipc_mqueue_destroy(mqueue
);
949 /* cleanup waitq related resources */
950 ipc_mqueue_deinit(mqueue
);
952 /* generate dead-name notifications */
953 ipc_port_dnnotify(port
);
955 ipc_kobject_destroy(port
);
957 ip_release(port
); /* consume caller's ref */
960 #if IMPORTANCE_INHERITANCE
961 if (release_imp_task
!= IIT_NULL
) {
964 self
->ith_assertions
= 0;
965 assert(ipc_importance_task_is_any_receiver_type(release_imp_task
));
966 ipc_importance_task_drop_internal_assertion(release_imp_task
, assertcnt
);
968 ipc_importance_task_release(release_imp_task
);
970 } else if (assertcnt
> 0) {
972 self
->ith_assertions
= 0;
973 release_imp_task
= current_task()->task_imp_base
;
974 if (ipc_importance_task_is_any_receiver_type(release_imp_task
)) {
975 ipc_importance_task_drop_internal_assertion(release_imp_task
, assertcnt
);
979 #endif /* IMPORTANCE_INHERITANCE */
983 * Routine: ipc_port_check_circularity
985 * Check if queueing "port" in a message for "dest"
986 * would create a circular group of ports and messages.
988 * If no circularity (FALSE returned), then "port"
989 * is changed from "in limbo" to "in transit".
991 * That is, we want to set port->ip_destination == dest,
992 * but guaranteeing that this doesn't create a circle
993 * port->ip_destination->ip_destination->... == port
996 * No ports locked. References held for "port" and "dest".
1000 ipc_port_check_circularity(
1004 #if IMPORTANCE_INHERITANCE
1005 /* adjust importance counts at the same time */
1006 return ipc_importance_check_circularity(port
, dest
);
1010 assert(port
!= IP_NULL
);
1011 assert(dest
!= IP_NULL
);
1018 * First try a quick check that can run in parallel.
1019 * No circularity if dest is not in transit.
1022 if (ip_lock_try(dest
)) {
1023 if (!ip_active(dest
) ||
1024 (dest
->ip_receiver_name
!= MACH_PORT_NULL
) ||
1025 (dest
->ip_destination
== IP_NULL
))
1028 /* dest is in transit; further checking necessary */
1034 ipc_port_multiple_lock(); /* massive serialization */
1037 * Search for the end of the chain (a port not in transit),
1038 * acquiring locks along the way.
1044 if (!ip_active(base
) ||
1045 (base
->ip_receiver_name
!= MACH_PORT_NULL
) ||
1046 (base
->ip_destination
== IP_NULL
))
1049 base
= base
->ip_destination
;
1052 /* all ports in chain from dest to base, inclusive, are locked */
1055 /* circularity detected! */
1057 ipc_port_multiple_unlock();
1059 /* port (== base) is in limbo */
1061 assert(ip_active(port
));
1062 assert(port
->ip_receiver_name
== MACH_PORT_NULL
);
1063 assert(port
->ip_destination
== IP_NULL
);
1065 while (dest
!= IP_NULL
) {
1068 /* dest is in transit or in limbo */
1070 assert(ip_active(dest
));
1071 assert(dest
->ip_receiver_name
== MACH_PORT_NULL
);
1073 next
= dest
->ip_destination
;
1082 * The guarantee: lock port while the entire chain is locked.
1083 * Once port is locked, we can take a reference to dest,
1084 * add port to the chain, and unlock everything.
1088 ipc_port_multiple_unlock();
1092 /* port is in limbo */
1094 assert(ip_active(port
));
1095 assert(port
->ip_receiver_name
== MACH_PORT_NULL
);
1096 assert(port
->ip_destination
== IP_NULL
);
1099 port
->ip_destination
= dest
;
1101 /* now unlock chain */
1109 /* port is in transit */
1111 assert(ip_active(dest
));
1112 assert(dest
->ip_receiver_name
== MACH_PORT_NULL
);
1113 assert(dest
->ip_destination
!= IP_NULL
);
1115 port
= dest
->ip_destination
;
1120 /* base is not in transit */
1121 assert(!ip_active(base
) ||
1122 (base
->ip_receiver_name
!= MACH_PORT_NULL
) ||
1123 (base
->ip_destination
== IP_NULL
));
1128 #endif /* !IMPORTANCE_INHERITANCE */
1132 * Routine: ipc_port_impcount_delta
1134 * Adjust only the importance count associated with a port.
1135 * If there are any adjustments to be made to receiver task,
1136 * those are handled elsewhere.
1138 * For now, be defensive during deductions to make sure the
1139 * impcount for the port doesn't underflow zero. This will
1140 * go away when the port boost addition is made atomic (see
1141 * note in ipc_port_importance_delta()).
1143 * The port is referenced and locked.
1144 * Nothing else is locked.
1147 ipc_port_impcount_delta(
1149 mach_port_delta_t delta
,
1150 ipc_port_t __unused base
)
1152 mach_port_delta_t absdelta
;
1154 if (!ip_active(port
)) {
1158 /* adding/doing nothing is easy */
1160 port
->ip_impcount
+= delta
;
1164 absdelta
= 0 - delta
;
1165 if (port
->ip_impcount
>= absdelta
) {
1166 port
->ip_impcount
-= absdelta
;
1170 #if (DEVELOPMENT || DEBUG)
1171 if (port
->ip_receiver_name
!= MACH_PORT_NULL
) {
1172 task_t target_task
= port
->ip_receiver
->is_task
;
1173 ipc_importance_task_t target_imp
= target_task
->task_imp_base
;
1174 const char *target_procname
;
1177 if (target_imp
!= IIT_NULL
) {
1178 target_procname
= target_imp
->iit_procname
;
1179 target_pid
= target_imp
->iit_bsd_pid
;
1181 target_procname
= "unknown";
1184 printf("Over-release of importance assertions for port 0x%x receiver pid %d (%s), "
1185 "dropping %d assertion(s) but port only has %d remaining.\n",
1186 port
->ip_receiver_name
,
1187 target_pid
, target_procname
,
1188 absdelta
, port
->ip_impcount
);
1190 } else if (base
!= IP_NULL
) {
1191 task_t target_task
= base
->ip_receiver
->is_task
;
1192 ipc_importance_task_t target_imp
= target_task
->task_imp_base
;
1193 const char *target_procname
;
1196 if (target_imp
!= IIT_NULL
) {
1197 target_procname
= target_imp
->iit_procname
;
1198 target_pid
= target_imp
->iit_bsd_pid
;
1200 target_procname
= "unknown";
1203 printf("Over-release of importance assertions for port 0x%lx "
1204 "enqueued on port 0x%x with receiver pid %d (%s), "
1205 "dropping %d assertion(s) but port only has %d remaining.\n",
1206 (unsigned long)VM_KERNEL_UNSLIDE_OR_PERM((uintptr_t)port
),
1207 base
->ip_receiver_name
,
1208 target_pid
, target_procname
,
1209 absdelta
, port
->ip_impcount
);
1213 delta
= 0 - port
->ip_impcount
;
1214 port
->ip_impcount
= 0;
1219 * Routine: ipc_port_importance_delta_internal
1221 * Adjust the importance count through the given port.
1222 * If the port is in transit, apply the delta throughout
1223 * the chain. Determine if the there is a task at the
1224 * base of the chain that wants/needs to be adjusted,
1225 * and if so, apply the delta.
1227 * The port is referenced and locked on entry.
1228 * Importance may be locked.
1229 * Nothing else is locked.
1230 * The lock may be dropped on exit.
1231 * Returns TRUE if lock was dropped.
1233 #if IMPORTANCE_INHERITANCE
1236 ipc_port_importance_delta_internal(
1239 mach_port_delta_t
*deltap
,
1240 ipc_importance_task_t
*imp_task
)
1242 ipc_port_t next
, base
;
1243 boolean_t dropped
= FALSE
;
1245 *imp_task
= IIT_NULL
;
1250 assert(options
== IPID_OPTION_NORMAL
|| options
== IPID_OPTION_SENDPOSSIBLE
);
1254 /* if port is in transit, have to search for end of chain */
1255 if (ip_active(port
) &&
1256 port
->ip_destination
!= IP_NULL
&&
1257 port
->ip_receiver_name
== MACH_PORT_NULL
) {
1262 ipc_port_multiple_lock(); /* massive serialization */
1265 while(ip_active(base
) &&
1266 base
->ip_destination
!= IP_NULL
&&
1267 base
->ip_receiver_name
== MACH_PORT_NULL
) {
1269 base
= base
->ip_destination
;
1272 ipc_port_multiple_unlock();
1276 * If the port lock is dropped b/c the port is in transit, there is a
1277 * race window where another thread can drain messages and/or fire a
1278 * send possible notification before we get here.
1280 * We solve this race by checking to see if our caller armed the send
1281 * possible notification, whether or not it's been fired yet, and
1282 * whether or not we've already set the port's ip_spimportant bit. If
1283 * we don't need a send-possible boost, then we'll just apply a
1284 * harmless 0-boost to the port.
1286 if (options
& IPID_OPTION_SENDPOSSIBLE
) {
1287 assert(*deltap
== 1);
1288 if (port
->ip_sprequests
&& port
->ip_spimportant
== 0)
1289 port
->ip_spimportant
= 1;
1294 /* unlock down to the base, adjusting boost(s) at each level */
1296 *deltap
= ipc_port_impcount_delta(port
, *deltap
, base
);
1302 /* port is in transit */
1303 assert(port
->ip_tempowner
== 0);
1304 next
= port
->ip_destination
;
1309 /* find the task (if any) to boost according to the base */
1310 if (ip_active(base
)) {
1311 if (base
->ip_tempowner
!= 0) {
1312 if (IIT_NULL
!= base
->ip_imp_task
)
1313 *imp_task
= base
->ip_imp_task
;
1314 /* otherwise don't boost */
1316 } else if (base
->ip_receiver_name
!= MACH_PORT_NULL
) {
1317 ipc_space_t space
= base
->ip_receiver
;
1319 /* only spaces with boost-accepting tasks */
1320 if (space
->is_task
!= TASK_NULL
&&
1321 ipc_importance_task_is_any_receiver_type(space
->is_task
->task_imp_base
)) {
1322 *imp_task
= space
->is_task
->task_imp_base
;
1328 * Only the base is locked. If we have to hold or drop task
1329 * importance assertions, we'll have to drop that lock as well.
1331 if (*imp_task
!= IIT_NULL
) {
1332 /* take a reference before unlocking base */
1333 ipc_importance_task_reference(*imp_task
);
1336 if (dropped
== TRUE
) {
1342 #endif /* IMPORTANCE_INHERITANCE */
1345 * Routine: ipc_port_importance_delta
1347 * Adjust the importance count through the given port.
1348 * If the port is in transit, apply the delta throughout
1351 * If there is a task at the base of the chain that wants/needs
1352 * to be adjusted, apply the delta.
1354 * The port is referenced and locked on entry.
1355 * Nothing else is locked.
1356 * The lock may be dropped on exit.
1357 * Returns TRUE if lock was dropped.
1359 #if IMPORTANCE_INHERITANCE
1362 ipc_port_importance_delta(
1365 mach_port_delta_t delta
)
1367 ipc_importance_task_t imp_task
= IIT_NULL
;
1370 dropped
= ipc_port_importance_delta_internal(port
, options
, &delta
, &imp_task
);
1372 if (IIT_NULL
== imp_task
|| delta
== 0)
1378 assert(ipc_importance_task_is_any_receiver_type(imp_task
));
1381 ipc_importance_task_hold_internal_assertion(imp_task
, delta
);
1383 ipc_importance_task_drop_internal_assertion(imp_task
, -delta
);
1385 ipc_importance_task_release(imp_task
);
1388 #endif /* IMPORTANCE_INHERITANCE */
1391 * Routine: ipc_port_lookup_notify
1393 * Make a send-once notify port from a receive right.
1394 * Returns IP_NULL if name doesn't denote a receive right.
1396 * The space must be locked (read or write) and active.
1397 * Being the active space, we can rely on thread server_id
1398 * context to give us the proper server level sub-order
1403 ipc_port_lookup_notify(
1405 mach_port_name_t name
)
1410 assert(is_active(space
));
1412 entry
= ipc_entry_lookup(space
, name
);
1413 if (entry
== IE_NULL
)
1415 if ((entry
->ie_bits
& MACH_PORT_TYPE_RECEIVE
) == 0)
1418 __IGNORE_WCASTALIGN(port
= (ipc_port_t
) entry
->ie_object
);
1419 assert(port
!= IP_NULL
);
1422 assert(ip_active(port
));
1423 assert(port
->ip_receiver_name
== name
);
1424 assert(port
->ip_receiver
== space
);
1427 port
->ip_sorights
++;
1434 * Routine: ipc_port_make_send_locked
1436 * Make a naked send right from a receive right.
1439 * port locked and active.
1442 ipc_port_make_send_locked(
1445 assert(ip_active(port
));
1453 * Routine: ipc_port_make_send
1455 * Make a naked send right from a receive right.
1463 if (!IP_VALID(port
))
1467 if (ip_active(port
)) {
1479 * Routine: ipc_port_copy_send
1481 * Make a naked send right from another naked send right.
1482 * IP_NULL -> IP_NULL
1483 * IP_DEAD -> IP_DEAD
1484 * dead port -> IP_DEAD
1485 * live port -> port + ref
1487 * Nothing locked except possibly a space.
1496 if (!IP_VALID(port
))
1500 if (ip_active(port
)) {
1501 assert(port
->ip_srights
> 0);
1514 * Routine: ipc_port_copyout_send
1516 * Copyout a naked send right (possibly null/dead),
1517 * or if that fails, destroy the right.
1523 ipc_port_copyout_send(
1527 mach_port_name_t name
;
1529 if (IP_VALID(sright
)) {
1532 kr
= ipc_object_copyout(space
, (ipc_object_t
) sright
,
1533 MACH_MSG_TYPE_PORT_SEND
, TRUE
, &name
);
1534 if (kr
!= KERN_SUCCESS
) {
1535 ipc_port_release_send(sright
);
1537 if (kr
== KERN_INVALID_CAPABILITY
)
1538 name
= MACH_PORT_DEAD
;
1540 name
= MACH_PORT_NULL
;
1543 name
= CAST_MACH_PORT_TO_NAME(sright
);
1549 * Routine: ipc_port_release_send
1551 * Release a naked send right.
1552 * Consumes a ref for the port.
1558 ipc_port_release_send(
1561 ipc_port_t nsrequest
= IP_NULL
;
1562 mach_port_mscount_t mscount
;
1564 if (!IP_VALID(port
))
1569 assert(port
->ip_srights
> 0);
1572 if (!ip_active(port
)) {
1578 if (port
->ip_srights
== 0 &&
1579 port
->ip_nsrequest
!= IP_NULL
) {
1580 nsrequest
= port
->ip_nsrequest
;
1581 port
->ip_nsrequest
= IP_NULL
;
1582 mscount
= port
->ip_mscount
;
1585 ipc_notify_no_senders(nsrequest
, mscount
);
1593 * Routine: ipc_port_make_sonce_locked
1595 * Make a naked send-once right from a receive right.
1597 * The port is locked and active.
1601 ipc_port_make_sonce_locked(
1604 assert(ip_active(port
));
1605 port
->ip_sorights
++;
1611 * Routine: ipc_port_make_sonce
1613 * Make a naked send-once right from a receive right.
1615 * The port is not locked.
1619 ipc_port_make_sonce(
1622 if (!IP_VALID(port
))
1626 if (ip_active(port
)) {
1627 port
->ip_sorights
++;
1637 * Routine: ipc_port_release_sonce
1639 * Release a naked send-once right.
1640 * Consumes a ref for the port.
1642 * In normal situations, this is never used.
1643 * Send-once rights are only consumed when
1644 * a message (possibly a send-once notification)
1647 * Nothing locked except possibly a space.
1651 ipc_port_release_sonce(
1654 if (!IP_VALID(port
))
1659 assert(port
->ip_sorights
> 0);
1661 port
->ip_sorights
--;
1668 * Routine: ipc_port_release_receive
1670 * Release a naked (in limbo or in transit) receive right.
1671 * Consumes a ref for the port; destroys the port.
1677 ipc_port_release_receive(
1682 if (!IP_VALID(port
))
1686 assert(ip_active(port
));
1687 assert(port
->ip_receiver_name
== MACH_PORT_NULL
);
1688 dest
= port
->ip_destination
;
1690 ipc_port_destroy(port
); /* consumes ref, unlocks */
1692 if (dest
!= IP_NULL
)
1697 * Routine: ipc_port_alloc_special
1699 * Allocate a port in a special space.
1700 * The new port is returned with one ref.
1701 * If unsuccessful, IP_NULL is returned.
1707 ipc_port_alloc_special(
1712 __IGNORE_WCASTALIGN(port
= (ipc_port_t
) io_alloc(IOT_PORT
));
1713 if (port
== IP_NULL
)
1717 uintptr_t buf
[IP_CALLSTACK_MAX
];
1718 ipc_port_callstack_init_debug(&buf
[0], IP_CALLSTACK_MAX
);
1719 #endif /* MACH_ASSERT */
1721 bzero((char *)port
, sizeof(*port
));
1722 io_lock_init(&port
->ip_object
);
1723 port
->ip_references
= 1;
1724 port
->ip_object
.io_bits
= io_makebits(TRUE
, IOT_PORT
, 0);
1726 ipc_port_init(port
, space
, 1);
1729 ipc_port_init_debug(port
, &buf
[0], IP_CALLSTACK_MAX
);
1730 #endif /* MACH_ASSERT */
1736 * Routine: ipc_port_dealloc_special
1738 * Deallocate a port in a special space.
1739 * Consumes one ref for the port.
1745 ipc_port_dealloc_special(
1747 __assert_only ipc_space_t space
)
1750 assert(ip_active(port
));
1751 // assert(port->ip_receiver_name != MACH_PORT_NULL);
1752 assert(port
->ip_receiver
== space
);
1755 * We clear ip_receiver_name and ip_receiver to simplify
1756 * the ipc_space_kernel check in ipc_mqueue_send.
1759 port
->ip_receiver_name
= MACH_PORT_NULL
;
1760 port
->ip_receiver
= IS_NULL
;
1762 /* relevant part of ipc_port_clear_receiver */
1763 ipc_port_set_mscount(port
, 0);
1764 port
->ip_messages
.imq_seqno
= 0;
1766 ipc_port_destroy(port
);
1770 * Routine: ipc_port_finalize
1772 * Called on last reference deallocate to
1773 * free any remaining data associated with the
1782 ipc_port_request_t requests
= port
->ip_requests
;
1784 assert(!ip_active(port
));
1785 if (requests
!= IPR_NULL
) {
1786 ipc_table_size_t its
= requests
->ipr_size
;
1787 it_requests_free(its
, requests
);
1788 port
->ip_requests
= IPR_NULL
;
1791 ipc_mqueue_deinit(&port
->ip_messages
);
1794 ipc_port_track_dealloc(port
);
1795 #endif /* MACH_ASSERT */
1799 #include <kern/machine.h>
1802 * Keep a list of all allocated ports.
1803 * Allocation is intercepted via ipc_port_init;
1804 * deallocation is intercepted via io_free.
1807 queue_head_t port_alloc_queue
;
1808 lck_spin_t port_alloc_queue_lock
;
1811 unsigned long port_count
= 0;
1812 unsigned long port_count_warning
= 20000;
1813 unsigned long port_timestamp
= 0;
1815 void db_port_stack_trace(
1820 unsigned int verbose
,
1821 unsigned int display
,
1822 unsigned int ref_search
,
1823 unsigned int ref_target
);
1826 * Initialize global state needed for run-time
1830 ipc_port_debug_init(void)
1833 queue_init(&port_alloc_queue
);
1834 lck_spin_init(&port_alloc_queue_lock
, &ipc_lck_grp
, &ipc_lck_attr
);
1837 if (!PE_parse_boot_argn("ipc_portbt", &ipc_portbt
, sizeof (ipc_portbt
)))
1842 extern int proc_pid(struct proc
*);
1843 #endif /* MACH_BSD */
1846 * Initialize all of the debugging state in a port.
1847 * Insert the port into a global list of all allocated ports.
1850 ipc_port_init_debug(
1852 uintptr_t *callstack
,
1853 unsigned int callstack_max
)
1857 port
->ip_thread
= current_thread();
1858 port
->ip_timetrack
= port_timestamp
++;
1859 for (i
= 0; i
< callstack_max
; ++i
)
1860 port
->ip_callstack
[i
] = callstack
[i
];
1861 for (i
= 0; i
< IP_NSPARES
; ++i
)
1862 port
->ip_spares
[i
] = 0;
1865 task_t task
= current_task();
1866 if (task
!= TASK_NULL
) {
1867 struct proc
* proc
= (struct proc
*) get_bsdtask_info(task
);
1869 port
->ip_spares
[0] = proc_pid(proc
);
1871 #endif /* MACH_BSD */
1874 lck_spin_lock(&port_alloc_queue_lock
);
1876 if (port_count_warning
> 0 && port_count
>= port_count_warning
)
1877 assert(port_count
< port_count_warning
);
1878 queue_enter(&port_alloc_queue
, port
, ipc_port_t
, ip_port_links
);
1879 lck_spin_unlock(&port_alloc_queue_lock
);
1884 * Routine: ipc_port_callstack_init_debug
1886 * Calls the machine-dependent routine to
1887 * fill in an array with up to IP_CALLSTACK_MAX
1888 * levels of return pc information
1890 * May block (via copyin)
1893 ipc_port_callstack_init_debug(
1894 uintptr_t *callstack
,
1895 unsigned int callstack_max
)
1899 /* guarantee the callstack is initialized */
1900 for (i
=0; i
< callstack_max
; i
++)
1904 machine_callstack(callstack
, callstack_max
);
1908 * Remove a port from the queue of allocated ports.
1909 * This routine should be invoked JUST prior to
1910 * deallocating the actual memory occupied by the port.
1914 ipc_port_track_dealloc(
1915 __unused ipc_port_t port
)
1920 ipc_port_track_dealloc(
1923 lck_spin_lock(&port_alloc_queue_lock
);
1924 assert(port_count
> 0);
1926 queue_remove(&port_alloc_queue
, port
, ipc_port_t
, ip_port_links
);
1927 lck_spin_unlock(&port_alloc_queue_lock
);
1932 #endif /* MACH_ASSERT */