]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ipc/ipc_port.c
2d6f7e7d14bb41c59a9beea2e12f43be6f3a8c51
[apple/xnu.git] / osfmk / ipc / ipc_port.c
1 /*
2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_FREE_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
58 * support for mandatory and extensible security protections. This notice
59 * is included in support of clause 2.2 (b) of the Apple Public License,
60 * Version 2.0.
61 */
62 /*
63 */
64 /*
65 * File: ipc/ipc_port.c
66 * Author: Rich Draves
67 * Date: 1989
68 *
69 * Functions to manipulate IPC ports.
70 */
71
72 #include <mach_assert.h>
73
74 #include <mach/port.h>
75 #include <mach/kern_return.h>
76 #include <kern/ipc_kobject.h>
77 #include <kern/thread.h>
78 #include <kern/misc_protos.h>
79 #include <kern/waitq.h>
80 #include <kern/policy_internal.h>
81 #include <kern/debug.h>
82 #include <kern/kcdata.h>
83 #include <ipc/ipc_entry.h>
84 #include <ipc/ipc_space.h>
85 #include <ipc/ipc_object.h>
86 #include <ipc/ipc_port.h>
87 #include <ipc/ipc_pset.h>
88 #include <ipc/ipc_kmsg.h>
89 #include <ipc/ipc_mqueue.h>
90 #include <ipc/ipc_notify.h>
91 #include <ipc/ipc_table.h>
92 #include <ipc/ipc_importance.h>
93 #include <machine/limits.h>
94 #include <kern/turnstile.h>
95 #include <kern/machine.h>
96
97 #include <security/mac_mach_internal.h>
98
99 #include <string.h>
100
101 static TUNABLE(bool, prioritize_launch, "prioritize_launch", true);
102 TUNABLE_WRITEABLE(int, ipc_portbt, "ipc_portbt", false);
103
104 LCK_SPIN_DECLARE_ATTR(ipc_port_multiple_lock_data, &ipc_lck_grp, &ipc_lck_attr);
105 ipc_port_timestamp_t ipc_port_timestamp_data;
106
107 #if MACH_ASSERT
108 void ipc_port_init_debug(
109 ipc_port_t port,
110 uintptr_t *callstack,
111 unsigned int callstack_max);
112
113 void ipc_port_callstack_init_debug(
114 uintptr_t *callstack,
115 unsigned int callstack_max);
116
117 #endif /* MACH_ASSERT */
118
119 static void
120 ipc_port_send_turnstile_recompute_push_locked(
121 ipc_port_t port);
122
123 static thread_t
124 ipc_port_get_watchport_inheritor(
125 ipc_port_t port);
126
127 void
128 ipc_port_release(ipc_port_t port)
129 {
130 ip_release(port);
131 }
132
133 void
134 ipc_port_reference(ipc_port_t port)
135 {
136 ip_reference(port);
137 }
138
139 /*
140 * Routine: ipc_port_timestamp
141 * Purpose:
142 * Retrieve a timestamp value.
143 */
144
145 ipc_port_timestamp_t
146 ipc_port_timestamp(void)
147 {
148 return OSIncrementAtomic(&ipc_port_timestamp_data);
149 }
150
151 /*
152 * Routine: ipc_port_request_alloc
153 * Purpose:
154 * Try to allocate a request slot.
155 * If successful, returns the request index.
156 * Otherwise returns zero.
157 * Conditions:
158 * The port is locked and active.
159 * Returns:
160 * KERN_SUCCESS A request index was found.
161 * KERN_NO_SPACE No index allocated.
162 */
163
164 #if IMPORTANCE_INHERITANCE
165 kern_return_t
166 ipc_port_request_alloc(
167 ipc_port_t port,
168 mach_port_name_t name,
169 ipc_port_t soright,
170 boolean_t send_possible,
171 boolean_t immediate,
172 ipc_port_request_index_t *indexp,
173 boolean_t *importantp)
174 #else
175 kern_return_t
176 ipc_port_request_alloc(
177 ipc_port_t port,
178 mach_port_name_t name,
179 ipc_port_t soright,
180 boolean_t send_possible,
181 boolean_t immediate,
182 ipc_port_request_index_t *indexp)
183 #endif /* IMPORTANCE_INHERITANCE */
184 {
185 ipc_port_request_t ipr, table;
186 ipc_port_request_index_t index;
187 uintptr_t mask = 0;
188
189 #if IMPORTANCE_INHERITANCE
190 *importantp = FALSE;
191 #endif /* IMPORTANCE_INHERITANCE */
192
193 require_ip_active(port);
194 assert(name != MACH_PORT_NULL);
195 assert(soright != IP_NULL);
196
197 table = port->ip_requests;
198
199 if (table == IPR_NULL) {
200 return KERN_NO_SPACE;
201 }
202
203 index = table->ipr_next;
204 if (index == 0) {
205 return KERN_NO_SPACE;
206 }
207
208 ipr = &table[index];
209 assert(ipr->ipr_name == MACH_PORT_NULL);
210
211 table->ipr_next = ipr->ipr_next;
212 ipr->ipr_name = name;
213
214 if (send_possible) {
215 mask |= IPR_SOR_SPREQ_MASK;
216 if (immediate) {
217 mask |= IPR_SOR_SPARM_MASK;
218 if (port->ip_sprequests == 0) {
219 port->ip_sprequests = 1;
220 #if IMPORTANCE_INHERITANCE
221 /* TODO: Live importance support in send-possible */
222 if (port->ip_impdonation != 0 &&
223 port->ip_spimportant == 0 &&
224 (task_is_importance_donor(current_task()))) {
225 *importantp = TRUE;
226 }
227 #endif /* IMPORTANCE_INHERTANCE */
228 }
229 }
230 }
231 ipr->ipr_soright = IPR_SOR_MAKE(soright, mask);
232
233 *indexp = index;
234
235 return KERN_SUCCESS;
236 }
237
238 /*
239 * Routine: ipc_port_request_grow
240 * Purpose:
241 * Grow a port's table of requests.
242 * Conditions:
243 * The port must be locked and active.
244 * Nothing else locked; will allocate memory.
245 * Upon return the port is unlocked.
246 * Returns:
247 * KERN_SUCCESS Grew the table.
248 * KERN_SUCCESS Somebody else grew the table.
249 * KERN_SUCCESS The port died.
250 * KERN_RESOURCE_SHORTAGE Couldn't allocate new table.
251 * KERN_NO_SPACE Couldn't grow to desired size
252 */
253
254 kern_return_t
255 ipc_port_request_grow(
256 ipc_port_t port,
257 ipc_table_elems_t target_size)
258 {
259 ipc_table_size_t its;
260 ipc_port_request_t otable, ntable;
261 require_ip_active(port);
262
263 otable = port->ip_requests;
264 if (otable == IPR_NULL) {
265 its = &ipc_table_requests[0];
266 } else {
267 its = otable->ipr_size + 1;
268 }
269
270 if (target_size != ITS_SIZE_NONE) {
271 if ((otable != IPR_NULL) &&
272 (target_size <= otable->ipr_size->its_size)) {
273 ip_unlock(port);
274 return KERN_SUCCESS;
275 }
276 while ((its->its_size) && (its->its_size < target_size)) {
277 its++;
278 }
279 if (its->its_size == 0) {
280 ip_unlock(port);
281 return KERN_NO_SPACE;
282 }
283 }
284
285 ip_reference(port);
286 ip_unlock(port);
287
288 if ((its->its_size == 0) ||
289 ((ntable = it_requests_alloc(its)) == IPR_NULL)) {
290 ip_release(port);
291 return KERN_RESOURCE_SHORTAGE;
292 }
293
294 ip_lock(port);
295
296 /*
297 * Check that port is still active and that nobody else
298 * has slipped in and grown the table on us. Note that
299 * just checking if the current table pointer == otable
300 * isn't sufficient; must check ipr_size.
301 */
302
303 if (ip_active(port) && (port->ip_requests == otable) &&
304 ((otable == IPR_NULL) || (otable->ipr_size + 1 == its))) {
305 ipc_table_size_t oits;
306 ipc_table_elems_t osize, nsize;
307 ipc_port_request_index_t free, i;
308
309 /* copy old table to new table */
310
311 if (otable != IPR_NULL) {
312 oits = otable->ipr_size;
313 osize = oits->its_size;
314 free = otable->ipr_next;
315
316 (void) memcpy((void *)(ntable + 1),
317 (const void *)(otable + 1),
318 (osize - 1) * sizeof(struct ipc_port_request));
319 } else {
320 osize = 1;
321 oits = 0;
322 free = 0;
323 }
324
325 nsize = its->its_size;
326 assert(nsize > osize);
327
328 /* add new elements to the new table's free list */
329
330 for (i = osize; i < nsize; i++) {
331 ipc_port_request_t ipr = &ntable[i];
332
333 ipr->ipr_name = MACH_PORT_NULL;
334 ipr->ipr_next = free;
335 free = i;
336 }
337
338 ntable->ipr_next = free;
339 ntable->ipr_size = its;
340 port->ip_requests = ntable;
341 ip_unlock(port);
342 ip_release(port);
343
344 if (otable != IPR_NULL) {
345 it_requests_free(oits, otable);
346 }
347 } else {
348 ip_unlock(port);
349 ip_release(port);
350 it_requests_free(its, ntable);
351 }
352
353 return KERN_SUCCESS;
354 }
355
356 /*
357 * Routine: ipc_port_request_sparm
358 * Purpose:
359 * Arm delayed send-possible request.
360 * Conditions:
361 * The port must be locked and active.
362 *
363 * Returns TRUE if the request was armed
364 * (or armed with importance in that version).
365 */
366
367 boolean_t
368 ipc_port_request_sparm(
369 ipc_port_t port,
370 __assert_only mach_port_name_t name,
371 ipc_port_request_index_t index,
372 mach_msg_option_t option,
373 mach_msg_priority_t priority)
374 {
375 if (index != IE_REQ_NONE) {
376 ipc_port_request_t ipr, table;
377
378 require_ip_active(port);
379
380 table = port->ip_requests;
381 assert(table != IPR_NULL);
382
383 ipr = &table[index];
384 assert(ipr->ipr_name == name);
385
386 /* Is there a valid destination? */
387 if (IPR_SOR_SPREQ(ipr->ipr_soright)) {
388 ipr->ipr_soright = IPR_SOR_MAKE(ipr->ipr_soright, IPR_SOR_SPARM_MASK);
389 port->ip_sprequests = 1;
390
391 if (option & MACH_SEND_OVERRIDE) {
392 /* apply override to message queue */
393 mach_msg_qos_t qos_ovr;
394 if (mach_msg_priority_is_pthread_priority(priority)) {
395 qos_ovr = _pthread_priority_thread_qos(priority);
396 } else {
397 qos_ovr = mach_msg_priority_overide_qos(priority);
398 }
399 if (qos_ovr) {
400 ipc_mqueue_override_send(&port->ip_messages, qos_ovr);
401 }
402 }
403
404 #if IMPORTANCE_INHERITANCE
405 if (((option & MACH_SEND_NOIMPORTANCE) == 0) &&
406 (port->ip_impdonation != 0) &&
407 (port->ip_spimportant == 0) &&
408 (((option & MACH_SEND_IMPORTANCE) != 0) ||
409 (task_is_importance_donor(current_task())))) {
410 return TRUE;
411 }
412 #else
413 return TRUE;
414 #endif /* IMPORTANCE_INHERITANCE */
415 }
416 }
417 return FALSE;
418 }
419
420 /*
421 * Routine: ipc_port_request_type
422 * Purpose:
423 * Determine the type(s) of port requests enabled for a name.
424 * Conditions:
425 * The port must be locked or inactive (to avoid table growth).
426 * The index must not be IE_REQ_NONE and for the name in question.
427 */
428 mach_port_type_t
429 ipc_port_request_type(
430 ipc_port_t port,
431 __assert_only mach_port_name_t name,
432 ipc_port_request_index_t index)
433 {
434 ipc_port_request_t ipr, table;
435 mach_port_type_t type = 0;
436
437 table = port->ip_requests;
438 assert(table != IPR_NULL);
439
440 assert(index != IE_REQ_NONE);
441 ipr = &table[index];
442 assert(ipr->ipr_name == name);
443
444 if (IP_VALID(IPR_SOR_PORT(ipr->ipr_soright))) {
445 type |= MACH_PORT_TYPE_DNREQUEST;
446
447 if (IPR_SOR_SPREQ(ipr->ipr_soright)) {
448 type |= MACH_PORT_TYPE_SPREQUEST;
449
450 if (!IPR_SOR_SPARMED(ipr->ipr_soright)) {
451 type |= MACH_PORT_TYPE_SPREQUEST_DELAYED;
452 }
453 }
454 }
455 return type;
456 }
457
458 /*
459 * Routine: ipc_port_request_cancel
460 * Purpose:
461 * Cancel a dead-name/send-possible request and return the send-once right.
462 * Conditions:
463 * The port must be locked and active.
464 * The index must not be IPR_REQ_NONE and must correspond with name.
465 */
466
467 ipc_port_t
468 ipc_port_request_cancel(
469 ipc_port_t port,
470 __assert_only mach_port_name_t name,
471 ipc_port_request_index_t index)
472 {
473 ipc_port_request_t ipr, table;
474 ipc_port_t request = IP_NULL;
475
476 require_ip_active(port);
477 table = port->ip_requests;
478 assert(table != IPR_NULL);
479
480 assert(index != IE_REQ_NONE);
481 ipr = &table[index];
482 assert(ipr->ipr_name == name);
483 request = IPR_SOR_PORT(ipr->ipr_soright);
484
485 /* return ipr to the free list inside the table */
486 ipr->ipr_name = MACH_PORT_NULL;
487 ipr->ipr_next = table->ipr_next;
488 table->ipr_next = index;
489
490 return request;
491 }
492
493 /*
494 * Routine: ipc_port_pdrequest
495 * Purpose:
496 * Make a port-deleted request, returning the
497 * previously registered send-once right.
498 * Just cancels the previous request if notify is IP_NULL.
499 * Conditions:
500 * The port is locked and active. It is unlocked.
501 * Consumes a ref for notify (if non-null), and
502 * returns previous with a ref (if non-null).
503 */
504
505 void
506 ipc_port_pdrequest(
507 ipc_port_t port,
508 ipc_port_t notify,
509 ipc_port_t *previousp)
510 {
511 ipc_port_t previous;
512 require_ip_active(port);
513
514 previous = port->ip_pdrequest;
515 port->ip_pdrequest = notify;
516 ip_unlock(port);
517
518 *previousp = previous;
519 }
520
521 /*
522 * Routine: ipc_port_nsrequest
523 * Purpose:
524 * Make a no-senders request, returning the
525 * previously registered send-once right.
526 * Just cancels the previous request if notify is IP_NULL.
527 * Conditions:
528 * The port is locked and active. It is unlocked.
529 * Consumes a ref for notify (if non-null), and
530 * returns previous with a ref (if non-null).
531 */
532
533 void
534 ipc_port_nsrequest(
535 ipc_port_t port,
536 mach_port_mscount_t sync,
537 ipc_port_t notify,
538 ipc_port_t *previousp)
539 {
540 ipc_port_t previous;
541 mach_port_mscount_t mscount;
542 require_ip_active(port);
543
544 previous = port->ip_nsrequest;
545 mscount = port->ip_mscount;
546
547 if ((port->ip_srights == 0) && (sync <= mscount) &&
548 (notify != IP_NULL)) {
549 port->ip_nsrequest = IP_NULL;
550 ip_unlock(port);
551 ipc_notify_no_senders(notify, mscount);
552 } else {
553 port->ip_nsrequest = notify;
554 ip_unlock(port);
555 }
556
557 *previousp = previous;
558 }
559
560
561 /*
562 * Routine: ipc_port_clear_receiver
563 * Purpose:
564 * Prepares a receive right for transmission/destruction,
565 * optionally performs mqueue destruction (with port lock held)
566 *
567 * Conditions:
568 * The port is locked and active.
569 * Returns:
570 * If should_destroy is TRUE, then the return value indicates
571 * whether the caller needs to reap kmsg structures that should
572 * be destroyed (by calling ipc_kmsg_reap_delayed)
573 *
574 * If should_destroy is FALSE, this always returns FALSE
575 */
576
577 boolean_t
578 ipc_port_clear_receiver(
579 ipc_port_t port,
580 boolean_t should_destroy)
581 {
582 ipc_mqueue_t mqueue = &port->ip_messages;
583 boolean_t reap_messages = FALSE;
584
585 /*
586 * Pull ourselves out of any sets to which we belong.
587 * We hold the port locked, so even though this acquires and releases
588 * the mqueue lock, we know we won't be added to any other sets.
589 */
590 if (port->ip_in_pset != 0) {
591 ipc_pset_remove_from_all(port);
592 assert(port->ip_in_pset == 0);
593 }
594
595 /*
596 * Send anyone waiting on the port's queue directly away.
597 * Also clear the mscount, seqno, guard bits
598 */
599 imq_lock(mqueue);
600 if (port->ip_receiver_name) {
601 ipc_mqueue_changed(port->ip_receiver, mqueue);
602 } else {
603 ipc_mqueue_changed(NULL, mqueue);
604 }
605 port->ip_mscount = 0;
606 mqueue->imq_seqno = 0;
607 port->ip_context = port->ip_guarded = port->ip_strict_guard = 0;
608 /*
609 * clear the immovable bit so the port can move back to anyone listening
610 * for the port destroy notification
611 */
612 port->ip_immovable_receive = 0;
613
614 if (should_destroy) {
615 /*
616 * Mark the port and mqueue invalid, preventing further send/receive
617 * operations from succeeding. It's important for this to be
618 * done under the same lock hold as the ipc_mqueue_changed
619 * call to avoid additional threads blocking on an mqueue
620 * that's being destroyed.
621 *
622 * The port active bit needs to be guarded under mqueue lock for
623 * turnstiles
624 */
625 port->ip_object.io_bits &= ~IO_BITS_ACTIVE;
626 port->ip_timestamp = ipc_port_timestamp();
627 reap_messages = ipc_mqueue_destroy_locked(mqueue);
628 } else {
629 /* make port be in limbo */
630 port->ip_receiver_name = MACH_PORT_NULL;
631 port->ip_destination = IP_NULL;
632 }
633
634 imq_unlock(&port->ip_messages);
635
636 return reap_messages;
637 }
638
639 /*
640 * Routine: ipc_port_init
641 * Purpose:
642 * Initializes a newly-allocated port.
643 * Doesn't touch the ip_object fields.
644 */
645
646 void
647 ipc_port_init(
648 ipc_port_t port,
649 ipc_space_t space,
650 ipc_port_init_flags_t flags,
651 mach_port_name_t name)
652 {
653 /* port->ip_kobject doesn't have to be initialized */
654
655 port->ip_receiver = space;
656 port->ip_receiver_name = name;
657
658 port->ip_mscount = 0;
659 port->ip_srights = 0;
660 port->ip_sorights = 0;
661 if (flags & IPC_PORT_INIT_MAKE_SEND_RIGHT) {
662 port->ip_srights = 1;
663 port->ip_mscount = 1;
664 }
665
666 port->ip_nsrequest = IP_NULL;
667 port->ip_pdrequest = IP_NULL;
668 port->ip_requests = IPR_NULL;
669
670 port->ip_premsg = IKM_NULL;
671 port->ip_context = 0;
672 port->ip_reply_context = 0;
673
674 port->ip_sprequests = 0;
675 port->ip_spimportant = 0;
676 port->ip_impdonation = 0;
677 port->ip_tempowner = 0;
678
679 port->ip_guarded = 0;
680 port->ip_strict_guard = 0;
681 port->ip_immovable_receive = 0;
682 port->ip_no_grant = 0;
683 port->ip_immovable_send = 0;
684 port->ip_impcount = 0;
685
686 if (flags & IPC_PORT_INIT_FILTER_MESSAGE) {
687 port->ip_object.io_bits |= IP_BIT_FILTER_MSG;
688 }
689
690 port->ip_tg_block_tracking = (flags & IPC_PORT_INIT_TG_BLOCK_TRACKING) != 0;
691 port->ip_specialreply = (flags & IPC_PORT_INIT_SPECIAL_REPLY) != 0;
692 port->ip_sync_link_state = PORT_SYNC_LINK_ANY;
693 port->ip_sync_bootstrap_checkin = 0;
694
695 ipc_special_reply_port_bits_reset(port);
696
697 port->ip_send_turnstile = TURNSTILE_NULL;
698
699 ipc_mqueue_kind_t kind = IPC_MQUEUE_KIND_NONE;
700 if (flags & IPC_PORT_INIT_MESSAGE_QUEUE) {
701 kind = IPC_MQUEUE_KIND_PORT;
702 }
703 ipc_mqueue_init(&port->ip_messages, kind);
704 }
705
706 /*
707 * Routine: ipc_port_alloc
708 * Purpose:
709 * Allocate a port.
710 * Conditions:
711 * Nothing locked. If successful, the port is returned
712 * locked. (The caller doesn't have a reference.)
713 * Returns:
714 * KERN_SUCCESS The port is allocated.
715 * KERN_INVALID_TASK The space is dead.
716 * KERN_NO_SPACE No room for an entry in the space.
717 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
718 */
719
720 kern_return_t
721 ipc_port_alloc(
722 ipc_space_t space,
723 ipc_port_init_flags_t flags,
724 mach_port_name_t *namep,
725 ipc_port_t *portp)
726 {
727 ipc_port_t port;
728 mach_port_name_t name;
729 kern_return_t kr;
730 mach_port_type_t type = MACH_PORT_TYPE_RECEIVE;
731 mach_port_urefs_t urefs = 0;
732
733 #if MACH_ASSERT
734 uintptr_t buf[IP_CALLSTACK_MAX];
735 ipc_port_callstack_init_debug(&buf[0], IP_CALLSTACK_MAX);
736 #endif /* MACH_ASSERT */
737
738 if (flags & IPC_PORT_INIT_MAKE_SEND_RIGHT) {
739 type |= MACH_PORT_TYPE_SEND;
740 urefs = 1;
741 }
742 kr = ipc_object_alloc(space, IOT_PORT, type, urefs,
743 &name, (ipc_object_t *) &port);
744 if (kr != KERN_SUCCESS) {
745 return kr;
746 }
747
748 /* port and space are locked */
749 ipc_port_init(port, space, flags, name);
750
751 #if MACH_ASSERT
752 ipc_port_init_debug(port, &buf[0], IP_CALLSTACK_MAX);
753 #endif /* MACH_ASSERT */
754
755 /* unlock space after init */
756 is_write_unlock(space);
757
758 *namep = name;
759 *portp = port;
760
761 return KERN_SUCCESS;
762 }
763
764 /*
765 * Routine: ipc_port_alloc_name
766 * Purpose:
767 * Allocate a port, with a specific name.
768 * Conditions:
769 * Nothing locked. If successful, the port is returned
770 * locked. (The caller doesn't have a reference.)
771 * Returns:
772 * KERN_SUCCESS The port is allocated.
773 * KERN_INVALID_TASK The space is dead.
774 * KERN_NAME_EXISTS The name already denotes a right.
775 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
776 */
777
778 kern_return_t
779 ipc_port_alloc_name(
780 ipc_space_t space,
781 ipc_port_init_flags_t flags,
782 mach_port_name_t name,
783 ipc_port_t *portp)
784 {
785 ipc_port_t port;
786 kern_return_t kr;
787 mach_port_type_t type = MACH_PORT_TYPE_RECEIVE;
788 mach_port_urefs_t urefs = 0;
789
790 #if MACH_ASSERT
791 uintptr_t buf[IP_CALLSTACK_MAX];
792 ipc_port_callstack_init_debug(&buf[0], IP_CALLSTACK_MAX);
793 #endif /* MACH_ASSERT */
794
795 if (flags & IPC_PORT_INIT_MAKE_SEND_RIGHT) {
796 type |= MACH_PORT_TYPE_SEND;
797 urefs = 1;
798 }
799 kr = ipc_object_alloc_name(space, IOT_PORT, type, urefs,
800 name, (ipc_object_t *) &port);
801 if (kr != KERN_SUCCESS) {
802 return kr;
803 }
804
805 /* port is locked */
806
807 ipc_port_init(port, space, flags, name);
808
809 #if MACH_ASSERT
810 ipc_port_init_debug(port, &buf[0], IP_CALLSTACK_MAX);
811 #endif /* MACH_ASSERT */
812
813 *portp = port;
814
815 return KERN_SUCCESS;
816 }
817
818 /*
819 * Routine: ipc_port_spnotify
820 * Purpose:
821 * Generate send-possible port notifications.
822 * Conditions:
823 * Nothing locked, reference held on port.
824 */
825 void
826 ipc_port_spnotify(
827 ipc_port_t port)
828 {
829 ipc_port_request_index_t index = 0;
830 ipc_table_elems_t size = 0;
831
832 /*
833 * If the port has no send-possible request
834 * armed, don't bother to lock the port.
835 */
836 if (port->ip_sprequests == 0) {
837 return;
838 }
839
840 ip_lock(port);
841
842 #if IMPORTANCE_INHERITANCE
843 if (port->ip_spimportant != 0) {
844 port->ip_spimportant = 0;
845 if (ipc_port_importance_delta(port, IPID_OPTION_NORMAL, -1) == TRUE) {
846 ip_lock(port);
847 }
848 }
849 #endif /* IMPORTANCE_INHERITANCE */
850
851 if (port->ip_sprequests == 0) {
852 ip_unlock(port);
853 return;
854 }
855 port->ip_sprequests = 0;
856
857 revalidate:
858 if (ip_active(port)) {
859 ipc_port_request_t requests;
860
861 /* table may change each time port unlocked (reload) */
862 requests = port->ip_requests;
863 assert(requests != IPR_NULL);
864
865 /*
866 * no need to go beyond table size when first
867 * we entered - those are future notifications.
868 */
869 if (size == 0) {
870 size = requests->ipr_size->its_size;
871 }
872
873 /* no need to backtrack either */
874 while (++index < size) {
875 ipc_port_request_t ipr = &requests[index];
876 mach_port_name_t name = ipr->ipr_name;
877 ipc_port_t soright = IPR_SOR_PORT(ipr->ipr_soright);
878 boolean_t armed = IPR_SOR_SPARMED(ipr->ipr_soright);
879
880 if (MACH_PORT_VALID(name) && armed && IP_VALID(soright)) {
881 /* claim send-once right - slot still inuse */
882 ipr->ipr_soright = IP_NULL;
883 ip_unlock(port);
884
885 ipc_notify_send_possible(soright, name);
886
887 ip_lock(port);
888 goto revalidate;
889 }
890 }
891 }
892 ip_unlock(port);
893 return;
894 }
895
896 /*
897 * Routine: ipc_port_dnnotify
898 * Purpose:
899 * Generate dead name notifications for
900 * all outstanding dead-name and send-
901 * possible requests.
902 * Conditions:
903 * Nothing locked.
904 * Port must be inactive.
905 * Reference held on port.
906 */
907 void
908 ipc_port_dnnotify(
909 ipc_port_t port)
910 {
911 ipc_port_request_t requests = port->ip_requests;
912
913 assert(!ip_active(port));
914 if (requests != IPR_NULL) {
915 ipc_table_size_t its = requests->ipr_size;
916 ipc_table_elems_t size = its->its_size;
917 ipc_port_request_index_t index;
918 for (index = 1; index < size; index++) {
919 ipc_port_request_t ipr = &requests[index];
920 mach_port_name_t name = ipr->ipr_name;
921 ipc_port_t soright = IPR_SOR_PORT(ipr->ipr_soright);
922
923 if (MACH_PORT_VALID(name) && IP_VALID(soright)) {
924 ipc_notify_dead_name(soright, name);
925 }
926 }
927 }
928 }
929
930
931 /*
932 * Routine: ipc_port_destroy
933 * Purpose:
934 * Destroys a port. Cleans up queued messages.
935 *
936 * If the port has a backup, it doesn't get destroyed,
937 * but is sent in a port-destroyed notification to the backup.
938 * Conditions:
939 * The port is locked and alive; nothing else locked.
940 * The caller has a reference, which is consumed.
941 * Afterwards, the port is unlocked and dead.
942 */
943
944 void
945 ipc_port_destroy(ipc_port_t port)
946 {
947 ipc_port_t pdrequest, nsrequest;
948 ipc_mqueue_t mqueue;
949 ipc_kmsg_t kmsg;
950 boolean_t special_reply = port->ip_specialreply;
951 struct task_watchport_elem *watchport_elem = NULL;
952
953 #if IMPORTANCE_INHERITANCE
954 ipc_importance_task_t release_imp_task = IIT_NULL;
955 thread_t self = current_thread();
956 boolean_t top = (self->ith_assertions == 0);
957 natural_t assertcnt = 0;
958 #endif /* IMPORTANCE_INHERITANCE */
959
960 require_ip_active(port);
961 /* port->ip_receiver_name is garbage */
962 /* port->ip_receiver/port->ip_destination is garbage */
963
964 /* clear any reply-port context */
965 port->ip_reply_context = 0;
966
967 /* check for a backup port */
968 pdrequest = port->ip_pdrequest;
969
970 #if IMPORTANCE_INHERITANCE
971 /* determine how many assertions to drop and from whom */
972 if (port->ip_tempowner != 0) {
973 assert(top);
974 release_imp_task = port->ip_imp_task;
975 if (IIT_NULL != release_imp_task) {
976 port->ip_imp_task = IIT_NULL;
977 assertcnt = port->ip_impcount;
978 }
979 /* Otherwise, nothing to drop */
980 } else {
981 assertcnt = port->ip_impcount;
982 if (pdrequest != IP_NULL) {
983 /* mark in limbo for the journey */
984 port->ip_tempowner = 1;
985 }
986 }
987
988 if (top) {
989 self->ith_assertions = assertcnt;
990 }
991 #endif /* IMPORTANCE_INHERITANCE */
992
993 if (pdrequest != IP_NULL) {
994 /* clear receiver, don't destroy the port */
995 (void)ipc_port_clear_receiver(port, FALSE);
996 assert(port->ip_in_pset == 0);
997 assert(port->ip_mscount == 0);
998
999 /* we assume the ref for pdrequest */
1000 port->ip_pdrequest = IP_NULL;
1001
1002 imq_lock(&port->ip_messages);
1003 watchport_elem = ipc_port_clear_watchport_elem_internal(port);
1004 ipc_port_send_turnstile_recompute_push_locked(port);
1005 /* mqueue and port unlocked */
1006
1007 if (special_reply) {
1008 ipc_port_adjust_special_reply_port(port,
1009 IPC_PORT_ADJUST_SR_ALLOW_SYNC_LINKAGE);
1010 }
1011
1012 if (watchport_elem) {
1013 task_watchport_elem_deallocate(watchport_elem);
1014 watchport_elem = NULL;
1015 }
1016 /* consumes our refs for port and pdrequest */
1017 ipc_notify_port_destroyed(pdrequest, port);
1018
1019 goto drop_assertions;
1020 }
1021
1022 /*
1023 * The mach_msg_* paths don't hold a port lock, they only hold a
1024 * reference to the port object. If a thread raced us and is now
1025 * blocked waiting for message reception on this mqueue (or waiting
1026 * for ipc_mqueue_full), it will never be woken up. We call
1027 * ipc_port_clear_receiver() here, _after_ the port has been marked
1028 * inactive, to wakeup any threads which may be blocked and ensure
1029 * that no other thread can get lost waiting for a wake up on a
1030 * port/mqueue that's been destroyed.
1031 */
1032 boolean_t reap_msgs = FALSE;
1033 reap_msgs = ipc_port_clear_receiver(port, TRUE); /* marks port and mqueue inactive */
1034 assert(port->ip_in_pset == 0);
1035 assert(port->ip_mscount == 0);
1036
1037 imq_lock(&port->ip_messages);
1038 watchport_elem = ipc_port_clear_watchport_elem_internal(port);
1039 imq_unlock(&port->ip_messages);
1040 nsrequest = port->ip_nsrequest;
1041
1042 /*
1043 * If the port has a preallocated message buffer and that buffer
1044 * is not inuse, free it. If it has an inuse one, then the kmsg
1045 * free will detect that we freed the association and it can free it
1046 * like a normal buffer.
1047 *
1048 * Once the port is marked inactive we don't need to keep it locked.
1049 */
1050 if (IP_PREALLOC(port)) {
1051 ipc_port_t inuse_port;
1052
1053 kmsg = port->ip_premsg;
1054 assert(kmsg != IKM_NULL);
1055 inuse_port = ikm_prealloc_inuse_port(kmsg);
1056 ipc_kmsg_clear_prealloc(kmsg, port);
1057
1058 imq_lock(&port->ip_messages);
1059 ipc_port_send_turnstile_recompute_push_locked(port);
1060 /* mqueue and port unlocked */
1061
1062 if (inuse_port != IP_NULL) {
1063 assert(inuse_port == port);
1064 } else {
1065 ipc_kmsg_free(kmsg);
1066 }
1067 } else {
1068 imq_lock(&port->ip_messages);
1069 ipc_port_send_turnstile_recompute_push_locked(port);
1070 /* mqueue and port unlocked */
1071 }
1072
1073 /* Deallocate the watchport element */
1074 if (watchport_elem) {
1075 task_watchport_elem_deallocate(watchport_elem);
1076 watchport_elem = NULL;
1077 }
1078
1079 /* unlink the kmsg from special reply port */
1080 if (special_reply) {
1081 ipc_port_adjust_special_reply_port(port,
1082 IPC_PORT_ADJUST_SR_ALLOW_SYNC_LINKAGE);
1083 }
1084
1085 /* throw away no-senders request */
1086 if (nsrequest != IP_NULL) {
1087 ipc_notify_send_once(nsrequest); /* consumes ref */
1088 }
1089 /*
1090 * Reap any kmsg objects waiting to be destroyed.
1091 * This must be done after we've released the port lock.
1092 */
1093 if (reap_msgs) {
1094 ipc_kmsg_reap_delayed();
1095 }
1096
1097 mqueue = &port->ip_messages;
1098
1099 /* cleanup waitq related resources */
1100 ipc_mqueue_deinit(mqueue);
1101
1102 /* generate dead-name notifications */
1103 ipc_port_dnnotify(port);
1104
1105 ipc_kobject_destroy(port);
1106
1107 ip_release(port); /* consume caller's ref */
1108
1109 drop_assertions:
1110 #if IMPORTANCE_INHERITANCE
1111 if (release_imp_task != IIT_NULL) {
1112 if (assertcnt > 0) {
1113 assert(top);
1114 self->ith_assertions = 0;
1115 assert(ipc_importance_task_is_any_receiver_type(release_imp_task));
1116 ipc_importance_task_drop_internal_assertion(release_imp_task, assertcnt);
1117 }
1118 ipc_importance_task_release(release_imp_task);
1119 } else if (assertcnt > 0) {
1120 if (top) {
1121 self->ith_assertions = 0;
1122 release_imp_task = current_task()->task_imp_base;
1123 if (ipc_importance_task_is_any_receiver_type(release_imp_task)) {
1124 ipc_importance_task_drop_internal_assertion(release_imp_task, assertcnt);
1125 }
1126 }
1127 }
1128 #endif /* IMPORTANCE_INHERITANCE */
1129 }
1130
1131 /*
1132 * Routine: ipc_port_destination_chain_lock
1133 * Purpose:
1134 * Search for the end of the chain (a port not in transit),
1135 * acquiring locks along the way, and return it in `base`.
1136 *
1137 * Returns true if a reference was taken on `base`
1138 *
1139 * Conditions:
1140 * No ports locked.
1141 * ipc_port_multiple_lock held.
1142 */
1143 boolean_t
1144 ipc_port_destination_chain_lock(
1145 ipc_port_t port,
1146 ipc_port_t *base)
1147 {
1148 for (;;) {
1149 ip_lock(port);
1150
1151 if (!ip_active(port)) {
1152 /*
1153 * Active ports that are ip_lock()ed cannot go away.
1154 *
1155 * But inactive ports at the end of walking
1156 * an ip_destination chain are only protected
1157 * from space termination cleanup while the entire
1158 * chain of ports leading to them is held.
1159 *
1160 * Callers of this code tend to unlock the chain
1161 * in the same order than this walk which doesn't
1162 * protect `base` properly when it's inactive.
1163 *
1164 * In that case, take a reference that the caller
1165 * is responsible for releasing.
1166 */
1167 ip_reference(port);
1168 *base = port;
1169 return true;
1170 }
1171 if ((port->ip_receiver_name != MACH_PORT_NULL) ||
1172 (port->ip_destination == IP_NULL)) {
1173 *base = port;
1174 return false;
1175 }
1176
1177 port = port->ip_destination;
1178 }
1179 }
1180
1181
1182 /*
1183 * Routine: ipc_port_check_circularity
1184 * Purpose:
1185 * Check if queueing "port" in a message for "dest"
1186 * would create a circular group of ports and messages.
1187 *
1188 * If no circularity (FALSE returned), then "port"
1189 * is changed from "in limbo" to "in transit".
1190 *
1191 * That is, we want to set port->ip_destination == dest,
1192 * but guaranteeing that this doesn't create a circle
1193 * port->ip_destination->ip_destination->... == port
1194 *
1195 * Conditions:
1196 * No ports locked. References held for "port" and "dest".
1197 */
1198
1199 boolean_t
1200 ipc_port_check_circularity(
1201 ipc_port_t port,
1202 ipc_port_t dest)
1203 {
1204 #if IMPORTANCE_INHERITANCE
1205 /* adjust importance counts at the same time */
1206 return ipc_importance_check_circularity(port, dest);
1207 #else
1208 ipc_port_t base;
1209 struct task_watchport_elem *watchport_elem = NULL;
1210 bool took_base_ref = false;
1211
1212 assert(port != IP_NULL);
1213 assert(dest != IP_NULL);
1214
1215 if (port == dest) {
1216 return TRUE;
1217 }
1218 base = dest;
1219
1220 /* Check if destination needs a turnstile */
1221 ipc_port_send_turnstile_prepare(dest);
1222
1223 /*
1224 * First try a quick check that can run in parallel.
1225 * No circularity if dest is not in transit.
1226 */
1227 ip_lock(port);
1228 if (ip_lock_try(dest)) {
1229 if (!ip_active(dest) ||
1230 (dest->ip_receiver_name != MACH_PORT_NULL) ||
1231 (dest->ip_destination == IP_NULL)) {
1232 goto not_circular;
1233 }
1234
1235 /* dest is in transit; further checking necessary */
1236
1237 ip_unlock(dest);
1238 }
1239 ip_unlock(port);
1240
1241 ipc_port_multiple_lock(); /* massive serialization */
1242
1243 /*
1244 * Search for the end of the chain (a port not in transit),
1245 * acquiring locks along the way.
1246 */
1247
1248 took_base_ref = ipc_port_destination_chain_lock(dest, &base);
1249 /* all ports in chain from dest to base, inclusive, are locked */
1250
1251 if (port == base) {
1252 /* circularity detected! */
1253
1254 ipc_port_multiple_unlock();
1255
1256 /* port (== base) is in limbo */
1257 require_ip_active(port);
1258 assert(port->ip_receiver_name == MACH_PORT_NULL);
1259 assert(port->ip_destination == IP_NULL);
1260 assert(!took_base_ref);
1261
1262 base = dest;
1263 while (base != IP_NULL) {
1264 ipc_port_t next;
1265
1266 /* dest is in transit or in limbo */
1267 require_ip_active(base);
1268 assert(base->ip_receiver_name == MACH_PORT_NULL);
1269
1270 next = base->ip_destination;
1271 ip_unlock(base);
1272 base = next;
1273 }
1274
1275 ipc_port_send_turnstile_complete(dest);
1276 return TRUE;
1277 }
1278
1279 /*
1280 * The guarantee: lock port while the entire chain is locked.
1281 * Once port is locked, we can take a reference to dest,
1282 * add port to the chain, and unlock everything.
1283 */
1284
1285 ip_lock(port);
1286 ipc_port_multiple_unlock();
1287
1288 not_circular:
1289 imq_lock(&port->ip_messages);
1290
1291 /* port is in limbo */
1292 require_ip_active(port);
1293 assert(port->ip_receiver_name == MACH_PORT_NULL);
1294 assert(port->ip_destination == IP_NULL);
1295
1296 /* Clear the watchport boost */
1297 watchport_elem = ipc_port_clear_watchport_elem_internal(port);
1298
1299 /* Check if the port is being enqueued as a part of sync bootstrap checkin */
1300 if (dest->ip_specialreply && dest->ip_sync_bootstrap_checkin) {
1301 port->ip_sync_bootstrap_checkin = 1;
1302 }
1303
1304 ip_reference(dest);
1305 port->ip_destination = dest;
1306
1307 /* Setup linkage for source port if it has sync ipc push */
1308 struct turnstile *send_turnstile = TURNSTILE_NULL;
1309 if (port_send_turnstile(port)) {
1310 send_turnstile = turnstile_prepare((uintptr_t)port,
1311 port_send_turnstile_address(port),
1312 TURNSTILE_NULL, TURNSTILE_SYNC_IPC);
1313
1314 /*
1315 * What ipc_port_adjust_port_locked would do,
1316 * but we need to also drop even more locks before
1317 * calling turnstile_update_inheritor_complete().
1318 */
1319 ipc_port_adjust_sync_link_state_locked(port, PORT_SYNC_LINK_ANY, NULL);
1320
1321 turnstile_update_inheritor(send_turnstile, port_send_turnstile(dest),
1322 (TURNSTILE_INHERITOR_TURNSTILE | TURNSTILE_IMMEDIATE_UPDATE));
1323
1324 /* update complete and turnstile complete called after dropping all locks */
1325 }
1326 imq_unlock(&port->ip_messages);
1327
1328 /* now unlock chain */
1329
1330 ip_unlock(port);
1331
1332 for (;;) {
1333 ipc_port_t next;
1334
1335 if (dest == base) {
1336 break;
1337 }
1338
1339 /* port is in transit */
1340 require_ip_active(dest);
1341 assert(dest->ip_receiver_name == MACH_PORT_NULL);
1342 assert(dest->ip_destination != IP_NULL);
1343
1344 next = dest->ip_destination;
1345 ip_unlock(dest);
1346 dest = next;
1347 }
1348
1349 /* base is not in transit */
1350 assert(!ip_active(base) ||
1351 (base->ip_receiver_name != MACH_PORT_NULL) ||
1352 (base->ip_destination == IP_NULL));
1353
1354 ip_unlock(base);
1355 if (took_base_ref) {
1356 ip_release(base);
1357 }
1358
1359 /* All locks dropped, call turnstile_update_inheritor_complete for source port's turnstile */
1360 if (send_turnstile) {
1361 turnstile_update_inheritor_complete(send_turnstile, TURNSTILE_INTERLOCK_NOT_HELD);
1362
1363 /* Take the mq lock to call turnstile complete */
1364 imq_lock(&port->ip_messages);
1365 turnstile_complete((uintptr_t)port, port_send_turnstile_address(port), NULL, TURNSTILE_SYNC_IPC);
1366 send_turnstile = TURNSTILE_NULL;
1367 imq_unlock(&port->ip_messages);
1368 turnstile_cleanup();
1369 }
1370
1371 if (watchport_elem) {
1372 task_watchport_elem_deallocate(watchport_elem);
1373 }
1374
1375 return FALSE;
1376 #endif /* !IMPORTANCE_INHERITANCE */
1377 }
1378
1379 /*
1380 * Routine: ipc_port_watchport_elem
1381 * Purpose:
1382 * Get the port's watchport elem field
1383 *
1384 * Conditions:
1385 * mqueue locked
1386 */
1387 static struct task_watchport_elem *
1388 ipc_port_watchport_elem(ipc_port_t port)
1389 {
1390 return port->ip_messages.imq_wait_queue.waitq_tspriv;
1391 }
1392
1393 /*
1394 * Routine: ipc_port_update_watchport_elem
1395 * Purpose:
1396 * Set the port's watchport elem field
1397 *
1398 * Conditions:
1399 * mqueue locked
1400 */
1401 static inline struct task_watchport_elem *
1402 ipc_port_update_watchport_elem(ipc_port_t port, struct task_watchport_elem *we)
1403 {
1404 assert(!port->ip_specialreply);
1405 struct task_watchport_elem *old_we = ipc_port_watchport_elem(port);
1406 port->ip_messages.imq_wait_queue.waitq_tspriv = we;
1407 return old_we;
1408 }
1409
1410 /*
1411 * Routine: ipc_special_reply_stash_pid_locked
1412 * Purpose:
1413 * Set the pid of process that copied out send once right to special reply port.
1414 *
1415 * Conditions:
1416 * port locked
1417 */
1418 static inline void
1419 ipc_special_reply_stash_pid_locked(ipc_port_t port, int pid)
1420 {
1421 assert(port->ip_specialreply);
1422 port->ip_messages.imq_wait_queue.waitq_priv_pid = pid;
1423 return;
1424 }
1425
1426 /*
1427 * Routine: ipc_special_reply_get_pid_locked
1428 * Purpose:
1429 * Get the pid of process that copied out send once right to special reply port.
1430 *
1431 * Conditions:
1432 * port locked
1433 */
1434 int
1435 ipc_special_reply_get_pid_locked(ipc_port_t port)
1436 {
1437 assert(port->ip_specialreply);
1438 return port->ip_messages.imq_wait_queue.waitq_priv_pid;
1439 }
1440
1441 /*
1442 * Update the recv turnstile inheritor for a port.
1443 *
1444 * Sync IPC through the port receive turnstile only happens for the special
1445 * reply port case. It has three sub-cases:
1446 *
1447 * 1. a send-once right is in transit, and pushes on the send turnstile of its
1448 * destination mqueue.
1449 *
1450 * 2. a send-once right has been stashed on a knote it was copied out "through",
1451 * as the first such copied out port.
1452 *
1453 * 3. a send-once right has been stashed on a knote it was copied out "through",
1454 * as the second or more copied out port.
1455 */
1456 void
1457 ipc_port_recv_update_inheritor(
1458 ipc_port_t port,
1459 struct turnstile *rcv_turnstile,
1460 turnstile_update_flags_t flags)
1461 {
1462 struct turnstile *inheritor = TURNSTILE_NULL;
1463 struct knote *kn;
1464
1465 if (ip_active(port) && port->ip_specialreply) {
1466 imq_held(&port->ip_messages);
1467
1468 switch (port->ip_sync_link_state) {
1469 case PORT_SYNC_LINK_PORT:
1470 if (port->ip_sync_inheritor_port != NULL) {
1471 inheritor = port_send_turnstile(port->ip_sync_inheritor_port);
1472 }
1473 break;
1474
1475 case PORT_SYNC_LINK_WORKLOOP_KNOTE:
1476 kn = port->ip_sync_inheritor_knote;
1477 inheritor = filt_ipc_kqueue_turnstile(kn);
1478 break;
1479
1480 case PORT_SYNC_LINK_WORKLOOP_STASH:
1481 inheritor = port->ip_sync_inheritor_ts;
1482 break;
1483 }
1484 }
1485
1486 turnstile_update_inheritor(rcv_turnstile, inheritor,
1487 flags | TURNSTILE_INHERITOR_TURNSTILE);
1488 }
1489
1490 /*
1491 * Update the send turnstile inheritor for a port.
1492 *
1493 * Sync IPC through the port send turnstile has 7 possible reasons to be linked:
1494 *
1495 * 1. a special reply port is part of sync ipc for bootstrap checkin and needs
1496 * to push on thread doing the sync ipc.
1497 *
1498 * 2. a receive right is in transit, and pushes on the send turnstile of its
1499 * destination mqueue.
1500 *
1501 * 3. port was passed as an exec watchport and port is pushing on main thread
1502 * of the task.
1503 *
1504 * 4. a receive right has been stashed on a knote it was copied out "through",
1505 * as the first such copied out port (same as PORT_SYNC_LINK_WORKLOOP_KNOTE
1506 * for the special reply port)
1507 *
1508 * 5. a receive right has been stashed on a knote it was copied out "through",
1509 * as the second or more copied out port (same as
1510 * PORT_SYNC_LINK_WORKLOOP_STASH for the special reply port)
1511 *
1512 * 6. a receive right has been copied out as a part of sync bootstrap checkin
1513 * and needs to push on thread doing the sync bootstrap checkin.
1514 *
1515 * 7. the receive right is monitored by a knote, and pushes on any that is
1516 * registered on a workloop. filt_machport makes sure that if such a knote
1517 * exists, it is kept as the first item in the knote list, so we never need
1518 * to walk.
1519 */
1520 void
1521 ipc_port_send_update_inheritor(
1522 ipc_port_t port,
1523 struct turnstile *send_turnstile,
1524 turnstile_update_flags_t flags)
1525 {
1526 ipc_mqueue_t mqueue = &port->ip_messages;
1527 turnstile_inheritor_t inheritor = TURNSTILE_INHERITOR_NULL;
1528 struct knote *kn;
1529 turnstile_update_flags_t inheritor_flags = TURNSTILE_INHERITOR_TURNSTILE;
1530
1531 assert(imq_held(mqueue));
1532
1533 if (!ip_active(port)) {
1534 /* this port is no longer active, it should not push anywhere */
1535 } else if (port->ip_specialreply) {
1536 /* Case 1. */
1537 if (port->ip_sync_bootstrap_checkin && prioritize_launch) {
1538 inheritor = port->ip_messages.imq_srp_owner_thread;
1539 inheritor_flags = TURNSTILE_INHERITOR_THREAD;
1540 }
1541 } else if (port->ip_receiver_name == MACH_PORT_NULL &&
1542 port->ip_destination != NULL) {
1543 /* Case 2. */
1544 inheritor = port_send_turnstile(port->ip_destination);
1545 } else if (ipc_port_watchport_elem(port) != NULL) {
1546 /* Case 3. */
1547 if (prioritize_launch) {
1548 assert(port->ip_sync_link_state == PORT_SYNC_LINK_ANY);
1549 inheritor = ipc_port_get_watchport_inheritor(port);
1550 inheritor_flags = TURNSTILE_INHERITOR_THREAD;
1551 }
1552 } else if (port->ip_sync_link_state == PORT_SYNC_LINK_WORKLOOP_KNOTE) {
1553 /* Case 4. */
1554 inheritor = filt_ipc_kqueue_turnstile(mqueue->imq_inheritor_knote);
1555 } else if (port->ip_sync_link_state == PORT_SYNC_LINK_WORKLOOP_STASH) {
1556 /* Case 5. */
1557 inheritor = mqueue->imq_inheritor_turnstile;
1558 } else if (port->ip_sync_link_state == PORT_SYNC_LINK_RCV_THREAD) {
1559 /* Case 6. */
1560 if (prioritize_launch) {
1561 inheritor = port->ip_messages.imq_inheritor_thread_ref;
1562 inheritor_flags = TURNSTILE_INHERITOR_THREAD;
1563 }
1564 } else if ((kn = SLIST_FIRST(&mqueue->imq_klist))) {
1565 /* Case 7. Push on a workloop that is interested */
1566 if (filt_machport_kqueue_has_turnstile(kn)) {
1567 assert(port->ip_sync_link_state == PORT_SYNC_LINK_ANY);
1568 inheritor = filt_ipc_kqueue_turnstile(kn);
1569 }
1570 }
1571
1572 turnstile_update_inheritor(send_turnstile, inheritor,
1573 flags | inheritor_flags);
1574 }
1575
1576 /*
1577 * Routine: ipc_port_send_turnstile_prepare
1578 * Purpose:
1579 * Get a reference on port's send turnstile, if
1580 * port does not have a send turnstile then allocate one.
1581 *
1582 * Conditions:
1583 * Nothing is locked.
1584 */
1585 void
1586 ipc_port_send_turnstile_prepare(ipc_port_t port)
1587 {
1588 struct turnstile *turnstile = TURNSTILE_NULL;
1589 struct turnstile *send_turnstile = TURNSTILE_NULL;
1590
1591 retry_alloc:
1592 imq_lock(&port->ip_messages);
1593
1594 if (port_send_turnstile(port) == NULL ||
1595 port_send_turnstile(port)->ts_port_ref == 0) {
1596 if (turnstile == TURNSTILE_NULL) {
1597 imq_unlock(&port->ip_messages);
1598 turnstile = turnstile_alloc();
1599 goto retry_alloc;
1600 }
1601
1602 send_turnstile = turnstile_prepare((uintptr_t)port,
1603 port_send_turnstile_address(port),
1604 turnstile, TURNSTILE_SYNC_IPC);
1605 turnstile = TURNSTILE_NULL;
1606
1607 ipc_port_send_update_inheritor(port, send_turnstile,
1608 TURNSTILE_IMMEDIATE_UPDATE);
1609
1610 /* turnstile complete will be called in ipc_port_send_turnstile_complete */
1611 }
1612
1613 /* Increment turnstile counter */
1614 port_send_turnstile(port)->ts_port_ref++;
1615 imq_unlock(&port->ip_messages);
1616
1617 if (send_turnstile) {
1618 turnstile_update_inheritor_complete(send_turnstile,
1619 TURNSTILE_INTERLOCK_NOT_HELD);
1620 }
1621 if (turnstile != TURNSTILE_NULL) {
1622 turnstile_deallocate(turnstile);
1623 }
1624 }
1625
1626
1627 /*
1628 * Routine: ipc_port_send_turnstile_complete
1629 * Purpose:
1630 * Drop a ref on the port's send turnstile, if the
1631 * ref becomes zero, deallocate the turnstile.
1632 *
1633 * Conditions:
1634 * The space might be locked, use safe deallocate.
1635 */
1636 void
1637 ipc_port_send_turnstile_complete(ipc_port_t port)
1638 {
1639 struct turnstile *turnstile = TURNSTILE_NULL;
1640
1641 /* Drop turnstile count on dest port */
1642 imq_lock(&port->ip_messages);
1643
1644 port_send_turnstile(port)->ts_port_ref--;
1645 if (port_send_turnstile(port)->ts_port_ref == 0) {
1646 turnstile_complete((uintptr_t)port, port_send_turnstile_address(port),
1647 &turnstile, TURNSTILE_SYNC_IPC);
1648 assert(turnstile != TURNSTILE_NULL);
1649 }
1650 imq_unlock(&port->ip_messages);
1651 turnstile_cleanup();
1652
1653 if (turnstile != TURNSTILE_NULL) {
1654 turnstile_deallocate_safe(turnstile);
1655 turnstile = TURNSTILE_NULL;
1656 }
1657 }
1658
1659 /*
1660 * Routine: ipc_port_rcv_turnstile
1661 * Purpose:
1662 * Get the port's receive turnstile
1663 *
1664 * Conditions:
1665 * mqueue locked or thread waiting on turnstile is locked.
1666 */
1667 static struct turnstile *
1668 ipc_port_rcv_turnstile(ipc_port_t port)
1669 {
1670 return *port_rcv_turnstile_address(port);
1671 }
1672
1673
1674 /*
1675 * Routine: ipc_port_link_special_reply_port
1676 * Purpose:
1677 * Link the special reply port with the destination port.
1678 * Allocates turnstile to dest port.
1679 *
1680 * Conditions:
1681 * Nothing is locked.
1682 */
1683 void
1684 ipc_port_link_special_reply_port(
1685 ipc_port_t special_reply_port,
1686 ipc_port_t dest_port,
1687 boolean_t sync_bootstrap_checkin)
1688 {
1689 boolean_t drop_turnstile_ref = FALSE;
1690 boolean_t special_reply = FALSE;
1691
1692 /* Check if dest_port needs a turnstile */
1693 ipc_port_send_turnstile_prepare(dest_port);
1694
1695 /* Lock the special reply port and establish the linkage */
1696 ip_lock(special_reply_port);
1697 imq_lock(&special_reply_port->ip_messages);
1698
1699 special_reply = special_reply_port->ip_specialreply;
1700
1701 if (sync_bootstrap_checkin && special_reply) {
1702 special_reply_port->ip_sync_bootstrap_checkin = 1;
1703 }
1704
1705 /* Check if we need to drop the acquired turnstile ref on dest port */
1706 if (!special_reply ||
1707 special_reply_port->ip_sync_link_state != PORT_SYNC_LINK_ANY ||
1708 special_reply_port->ip_sync_inheritor_port != IPC_PORT_NULL) {
1709 drop_turnstile_ref = TRUE;
1710 } else {
1711 /* take a reference on dest_port */
1712 ip_reference(dest_port);
1713 special_reply_port->ip_sync_inheritor_port = dest_port;
1714 special_reply_port->ip_sync_link_state = PORT_SYNC_LINK_PORT;
1715 }
1716
1717 imq_unlock(&special_reply_port->ip_messages);
1718 ip_unlock(special_reply_port);
1719
1720 if (special_reply) {
1721 /*
1722 * For special reply ports, if the destination port is
1723 * marked with the thread group blocked tracking flag,
1724 * callout to the performance controller.
1725 */
1726 ipc_port_thread_group_blocked(dest_port);
1727 }
1728
1729 if (drop_turnstile_ref) {
1730 ipc_port_send_turnstile_complete(dest_port);
1731 }
1732
1733 return;
1734 }
1735
1736 /*
1737 * Routine: ipc_port_thread_group_blocked
1738 * Purpose:
1739 * Call thread_group_blocked callout if the port
1740 * has ip_tg_block_tracking bit set and the thread
1741 * has not made this callout already.
1742 *
1743 * Conditions:
1744 * Nothing is locked.
1745 */
1746 void
1747 ipc_port_thread_group_blocked(ipc_port_t port __unused)
1748 {
1749 #if CONFIG_THREAD_GROUPS
1750 bool port_tg_block_tracking = false;
1751 thread_t self = current_thread();
1752
1753 if (self->thread_group == NULL ||
1754 (self->options & TH_OPT_IPC_TG_BLOCKED)) {
1755 return;
1756 }
1757
1758 port_tg_block_tracking = port->ip_tg_block_tracking;
1759 if (!port_tg_block_tracking) {
1760 return;
1761 }
1762
1763 machine_thread_group_blocked(self->thread_group, NULL,
1764 PERFCONTROL_CALLOUT_BLOCKING_TG_RENDER_SERVER, self);
1765
1766 self->options |= TH_OPT_IPC_TG_BLOCKED;
1767 #endif
1768 }
1769
1770 /*
1771 * Routine: ipc_port_thread_group_unblocked
1772 * Purpose:
1773 * Call thread_group_unblocked callout if the
1774 * thread had previously made a thread_group_blocked
1775 * callout before (indicated by TH_OPT_IPC_TG_BLOCKED
1776 * flag on the thread).
1777 *
1778 * Conditions:
1779 * Nothing is locked.
1780 */
1781 void
1782 ipc_port_thread_group_unblocked(void)
1783 {
1784 #if CONFIG_THREAD_GROUPS
1785 thread_t self = current_thread();
1786
1787 if (!(self->options & TH_OPT_IPC_TG_BLOCKED)) {
1788 return;
1789 }
1790
1791 machine_thread_group_unblocked(self->thread_group, NULL,
1792 PERFCONTROL_CALLOUT_BLOCKING_TG_RENDER_SERVER, self);
1793
1794 self->options &= ~TH_OPT_IPC_TG_BLOCKED;
1795 #endif
1796 }
1797
1798 #if DEVELOPMENT || DEBUG
1799 inline void
1800 ipc_special_reply_port_bits_reset(ipc_port_t special_reply_port)
1801 {
1802 special_reply_port->ip_srp_lost_link = 0;
1803 special_reply_port->ip_srp_msg_sent = 0;
1804 }
1805
1806 static inline void
1807 ipc_special_reply_port_msg_sent_reset(ipc_port_t special_reply_port)
1808 {
1809 if (special_reply_port->ip_specialreply == 1) {
1810 special_reply_port->ip_srp_msg_sent = 0;
1811 }
1812 }
1813
1814 inline void
1815 ipc_special_reply_port_msg_sent(ipc_port_t special_reply_port)
1816 {
1817 if (special_reply_port->ip_specialreply == 1) {
1818 special_reply_port->ip_srp_msg_sent = 1;
1819 }
1820 }
1821
1822 static inline void
1823 ipc_special_reply_port_lost_link(ipc_port_t special_reply_port)
1824 {
1825 if (special_reply_port->ip_specialreply == 1 && special_reply_port->ip_srp_msg_sent == 0) {
1826 special_reply_port->ip_srp_lost_link = 1;
1827 }
1828 }
1829
1830 #else /* DEVELOPMENT || DEBUG */
1831 inline void
1832 ipc_special_reply_port_bits_reset(__unused ipc_port_t special_reply_port)
1833 {
1834 return;
1835 }
1836
1837 static inline void
1838 ipc_special_reply_port_msg_sent_reset(__unused ipc_port_t special_reply_port)
1839 {
1840 return;
1841 }
1842
1843 inline void
1844 ipc_special_reply_port_msg_sent(__unused ipc_port_t special_reply_port)
1845 {
1846 return;
1847 }
1848
1849 static inline void
1850 ipc_special_reply_port_lost_link(__unused ipc_port_t special_reply_port)
1851 {
1852 return;
1853 }
1854 #endif /* DEVELOPMENT || DEBUG */
1855
1856 /*
1857 * Routine: ipc_port_adjust_special_reply_port_locked
1858 * Purpose:
1859 * If the special port has a turnstile, update its inheritor.
1860 * Condition:
1861 * Special reply port locked on entry.
1862 * Special reply port unlocked on return.
1863 * The passed in port is a special reply port.
1864 * Returns:
1865 * None.
1866 */
1867 void
1868 ipc_port_adjust_special_reply_port_locked(
1869 ipc_port_t special_reply_port,
1870 struct knote *kn,
1871 uint8_t flags,
1872 boolean_t get_turnstile)
1873 {
1874 ipc_port_t dest_port = IPC_PORT_NULL;
1875 int sync_link_state = PORT_SYNC_LINK_NO_LINKAGE;
1876 turnstile_inheritor_t inheritor = TURNSTILE_INHERITOR_NULL;
1877 struct turnstile *ts = TURNSTILE_NULL;
1878
1879 ip_lock_held(special_reply_port); // ip_sync_link_state is touched
1880 imq_lock(&special_reply_port->ip_messages);
1881
1882 if (!special_reply_port->ip_specialreply) {
1883 // only mach_msg_receive_results_complete() calls this with any port
1884 assert(get_turnstile);
1885 goto not_special;
1886 }
1887
1888 if (flags & IPC_PORT_ADJUST_SR_RECEIVED_MSG) {
1889 ipc_special_reply_port_msg_sent_reset(special_reply_port);
1890 }
1891
1892 if (flags & IPC_PORT_ADJUST_UNLINK_THREAD) {
1893 special_reply_port->ip_messages.imq_srp_owner_thread = NULL;
1894 }
1895
1896 if (flags & IPC_PORT_ADJUST_RESET_BOOSTRAP_CHECKIN) {
1897 special_reply_port->ip_sync_bootstrap_checkin = 0;
1898 }
1899
1900 /* Check if the special reply port is marked non-special */
1901 if (special_reply_port->ip_sync_link_state == PORT_SYNC_LINK_ANY) {
1902 not_special:
1903 if (get_turnstile) {
1904 turnstile_complete((uintptr_t)special_reply_port,
1905 port_rcv_turnstile_address(special_reply_port), NULL, TURNSTILE_SYNC_IPC);
1906 }
1907 imq_unlock(&special_reply_port->ip_messages);
1908 ip_unlock(special_reply_port);
1909 if (get_turnstile) {
1910 turnstile_cleanup();
1911 }
1912 return;
1913 }
1914
1915 if (flags & IPC_PORT_ADJUST_SR_LINK_WORKLOOP) {
1916 if (ITH_KNOTE_VALID(kn, MACH_MSG_TYPE_PORT_SEND_ONCE)) {
1917 inheritor = filt_machport_stash_port(kn, special_reply_port,
1918 &sync_link_state);
1919 }
1920 } else if (flags & IPC_PORT_ADJUST_SR_ALLOW_SYNC_LINKAGE) {
1921 sync_link_state = PORT_SYNC_LINK_ANY;
1922 }
1923
1924 /* Check if need to break linkage */
1925 if (!get_turnstile && sync_link_state == PORT_SYNC_LINK_NO_LINKAGE &&
1926 special_reply_port->ip_sync_link_state == PORT_SYNC_LINK_NO_LINKAGE) {
1927 imq_unlock(&special_reply_port->ip_messages);
1928 ip_unlock(special_reply_port);
1929 return;
1930 }
1931
1932 switch (special_reply_port->ip_sync_link_state) {
1933 case PORT_SYNC_LINK_PORT:
1934 dest_port = special_reply_port->ip_sync_inheritor_port;
1935 special_reply_port->ip_sync_inheritor_port = IPC_PORT_NULL;
1936 break;
1937 case PORT_SYNC_LINK_WORKLOOP_KNOTE:
1938 special_reply_port->ip_sync_inheritor_knote = NULL;
1939 break;
1940 case PORT_SYNC_LINK_WORKLOOP_STASH:
1941 special_reply_port->ip_sync_inheritor_ts = NULL;
1942 break;
1943 }
1944
1945 /*
1946 * Stash (or unstash) the server's PID in the ip_sorights field of the
1947 * special reply port, so that stackshot can later retrieve who the client
1948 * is blocked on.
1949 */
1950 if (special_reply_port->ip_sync_link_state == PORT_SYNC_LINK_PORT &&
1951 sync_link_state == PORT_SYNC_LINK_NO_LINKAGE) {
1952 ipc_special_reply_stash_pid_locked(special_reply_port, pid_from_task(current_task()));
1953 } else if (special_reply_port->ip_sync_link_state == PORT_SYNC_LINK_NO_LINKAGE &&
1954 sync_link_state == PORT_SYNC_LINK_ANY) {
1955 /* If we are resetting the special reply port, remove the stashed pid. */
1956 ipc_special_reply_stash_pid_locked(special_reply_port, 0);
1957 }
1958
1959 special_reply_port->ip_sync_link_state = sync_link_state;
1960
1961 switch (sync_link_state) {
1962 case PORT_SYNC_LINK_WORKLOOP_KNOTE:
1963 special_reply_port->ip_sync_inheritor_knote = kn;
1964 break;
1965 case PORT_SYNC_LINK_WORKLOOP_STASH:
1966 special_reply_port->ip_sync_inheritor_ts = inheritor;
1967 break;
1968 case PORT_SYNC_LINK_NO_LINKAGE:
1969 if (flags & IPC_PORT_ADJUST_SR_ENABLE_EVENT) {
1970 ipc_special_reply_port_lost_link(special_reply_port);
1971 }
1972 break;
1973 }
1974
1975 /* Get thread's turnstile donated to special reply port */
1976 if (get_turnstile) {
1977 turnstile_complete((uintptr_t)special_reply_port,
1978 port_rcv_turnstile_address(special_reply_port), NULL, TURNSTILE_SYNC_IPC);
1979 } else {
1980 ts = ipc_port_rcv_turnstile(special_reply_port);
1981 if (ts) {
1982 turnstile_reference(ts);
1983 ipc_port_recv_update_inheritor(special_reply_port, ts,
1984 TURNSTILE_IMMEDIATE_UPDATE);
1985 }
1986 }
1987
1988 imq_unlock(&special_reply_port->ip_messages);
1989 ip_unlock(special_reply_port);
1990
1991 if (get_turnstile) {
1992 turnstile_cleanup();
1993 } else if (ts) {
1994 /* Call turnstile cleanup after dropping the interlock */
1995 turnstile_update_inheritor_complete(ts, TURNSTILE_INTERLOCK_NOT_HELD);
1996 turnstile_deallocate_safe(ts);
1997 }
1998
1999 /* Release the ref on the dest port and its turnstile */
2000 if (dest_port) {
2001 ipc_port_send_turnstile_complete(dest_port);
2002 /* release the reference on the dest port */
2003 ip_release(dest_port);
2004 }
2005 }
2006
2007 /*
2008 * Routine: ipc_port_adjust_special_reply_port
2009 * Purpose:
2010 * If the special port has a turnstile, update its inheritor.
2011 * Condition:
2012 * Nothing locked.
2013 * Returns:
2014 * None.
2015 */
2016 void
2017 ipc_port_adjust_special_reply_port(
2018 ipc_port_t port,
2019 uint8_t flags)
2020 {
2021 if (port->ip_specialreply) {
2022 ip_lock(port);
2023 ipc_port_adjust_special_reply_port_locked(port, NULL, flags, FALSE);
2024 }
2025 }
2026
2027 /*
2028 * Routine: ipc_port_adjust_sync_link_state_locked
2029 * Purpose:
2030 * Update the sync link state of the port and the
2031 * turnstile inheritor.
2032 * Condition:
2033 * Port and mqueue locked on entry.
2034 * Port and mqueue locked on return.
2035 * Returns:
2036 * None.
2037 */
2038 void
2039 ipc_port_adjust_sync_link_state_locked(
2040 ipc_port_t port,
2041 int sync_link_state,
2042 turnstile_inheritor_t inheritor)
2043 {
2044 switch (port->ip_sync_link_state) {
2045 case PORT_SYNC_LINK_RCV_THREAD:
2046 /* deallocate the thread reference for the inheritor */
2047 thread_deallocate_safe(port->ip_messages.imq_inheritor_thread_ref);
2048 OS_FALLTHROUGH;
2049 default:
2050 klist_init(&port->ip_messages.imq_klist);
2051 }
2052
2053 switch (sync_link_state) {
2054 case PORT_SYNC_LINK_WORKLOOP_KNOTE:
2055 port->ip_messages.imq_inheritor_knote = inheritor;
2056 break;
2057 case PORT_SYNC_LINK_WORKLOOP_STASH:
2058 port->ip_messages.imq_inheritor_turnstile = inheritor;
2059 break;
2060 case PORT_SYNC_LINK_RCV_THREAD:
2061 /* The thread could exit without clearing port state, take a thread ref */
2062 thread_reference((thread_t)inheritor);
2063 port->ip_messages.imq_inheritor_thread_ref = inheritor;
2064 break;
2065 default:
2066 klist_init(&port->ip_messages.imq_klist);
2067 sync_link_state = PORT_SYNC_LINK_ANY;
2068 }
2069
2070 port->ip_sync_link_state = sync_link_state;
2071 }
2072
2073
2074 /*
2075 * Routine: ipc_port_adjust_port_locked
2076 * Purpose:
2077 * If the port has a turnstile, update its inheritor.
2078 * Condition:
2079 * Port locked on entry.
2080 * Port unlocked on return.
2081 * Returns:
2082 * None.
2083 */
2084 void
2085 ipc_port_adjust_port_locked(
2086 ipc_port_t port,
2087 struct knote *kn,
2088 boolean_t sync_bootstrap_checkin)
2089 {
2090 int sync_link_state = PORT_SYNC_LINK_ANY;
2091 turnstile_inheritor_t inheritor = TURNSTILE_INHERITOR_NULL;
2092
2093 ip_lock_held(port); // ip_sync_link_state is touched
2094 imq_held(&port->ip_messages);
2095
2096 assert(!port->ip_specialreply);
2097
2098 if (kn) {
2099 inheritor = filt_machport_stash_port(kn, port, &sync_link_state);
2100 if (sync_link_state == PORT_SYNC_LINK_WORKLOOP_KNOTE) {
2101 inheritor = kn;
2102 }
2103 } else if (sync_bootstrap_checkin) {
2104 inheritor = current_thread();
2105 sync_link_state = PORT_SYNC_LINK_RCV_THREAD;
2106 }
2107
2108 ipc_port_adjust_sync_link_state_locked(port, sync_link_state, inheritor);
2109 port->ip_sync_bootstrap_checkin = 0;
2110
2111 ipc_port_send_turnstile_recompute_push_locked(port);
2112 /* port and mqueue unlocked */
2113 }
2114
2115 /*
2116 * Routine: ipc_port_clear_sync_rcv_thread_boost_locked
2117 * Purpose:
2118 * If the port is pushing on rcv thread, clear it.
2119 * Condition:
2120 * Port locked on entry
2121 * mqueue is not locked.
2122 * Port unlocked on return.
2123 * Returns:
2124 * None.
2125 */
2126 void
2127 ipc_port_clear_sync_rcv_thread_boost_locked(
2128 ipc_port_t port)
2129 {
2130 ip_lock_held(port); // ip_sync_link_state is touched
2131
2132 if (port->ip_sync_link_state != PORT_SYNC_LINK_RCV_THREAD) {
2133 ip_unlock(port);
2134 return;
2135 }
2136
2137 imq_lock(&port->ip_messages);
2138 ipc_port_adjust_sync_link_state_locked(port, PORT_SYNC_LINK_ANY, NULL);
2139
2140 ipc_port_send_turnstile_recompute_push_locked(port);
2141 /* port and mqueue unlocked */
2142 }
2143
2144 /*
2145 * Routine: ipc_port_add_watchport_elem_locked
2146 * Purpose:
2147 * Transfer the turnstile boost of watchport to task calling exec.
2148 * Condition:
2149 * Port locked on entry.
2150 * Port unlocked on return.
2151 * Returns:
2152 * KERN_SUCESS on success.
2153 * KERN_FAILURE otherwise.
2154 */
2155 kern_return_t
2156 ipc_port_add_watchport_elem_locked(
2157 ipc_port_t port,
2158 struct task_watchport_elem *watchport_elem,
2159 struct task_watchport_elem **old_elem)
2160 {
2161 ip_lock_held(port);
2162 imq_held(&port->ip_messages);
2163
2164 /* Watchport boost only works for non-special active ports mapped in an ipc space */
2165 if (!ip_active(port) || port->ip_specialreply ||
2166 port->ip_receiver_name == MACH_PORT_NULL) {
2167 imq_unlock(&port->ip_messages);
2168 ip_unlock(port);
2169 return KERN_FAILURE;
2170 }
2171
2172 if (port->ip_sync_link_state != PORT_SYNC_LINK_ANY) {
2173 /* Sever the linkage if the port was pushing on knote */
2174 ipc_port_adjust_sync_link_state_locked(port, PORT_SYNC_LINK_ANY, NULL);
2175 }
2176
2177 *old_elem = ipc_port_update_watchport_elem(port, watchport_elem);
2178
2179 ipc_port_send_turnstile_recompute_push_locked(port);
2180 /* port and mqueue unlocked */
2181 return KERN_SUCCESS;
2182 }
2183
2184 /*
2185 * Routine: ipc_port_clear_watchport_elem_internal_conditional_locked
2186 * Purpose:
2187 * Remove the turnstile boost of watchport and recompute the push.
2188 * Condition:
2189 * Port locked on entry.
2190 * Port unlocked on return.
2191 * Returns:
2192 * KERN_SUCESS on success.
2193 * KERN_FAILURE otherwise.
2194 */
2195 kern_return_t
2196 ipc_port_clear_watchport_elem_internal_conditional_locked(
2197 ipc_port_t port,
2198 struct task_watchport_elem *watchport_elem)
2199 {
2200 ip_lock_held(port);
2201 imq_held(&port->ip_messages);
2202
2203 if (ipc_port_watchport_elem(port) != watchport_elem) {
2204 imq_unlock(&port->ip_messages);
2205 ip_unlock(port);
2206 return KERN_FAILURE;
2207 }
2208
2209 ipc_port_clear_watchport_elem_internal(port);
2210 ipc_port_send_turnstile_recompute_push_locked(port);
2211 /* port and mqueue unlocked */
2212 return KERN_SUCCESS;
2213 }
2214
2215 /*
2216 * Routine: ipc_port_replace_watchport_elem_conditional_locked
2217 * Purpose:
2218 * Replace the turnstile boost of watchport and recompute the push.
2219 * Condition:
2220 * Port locked on entry.
2221 * Port unlocked on return.
2222 * Returns:
2223 * KERN_SUCESS on success.
2224 * KERN_FAILURE otherwise.
2225 */
2226 kern_return_t
2227 ipc_port_replace_watchport_elem_conditional_locked(
2228 ipc_port_t port,
2229 struct task_watchport_elem *old_watchport_elem,
2230 struct task_watchport_elem *new_watchport_elem)
2231 {
2232 ip_lock_held(port);
2233 imq_held(&port->ip_messages);
2234
2235 if (ipc_port_watchport_elem(port) != old_watchport_elem) {
2236 imq_unlock(&port->ip_messages);
2237 ip_unlock(port);
2238 return KERN_FAILURE;
2239 }
2240
2241 ipc_port_update_watchport_elem(port, new_watchport_elem);
2242 ipc_port_send_turnstile_recompute_push_locked(port);
2243 /* port and mqueue unlocked */
2244 return KERN_SUCCESS;
2245 }
2246
2247 /*
2248 * Routine: ipc_port_clear_watchport_elem_internal
2249 * Purpose:
2250 * Remove the turnstile boost of watchport.
2251 * Condition:
2252 * Port locked on entry.
2253 * Port locked on return.
2254 * Returns:
2255 * Old task_watchport_elem returned.
2256 */
2257 struct task_watchport_elem *
2258 ipc_port_clear_watchport_elem_internal(
2259 ipc_port_t port)
2260 {
2261 ip_lock_held(port);
2262 imq_held(&port->ip_messages);
2263
2264 if (port->ip_specialreply) {
2265 return NULL;
2266 }
2267
2268 return ipc_port_update_watchport_elem(port, NULL);
2269 }
2270
2271 /*
2272 * Routine: ipc_port_send_turnstile_recompute_push_locked
2273 * Purpose:
2274 * Update send turnstile inheritor of port and recompute the push.
2275 * Condition:
2276 * Port locked on entry.
2277 * Port unlocked on return.
2278 * Returns:
2279 * None.
2280 */
2281 static void
2282 ipc_port_send_turnstile_recompute_push_locked(
2283 ipc_port_t port)
2284 {
2285 struct turnstile *send_turnstile = port_send_turnstile(port);
2286 if (send_turnstile) {
2287 turnstile_reference(send_turnstile);
2288 ipc_port_send_update_inheritor(port, send_turnstile,
2289 TURNSTILE_IMMEDIATE_UPDATE);
2290 }
2291 imq_unlock(&port->ip_messages);
2292 ip_unlock(port);
2293
2294 if (send_turnstile) {
2295 turnstile_update_inheritor_complete(send_turnstile,
2296 TURNSTILE_INTERLOCK_NOT_HELD);
2297 turnstile_deallocate_safe(send_turnstile);
2298 }
2299 }
2300
2301 /*
2302 * Routine: ipc_port_get_watchport_inheritor
2303 * Purpose:
2304 * Returns inheritor for watchport.
2305 *
2306 * Conditions:
2307 * mqueue locked.
2308 * Returns:
2309 * watchport inheritor.
2310 */
2311 static thread_t
2312 ipc_port_get_watchport_inheritor(
2313 ipc_port_t port)
2314 {
2315 imq_held(&port->ip_messages);
2316 return ipc_port_watchport_elem(port)->twe_task->watchports->tw_thread;
2317 }
2318
2319 /*
2320 * Routine: ipc_port_impcount_delta
2321 * Purpose:
2322 * Adjust only the importance count associated with a port.
2323 * If there are any adjustments to be made to receiver task,
2324 * those are handled elsewhere.
2325 *
2326 * For now, be defensive during deductions to make sure the
2327 * impcount for the port doesn't underflow zero. This will
2328 * go away when the port boost addition is made atomic (see
2329 * note in ipc_port_importance_delta()).
2330 * Conditions:
2331 * The port is referenced and locked.
2332 * Nothing else is locked.
2333 */
2334 mach_port_delta_t
2335 ipc_port_impcount_delta(
2336 ipc_port_t port,
2337 mach_port_delta_t delta,
2338 ipc_port_t __unused base)
2339 {
2340 mach_port_delta_t absdelta;
2341
2342 if (!ip_active(port)) {
2343 return 0;
2344 }
2345
2346 /* adding/doing nothing is easy */
2347 if (delta >= 0) {
2348 port->ip_impcount += delta;
2349 return delta;
2350 }
2351
2352 absdelta = 0 - delta;
2353 if (port->ip_impcount >= absdelta) {
2354 port->ip_impcount -= absdelta;
2355 return delta;
2356 }
2357
2358 #if (DEVELOPMENT || DEBUG)
2359 if (port->ip_receiver_name != MACH_PORT_NULL) {
2360 task_t target_task = port->ip_receiver->is_task;
2361 ipc_importance_task_t target_imp = target_task->task_imp_base;
2362 const char *target_procname;
2363 int target_pid;
2364
2365 if (target_imp != IIT_NULL) {
2366 target_procname = target_imp->iit_procname;
2367 target_pid = target_imp->iit_bsd_pid;
2368 } else {
2369 target_procname = "unknown";
2370 target_pid = -1;
2371 }
2372 printf("Over-release of importance assertions for port 0x%x receiver pid %d (%s), "
2373 "dropping %d assertion(s) but port only has %d remaining.\n",
2374 port->ip_receiver_name,
2375 target_pid, target_procname,
2376 absdelta, port->ip_impcount);
2377 } else if (base != IP_NULL) {
2378 task_t target_task = base->ip_receiver->is_task;
2379 ipc_importance_task_t target_imp = target_task->task_imp_base;
2380 const char *target_procname;
2381 int target_pid;
2382
2383 if (target_imp != IIT_NULL) {
2384 target_procname = target_imp->iit_procname;
2385 target_pid = target_imp->iit_bsd_pid;
2386 } else {
2387 target_procname = "unknown";
2388 target_pid = -1;
2389 }
2390 printf("Over-release of importance assertions for port 0x%lx "
2391 "enqueued on port 0x%x with receiver pid %d (%s), "
2392 "dropping %d assertion(s) but port only has %d remaining.\n",
2393 (unsigned long)VM_KERNEL_UNSLIDE_OR_PERM((uintptr_t)port),
2394 base->ip_receiver_name,
2395 target_pid, target_procname,
2396 absdelta, port->ip_impcount);
2397 }
2398 #endif
2399
2400 delta = 0 - port->ip_impcount;
2401 port->ip_impcount = 0;
2402 return delta;
2403 }
2404
2405 /*
2406 * Routine: ipc_port_importance_delta_internal
2407 * Purpose:
2408 * Adjust the importance count through the given port.
2409 * If the port is in transit, apply the delta throughout
2410 * the chain. Determine if the there is a task at the
2411 * base of the chain that wants/needs to be adjusted,
2412 * and if so, apply the delta.
2413 * Conditions:
2414 * The port is referenced and locked on entry.
2415 * Importance may be locked.
2416 * Nothing else is locked.
2417 * The lock may be dropped on exit.
2418 * Returns TRUE if lock was dropped.
2419 */
2420 #if IMPORTANCE_INHERITANCE
2421
2422 boolean_t
2423 ipc_port_importance_delta_internal(
2424 ipc_port_t port,
2425 natural_t options,
2426 mach_port_delta_t *deltap,
2427 ipc_importance_task_t *imp_task)
2428 {
2429 ipc_port_t next, base;
2430 bool dropped = false;
2431 bool took_base_ref = false;
2432
2433 *imp_task = IIT_NULL;
2434
2435 if (*deltap == 0) {
2436 return FALSE;
2437 }
2438
2439 assert(options == IPID_OPTION_NORMAL || options == IPID_OPTION_SENDPOSSIBLE);
2440
2441 base = port;
2442
2443 /* if port is in transit, have to search for end of chain */
2444 if (ip_active(port) &&
2445 port->ip_destination != IP_NULL &&
2446 port->ip_receiver_name == MACH_PORT_NULL) {
2447 dropped = true;
2448
2449 ip_unlock(port);
2450 ipc_port_multiple_lock(); /* massive serialization */
2451
2452 took_base_ref = ipc_port_destination_chain_lock(port, &base);
2453 /* all ports in chain from port to base, inclusive, are locked */
2454
2455 ipc_port_multiple_unlock();
2456 }
2457
2458 /*
2459 * If the port lock is dropped b/c the port is in transit, there is a
2460 * race window where another thread can drain messages and/or fire a
2461 * send possible notification before we get here.
2462 *
2463 * We solve this race by checking to see if our caller armed the send
2464 * possible notification, whether or not it's been fired yet, and
2465 * whether or not we've already set the port's ip_spimportant bit. If
2466 * we don't need a send-possible boost, then we'll just apply a
2467 * harmless 0-boost to the port.
2468 */
2469 if (options & IPID_OPTION_SENDPOSSIBLE) {
2470 assert(*deltap == 1);
2471 if (port->ip_sprequests && port->ip_spimportant == 0) {
2472 port->ip_spimportant = 1;
2473 } else {
2474 *deltap = 0;
2475 }
2476 }
2477
2478 /* unlock down to the base, adjusting boost(s) at each level */
2479 for (;;) {
2480 *deltap = ipc_port_impcount_delta(port, *deltap, base);
2481
2482 if (port == base) {
2483 break;
2484 }
2485
2486 /* port is in transit */
2487 assert(port->ip_tempowner == 0);
2488 next = port->ip_destination;
2489 ip_unlock(port);
2490 port = next;
2491 }
2492
2493 /* find the task (if any) to boost according to the base */
2494 if (ip_active(base)) {
2495 if (base->ip_tempowner != 0) {
2496 if (IIT_NULL != base->ip_imp_task) {
2497 *imp_task = base->ip_imp_task;
2498 }
2499 /* otherwise don't boost */
2500 } else if (base->ip_receiver_name != MACH_PORT_NULL) {
2501 ipc_space_t space = base->ip_receiver;
2502
2503 /* only spaces with boost-accepting tasks */
2504 if (space->is_task != TASK_NULL &&
2505 ipc_importance_task_is_any_receiver_type(space->is_task->task_imp_base)) {
2506 *imp_task = space->is_task->task_imp_base;
2507 }
2508 }
2509 }
2510
2511 /*
2512 * Only the base is locked. If we have to hold or drop task
2513 * importance assertions, we'll have to drop that lock as well.
2514 */
2515 if (*imp_task != IIT_NULL) {
2516 /* take a reference before unlocking base */
2517 ipc_importance_task_reference(*imp_task);
2518 }
2519
2520 if (dropped) {
2521 ip_unlock(base);
2522 if (took_base_ref) {
2523 ip_release(base);
2524 }
2525 }
2526
2527 return dropped;
2528 }
2529 #endif /* IMPORTANCE_INHERITANCE */
2530
2531 /*
2532 * Routine: ipc_port_importance_delta
2533 * Purpose:
2534 * Adjust the importance count through the given port.
2535 * If the port is in transit, apply the delta throughout
2536 * the chain.
2537 *
2538 * If there is a task at the base of the chain that wants/needs
2539 * to be adjusted, apply the delta.
2540 * Conditions:
2541 * The port is referenced and locked on entry.
2542 * Nothing else is locked.
2543 * The lock may be dropped on exit.
2544 * Returns TRUE if lock was dropped.
2545 */
2546 #if IMPORTANCE_INHERITANCE
2547
2548 boolean_t
2549 ipc_port_importance_delta(
2550 ipc_port_t port,
2551 natural_t options,
2552 mach_port_delta_t delta)
2553 {
2554 ipc_importance_task_t imp_task = IIT_NULL;
2555 boolean_t dropped;
2556
2557 dropped = ipc_port_importance_delta_internal(port, options, &delta, &imp_task);
2558
2559 if (IIT_NULL == imp_task || delta == 0) {
2560 return dropped;
2561 }
2562
2563 if (!dropped) {
2564 ip_unlock(port);
2565 }
2566
2567 assert(ipc_importance_task_is_any_receiver_type(imp_task));
2568
2569 if (delta > 0) {
2570 ipc_importance_task_hold_internal_assertion(imp_task, delta);
2571 } else {
2572 ipc_importance_task_drop_internal_assertion(imp_task, -delta);
2573 }
2574
2575 ipc_importance_task_release(imp_task);
2576 return TRUE;
2577 }
2578 #endif /* IMPORTANCE_INHERITANCE */
2579
2580 /*
2581 * Routine: ipc_port_make_send_locked
2582 * Purpose:
2583 * Make a naked send right from a receive right.
2584 *
2585 * Conditions:
2586 * port locked and active.
2587 */
2588 ipc_port_t
2589 ipc_port_make_send_locked(
2590 ipc_port_t port)
2591 {
2592 require_ip_active(port);
2593 port->ip_mscount++;
2594 port->ip_srights++;
2595 ip_reference(port);
2596 return port;
2597 }
2598
2599 /*
2600 * Routine: ipc_port_make_send
2601 * Purpose:
2602 * Make a naked send right from a receive right.
2603 */
2604
2605 ipc_port_t
2606 ipc_port_make_send(
2607 ipc_port_t port)
2608 {
2609 if (!IP_VALID(port)) {
2610 return port;
2611 }
2612
2613 ip_lock(port);
2614 if (ip_active(port)) {
2615 ipc_port_make_send_locked(port);
2616 ip_unlock(port);
2617 return port;
2618 }
2619 ip_unlock(port);
2620 return IP_DEAD;
2621 }
2622
2623 /*
2624 * Routine: ipc_port_copy_send_locked
2625 * Purpose:
2626 * Make a naked send right from another naked send right.
2627 * Conditions:
2628 * port locked and active.
2629 */
2630 void
2631 ipc_port_copy_send_locked(
2632 ipc_port_t port)
2633 {
2634 assert(port->ip_srights > 0);
2635 port->ip_srights++;
2636 ip_reference(port);
2637 }
2638
2639 /*
2640 * Routine: ipc_port_copy_send
2641 * Purpose:
2642 * Make a naked send right from another naked send right.
2643 * IP_NULL -> IP_NULL
2644 * IP_DEAD -> IP_DEAD
2645 * dead port -> IP_DEAD
2646 * live port -> port + ref
2647 * Conditions:
2648 * Nothing locked except possibly a space.
2649 */
2650
2651 ipc_port_t
2652 ipc_port_copy_send(
2653 ipc_port_t port)
2654 {
2655 ipc_port_t sright;
2656
2657 if (!IP_VALID(port)) {
2658 return port;
2659 }
2660
2661 ip_lock(port);
2662 if (ip_active(port)) {
2663 ipc_port_copy_send_locked(port);
2664 sright = port;
2665 } else {
2666 sright = IP_DEAD;
2667 }
2668 ip_unlock(port);
2669
2670 return sright;
2671 }
2672
2673 /*
2674 * Routine: ipc_port_copyout_send
2675 * Purpose:
2676 * Copyout a naked send right (possibly null/dead),
2677 * or if that fails, destroy the right.
2678 * Conditions:
2679 * Nothing locked.
2680 */
2681
2682 mach_port_name_t
2683 ipc_port_copyout_send(
2684 ipc_port_t sright,
2685 ipc_space_t space)
2686 {
2687 mach_port_name_t name;
2688
2689 if (IP_VALID(sright)) {
2690 kern_return_t kr;
2691
2692 kr = ipc_object_copyout(space, ip_to_object(sright),
2693 MACH_MSG_TYPE_PORT_SEND, NULL, NULL, &name);
2694 if (kr != KERN_SUCCESS) {
2695 ipc_port_release_send(sright);
2696
2697 if (kr == KERN_INVALID_CAPABILITY) {
2698 name = MACH_PORT_DEAD;
2699 } else {
2700 name = MACH_PORT_NULL;
2701 }
2702 }
2703 } else {
2704 name = CAST_MACH_PORT_TO_NAME(sright);
2705 }
2706
2707 return name;
2708 }
2709
2710 /*
2711 * Routine: ipc_port_release_send
2712 * Purpose:
2713 * Release a naked send right.
2714 * Consumes a ref for the port.
2715 * Conditions:
2716 * Nothing locked.
2717 */
2718
2719 void
2720 ipc_port_release_send(
2721 ipc_port_t port)
2722 {
2723 ipc_port_t nsrequest = IP_NULL;
2724 mach_port_mscount_t mscount;
2725
2726 if (!IP_VALID(port)) {
2727 return;
2728 }
2729
2730 ip_lock(port);
2731
2732 assert(port->ip_srights > 0);
2733 if (port->ip_srights == 0) {
2734 panic("Over-release of port %p send right!", port);
2735 }
2736
2737 port->ip_srights--;
2738
2739 if (!ip_active(port)) {
2740 ip_unlock(port);
2741 ip_release(port);
2742 return;
2743 }
2744
2745 if (port->ip_srights == 0 &&
2746 port->ip_nsrequest != IP_NULL) {
2747 nsrequest = port->ip_nsrequest;
2748 port->ip_nsrequest = IP_NULL;
2749 mscount = port->ip_mscount;
2750 ip_unlock(port);
2751 ip_release(port);
2752 ipc_notify_no_senders(nsrequest, mscount);
2753 } else {
2754 ip_unlock(port);
2755 ip_release(port);
2756 }
2757 }
2758
2759 /*
2760 * Routine: ipc_port_make_sonce_locked
2761 * Purpose:
2762 * Make a naked send-once right from a receive right.
2763 * Conditions:
2764 * The port is locked and active.
2765 */
2766
2767 ipc_port_t
2768 ipc_port_make_sonce_locked(
2769 ipc_port_t port)
2770 {
2771 require_ip_active(port);
2772 port->ip_sorights++;
2773 ip_reference(port);
2774 return port;
2775 }
2776
2777 /*
2778 * Routine: ipc_port_make_sonce
2779 * Purpose:
2780 * Make a naked send-once right from a receive right.
2781 * Conditions:
2782 * The port is not locked.
2783 */
2784
2785 ipc_port_t
2786 ipc_port_make_sonce(
2787 ipc_port_t port)
2788 {
2789 if (!IP_VALID(port)) {
2790 return port;
2791 }
2792
2793 ip_lock(port);
2794 if (ip_active(port)) {
2795 ipc_port_make_sonce_locked(port);
2796 ip_unlock(port);
2797 return port;
2798 }
2799 ip_unlock(port);
2800 return IP_DEAD;
2801 }
2802
2803 /*
2804 * Routine: ipc_port_release_sonce
2805 * Purpose:
2806 * Release a naked send-once right.
2807 * Consumes a ref for the port.
2808 *
2809 * In normal situations, this is never used.
2810 * Send-once rights are only consumed when
2811 * a message (possibly a send-once notification)
2812 * is sent to them.
2813 * Conditions:
2814 * Nothing locked except possibly a space.
2815 */
2816
2817 void
2818 ipc_port_release_sonce(
2819 ipc_port_t port)
2820 {
2821 if (!IP_VALID(port)) {
2822 return;
2823 }
2824
2825 ipc_port_adjust_special_reply_port(port, IPC_PORT_ADJUST_RESET_BOOSTRAP_CHECKIN);
2826
2827 ip_lock(port);
2828
2829 assert(port->ip_sorights > 0);
2830 if (port->ip_sorights == 0) {
2831 panic("Over-release of port %p send-once right!", port);
2832 }
2833
2834 port->ip_sorights--;
2835
2836 ip_unlock(port);
2837 ip_release(port);
2838 }
2839
2840 /*
2841 * Routine: ipc_port_release_receive
2842 * Purpose:
2843 * Release a naked (in limbo or in transit) receive right.
2844 * Consumes a ref for the port; destroys the port.
2845 * Conditions:
2846 * Nothing locked.
2847 */
2848
2849 void
2850 ipc_port_release_receive(
2851 ipc_port_t port)
2852 {
2853 ipc_port_t dest;
2854
2855 if (!IP_VALID(port)) {
2856 return;
2857 }
2858
2859 ip_lock(port);
2860 require_ip_active(port);
2861 assert(port->ip_receiver_name == MACH_PORT_NULL);
2862 dest = port->ip_destination;
2863
2864 ipc_port_destroy(port); /* consumes ref, unlocks */
2865
2866 if (dest != IP_NULL) {
2867 ipc_port_send_turnstile_complete(dest);
2868 ip_release(dest);
2869 }
2870 }
2871
2872 /*
2873 * Routine: ipc_port_alloc_special
2874 * Purpose:
2875 * Allocate a port in a special space.
2876 * The new port is returned with one ref.
2877 * If unsuccessful, IP_NULL is returned.
2878 * Conditions:
2879 * Nothing locked.
2880 */
2881
2882 ipc_port_t
2883 ipc_port_alloc_special(
2884 ipc_space_t space,
2885 ipc_port_init_flags_t flags)
2886 {
2887 ipc_port_t port;
2888
2889 port = ip_object_to_port(io_alloc(IOT_PORT));
2890 if (port == IP_NULL) {
2891 return IP_NULL;
2892 }
2893
2894 #if MACH_ASSERT
2895 uintptr_t buf[IP_CALLSTACK_MAX];
2896 ipc_port_callstack_init_debug(&buf[0], IP_CALLSTACK_MAX);
2897 #endif /* MACH_ASSERT */
2898
2899 bzero((char *)port, sizeof(*port));
2900 io_lock_init(ip_to_object(port));
2901 port->ip_references = 1;
2902 port->ip_object.io_bits = io_makebits(TRUE, IOT_PORT, 0);
2903
2904 ipc_port_init(port, space, flags, 1);
2905
2906 #if MACH_ASSERT
2907 ipc_port_init_debug(port, &buf[0], IP_CALLSTACK_MAX);
2908 #endif /* MACH_ASSERT */
2909
2910 return port;
2911 }
2912
2913 /*
2914 * Routine: ipc_port_dealloc_special
2915 * Purpose:
2916 * Deallocate a port in a special space.
2917 * Consumes one ref for the port.
2918 * Conditions:
2919 * Nothing locked.
2920 */
2921
2922 void
2923 ipc_port_dealloc_special(
2924 ipc_port_t port,
2925 __assert_only ipc_space_t space)
2926 {
2927 ip_lock(port);
2928 require_ip_active(port);
2929 // assert(port->ip_receiver_name != MACH_PORT_NULL);
2930 assert(port->ip_receiver == space);
2931
2932 /*
2933 * We clear ip_receiver_name and ip_receiver to simplify
2934 * the ipc_space_kernel check in ipc_mqueue_send.
2935 */
2936
2937 imq_lock(&port->ip_messages);
2938 port->ip_receiver_name = MACH_PORT_NULL;
2939 port->ip_receiver = IS_NULL;
2940 imq_unlock(&port->ip_messages);
2941
2942 /* relevant part of ipc_port_clear_receiver */
2943 port->ip_mscount = 0;
2944 port->ip_messages.imq_seqno = 0;
2945
2946 ipc_port_destroy(port);
2947 }
2948
2949 /*
2950 * Routine: ipc_port_finalize
2951 * Purpose:
2952 * Called on last reference deallocate to
2953 * free any remaining data associated with the
2954 * port.
2955 * Conditions:
2956 * Nothing locked.
2957 */
2958 void
2959 ipc_port_finalize(
2960 ipc_port_t port)
2961 {
2962 ipc_port_request_t requests = port->ip_requests;
2963
2964 assert(port_send_turnstile(port) == TURNSTILE_NULL);
2965 if (imq_is_turnstile_proxy(&port->ip_messages)) {
2966 assert(ipc_port_rcv_turnstile(port) == TURNSTILE_NULL);
2967 }
2968
2969 if (ip_active(port)) {
2970 panic("Trying to free an active port. port %p", port);
2971 }
2972
2973 if (requests != IPR_NULL) {
2974 ipc_table_size_t its = requests->ipr_size;
2975 it_requests_free(its, requests);
2976 port->ip_requests = IPR_NULL;
2977 }
2978
2979 ipc_mqueue_deinit(&port->ip_messages);
2980
2981 #if MACH_ASSERT
2982 ipc_port_track_dealloc(port);
2983 #endif /* MACH_ASSERT */
2984 }
2985
2986 /*
2987 * Routine: kdp_mqueue_send_find_owner
2988 * Purpose:
2989 * Discover the owner of the ipc_mqueue that contains the input
2990 * waitq object. The thread blocked on the waitq should be
2991 * waiting for an IPC_MQUEUE_FULL event.
2992 * Conditions:
2993 * The 'waitinfo->wait_type' value should already be set to
2994 * kThreadWaitPortSend.
2995 * Note:
2996 * If we find out that the containing port is actually in
2997 * transit, we reset the wait_type field to reflect this.
2998 */
2999 void
3000 kdp_mqueue_send_find_owner(struct waitq * waitq, __assert_only event64_t event, thread_waitinfo_t * waitinfo)
3001 {
3002 struct turnstile *turnstile;
3003 assert(waitinfo->wait_type == kThreadWaitPortSend);
3004 assert(event == IPC_MQUEUE_FULL);
3005 assert(waitq_is_turnstile_queue(waitq));
3006
3007 turnstile = waitq_to_turnstile(waitq);
3008 ipc_port_t port = (ipc_port_t)turnstile->ts_proprietor; /* we are blocking on send */
3009
3010 zone_id_require(ZONE_ID_IPC_PORT, sizeof(struct ipc_port), port);
3011
3012 waitinfo->owner = 0;
3013 waitinfo->context = VM_KERNEL_UNSLIDE_OR_PERM(port);
3014 if (ip_lock_held_kdp(port)) {
3015 /*
3016 * someone has the port locked: it may be in an
3017 * inconsistent state: bail
3018 */
3019 waitinfo->owner = STACKSHOT_WAITOWNER_PORT_LOCKED;
3020 return;
3021 }
3022
3023 if (ip_active(port)) {
3024 if (port->ip_tempowner) {
3025 if (port->ip_imp_task != IIT_NULL && port->ip_imp_task->iit_task != NULL) {
3026 /* port is held by a tempowner */
3027 waitinfo->owner = pid_from_task(port->ip_imp_task->iit_task);
3028 } else {
3029 waitinfo->owner = STACKSHOT_WAITOWNER_INTRANSIT;
3030 }
3031 } else if (port->ip_receiver_name) {
3032 /* port in a space */
3033 if (port->ip_receiver == ipc_space_kernel) {
3034 /*
3035 * The kernel pid is 0, make this
3036 * distinguishable from no-owner and
3037 * inconsistent port state.
3038 */
3039 waitinfo->owner = STACKSHOT_WAITOWNER_KERNEL;
3040 } else {
3041 waitinfo->owner = pid_from_task(port->ip_receiver->is_task);
3042 }
3043 } else if (port->ip_destination != IP_NULL) {
3044 /* port in transit */
3045 waitinfo->wait_type = kThreadWaitPortSendInTransit;
3046 waitinfo->owner = VM_KERNEL_UNSLIDE_OR_PERM(port->ip_destination);
3047 }
3048 }
3049 }
3050
3051 /*
3052 * Routine: kdp_mqueue_recv_find_owner
3053 * Purpose:
3054 * Discover the "owner" of the ipc_mqueue that contains the input
3055 * waitq object. The thread blocked on the waitq is trying to
3056 * receive on the mqueue.
3057 * Conditions:
3058 * The 'waitinfo->wait_type' value should already be set to
3059 * kThreadWaitPortReceive.
3060 * Note:
3061 * If we find that we are actualy waiting on a port set, we reset
3062 * the wait_type field to reflect this.
3063 */
3064 void
3065 kdp_mqueue_recv_find_owner(struct waitq * waitq, __assert_only event64_t event, thread_waitinfo_t * waitinfo)
3066 {
3067 assert(waitinfo->wait_type == kThreadWaitPortReceive);
3068 assert(event == IPC_MQUEUE_RECEIVE);
3069
3070 ipc_mqueue_t mqueue = imq_from_waitq(waitq);
3071 waitinfo->owner = 0;
3072 if (imq_is_set(mqueue)) { /* we are waiting on a port set */
3073 ipc_pset_t set = ips_from_mq(mqueue);
3074
3075 zone_id_require(ZONE_ID_IPC_PORT_SET, sizeof(struct ipc_pset), set);
3076
3077 /* Reset wait type to specify waiting on port set receive */
3078 waitinfo->wait_type = kThreadWaitPortSetReceive;
3079 waitinfo->context = VM_KERNEL_UNSLIDE_OR_PERM(set);
3080 if (ips_lock_held_kdp(set)) {
3081 waitinfo->owner = STACKSHOT_WAITOWNER_PSET_LOCKED;
3082 }
3083 /* There is no specific owner "at the other end" of a port set, so leave unset. */
3084 } else {
3085 ipc_port_t port = ip_from_mq(mqueue);
3086
3087 zone_id_require(ZONE_ID_IPC_PORT, sizeof(struct ipc_port), port);
3088
3089 waitinfo->context = VM_KERNEL_UNSLIDE_OR_PERM(port);
3090 if (ip_lock_held_kdp(port)) {
3091 waitinfo->owner = STACKSHOT_WAITOWNER_PORT_LOCKED;
3092 return;
3093 }
3094
3095 if (ip_active(port)) {
3096 if (port->ip_receiver_name != MACH_PORT_NULL) {
3097 waitinfo->owner = port->ip_receiver_name;
3098 } else {
3099 waitinfo->owner = STACKSHOT_WAITOWNER_INTRANSIT;
3100 }
3101 }
3102 }
3103 }
3104
3105 #if MACH_ASSERT
3106 #include <kern/machine.h>
3107
3108 /*
3109 * Keep a list of all allocated ports.
3110 * Allocation is intercepted via ipc_port_init;
3111 * deallocation is intercepted via io_free.
3112 */
3113 #if 0
3114 queue_head_t port_alloc_queue = QUEUE_HEAD_INITIALIZER(port_alloc_queue);
3115 LCK_SPIN_DECLARE(port_alloc_queue_lock, &ipc_lck_grp, &ipc_lck_attr);
3116 #endif
3117
3118 unsigned long port_count = 0;
3119 unsigned long port_count_warning = 20000;
3120 unsigned long port_timestamp = 0;
3121
3122 void db_port_stack_trace(
3123 ipc_port_t port);
3124 void db_ref(
3125 int refs);
3126 int db_port_walk(
3127 unsigned int verbose,
3128 unsigned int display,
3129 unsigned int ref_search,
3130 unsigned int ref_target);
3131
3132 #ifdef MACH_BSD
3133 extern int proc_pid(struct proc*);
3134 #endif /* MACH_BSD */
3135
3136 /*
3137 * Initialize all of the debugging state in a port.
3138 * Insert the port into a global list of all allocated ports.
3139 */
3140 void
3141 ipc_port_init_debug(
3142 ipc_port_t port,
3143 uintptr_t *callstack,
3144 unsigned int callstack_max)
3145 {
3146 unsigned int i;
3147
3148 port->ip_thread = current_thread();
3149 port->ip_timetrack = port_timestamp++;
3150 for (i = 0; i < callstack_max; ++i) {
3151 port->ip_callstack[i] = callstack[i];
3152 }
3153 for (i = 0; i < IP_NSPARES; ++i) {
3154 port->ip_spares[i] = 0;
3155 }
3156
3157 #ifdef MACH_BSD
3158 task_t task = current_task();
3159 if (task != TASK_NULL) {
3160 struct proc* proc = (struct proc*) get_bsdtask_info(task);
3161 if (proc) {
3162 port->ip_spares[0] = proc_pid(proc);
3163 }
3164 }
3165 #endif /* MACH_BSD */
3166
3167 #if 0
3168 lck_spin_lock(&port_alloc_queue_lock);
3169 ++port_count;
3170 if (port_count_warning > 0 && port_count >= port_count_warning) {
3171 assert(port_count < port_count_warning);
3172 }
3173 queue_enter(&port_alloc_queue, port, ipc_port_t, ip_port_links);
3174 lck_spin_unlock(&port_alloc_queue_lock);
3175 #endif
3176 }
3177
3178 /*
3179 * Routine: ipc_port_callstack_init_debug
3180 * Purpose:
3181 * Calls the machine-dependent routine to
3182 * fill in an array with up to IP_CALLSTACK_MAX
3183 * levels of return pc information
3184 * Conditions:
3185 * May block (via copyin)
3186 */
3187 void
3188 ipc_port_callstack_init_debug(
3189 uintptr_t *callstack,
3190 unsigned int callstack_max)
3191 {
3192 unsigned int i;
3193
3194 /* guarantee the callstack is initialized */
3195 for (i = 0; i < callstack_max; i++) {
3196 callstack[i] = 0;
3197 }
3198
3199 if (ipc_portbt) {
3200 machine_callstack(callstack, callstack_max);
3201 }
3202 }
3203
3204 /*
3205 * Remove a port from the queue of allocated ports.
3206 * This routine should be invoked JUST prior to
3207 * deallocating the actual memory occupied by the port.
3208 */
3209 #if 1
3210 void
3211 ipc_port_track_dealloc(
3212 __unused ipc_port_t port)
3213 {
3214 }
3215 #else
3216 void
3217 ipc_port_track_dealloc(
3218 ipc_port_t port)
3219 {
3220 lck_spin_lock(&port_alloc_queue_lock);
3221 assert(port_count > 0);
3222 --port_count;
3223 queue_remove(&port_alloc_queue, port, ipc_port_t, ip_port_links);
3224 lck_spin_unlock(&port_alloc_queue_lock);
3225 }
3226 #endif
3227
3228
3229 #endif /* MACH_ASSERT */