]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ipc/ipc_port.c
xnu-7195.101.1.tar.gz
[apple/xnu.git] / osfmk / ipc / ipc_port.c
1 /*
2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_FREE_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
58 * support for mandatory and extensible security protections. This notice
59 * is included in support of clause 2.2 (b) of the Apple Public License,
60 * Version 2.0.
61 */
62 /*
63 */
64 /*
65 * File: ipc/ipc_port.c
66 * Author: Rich Draves
67 * Date: 1989
68 *
69 * Functions to manipulate IPC ports.
70 */
71
72 #include <mach_assert.h>
73
74 #include <mach/port.h>
75 #include <mach/kern_return.h>
76 #include <kern/ipc_kobject.h>
77 #include <kern/thread.h>
78 #include <kern/misc_protos.h>
79 #include <kern/waitq.h>
80 #include <kern/policy_internal.h>
81 #include <kern/debug.h>
82 #include <kern/kcdata.h>
83 #include <ipc/ipc_entry.h>
84 #include <ipc/ipc_space.h>
85 #include <ipc/ipc_object.h>
86 #include <ipc/ipc_right.h>
87 #include <ipc/ipc_port.h>
88 #include <ipc/ipc_pset.h>
89 #include <ipc/ipc_kmsg.h>
90 #include <ipc/ipc_mqueue.h>
91 #include <ipc/ipc_notify.h>
92 #include <ipc/ipc_table.h>
93 #include <ipc/ipc_importance.h>
94 #include <machine/limits.h>
95 #include <kern/turnstile.h>
96 #include <kern/machine.h>
97
98 #include <security/mac_mach_internal.h>
99
100 #include <string.h>
101
102 static TUNABLE(bool, prioritize_launch, "prioritize_launch", true);
103 TUNABLE_WRITEABLE(int, ipc_portbt, "ipc_portbt", false);
104
105 LCK_SPIN_DECLARE_ATTR(ipc_port_multiple_lock_data, &ipc_lck_grp, &ipc_lck_attr);
106 ipc_port_timestamp_t ipc_port_timestamp_data;
107
108 #if MACH_ASSERT
109 void ipc_port_init_debug(
110 ipc_port_t port,
111 uintptr_t *callstack,
112 unsigned int callstack_max);
113
114 void ipc_port_callstack_init_debug(
115 uintptr_t *callstack,
116 unsigned int callstack_max);
117
118 #endif /* MACH_ASSERT */
119
120 static void
121 ipc_port_send_turnstile_recompute_push_locked(
122 ipc_port_t port);
123
124 static thread_t
125 ipc_port_get_watchport_inheritor(
126 ipc_port_t port);
127
128 void
129 ipc_port_release(ipc_port_t port)
130 {
131 ip_release(port);
132 }
133
134 void
135 ipc_port_reference(ipc_port_t port)
136 {
137 ip_reference(port);
138 }
139
140 /*
141 * Routine: ipc_port_timestamp
142 * Purpose:
143 * Retrieve a timestamp value.
144 */
145
146 ipc_port_timestamp_t
147 ipc_port_timestamp(void)
148 {
149 return OSIncrementAtomic(&ipc_port_timestamp_data);
150 }
151
152 /*
153 * Routine: ipc_port_request_alloc
154 * Purpose:
155 * Try to allocate a request slot.
156 * If successful, returns the request index.
157 * Otherwise returns zero.
158 * Conditions:
159 * The port is locked and active.
160 * Returns:
161 * KERN_SUCCESS A request index was found.
162 * KERN_NO_SPACE No index allocated.
163 */
164
165 #if IMPORTANCE_INHERITANCE
166 kern_return_t
167 ipc_port_request_alloc(
168 ipc_port_t port,
169 mach_port_name_t name,
170 ipc_port_t soright,
171 boolean_t send_possible,
172 boolean_t immediate,
173 ipc_port_request_index_t *indexp,
174 boolean_t *importantp)
175 #else
176 kern_return_t
177 ipc_port_request_alloc(
178 ipc_port_t port,
179 mach_port_name_t name,
180 ipc_port_t soright,
181 boolean_t send_possible,
182 boolean_t immediate,
183 ipc_port_request_index_t *indexp)
184 #endif /* IMPORTANCE_INHERITANCE */
185 {
186 ipc_port_request_t ipr, table;
187 ipc_port_request_index_t index;
188 uintptr_t mask = 0;
189
190 #if IMPORTANCE_INHERITANCE
191 *importantp = FALSE;
192 #endif /* IMPORTANCE_INHERITANCE */
193
194 require_ip_active(port);
195 assert(name != MACH_PORT_NULL);
196 assert(soright != IP_NULL);
197
198 table = port->ip_requests;
199
200 if (table == IPR_NULL) {
201 return KERN_NO_SPACE;
202 }
203
204 index = table->ipr_next;
205 if (index == 0) {
206 return KERN_NO_SPACE;
207 }
208
209 ipr = &table[index];
210 assert(ipr->ipr_name == MACH_PORT_NULL);
211
212 table->ipr_next = ipr->ipr_next;
213 ipr->ipr_name = name;
214
215 if (send_possible) {
216 mask |= IPR_SOR_SPREQ_MASK;
217 if (immediate) {
218 mask |= IPR_SOR_SPARM_MASK;
219 if (port->ip_sprequests == 0) {
220 port->ip_sprequests = 1;
221 #if IMPORTANCE_INHERITANCE
222 /* TODO: Live importance support in send-possible */
223 if (port->ip_impdonation != 0 &&
224 port->ip_spimportant == 0 &&
225 (task_is_importance_donor(current_task()))) {
226 *importantp = TRUE;
227 }
228 #endif /* IMPORTANCE_INHERTANCE */
229 }
230 }
231 }
232 ipr->ipr_soright = IPR_SOR_MAKE(soright, mask);
233
234 *indexp = index;
235
236 return KERN_SUCCESS;
237 }
238
239 /*
240 * Routine: ipc_port_request_grow
241 * Purpose:
242 * Grow a port's table of requests.
243 * Conditions:
244 * The port must be locked and active.
245 * Nothing else locked; will allocate memory.
246 * Upon return the port is unlocked.
247 * Returns:
248 * KERN_SUCCESS Grew the table.
249 * KERN_SUCCESS Somebody else grew the table.
250 * KERN_SUCCESS The port died.
251 * KERN_RESOURCE_SHORTAGE Couldn't allocate new table.
252 * KERN_NO_SPACE Couldn't grow to desired size
253 */
254
255 kern_return_t
256 ipc_port_request_grow(
257 ipc_port_t port,
258 ipc_table_elems_t target_size)
259 {
260 ipc_table_size_t its;
261 ipc_port_request_t otable, ntable;
262 require_ip_active(port);
263
264 otable = port->ip_requests;
265 if (otable == IPR_NULL) {
266 its = &ipc_table_requests[0];
267 } else {
268 its = otable->ipr_size + 1;
269 }
270
271 if (target_size != ITS_SIZE_NONE) {
272 if ((otable != IPR_NULL) &&
273 (target_size <= otable->ipr_size->its_size)) {
274 ip_unlock(port);
275 return KERN_SUCCESS;
276 }
277 while ((its->its_size) && (its->its_size < target_size)) {
278 its++;
279 }
280 if (its->its_size == 0) {
281 ip_unlock(port);
282 return KERN_NO_SPACE;
283 }
284 }
285
286 ip_reference(port);
287 ip_unlock(port);
288
289 if ((its->its_size == 0) ||
290 ((ntable = it_requests_alloc(its)) == IPR_NULL)) {
291 ip_release(port);
292 return KERN_RESOURCE_SHORTAGE;
293 }
294
295 ip_lock(port);
296
297 /*
298 * Check that port is still active and that nobody else
299 * has slipped in and grown the table on us. Note that
300 * just checking if the current table pointer == otable
301 * isn't sufficient; must check ipr_size.
302 */
303
304 if (ip_active(port) && (port->ip_requests == otable) &&
305 ((otable == IPR_NULL) || (otable->ipr_size + 1 == its))) {
306 ipc_table_size_t oits;
307 ipc_table_elems_t osize, nsize;
308 ipc_port_request_index_t free, i;
309
310 /* copy old table to new table */
311
312 if (otable != IPR_NULL) {
313 oits = otable->ipr_size;
314 osize = oits->its_size;
315 free = otable->ipr_next;
316
317 (void) memcpy((void *)(ntable + 1),
318 (const void *)(otable + 1),
319 (osize - 1) * sizeof(struct ipc_port_request));
320 } else {
321 osize = 1;
322 oits = 0;
323 free = 0;
324 }
325
326 nsize = its->its_size;
327 assert(nsize > osize);
328
329 /* add new elements to the new table's free list */
330
331 for (i = osize; i < nsize; i++) {
332 ipc_port_request_t ipr = &ntable[i];
333
334 ipr->ipr_name = MACH_PORT_NULL;
335 ipr->ipr_next = free;
336 free = i;
337 }
338
339 ntable->ipr_next = free;
340 ntable->ipr_size = its;
341 port->ip_requests = ntable;
342 ip_unlock(port);
343 ip_release(port);
344
345 if (otable != IPR_NULL) {
346 it_requests_free(oits, otable);
347 }
348 } else {
349 ip_unlock(port);
350 ip_release(port);
351 it_requests_free(its, ntable);
352 }
353
354 return KERN_SUCCESS;
355 }
356
357 /*
358 * Routine: ipc_port_request_sparm
359 * Purpose:
360 * Arm delayed send-possible request.
361 * Conditions:
362 * The port must be locked and active.
363 *
364 * Returns TRUE if the request was armed
365 * (or armed with importance in that version).
366 */
367
368 boolean_t
369 ipc_port_request_sparm(
370 ipc_port_t port,
371 __assert_only mach_port_name_t name,
372 ipc_port_request_index_t index,
373 mach_msg_option_t option,
374 mach_msg_priority_t priority)
375 {
376 if (index != IE_REQ_NONE) {
377 ipc_port_request_t ipr, table;
378
379 require_ip_active(port);
380
381 table = port->ip_requests;
382 assert(table != IPR_NULL);
383
384 ipr = &table[index];
385 assert(ipr->ipr_name == name);
386
387 /* Is there a valid destination? */
388 if (IPR_SOR_SPREQ(ipr->ipr_soright)) {
389 ipr->ipr_soright = IPR_SOR_MAKE(ipr->ipr_soright, IPR_SOR_SPARM_MASK);
390 port->ip_sprequests = 1;
391
392 if (option & MACH_SEND_OVERRIDE) {
393 /* apply override to message queue */
394 mach_msg_qos_t qos_ovr;
395 if (mach_msg_priority_is_pthread_priority(priority)) {
396 qos_ovr = _pthread_priority_thread_qos(priority);
397 } else {
398 qos_ovr = mach_msg_priority_overide_qos(priority);
399 }
400 if (qos_ovr) {
401 ipc_mqueue_override_send(&port->ip_messages, qos_ovr);
402 }
403 }
404
405 #if IMPORTANCE_INHERITANCE
406 if (((option & MACH_SEND_NOIMPORTANCE) == 0) &&
407 (port->ip_impdonation != 0) &&
408 (port->ip_spimportant == 0) &&
409 (((option & MACH_SEND_IMPORTANCE) != 0) ||
410 (task_is_importance_donor(current_task())))) {
411 return TRUE;
412 }
413 #else
414 return TRUE;
415 #endif /* IMPORTANCE_INHERITANCE */
416 }
417 }
418 return FALSE;
419 }
420
421 /*
422 * Routine: ipc_port_request_type
423 * Purpose:
424 * Determine the type(s) of port requests enabled for a name.
425 * Conditions:
426 * The port must be locked or inactive (to avoid table growth).
427 * The index must not be IE_REQ_NONE and for the name in question.
428 */
429 mach_port_type_t
430 ipc_port_request_type(
431 ipc_port_t port,
432 __assert_only mach_port_name_t name,
433 ipc_port_request_index_t index)
434 {
435 ipc_port_request_t ipr, table;
436 mach_port_type_t type = 0;
437
438 table = port->ip_requests;
439 assert(table != IPR_NULL);
440
441 assert(index != IE_REQ_NONE);
442 ipr = &table[index];
443 assert(ipr->ipr_name == name);
444
445 if (IP_VALID(IPR_SOR_PORT(ipr->ipr_soright))) {
446 type |= MACH_PORT_TYPE_DNREQUEST;
447
448 if (IPR_SOR_SPREQ(ipr->ipr_soright)) {
449 type |= MACH_PORT_TYPE_SPREQUEST;
450
451 if (!IPR_SOR_SPARMED(ipr->ipr_soright)) {
452 type |= MACH_PORT_TYPE_SPREQUEST_DELAYED;
453 }
454 }
455 }
456 return type;
457 }
458
459 /*
460 * Routine: ipc_port_request_cancel
461 * Purpose:
462 * Cancel a dead-name/send-possible request and return the send-once right.
463 * Conditions:
464 * The port must be locked and active.
465 * The index must not be IPR_REQ_NONE and must correspond with name.
466 */
467
468 ipc_port_t
469 ipc_port_request_cancel(
470 ipc_port_t port,
471 __assert_only mach_port_name_t name,
472 ipc_port_request_index_t index)
473 {
474 ipc_port_request_t ipr, table;
475 ipc_port_t request = IP_NULL;
476
477 require_ip_active(port);
478 table = port->ip_requests;
479 assert(table != IPR_NULL);
480
481 assert(index != IE_REQ_NONE);
482 ipr = &table[index];
483 assert(ipr->ipr_name == name);
484 request = IPR_SOR_PORT(ipr->ipr_soright);
485
486 /* return ipr to the free list inside the table */
487 ipr->ipr_name = MACH_PORT_NULL;
488 ipr->ipr_next = table->ipr_next;
489 table->ipr_next = index;
490
491 return request;
492 }
493
494 /*
495 * Routine: ipc_port_pdrequest
496 * Purpose:
497 * Make a port-deleted request, returning the
498 * previously registered send-once right.
499 * Just cancels the previous request if notify is IP_NULL.
500 * Conditions:
501 * The port is locked and active. It is unlocked.
502 * Consumes a ref for notify (if non-null), and
503 * returns previous with a ref (if non-null).
504 */
505
506 void
507 ipc_port_pdrequest(
508 ipc_port_t port,
509 ipc_port_t notify,
510 ipc_port_t *previousp)
511 {
512 ipc_port_t previous;
513 require_ip_active(port);
514
515 previous = port->ip_pdrequest;
516 port->ip_pdrequest = notify;
517 ip_unlock(port);
518
519 *previousp = previous;
520 }
521
522 /*
523 * Routine: ipc_port_nsrequest
524 * Purpose:
525 * Make a no-senders request, returning the
526 * previously registered send-once right.
527 * Just cancels the previous request if notify is IP_NULL.
528 * Conditions:
529 * The port is locked and active. It is unlocked.
530 * Consumes a ref for notify (if non-null), and
531 * returns previous with a ref (if non-null).
532 */
533
534 void
535 ipc_port_nsrequest(
536 ipc_port_t port,
537 mach_port_mscount_t sync,
538 ipc_port_t notify,
539 ipc_port_t *previousp)
540 {
541 ipc_port_t previous;
542 mach_port_mscount_t mscount;
543 require_ip_active(port);
544
545 previous = port->ip_nsrequest;
546 mscount = port->ip_mscount;
547
548 if ((port->ip_srights == 0) && (sync <= mscount) &&
549 (notify != IP_NULL)) {
550 port->ip_nsrequest = IP_NULL;
551 ip_unlock(port);
552 ipc_notify_no_senders(notify, mscount);
553 } else {
554 port->ip_nsrequest = notify;
555 ip_unlock(port);
556 }
557
558 *previousp = previous;
559 }
560
561
562 /*
563 * Routine: ipc_port_clear_receiver
564 * Purpose:
565 * Prepares a receive right for transmission/destruction,
566 * optionally performs mqueue destruction (with port lock held)
567 *
568 * Conditions:
569 * The port is locked and active.
570 * Returns:
571 * If should_destroy is TRUE, then the return value indicates
572 * whether the caller needs to reap kmsg structures that should
573 * be destroyed (by calling ipc_kmsg_reap_delayed)
574 *
575 * If should_destroy is FALSE, this always returns FALSE
576 */
577
578 boolean_t
579 ipc_port_clear_receiver(
580 ipc_port_t port,
581 boolean_t should_destroy)
582 {
583 ipc_mqueue_t mqueue = &port->ip_messages;
584 boolean_t reap_messages = FALSE;
585
586 /*
587 * Pull ourselves out of any sets to which we belong.
588 * We hold the port locked, so even though this acquires and releases
589 * the mqueue lock, we know we won't be added to any other sets.
590 */
591 if (port->ip_in_pset != 0) {
592 ipc_pset_remove_from_all(port);
593 assert(port->ip_in_pset == 0);
594 }
595
596 /*
597 * Send anyone waiting on the port's queue directly away.
598 * Also clear the mscount, seqno, guard bits
599 */
600 imq_lock(mqueue);
601 if (port->ip_receiver_name) {
602 ipc_mqueue_changed(port->ip_receiver, mqueue);
603 } else {
604 ipc_mqueue_changed(NULL, mqueue);
605 }
606 port->ip_mscount = 0;
607 mqueue->imq_seqno = 0;
608 port->ip_context = port->ip_guarded = port->ip_strict_guard = 0;
609 /*
610 * clear the immovable bit so the port can move back to anyone listening
611 * for the port destroy notification
612 */
613 port->ip_immovable_receive = 0;
614
615 if (should_destroy) {
616 /*
617 * Mark the port and mqueue invalid, preventing further send/receive
618 * operations from succeeding. It's important for this to be
619 * done under the same lock hold as the ipc_mqueue_changed
620 * call to avoid additional threads blocking on an mqueue
621 * that's being destroyed.
622 *
623 * The port active bit needs to be guarded under mqueue lock for
624 * turnstiles
625 */
626 port->ip_object.io_bits &= ~IO_BITS_ACTIVE;
627 port->ip_timestamp = ipc_port_timestamp();
628 reap_messages = ipc_mqueue_destroy_locked(mqueue);
629 } else {
630 /* make port be in limbo */
631 port->ip_receiver_name = MACH_PORT_NULL;
632 port->ip_destination = IP_NULL;
633 }
634
635 imq_unlock(&port->ip_messages);
636
637 return reap_messages;
638 }
639
640 /*
641 * Routine: ipc_port_init
642 * Purpose:
643 * Initializes a newly-allocated port.
644 * Doesn't touch the ip_object fields.
645 *
646 * The memory is expected to be zero initialized (allocated with Z_ZERO).
647 */
648
649 void
650 ipc_port_init(
651 ipc_port_t port,
652 ipc_space_t space,
653 ipc_port_init_flags_t flags,
654 mach_port_name_t name)
655 {
656 /* port->ip_kobject doesn't have to be initialized */
657
658 port->ip_receiver = space;
659 port->ip_receiver_name = name;
660
661 if (flags & IPC_PORT_INIT_MAKE_SEND_RIGHT) {
662 port->ip_srights = 1;
663 port->ip_mscount = 1;
664 }
665
666 if (flags & IPC_PORT_INIT_FILTER_MESSAGE) {
667 port->ip_object.io_bits |= IP_BIT_FILTER_MSG;
668 }
669
670 port->ip_tg_block_tracking = (flags & IPC_PORT_INIT_TG_BLOCK_TRACKING) != 0;
671
672 if (flags & IPC_PORT_INIT_SPECIAL_REPLY) {
673 port->ip_specialreply = true;
674 port->ip_immovable_receive = true;
675 }
676
677 port->ip_sync_link_state = PORT_SYNC_LINK_ANY;
678
679 ipc_mqueue_kind_t kind = IPC_MQUEUE_KIND_NONE;
680 if (flags & IPC_PORT_INIT_MESSAGE_QUEUE) {
681 kind = IPC_MQUEUE_KIND_PORT;
682 }
683 ipc_mqueue_init(&port->ip_messages, kind);
684 }
685
686 /*
687 * Routine: ipc_port_alloc
688 * Purpose:
689 * Allocate a port.
690 * Conditions:
691 * Nothing locked. If successful, the port is returned
692 * locked. (The caller doesn't have a reference.)
693 * Returns:
694 * KERN_SUCCESS The port is allocated.
695 * KERN_INVALID_TASK The space is dead.
696 * KERN_NO_SPACE No room for an entry in the space.
697 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
698 */
699
700 kern_return_t
701 ipc_port_alloc(
702 ipc_space_t space,
703 ipc_port_init_flags_t flags,
704 mach_port_name_t *namep,
705 ipc_port_t *portp)
706 {
707 ipc_port_t port;
708 mach_port_name_t name;
709 kern_return_t kr;
710 mach_port_type_t type = MACH_PORT_TYPE_RECEIVE;
711 mach_port_urefs_t urefs = 0;
712
713 #if MACH_ASSERT
714 uintptr_t buf[IP_CALLSTACK_MAX];
715 ipc_port_callstack_init_debug(&buf[0], IP_CALLSTACK_MAX);
716 #endif /* MACH_ASSERT */
717
718 if (flags & IPC_PORT_INIT_MAKE_SEND_RIGHT) {
719 type |= MACH_PORT_TYPE_SEND;
720 urefs = 1;
721 }
722 kr = ipc_object_alloc(space, IOT_PORT, type, urefs,
723 &name, (ipc_object_t *) &port);
724 if (kr != KERN_SUCCESS) {
725 return kr;
726 }
727
728 /* port and space are locked */
729 ipc_port_init(port, space, flags, name);
730
731 #if MACH_ASSERT
732 ipc_port_init_debug(port, &buf[0], IP_CALLSTACK_MAX);
733 #endif /* MACH_ASSERT */
734
735 /* unlock space after init */
736 is_write_unlock(space);
737
738 *namep = name;
739 *portp = port;
740
741 return KERN_SUCCESS;
742 }
743
744 /*
745 * Routine: ipc_port_alloc_name
746 * Purpose:
747 * Allocate a port, with a specific name.
748 * Conditions:
749 * Nothing locked. If successful, the port is returned
750 * locked. (The caller doesn't have a reference.)
751 * Returns:
752 * KERN_SUCCESS The port is allocated.
753 * KERN_INVALID_TASK The space is dead.
754 * KERN_NAME_EXISTS The name already denotes a right.
755 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
756 */
757
758 kern_return_t
759 ipc_port_alloc_name(
760 ipc_space_t space,
761 ipc_port_init_flags_t flags,
762 mach_port_name_t name,
763 ipc_port_t *portp)
764 {
765 ipc_port_t port;
766 kern_return_t kr;
767 mach_port_type_t type = MACH_PORT_TYPE_RECEIVE;
768 mach_port_urefs_t urefs = 0;
769
770 #if MACH_ASSERT
771 uintptr_t buf[IP_CALLSTACK_MAX];
772 ipc_port_callstack_init_debug(&buf[0], IP_CALLSTACK_MAX);
773 #endif /* MACH_ASSERT */
774
775 if (flags & IPC_PORT_INIT_MAKE_SEND_RIGHT) {
776 type |= MACH_PORT_TYPE_SEND;
777 urefs = 1;
778 }
779 kr = ipc_object_alloc_name(space, IOT_PORT, type, urefs,
780 name, (ipc_object_t *) &port);
781 if (kr != KERN_SUCCESS) {
782 return kr;
783 }
784
785 /* port is locked */
786
787 ipc_port_init(port, space, flags, name);
788
789 #if MACH_ASSERT
790 ipc_port_init_debug(port, &buf[0], IP_CALLSTACK_MAX);
791 #endif /* MACH_ASSERT */
792
793 *portp = port;
794
795 return KERN_SUCCESS;
796 }
797
798 /*
799 * Routine: ipc_port_spnotify
800 * Purpose:
801 * Generate send-possible port notifications.
802 * Conditions:
803 * Nothing locked, reference held on port.
804 */
805 void
806 ipc_port_spnotify(
807 ipc_port_t port)
808 {
809 ipc_port_request_index_t index = 0;
810 ipc_table_elems_t size = 0;
811
812 /*
813 * If the port has no send-possible request
814 * armed, don't bother to lock the port.
815 */
816 if (port->ip_sprequests == 0) {
817 return;
818 }
819
820 ip_lock(port);
821
822 #if IMPORTANCE_INHERITANCE
823 if (port->ip_spimportant != 0) {
824 port->ip_spimportant = 0;
825 if (ipc_port_importance_delta(port, IPID_OPTION_NORMAL, -1) == TRUE) {
826 ip_lock(port);
827 }
828 }
829 #endif /* IMPORTANCE_INHERITANCE */
830
831 if (port->ip_sprequests == 0) {
832 ip_unlock(port);
833 return;
834 }
835 port->ip_sprequests = 0;
836
837 revalidate:
838 if (ip_active(port)) {
839 ipc_port_request_t requests;
840
841 /* table may change each time port unlocked (reload) */
842 requests = port->ip_requests;
843 assert(requests != IPR_NULL);
844
845 /*
846 * no need to go beyond table size when first
847 * we entered - those are future notifications.
848 */
849 if (size == 0) {
850 size = requests->ipr_size->its_size;
851 }
852
853 /* no need to backtrack either */
854 while (++index < size) {
855 ipc_port_request_t ipr = &requests[index];
856 mach_port_name_t name = ipr->ipr_name;
857 ipc_port_t soright = IPR_SOR_PORT(ipr->ipr_soright);
858 boolean_t armed = IPR_SOR_SPARMED(ipr->ipr_soright);
859
860 if (MACH_PORT_VALID(name) && armed && IP_VALID(soright)) {
861 /* claim send-once right - slot still inuse */
862 ipr->ipr_soright = IP_NULL;
863 ip_unlock(port);
864
865 ipc_notify_send_possible(soright, name);
866
867 ip_lock(port);
868 goto revalidate;
869 }
870 }
871 }
872 ip_unlock(port);
873 return;
874 }
875
876 /*
877 * Routine: ipc_port_dnnotify
878 * Purpose:
879 * Generate dead name notifications for
880 * all outstanding dead-name and send-
881 * possible requests.
882 * Conditions:
883 * Nothing locked.
884 * Port must be inactive.
885 * Reference held on port.
886 */
887 void
888 ipc_port_dnnotify(
889 ipc_port_t port)
890 {
891 ipc_port_request_t requests = port->ip_requests;
892
893 assert(!ip_active(port));
894 if (requests != IPR_NULL) {
895 ipc_table_size_t its = requests->ipr_size;
896 ipc_table_elems_t size = its->its_size;
897 ipc_port_request_index_t index;
898 for (index = 1; index < size; index++) {
899 ipc_port_request_t ipr = &requests[index];
900 mach_port_name_t name = ipr->ipr_name;
901 ipc_port_t soright = IPR_SOR_PORT(ipr->ipr_soright);
902
903 if (MACH_PORT_VALID(name) && IP_VALID(soright)) {
904 ipc_notify_dead_name(soright, name);
905 }
906 }
907 }
908 }
909
910
911 /*
912 * Routine: ipc_port_destroy
913 * Purpose:
914 * Destroys a port. Cleans up queued messages.
915 *
916 * If the port has a backup, it doesn't get destroyed,
917 * but is sent in a port-destroyed notification to the backup.
918 * Conditions:
919 * The port is locked and alive; nothing else locked.
920 * The caller has a reference, which is consumed.
921 * Afterwards, the port is unlocked and dead.
922 */
923
924 void
925 ipc_port_destroy(ipc_port_t port)
926 {
927 ipc_port_t pdrequest, nsrequest;
928 ipc_mqueue_t mqueue;
929 ipc_kmsg_t kmsg;
930 boolean_t special_reply = port->ip_specialreply;
931 struct task_watchport_elem *watchport_elem = NULL;
932
933 #if IMPORTANCE_INHERITANCE
934 ipc_importance_task_t release_imp_task = IIT_NULL;
935 thread_t self = current_thread();
936 boolean_t top = (self->ith_assertions == 0);
937 natural_t assertcnt = 0;
938 #endif /* IMPORTANCE_INHERITANCE */
939
940 require_ip_active(port);
941 /* port->ip_receiver_name is garbage */
942 /* port->ip_receiver/port->ip_destination is garbage */
943
944 /* clear any reply-port context */
945 port->ip_reply_context = 0;
946
947 /* check for a backup port */
948 pdrequest = port->ip_pdrequest;
949
950 /*
951 * Panic if a special reply has ip_pdrequest or ip_tempowner
952 * set, as this causes a type confusion while accessing the
953 * kdata union.
954 */
955 if (special_reply && (pdrequest || port->ip_tempowner)) {
956 panic("ipc_port_destroy: invalid state");
957 }
958
959 #if IMPORTANCE_INHERITANCE
960 /* determine how many assertions to drop and from whom */
961 if (port->ip_tempowner != 0) {
962 assert(top);
963 release_imp_task = port->ip_imp_task;
964 if (IIT_NULL != release_imp_task) {
965 port->ip_imp_task = IIT_NULL;
966 assertcnt = port->ip_impcount;
967 }
968 /* Otherwise, nothing to drop */
969 } else {
970 assertcnt = port->ip_impcount;
971 if (pdrequest != IP_NULL) {
972 /* mark in limbo for the journey */
973 port->ip_tempowner = 1;
974 }
975 }
976
977 if (top) {
978 self->ith_assertions = assertcnt;
979 }
980 #endif /* IMPORTANCE_INHERITANCE */
981
982 if (pdrequest != IP_NULL) {
983 /* clear receiver, don't destroy the port */
984 (void)ipc_port_clear_receiver(port, FALSE);
985 assert(port->ip_in_pset == 0);
986 assert(port->ip_mscount == 0);
987
988 /* we assume the ref for pdrequest */
989 port->ip_pdrequest = IP_NULL;
990
991 imq_lock(&port->ip_messages);
992 watchport_elem = ipc_port_clear_watchport_elem_internal(port);
993 ipc_port_send_turnstile_recompute_push_locked(port);
994 /* mqueue and port unlocked */
995
996 if (special_reply) {
997 ipc_port_adjust_special_reply_port(port,
998 IPC_PORT_ADJUST_SR_ALLOW_SYNC_LINKAGE);
999 }
1000
1001 if (watchport_elem) {
1002 task_watchport_elem_deallocate(watchport_elem);
1003 watchport_elem = NULL;
1004 }
1005 /* consumes our refs for port and pdrequest */
1006 ipc_notify_port_destroyed(pdrequest, port);
1007
1008 goto drop_assertions;
1009 }
1010
1011 /*
1012 * The mach_msg_* paths don't hold a port lock, they only hold a
1013 * reference to the port object. If a thread raced us and is now
1014 * blocked waiting for message reception on this mqueue (or waiting
1015 * for ipc_mqueue_full), it will never be woken up. We call
1016 * ipc_port_clear_receiver() here, _after_ the port has been marked
1017 * inactive, to wakeup any threads which may be blocked and ensure
1018 * that no other thread can get lost waiting for a wake up on a
1019 * port/mqueue that's been destroyed.
1020 */
1021 boolean_t reap_msgs = FALSE;
1022 reap_msgs = ipc_port_clear_receiver(port, TRUE); /* marks port and mqueue inactive */
1023 assert(port->ip_in_pset == 0);
1024 assert(port->ip_mscount == 0);
1025
1026 imq_lock(&port->ip_messages);
1027 watchport_elem = ipc_port_clear_watchport_elem_internal(port);
1028 imq_unlock(&port->ip_messages);
1029 nsrequest = port->ip_nsrequest;
1030
1031 /*
1032 * If the port has a preallocated message buffer and that buffer
1033 * is not inuse, free it. If it has an inuse one, then the kmsg
1034 * free will detect that we freed the association and it can free it
1035 * like a normal buffer.
1036 *
1037 * Once the port is marked inactive we don't need to keep it locked.
1038 */
1039 if (IP_PREALLOC(port)) {
1040 ipc_port_t inuse_port;
1041
1042 kmsg = port->ip_premsg;
1043 assert(kmsg != IKM_NULL);
1044 inuse_port = ikm_prealloc_inuse_port(kmsg);
1045 ipc_kmsg_clear_prealloc(kmsg, port);
1046
1047 imq_lock(&port->ip_messages);
1048 ipc_port_send_turnstile_recompute_push_locked(port);
1049 /* mqueue and port unlocked */
1050
1051 if (inuse_port != IP_NULL) {
1052 assert(inuse_port == port);
1053 } else {
1054 ipc_kmsg_free(kmsg);
1055 }
1056 } else {
1057 imq_lock(&port->ip_messages);
1058 ipc_port_send_turnstile_recompute_push_locked(port);
1059 /* mqueue and port unlocked */
1060 }
1061
1062 /* Deallocate the watchport element */
1063 if (watchport_elem) {
1064 task_watchport_elem_deallocate(watchport_elem);
1065 watchport_elem = NULL;
1066 }
1067
1068 /* unlink the kmsg from special reply port */
1069 if (special_reply) {
1070 ipc_port_adjust_special_reply_port(port,
1071 IPC_PORT_ADJUST_SR_ALLOW_SYNC_LINKAGE);
1072 }
1073
1074 /* throw away no-senders request */
1075 if (nsrequest != IP_NULL) {
1076 ipc_notify_send_once(nsrequest); /* consumes ref */
1077 }
1078 /*
1079 * Reap any kmsg objects waiting to be destroyed.
1080 * This must be done after we've released the port lock.
1081 */
1082 if (reap_msgs) {
1083 ipc_kmsg_reap_delayed();
1084 }
1085
1086 mqueue = &port->ip_messages;
1087
1088 /* cleanup waitq related resources */
1089 ipc_mqueue_deinit(mqueue);
1090
1091 /* generate dead-name notifications */
1092 ipc_port_dnnotify(port);
1093
1094 ipc_kobject_destroy(port);
1095
1096 ip_release(port); /* consume caller's ref */
1097
1098 drop_assertions:
1099 #if IMPORTANCE_INHERITANCE
1100 if (release_imp_task != IIT_NULL) {
1101 if (assertcnt > 0) {
1102 assert(top);
1103 self->ith_assertions = 0;
1104 assert(ipc_importance_task_is_any_receiver_type(release_imp_task));
1105 ipc_importance_task_drop_internal_assertion(release_imp_task, assertcnt);
1106 }
1107 ipc_importance_task_release(release_imp_task);
1108 } else if (assertcnt > 0) {
1109 if (top) {
1110 self->ith_assertions = 0;
1111 release_imp_task = current_task()->task_imp_base;
1112 if (ipc_importance_task_is_any_receiver_type(release_imp_task)) {
1113 ipc_importance_task_drop_internal_assertion(release_imp_task, assertcnt);
1114 }
1115 }
1116 }
1117 #endif /* IMPORTANCE_INHERITANCE */
1118 }
1119
1120 /*
1121 * Routine: ipc_port_destination_chain_lock
1122 * Purpose:
1123 * Search for the end of the chain (a port not in transit),
1124 * acquiring locks along the way, and return it in `base`.
1125 *
1126 * Returns true if a reference was taken on `base`
1127 *
1128 * Conditions:
1129 * No ports locked.
1130 * ipc_port_multiple_lock held.
1131 */
1132 boolean_t
1133 ipc_port_destination_chain_lock(
1134 ipc_port_t port,
1135 ipc_port_t *base)
1136 {
1137 for (;;) {
1138 ip_lock(port);
1139
1140 if (!ip_active(port)) {
1141 /*
1142 * Active ports that are ip_lock()ed cannot go away.
1143 *
1144 * But inactive ports at the end of walking
1145 * an ip_destination chain are only protected
1146 * from space termination cleanup while the entire
1147 * chain of ports leading to them is held.
1148 *
1149 * Callers of this code tend to unlock the chain
1150 * in the same order than this walk which doesn't
1151 * protect `base` properly when it's inactive.
1152 *
1153 * In that case, take a reference that the caller
1154 * is responsible for releasing.
1155 */
1156 ip_reference(port);
1157 *base = port;
1158 return true;
1159 }
1160 if ((port->ip_receiver_name != MACH_PORT_NULL) ||
1161 (port->ip_destination == IP_NULL)) {
1162 *base = port;
1163 return false;
1164 }
1165
1166 port = port->ip_destination;
1167 }
1168 }
1169
1170
1171 /*
1172 * Routine: ipc_port_check_circularity
1173 * Purpose:
1174 * Check if queueing "port" in a message for "dest"
1175 * would create a circular group of ports and messages.
1176 *
1177 * If no circularity (FALSE returned), then "port"
1178 * is changed from "in limbo" to "in transit".
1179 *
1180 * That is, we want to set port->ip_destination == dest,
1181 * but guaranteeing that this doesn't create a circle
1182 * port->ip_destination->ip_destination->... == port
1183 *
1184 * Conditions:
1185 * No ports locked. References held for "port" and "dest".
1186 */
1187
1188 boolean_t
1189 ipc_port_check_circularity(
1190 ipc_port_t port,
1191 ipc_port_t dest)
1192 {
1193 #if IMPORTANCE_INHERITANCE
1194 /* adjust importance counts at the same time */
1195 return ipc_importance_check_circularity(port, dest);
1196 #else
1197 ipc_port_t base;
1198 struct task_watchport_elem *watchport_elem = NULL;
1199 bool took_base_ref = false;
1200
1201 assert(port != IP_NULL);
1202 assert(dest != IP_NULL);
1203
1204 if (port == dest) {
1205 return TRUE;
1206 }
1207 base = dest;
1208
1209 /* Check if destination needs a turnstile */
1210 ipc_port_send_turnstile_prepare(dest);
1211
1212 /*
1213 * First try a quick check that can run in parallel.
1214 * No circularity if dest is not in transit.
1215 */
1216 ip_lock(port);
1217 if (ip_lock_try(dest)) {
1218 if (!ip_active(dest) ||
1219 (dest->ip_receiver_name != MACH_PORT_NULL) ||
1220 (dest->ip_destination == IP_NULL)) {
1221 goto not_circular;
1222 }
1223
1224 /* dest is in transit; further checking necessary */
1225
1226 ip_unlock(dest);
1227 }
1228 ip_unlock(port);
1229
1230 ipc_port_multiple_lock(); /* massive serialization */
1231
1232 /*
1233 * Search for the end of the chain (a port not in transit),
1234 * acquiring locks along the way.
1235 */
1236
1237 took_base_ref = ipc_port_destination_chain_lock(dest, &base);
1238 /* all ports in chain from dest to base, inclusive, are locked */
1239
1240 if (port == base) {
1241 /* circularity detected! */
1242
1243 ipc_port_multiple_unlock();
1244
1245 /* port (== base) is in limbo */
1246 require_ip_active(port);
1247 assert(port->ip_receiver_name == MACH_PORT_NULL);
1248 assert(port->ip_destination == IP_NULL);
1249 assert(!took_base_ref);
1250
1251 base = dest;
1252 while (base != IP_NULL) {
1253 ipc_port_t next;
1254
1255 /* dest is in transit or in limbo */
1256 require_ip_active(base);
1257 assert(base->ip_receiver_name == MACH_PORT_NULL);
1258
1259 next = base->ip_destination;
1260 ip_unlock(base);
1261 base = next;
1262 }
1263
1264 ipc_port_send_turnstile_complete(dest);
1265 return TRUE;
1266 }
1267
1268 /*
1269 * The guarantee: lock port while the entire chain is locked.
1270 * Once port is locked, we can take a reference to dest,
1271 * add port to the chain, and unlock everything.
1272 */
1273
1274 ip_lock(port);
1275 ipc_port_multiple_unlock();
1276
1277 not_circular:
1278 imq_lock(&port->ip_messages);
1279
1280 /* port is in limbo */
1281 require_ip_active(port);
1282 assert(port->ip_receiver_name == MACH_PORT_NULL);
1283 assert(port->ip_destination == IP_NULL);
1284
1285 /* Clear the watchport boost */
1286 watchport_elem = ipc_port_clear_watchport_elem_internal(port);
1287
1288 /* Check if the port is being enqueued as a part of sync bootstrap checkin */
1289 if (dest->ip_specialreply && dest->ip_sync_bootstrap_checkin) {
1290 port->ip_sync_bootstrap_checkin = 1;
1291 }
1292
1293 ip_reference(dest);
1294 port->ip_destination = dest;
1295
1296 /* Setup linkage for source port if it has sync ipc push */
1297 struct turnstile *send_turnstile = TURNSTILE_NULL;
1298 if (port_send_turnstile(port)) {
1299 send_turnstile = turnstile_prepare((uintptr_t)port,
1300 port_send_turnstile_address(port),
1301 TURNSTILE_NULL, TURNSTILE_SYNC_IPC);
1302
1303 /*
1304 * What ipc_port_adjust_port_locked would do,
1305 * but we need to also drop even more locks before
1306 * calling turnstile_update_inheritor_complete().
1307 */
1308 ipc_port_adjust_sync_link_state_locked(port, PORT_SYNC_LINK_ANY, NULL);
1309
1310 turnstile_update_inheritor(send_turnstile, port_send_turnstile(dest),
1311 (TURNSTILE_INHERITOR_TURNSTILE | TURNSTILE_IMMEDIATE_UPDATE));
1312
1313 /* update complete and turnstile complete called after dropping all locks */
1314 }
1315 imq_unlock(&port->ip_messages);
1316
1317 /* now unlock chain */
1318
1319 ip_unlock(port);
1320
1321 for (;;) {
1322 ipc_port_t next;
1323
1324 if (dest == base) {
1325 break;
1326 }
1327
1328 /* port is in transit */
1329 require_ip_active(dest);
1330 assert(dest->ip_receiver_name == MACH_PORT_NULL);
1331 assert(dest->ip_destination != IP_NULL);
1332
1333 next = dest->ip_destination;
1334 ip_unlock(dest);
1335 dest = next;
1336 }
1337
1338 /* base is not in transit */
1339 assert(!ip_active(base) ||
1340 (base->ip_receiver_name != MACH_PORT_NULL) ||
1341 (base->ip_destination == IP_NULL));
1342
1343 ip_unlock(base);
1344 if (took_base_ref) {
1345 ip_release(base);
1346 }
1347
1348 /* All locks dropped, call turnstile_update_inheritor_complete for source port's turnstile */
1349 if (send_turnstile) {
1350 turnstile_update_inheritor_complete(send_turnstile, TURNSTILE_INTERLOCK_NOT_HELD);
1351
1352 /* Take the mq lock to call turnstile complete */
1353 imq_lock(&port->ip_messages);
1354 turnstile_complete((uintptr_t)port, port_send_turnstile_address(port), NULL, TURNSTILE_SYNC_IPC);
1355 send_turnstile = TURNSTILE_NULL;
1356 imq_unlock(&port->ip_messages);
1357 turnstile_cleanup();
1358 }
1359
1360 if (watchport_elem) {
1361 task_watchport_elem_deallocate(watchport_elem);
1362 }
1363
1364 return FALSE;
1365 #endif /* !IMPORTANCE_INHERITANCE */
1366 }
1367
1368 /*
1369 * Routine: ipc_port_watchport_elem
1370 * Purpose:
1371 * Get the port's watchport elem field
1372 *
1373 * Conditions:
1374 * mqueue locked
1375 */
1376 static struct task_watchport_elem *
1377 ipc_port_watchport_elem(ipc_port_t port)
1378 {
1379 return port->ip_messages.imq_wait_queue.waitq_tspriv;
1380 }
1381
1382 /*
1383 * Routine: ipc_port_update_watchport_elem
1384 * Purpose:
1385 * Set the port's watchport elem field
1386 *
1387 * Conditions:
1388 * mqueue locked
1389 */
1390 static inline struct task_watchport_elem *
1391 ipc_port_update_watchport_elem(ipc_port_t port, struct task_watchport_elem *we)
1392 {
1393 assert(!port->ip_specialreply);
1394 struct task_watchport_elem *old_we = ipc_port_watchport_elem(port);
1395 port->ip_messages.imq_wait_queue.waitq_tspriv = we;
1396 return old_we;
1397 }
1398
1399 /*
1400 * Routine: ipc_special_reply_stash_pid_locked
1401 * Purpose:
1402 * Set the pid of process that copied out send once right to special reply port.
1403 *
1404 * Conditions:
1405 * port locked
1406 */
1407 static inline void
1408 ipc_special_reply_stash_pid_locked(ipc_port_t port, int pid)
1409 {
1410 assert(port->ip_specialreply);
1411 port->ip_messages.imq_wait_queue.waitq_priv_pid = pid;
1412 return;
1413 }
1414
1415 /*
1416 * Routine: ipc_special_reply_get_pid_locked
1417 * Purpose:
1418 * Get the pid of process that copied out send once right to special reply port.
1419 *
1420 * Conditions:
1421 * port locked
1422 */
1423 int
1424 ipc_special_reply_get_pid_locked(ipc_port_t port)
1425 {
1426 assert(port->ip_specialreply);
1427 return port->ip_messages.imq_wait_queue.waitq_priv_pid;
1428 }
1429
1430 /*
1431 * Update the recv turnstile inheritor for a port.
1432 *
1433 * Sync IPC through the port receive turnstile only happens for the special
1434 * reply port case. It has three sub-cases:
1435 *
1436 * 1. a send-once right is in transit, and pushes on the send turnstile of its
1437 * destination mqueue.
1438 *
1439 * 2. a send-once right has been stashed on a knote it was copied out "through",
1440 * as the first such copied out port.
1441 *
1442 * 3. a send-once right has been stashed on a knote it was copied out "through",
1443 * as the second or more copied out port.
1444 */
1445 void
1446 ipc_port_recv_update_inheritor(
1447 ipc_port_t port,
1448 struct turnstile *rcv_turnstile,
1449 turnstile_update_flags_t flags)
1450 {
1451 struct turnstile *inheritor = TURNSTILE_NULL;
1452 struct knote *kn;
1453
1454 if (ip_active(port) && port->ip_specialreply) {
1455 imq_held(&port->ip_messages);
1456
1457 switch (port->ip_sync_link_state) {
1458 case PORT_SYNC_LINK_PORT:
1459 if (port->ip_sync_inheritor_port != NULL) {
1460 inheritor = port_send_turnstile(port->ip_sync_inheritor_port);
1461 }
1462 break;
1463
1464 case PORT_SYNC_LINK_WORKLOOP_KNOTE:
1465 kn = port->ip_sync_inheritor_knote;
1466 inheritor = filt_ipc_kqueue_turnstile(kn);
1467 break;
1468
1469 case PORT_SYNC_LINK_WORKLOOP_STASH:
1470 inheritor = port->ip_sync_inheritor_ts;
1471 break;
1472 }
1473 }
1474
1475 turnstile_update_inheritor(rcv_turnstile, inheritor,
1476 flags | TURNSTILE_INHERITOR_TURNSTILE);
1477 }
1478
1479 /*
1480 * Update the send turnstile inheritor for a port.
1481 *
1482 * Sync IPC through the port send turnstile has 7 possible reasons to be linked:
1483 *
1484 * 1. a special reply port is part of sync ipc for bootstrap checkin and needs
1485 * to push on thread doing the sync ipc.
1486 *
1487 * 2. a receive right is in transit, and pushes on the send turnstile of its
1488 * destination mqueue.
1489 *
1490 * 3. port was passed as an exec watchport and port is pushing on main thread
1491 * of the task.
1492 *
1493 * 4. a receive right has been stashed on a knote it was copied out "through",
1494 * as the first such copied out port (same as PORT_SYNC_LINK_WORKLOOP_KNOTE
1495 * for the special reply port)
1496 *
1497 * 5. a receive right has been stashed on a knote it was copied out "through",
1498 * as the second or more copied out port (same as
1499 * PORT_SYNC_LINK_WORKLOOP_STASH for the special reply port)
1500 *
1501 * 6. a receive right has been copied out as a part of sync bootstrap checkin
1502 * and needs to push on thread doing the sync bootstrap checkin.
1503 *
1504 * 7. the receive right is monitored by a knote, and pushes on any that is
1505 * registered on a workloop. filt_machport makes sure that if such a knote
1506 * exists, it is kept as the first item in the knote list, so we never need
1507 * to walk.
1508 */
1509 void
1510 ipc_port_send_update_inheritor(
1511 ipc_port_t port,
1512 struct turnstile *send_turnstile,
1513 turnstile_update_flags_t flags)
1514 {
1515 ipc_mqueue_t mqueue = &port->ip_messages;
1516 turnstile_inheritor_t inheritor = TURNSTILE_INHERITOR_NULL;
1517 struct knote *kn;
1518 turnstile_update_flags_t inheritor_flags = TURNSTILE_INHERITOR_TURNSTILE;
1519
1520 imq_held(mqueue);
1521
1522 if (!ip_active(port)) {
1523 /* this port is no longer active, it should not push anywhere */
1524 } else if (port->ip_specialreply) {
1525 /* Case 1. */
1526 if (port->ip_sync_bootstrap_checkin && prioritize_launch) {
1527 inheritor = port->ip_messages.imq_srp_owner_thread;
1528 inheritor_flags = TURNSTILE_INHERITOR_THREAD;
1529 }
1530 } else if (port->ip_receiver_name == MACH_PORT_NULL &&
1531 port->ip_destination != NULL) {
1532 /* Case 2. */
1533 inheritor = port_send_turnstile(port->ip_destination);
1534 } else if (ipc_port_watchport_elem(port) != NULL) {
1535 /* Case 3. */
1536 if (prioritize_launch) {
1537 assert(port->ip_sync_link_state == PORT_SYNC_LINK_ANY);
1538 inheritor = ipc_port_get_watchport_inheritor(port);
1539 inheritor_flags = TURNSTILE_INHERITOR_THREAD;
1540 }
1541 } else if (port->ip_sync_link_state == PORT_SYNC_LINK_WORKLOOP_KNOTE) {
1542 /* Case 4. */
1543 inheritor = filt_ipc_kqueue_turnstile(mqueue->imq_inheritor_knote);
1544 } else if (port->ip_sync_link_state == PORT_SYNC_LINK_WORKLOOP_STASH) {
1545 /* Case 5. */
1546 inheritor = mqueue->imq_inheritor_turnstile;
1547 } else if (port->ip_sync_link_state == PORT_SYNC_LINK_RCV_THREAD) {
1548 /* Case 6. */
1549 if (prioritize_launch) {
1550 inheritor = port->ip_messages.imq_inheritor_thread_ref;
1551 inheritor_flags = TURNSTILE_INHERITOR_THREAD;
1552 }
1553 } else if ((kn = SLIST_FIRST(&mqueue->imq_klist))) {
1554 /* Case 7. Push on a workloop that is interested */
1555 if (filt_machport_kqueue_has_turnstile(kn)) {
1556 assert(port->ip_sync_link_state == PORT_SYNC_LINK_ANY);
1557 inheritor = filt_ipc_kqueue_turnstile(kn);
1558 }
1559 }
1560
1561 turnstile_update_inheritor(send_turnstile, inheritor,
1562 flags | inheritor_flags);
1563 }
1564
1565 /*
1566 * Routine: ipc_port_send_turnstile_prepare
1567 * Purpose:
1568 * Get a reference on port's send turnstile, if
1569 * port does not have a send turnstile then allocate one.
1570 *
1571 * Conditions:
1572 * Nothing is locked.
1573 */
1574 void
1575 ipc_port_send_turnstile_prepare(ipc_port_t port)
1576 {
1577 struct turnstile *turnstile = TURNSTILE_NULL;
1578 struct turnstile *send_turnstile = TURNSTILE_NULL;
1579
1580 retry_alloc:
1581 imq_lock(&port->ip_messages);
1582
1583 if (port_send_turnstile(port) == NULL ||
1584 port_send_turnstile(port)->ts_port_ref == 0) {
1585 if (turnstile == TURNSTILE_NULL) {
1586 imq_unlock(&port->ip_messages);
1587 turnstile = turnstile_alloc();
1588 goto retry_alloc;
1589 }
1590
1591 send_turnstile = turnstile_prepare((uintptr_t)port,
1592 port_send_turnstile_address(port),
1593 turnstile, TURNSTILE_SYNC_IPC);
1594 turnstile = TURNSTILE_NULL;
1595
1596 ipc_port_send_update_inheritor(port, send_turnstile,
1597 TURNSTILE_IMMEDIATE_UPDATE);
1598
1599 /* turnstile complete will be called in ipc_port_send_turnstile_complete */
1600 }
1601
1602 /* Increment turnstile counter */
1603 port_send_turnstile(port)->ts_port_ref++;
1604 imq_unlock(&port->ip_messages);
1605
1606 if (send_turnstile) {
1607 turnstile_update_inheritor_complete(send_turnstile,
1608 TURNSTILE_INTERLOCK_NOT_HELD);
1609 }
1610 if (turnstile != TURNSTILE_NULL) {
1611 turnstile_deallocate(turnstile);
1612 }
1613 }
1614
1615
1616 /*
1617 * Routine: ipc_port_send_turnstile_complete
1618 * Purpose:
1619 * Drop a ref on the port's send turnstile, if the
1620 * ref becomes zero, deallocate the turnstile.
1621 *
1622 * Conditions:
1623 * The space might be locked, use safe deallocate.
1624 */
1625 void
1626 ipc_port_send_turnstile_complete(ipc_port_t port)
1627 {
1628 struct turnstile *turnstile = TURNSTILE_NULL;
1629
1630 /* Drop turnstile count on dest port */
1631 imq_lock(&port->ip_messages);
1632
1633 port_send_turnstile(port)->ts_port_ref--;
1634 if (port_send_turnstile(port)->ts_port_ref == 0) {
1635 turnstile_complete((uintptr_t)port, port_send_turnstile_address(port),
1636 &turnstile, TURNSTILE_SYNC_IPC);
1637 assert(turnstile != TURNSTILE_NULL);
1638 }
1639 imq_unlock(&port->ip_messages);
1640 turnstile_cleanup();
1641
1642 if (turnstile != TURNSTILE_NULL) {
1643 turnstile_deallocate_safe(turnstile);
1644 turnstile = TURNSTILE_NULL;
1645 }
1646 }
1647
1648 /*
1649 * Routine: ipc_port_rcv_turnstile
1650 * Purpose:
1651 * Get the port's receive turnstile
1652 *
1653 * Conditions:
1654 * mqueue locked or thread waiting on turnstile is locked.
1655 */
1656 static struct turnstile *
1657 ipc_port_rcv_turnstile(ipc_port_t port)
1658 {
1659 return *port_rcv_turnstile_address(port);
1660 }
1661
1662
1663 /*
1664 * Routine: ipc_port_link_special_reply_port
1665 * Purpose:
1666 * Link the special reply port with the destination port.
1667 * Allocates turnstile to dest port.
1668 *
1669 * Conditions:
1670 * Nothing is locked.
1671 */
1672 void
1673 ipc_port_link_special_reply_port(
1674 ipc_port_t special_reply_port,
1675 ipc_port_t dest_port,
1676 boolean_t sync_bootstrap_checkin)
1677 {
1678 boolean_t drop_turnstile_ref = FALSE;
1679 boolean_t special_reply = FALSE;
1680
1681 /* Check if dest_port needs a turnstile */
1682 ipc_port_send_turnstile_prepare(dest_port);
1683
1684 /* Lock the special reply port and establish the linkage */
1685 ip_lock(special_reply_port);
1686 imq_lock(&special_reply_port->ip_messages);
1687
1688 special_reply = special_reply_port->ip_specialreply;
1689
1690 if (sync_bootstrap_checkin && special_reply) {
1691 special_reply_port->ip_sync_bootstrap_checkin = 1;
1692 }
1693
1694 /* Check if we need to drop the acquired turnstile ref on dest port */
1695 if (!special_reply ||
1696 special_reply_port->ip_sync_link_state != PORT_SYNC_LINK_ANY ||
1697 special_reply_port->ip_sync_inheritor_port != IPC_PORT_NULL) {
1698 drop_turnstile_ref = TRUE;
1699 } else {
1700 /* take a reference on dest_port */
1701 ip_reference(dest_port);
1702 special_reply_port->ip_sync_inheritor_port = dest_port;
1703 special_reply_port->ip_sync_link_state = PORT_SYNC_LINK_PORT;
1704 }
1705
1706 imq_unlock(&special_reply_port->ip_messages);
1707 ip_unlock(special_reply_port);
1708
1709 if (special_reply) {
1710 /*
1711 * For special reply ports, if the destination port is
1712 * marked with the thread group blocked tracking flag,
1713 * callout to the performance controller.
1714 */
1715 ipc_port_thread_group_blocked(dest_port);
1716 }
1717
1718 if (drop_turnstile_ref) {
1719 ipc_port_send_turnstile_complete(dest_port);
1720 }
1721
1722 return;
1723 }
1724
1725 /*
1726 * Routine: ipc_port_thread_group_blocked
1727 * Purpose:
1728 * Call thread_group_blocked callout if the port
1729 * has ip_tg_block_tracking bit set and the thread
1730 * has not made this callout already.
1731 *
1732 * Conditions:
1733 * Nothing is locked.
1734 */
1735 void
1736 ipc_port_thread_group_blocked(ipc_port_t port __unused)
1737 {
1738 #if CONFIG_THREAD_GROUPS
1739 bool port_tg_block_tracking = false;
1740 thread_t self = current_thread();
1741
1742 if (self->thread_group == NULL ||
1743 (self->options & TH_OPT_IPC_TG_BLOCKED)) {
1744 return;
1745 }
1746
1747 port_tg_block_tracking = port->ip_tg_block_tracking;
1748 if (!port_tg_block_tracking) {
1749 return;
1750 }
1751
1752 machine_thread_group_blocked(self->thread_group, NULL,
1753 PERFCONTROL_CALLOUT_BLOCKING_TG_RENDER_SERVER, self);
1754
1755 self->options |= TH_OPT_IPC_TG_BLOCKED;
1756 #endif
1757 }
1758
1759 /*
1760 * Routine: ipc_port_thread_group_unblocked
1761 * Purpose:
1762 * Call thread_group_unblocked callout if the
1763 * thread had previously made a thread_group_blocked
1764 * callout before (indicated by TH_OPT_IPC_TG_BLOCKED
1765 * flag on the thread).
1766 *
1767 * Conditions:
1768 * Nothing is locked.
1769 */
1770 void
1771 ipc_port_thread_group_unblocked(void)
1772 {
1773 #if CONFIG_THREAD_GROUPS
1774 thread_t self = current_thread();
1775
1776 if (!(self->options & TH_OPT_IPC_TG_BLOCKED)) {
1777 return;
1778 }
1779
1780 machine_thread_group_unblocked(self->thread_group, NULL,
1781 PERFCONTROL_CALLOUT_BLOCKING_TG_RENDER_SERVER, self);
1782
1783 self->options &= ~TH_OPT_IPC_TG_BLOCKED;
1784 #endif
1785 }
1786
1787 #if DEVELOPMENT || DEBUG
1788 inline void
1789 ipc_special_reply_port_bits_reset(ipc_port_t special_reply_port)
1790 {
1791 special_reply_port->ip_srp_lost_link = 0;
1792 special_reply_port->ip_srp_msg_sent = 0;
1793 }
1794
1795 static inline void
1796 ipc_special_reply_port_msg_sent_reset(ipc_port_t special_reply_port)
1797 {
1798 if (special_reply_port->ip_specialreply == 1) {
1799 special_reply_port->ip_srp_msg_sent = 0;
1800 }
1801 }
1802
1803 inline void
1804 ipc_special_reply_port_msg_sent(ipc_port_t special_reply_port)
1805 {
1806 if (special_reply_port->ip_specialreply == 1) {
1807 special_reply_port->ip_srp_msg_sent = 1;
1808 }
1809 }
1810
1811 static inline void
1812 ipc_special_reply_port_lost_link(ipc_port_t special_reply_port)
1813 {
1814 if (special_reply_port->ip_specialreply == 1 && special_reply_port->ip_srp_msg_sent == 0) {
1815 special_reply_port->ip_srp_lost_link = 1;
1816 }
1817 }
1818
1819 #else /* DEVELOPMENT || DEBUG */
1820 inline void
1821 ipc_special_reply_port_bits_reset(__unused ipc_port_t special_reply_port)
1822 {
1823 return;
1824 }
1825
1826 static inline void
1827 ipc_special_reply_port_msg_sent_reset(__unused ipc_port_t special_reply_port)
1828 {
1829 return;
1830 }
1831
1832 inline void
1833 ipc_special_reply_port_msg_sent(__unused ipc_port_t special_reply_port)
1834 {
1835 return;
1836 }
1837
1838 static inline void
1839 ipc_special_reply_port_lost_link(__unused ipc_port_t special_reply_port)
1840 {
1841 return;
1842 }
1843 #endif /* DEVELOPMENT || DEBUG */
1844
1845 /*
1846 * Routine: ipc_port_adjust_special_reply_port_locked
1847 * Purpose:
1848 * If the special port has a turnstile, update its inheritor.
1849 * Condition:
1850 * Special reply port locked on entry.
1851 * Special reply port unlocked on return.
1852 * The passed in port is a special reply port.
1853 * Returns:
1854 * None.
1855 */
1856 void
1857 ipc_port_adjust_special_reply_port_locked(
1858 ipc_port_t special_reply_port,
1859 struct knote *kn,
1860 uint8_t flags,
1861 boolean_t get_turnstile)
1862 {
1863 ipc_port_t dest_port = IPC_PORT_NULL;
1864 int sync_link_state = PORT_SYNC_LINK_NO_LINKAGE;
1865 turnstile_inheritor_t inheritor = TURNSTILE_INHERITOR_NULL;
1866 struct turnstile *ts = TURNSTILE_NULL;
1867
1868 ip_lock_held(special_reply_port); // ip_sync_link_state is touched
1869 imq_lock(&special_reply_port->ip_messages);
1870
1871 if (!special_reply_port->ip_specialreply) {
1872 // only mach_msg_receive_results_complete() calls this with any port
1873 assert(get_turnstile);
1874 goto not_special;
1875 }
1876
1877 if (flags & IPC_PORT_ADJUST_SR_RECEIVED_MSG) {
1878 ipc_special_reply_port_msg_sent_reset(special_reply_port);
1879 }
1880
1881 if (flags & IPC_PORT_ADJUST_UNLINK_THREAD) {
1882 special_reply_port->ip_messages.imq_srp_owner_thread = NULL;
1883 }
1884
1885 if (flags & IPC_PORT_ADJUST_RESET_BOOSTRAP_CHECKIN) {
1886 special_reply_port->ip_sync_bootstrap_checkin = 0;
1887 }
1888
1889 /* Check if the special reply port is marked non-special */
1890 if (special_reply_port->ip_sync_link_state == PORT_SYNC_LINK_ANY) {
1891 not_special:
1892 if (get_turnstile) {
1893 turnstile_complete((uintptr_t)special_reply_port,
1894 port_rcv_turnstile_address(special_reply_port), NULL, TURNSTILE_SYNC_IPC);
1895 }
1896 imq_unlock(&special_reply_port->ip_messages);
1897 ip_unlock(special_reply_port);
1898 if (get_turnstile) {
1899 turnstile_cleanup();
1900 }
1901 return;
1902 }
1903
1904 if (flags & IPC_PORT_ADJUST_SR_LINK_WORKLOOP) {
1905 if (ITH_KNOTE_VALID(kn, MACH_MSG_TYPE_PORT_SEND_ONCE)) {
1906 inheritor = filt_machport_stash_port(kn, special_reply_port,
1907 &sync_link_state);
1908 }
1909 } else if (flags & IPC_PORT_ADJUST_SR_ALLOW_SYNC_LINKAGE) {
1910 sync_link_state = PORT_SYNC_LINK_ANY;
1911 }
1912
1913 /* Check if need to break linkage */
1914 if (!get_turnstile && sync_link_state == PORT_SYNC_LINK_NO_LINKAGE &&
1915 special_reply_port->ip_sync_link_state == PORT_SYNC_LINK_NO_LINKAGE) {
1916 imq_unlock(&special_reply_port->ip_messages);
1917 ip_unlock(special_reply_port);
1918 return;
1919 }
1920
1921 switch (special_reply_port->ip_sync_link_state) {
1922 case PORT_SYNC_LINK_PORT:
1923 dest_port = special_reply_port->ip_sync_inheritor_port;
1924 special_reply_port->ip_sync_inheritor_port = IPC_PORT_NULL;
1925 break;
1926 case PORT_SYNC_LINK_WORKLOOP_KNOTE:
1927 special_reply_port->ip_sync_inheritor_knote = NULL;
1928 break;
1929 case PORT_SYNC_LINK_WORKLOOP_STASH:
1930 special_reply_port->ip_sync_inheritor_ts = NULL;
1931 break;
1932 }
1933
1934 /*
1935 * Stash (or unstash) the server's PID in the ip_sorights field of the
1936 * special reply port, so that stackshot can later retrieve who the client
1937 * is blocked on.
1938 */
1939 if (special_reply_port->ip_sync_link_state == PORT_SYNC_LINK_PORT &&
1940 sync_link_state == PORT_SYNC_LINK_NO_LINKAGE) {
1941 ipc_special_reply_stash_pid_locked(special_reply_port, pid_from_task(current_task()));
1942 } else if (special_reply_port->ip_sync_link_state == PORT_SYNC_LINK_NO_LINKAGE &&
1943 sync_link_state == PORT_SYNC_LINK_ANY) {
1944 /* If we are resetting the special reply port, remove the stashed pid. */
1945 ipc_special_reply_stash_pid_locked(special_reply_port, 0);
1946 }
1947
1948 special_reply_port->ip_sync_link_state = sync_link_state;
1949
1950 switch (sync_link_state) {
1951 case PORT_SYNC_LINK_WORKLOOP_KNOTE:
1952 special_reply_port->ip_sync_inheritor_knote = kn;
1953 break;
1954 case PORT_SYNC_LINK_WORKLOOP_STASH:
1955 special_reply_port->ip_sync_inheritor_ts = inheritor;
1956 break;
1957 case PORT_SYNC_LINK_NO_LINKAGE:
1958 if (flags & IPC_PORT_ADJUST_SR_ENABLE_EVENT) {
1959 ipc_special_reply_port_lost_link(special_reply_port);
1960 }
1961 break;
1962 }
1963
1964 /* Get thread's turnstile donated to special reply port */
1965 if (get_turnstile) {
1966 turnstile_complete((uintptr_t)special_reply_port,
1967 port_rcv_turnstile_address(special_reply_port), NULL, TURNSTILE_SYNC_IPC);
1968 } else {
1969 ts = ipc_port_rcv_turnstile(special_reply_port);
1970 if (ts) {
1971 turnstile_reference(ts);
1972 ipc_port_recv_update_inheritor(special_reply_port, ts,
1973 TURNSTILE_IMMEDIATE_UPDATE);
1974 }
1975 }
1976
1977 imq_unlock(&special_reply_port->ip_messages);
1978 ip_unlock(special_reply_port);
1979
1980 if (get_turnstile) {
1981 turnstile_cleanup();
1982 } else if (ts) {
1983 /* Call turnstile cleanup after dropping the interlock */
1984 turnstile_update_inheritor_complete(ts, TURNSTILE_INTERLOCK_NOT_HELD);
1985 turnstile_deallocate_safe(ts);
1986 }
1987
1988 /* Release the ref on the dest port and its turnstile */
1989 if (dest_port) {
1990 ipc_port_send_turnstile_complete(dest_port);
1991 /* release the reference on the dest port */
1992 ip_release(dest_port);
1993 }
1994 }
1995
1996 /*
1997 * Routine: ipc_port_adjust_special_reply_port
1998 * Purpose:
1999 * If the special port has a turnstile, update its inheritor.
2000 * Condition:
2001 * Nothing locked.
2002 * Returns:
2003 * None.
2004 */
2005 void
2006 ipc_port_adjust_special_reply_port(
2007 ipc_port_t port,
2008 uint8_t flags)
2009 {
2010 if (port->ip_specialreply) {
2011 ip_lock(port);
2012 ipc_port_adjust_special_reply_port_locked(port, NULL, flags, FALSE);
2013 }
2014 }
2015
2016 /*
2017 * Routine: ipc_port_adjust_sync_link_state_locked
2018 * Purpose:
2019 * Update the sync link state of the port and the
2020 * turnstile inheritor.
2021 * Condition:
2022 * Port and mqueue locked on entry.
2023 * Port and mqueue locked on return.
2024 * Returns:
2025 * None.
2026 */
2027 void
2028 ipc_port_adjust_sync_link_state_locked(
2029 ipc_port_t port,
2030 int sync_link_state,
2031 turnstile_inheritor_t inheritor)
2032 {
2033 switch (port->ip_sync_link_state) {
2034 case PORT_SYNC_LINK_RCV_THREAD:
2035 /* deallocate the thread reference for the inheritor */
2036 thread_deallocate_safe(port->ip_messages.imq_inheritor_thread_ref);
2037 OS_FALLTHROUGH;
2038 default:
2039 klist_init(&port->ip_messages.imq_klist);
2040 }
2041
2042 switch (sync_link_state) {
2043 case PORT_SYNC_LINK_WORKLOOP_KNOTE:
2044 port->ip_messages.imq_inheritor_knote = inheritor;
2045 break;
2046 case PORT_SYNC_LINK_WORKLOOP_STASH:
2047 port->ip_messages.imq_inheritor_turnstile = inheritor;
2048 break;
2049 case PORT_SYNC_LINK_RCV_THREAD:
2050 /* The thread could exit without clearing port state, take a thread ref */
2051 thread_reference((thread_t)inheritor);
2052 port->ip_messages.imq_inheritor_thread_ref = inheritor;
2053 break;
2054 default:
2055 klist_init(&port->ip_messages.imq_klist);
2056 sync_link_state = PORT_SYNC_LINK_ANY;
2057 }
2058
2059 port->ip_sync_link_state = sync_link_state;
2060 }
2061
2062
2063 /*
2064 * Routine: ipc_port_adjust_port_locked
2065 * Purpose:
2066 * If the port has a turnstile, update its inheritor.
2067 * Condition:
2068 * Port locked on entry.
2069 * Port unlocked on return.
2070 * Returns:
2071 * None.
2072 */
2073 void
2074 ipc_port_adjust_port_locked(
2075 ipc_port_t port,
2076 struct knote *kn,
2077 boolean_t sync_bootstrap_checkin)
2078 {
2079 int sync_link_state = PORT_SYNC_LINK_ANY;
2080 turnstile_inheritor_t inheritor = TURNSTILE_INHERITOR_NULL;
2081
2082 ip_lock_held(port); // ip_sync_link_state is touched
2083 imq_held(&port->ip_messages);
2084
2085 assert(!port->ip_specialreply);
2086
2087 if (kn) {
2088 inheritor = filt_machport_stash_port(kn, port, &sync_link_state);
2089 if (sync_link_state == PORT_SYNC_LINK_WORKLOOP_KNOTE) {
2090 inheritor = kn;
2091 }
2092 } else if (sync_bootstrap_checkin) {
2093 inheritor = current_thread();
2094 sync_link_state = PORT_SYNC_LINK_RCV_THREAD;
2095 }
2096
2097 ipc_port_adjust_sync_link_state_locked(port, sync_link_state, inheritor);
2098 port->ip_sync_bootstrap_checkin = 0;
2099
2100 ipc_port_send_turnstile_recompute_push_locked(port);
2101 /* port and mqueue unlocked */
2102 }
2103
2104 /*
2105 * Routine: ipc_port_clear_sync_rcv_thread_boost_locked
2106 * Purpose:
2107 * If the port is pushing on rcv thread, clear it.
2108 * Condition:
2109 * Port locked on entry
2110 * mqueue is not locked.
2111 * Port unlocked on return.
2112 * Returns:
2113 * None.
2114 */
2115 void
2116 ipc_port_clear_sync_rcv_thread_boost_locked(
2117 ipc_port_t port)
2118 {
2119 ip_lock_held(port); // ip_sync_link_state is touched
2120
2121 if (port->ip_sync_link_state != PORT_SYNC_LINK_RCV_THREAD) {
2122 ip_unlock(port);
2123 return;
2124 }
2125
2126 imq_lock(&port->ip_messages);
2127 ipc_port_adjust_sync_link_state_locked(port, PORT_SYNC_LINK_ANY, NULL);
2128
2129 ipc_port_send_turnstile_recompute_push_locked(port);
2130 /* port and mqueue unlocked */
2131 }
2132
2133 /*
2134 * Routine: ipc_port_add_watchport_elem_locked
2135 * Purpose:
2136 * Transfer the turnstile boost of watchport to task calling exec.
2137 * Condition:
2138 * Port locked on entry.
2139 * Port unlocked on return.
2140 * Returns:
2141 * KERN_SUCESS on success.
2142 * KERN_FAILURE otherwise.
2143 */
2144 kern_return_t
2145 ipc_port_add_watchport_elem_locked(
2146 ipc_port_t port,
2147 struct task_watchport_elem *watchport_elem,
2148 struct task_watchport_elem **old_elem)
2149 {
2150 ip_lock_held(port);
2151 imq_held(&port->ip_messages);
2152
2153 /* Watchport boost only works for non-special active ports mapped in an ipc space */
2154 if (!ip_active(port) || port->ip_specialreply ||
2155 port->ip_receiver_name == MACH_PORT_NULL) {
2156 imq_unlock(&port->ip_messages);
2157 ip_unlock(port);
2158 return KERN_FAILURE;
2159 }
2160
2161 if (port->ip_sync_link_state != PORT_SYNC_LINK_ANY) {
2162 /* Sever the linkage if the port was pushing on knote */
2163 ipc_port_adjust_sync_link_state_locked(port, PORT_SYNC_LINK_ANY, NULL);
2164 }
2165
2166 *old_elem = ipc_port_update_watchport_elem(port, watchport_elem);
2167
2168 ipc_port_send_turnstile_recompute_push_locked(port);
2169 /* port and mqueue unlocked */
2170 return KERN_SUCCESS;
2171 }
2172
2173 /*
2174 * Routine: ipc_port_clear_watchport_elem_internal_conditional_locked
2175 * Purpose:
2176 * Remove the turnstile boost of watchport and recompute the push.
2177 * Condition:
2178 * Port locked on entry.
2179 * Port unlocked on return.
2180 * Returns:
2181 * KERN_SUCESS on success.
2182 * KERN_FAILURE otherwise.
2183 */
2184 kern_return_t
2185 ipc_port_clear_watchport_elem_internal_conditional_locked(
2186 ipc_port_t port,
2187 struct task_watchport_elem *watchport_elem)
2188 {
2189 ip_lock_held(port);
2190 imq_held(&port->ip_messages);
2191
2192 if (ipc_port_watchport_elem(port) != watchport_elem) {
2193 imq_unlock(&port->ip_messages);
2194 ip_unlock(port);
2195 return KERN_FAILURE;
2196 }
2197
2198 ipc_port_clear_watchport_elem_internal(port);
2199 ipc_port_send_turnstile_recompute_push_locked(port);
2200 /* port and mqueue unlocked */
2201 return KERN_SUCCESS;
2202 }
2203
2204 /*
2205 * Routine: ipc_port_replace_watchport_elem_conditional_locked
2206 * Purpose:
2207 * Replace the turnstile boost of watchport and recompute the push.
2208 * Condition:
2209 * Port locked on entry.
2210 * Port unlocked on return.
2211 * Returns:
2212 * KERN_SUCESS on success.
2213 * KERN_FAILURE otherwise.
2214 */
2215 kern_return_t
2216 ipc_port_replace_watchport_elem_conditional_locked(
2217 ipc_port_t port,
2218 struct task_watchport_elem *old_watchport_elem,
2219 struct task_watchport_elem *new_watchport_elem)
2220 {
2221 ip_lock_held(port);
2222 imq_held(&port->ip_messages);
2223
2224 if (ipc_port_watchport_elem(port) != old_watchport_elem) {
2225 imq_unlock(&port->ip_messages);
2226 ip_unlock(port);
2227 return KERN_FAILURE;
2228 }
2229
2230 ipc_port_update_watchport_elem(port, new_watchport_elem);
2231 ipc_port_send_turnstile_recompute_push_locked(port);
2232 /* port and mqueue unlocked */
2233 return KERN_SUCCESS;
2234 }
2235
2236 /*
2237 * Routine: ipc_port_clear_watchport_elem_internal
2238 * Purpose:
2239 * Remove the turnstile boost of watchport.
2240 * Condition:
2241 * Port locked on entry.
2242 * Port locked on return.
2243 * Returns:
2244 * Old task_watchport_elem returned.
2245 */
2246 struct task_watchport_elem *
2247 ipc_port_clear_watchport_elem_internal(
2248 ipc_port_t port)
2249 {
2250 ip_lock_held(port);
2251 imq_held(&port->ip_messages);
2252
2253 if (port->ip_specialreply) {
2254 return NULL;
2255 }
2256
2257 return ipc_port_update_watchport_elem(port, NULL);
2258 }
2259
2260 /*
2261 * Routine: ipc_port_send_turnstile_recompute_push_locked
2262 * Purpose:
2263 * Update send turnstile inheritor of port and recompute the push.
2264 * Condition:
2265 * Port locked on entry.
2266 * Port unlocked on return.
2267 * Returns:
2268 * None.
2269 */
2270 static void
2271 ipc_port_send_turnstile_recompute_push_locked(
2272 ipc_port_t port)
2273 {
2274 struct turnstile *send_turnstile = port_send_turnstile(port);
2275 if (send_turnstile) {
2276 turnstile_reference(send_turnstile);
2277 ipc_port_send_update_inheritor(port, send_turnstile,
2278 TURNSTILE_IMMEDIATE_UPDATE);
2279 }
2280 imq_unlock(&port->ip_messages);
2281 ip_unlock(port);
2282
2283 if (send_turnstile) {
2284 turnstile_update_inheritor_complete(send_turnstile,
2285 TURNSTILE_INTERLOCK_NOT_HELD);
2286 turnstile_deallocate_safe(send_turnstile);
2287 }
2288 }
2289
2290 /*
2291 * Routine: ipc_port_get_watchport_inheritor
2292 * Purpose:
2293 * Returns inheritor for watchport.
2294 *
2295 * Conditions:
2296 * mqueue locked.
2297 * Returns:
2298 * watchport inheritor.
2299 */
2300 static thread_t
2301 ipc_port_get_watchport_inheritor(
2302 ipc_port_t port)
2303 {
2304 imq_held(&port->ip_messages);
2305 return ipc_port_watchport_elem(port)->twe_task->watchports->tw_thread;
2306 }
2307
2308 /*
2309 * Routine: ipc_port_get_receiver_task
2310 * Purpose:
2311 * Returns receiver task pointer and its pid (if any) for port.
2312 *
2313 * Conditions:
2314 * Nothing locked.
2315 */
2316 pid_t
2317 ipc_port_get_receiver_task(ipc_port_t port, uintptr_t *task)
2318 {
2319 task_t receiver = TASK_NULL;
2320 pid_t pid = -1;
2321
2322 if (!port) {
2323 goto out;
2324 }
2325
2326 ip_lock(port);
2327 if (ip_active(port) &&
2328 MACH_PORT_VALID(port->ip_receiver_name) &&
2329 port->ip_receiver &&
2330 port->ip_receiver != ipc_space_kernel &&
2331 port->ip_receiver != ipc_space_reply) {
2332 receiver = port->ip_receiver->is_task;
2333 pid = task_pid(receiver);
2334 }
2335 ip_unlock(port);
2336
2337 out:
2338 if (task) {
2339 *task = (uintptr_t)receiver;
2340 }
2341 return pid;
2342 }
2343
2344 /*
2345 * Routine: ipc_port_impcount_delta
2346 * Purpose:
2347 * Adjust only the importance count associated with a port.
2348 * If there are any adjustments to be made to receiver task,
2349 * those are handled elsewhere.
2350 *
2351 * For now, be defensive during deductions to make sure the
2352 * impcount for the port doesn't underflow zero. This will
2353 * go away when the port boost addition is made atomic (see
2354 * note in ipc_port_importance_delta()).
2355 * Conditions:
2356 * The port is referenced and locked.
2357 * Nothing else is locked.
2358 */
2359 mach_port_delta_t
2360 ipc_port_impcount_delta(
2361 ipc_port_t port,
2362 mach_port_delta_t delta,
2363 ipc_port_t __unused base)
2364 {
2365 mach_port_delta_t absdelta;
2366
2367 if (!ip_active(port)) {
2368 return 0;
2369 }
2370
2371 /* adding/doing nothing is easy */
2372 if (delta >= 0) {
2373 port->ip_impcount += delta;
2374 return delta;
2375 }
2376
2377 absdelta = 0 - delta;
2378 if (port->ip_impcount >= absdelta) {
2379 port->ip_impcount -= absdelta;
2380 return delta;
2381 }
2382
2383 #if (DEVELOPMENT || DEBUG)
2384 if (port->ip_receiver_name != MACH_PORT_NULL) {
2385 task_t target_task = port->ip_receiver->is_task;
2386 ipc_importance_task_t target_imp = target_task->task_imp_base;
2387 const char *target_procname;
2388 int target_pid;
2389
2390 if (target_imp != IIT_NULL) {
2391 target_procname = target_imp->iit_procname;
2392 target_pid = target_imp->iit_bsd_pid;
2393 } else {
2394 target_procname = "unknown";
2395 target_pid = -1;
2396 }
2397 printf("Over-release of importance assertions for port 0x%x receiver pid %d (%s), "
2398 "dropping %d assertion(s) but port only has %d remaining.\n",
2399 port->ip_receiver_name,
2400 target_pid, target_procname,
2401 absdelta, port->ip_impcount);
2402 } else if (base != IP_NULL) {
2403 task_t target_task = base->ip_receiver->is_task;
2404 ipc_importance_task_t target_imp = target_task->task_imp_base;
2405 const char *target_procname;
2406 int target_pid;
2407
2408 if (target_imp != IIT_NULL) {
2409 target_procname = target_imp->iit_procname;
2410 target_pid = target_imp->iit_bsd_pid;
2411 } else {
2412 target_procname = "unknown";
2413 target_pid = -1;
2414 }
2415 printf("Over-release of importance assertions for port 0x%lx "
2416 "enqueued on port 0x%x with receiver pid %d (%s), "
2417 "dropping %d assertion(s) but port only has %d remaining.\n",
2418 (unsigned long)VM_KERNEL_UNSLIDE_OR_PERM((uintptr_t)port),
2419 base->ip_receiver_name,
2420 target_pid, target_procname,
2421 absdelta, port->ip_impcount);
2422 }
2423 #endif
2424
2425 delta = 0 - port->ip_impcount;
2426 port->ip_impcount = 0;
2427 return delta;
2428 }
2429
2430 /*
2431 * Routine: ipc_port_importance_delta_internal
2432 * Purpose:
2433 * Adjust the importance count through the given port.
2434 * If the port is in transit, apply the delta throughout
2435 * the chain. Determine if the there is a task at the
2436 * base of the chain that wants/needs to be adjusted,
2437 * and if so, apply the delta.
2438 * Conditions:
2439 * The port is referenced and locked on entry.
2440 * Importance may be locked.
2441 * Nothing else is locked.
2442 * The lock may be dropped on exit.
2443 * Returns TRUE if lock was dropped.
2444 */
2445 #if IMPORTANCE_INHERITANCE
2446
2447 boolean_t
2448 ipc_port_importance_delta_internal(
2449 ipc_port_t port,
2450 natural_t options,
2451 mach_port_delta_t *deltap,
2452 ipc_importance_task_t *imp_task)
2453 {
2454 ipc_port_t next, base;
2455 bool dropped = false;
2456 bool took_base_ref = false;
2457
2458 *imp_task = IIT_NULL;
2459
2460 if (*deltap == 0) {
2461 return FALSE;
2462 }
2463
2464 assert(options == IPID_OPTION_NORMAL || options == IPID_OPTION_SENDPOSSIBLE);
2465
2466 base = port;
2467
2468 /* if port is in transit, have to search for end of chain */
2469 if (ip_active(port) &&
2470 port->ip_destination != IP_NULL &&
2471 port->ip_receiver_name == MACH_PORT_NULL) {
2472 dropped = true;
2473
2474 ip_unlock(port);
2475 ipc_port_multiple_lock(); /* massive serialization */
2476
2477 took_base_ref = ipc_port_destination_chain_lock(port, &base);
2478 /* all ports in chain from port to base, inclusive, are locked */
2479
2480 ipc_port_multiple_unlock();
2481 }
2482
2483 /*
2484 * If the port lock is dropped b/c the port is in transit, there is a
2485 * race window where another thread can drain messages and/or fire a
2486 * send possible notification before we get here.
2487 *
2488 * We solve this race by checking to see if our caller armed the send
2489 * possible notification, whether or not it's been fired yet, and
2490 * whether or not we've already set the port's ip_spimportant bit. If
2491 * we don't need a send-possible boost, then we'll just apply a
2492 * harmless 0-boost to the port.
2493 */
2494 if (options & IPID_OPTION_SENDPOSSIBLE) {
2495 assert(*deltap == 1);
2496 if (port->ip_sprequests && port->ip_spimportant == 0) {
2497 port->ip_spimportant = 1;
2498 } else {
2499 *deltap = 0;
2500 }
2501 }
2502
2503 /* unlock down to the base, adjusting boost(s) at each level */
2504 for (;;) {
2505 *deltap = ipc_port_impcount_delta(port, *deltap, base);
2506
2507 if (port == base) {
2508 break;
2509 }
2510
2511 /* port is in transit */
2512 assert(port->ip_tempowner == 0);
2513 next = port->ip_destination;
2514 ip_unlock(port);
2515 port = next;
2516 }
2517
2518 /* find the task (if any) to boost according to the base */
2519 if (ip_active(base)) {
2520 if (base->ip_tempowner != 0) {
2521 if (IIT_NULL != base->ip_imp_task) {
2522 *imp_task = base->ip_imp_task;
2523 }
2524 /* otherwise don't boost */
2525 } else if (base->ip_receiver_name != MACH_PORT_NULL) {
2526 ipc_space_t space = base->ip_receiver;
2527
2528 /* only spaces with boost-accepting tasks */
2529 if (space->is_task != TASK_NULL &&
2530 ipc_importance_task_is_any_receiver_type(space->is_task->task_imp_base)) {
2531 *imp_task = space->is_task->task_imp_base;
2532 }
2533 }
2534 }
2535
2536 /*
2537 * Only the base is locked. If we have to hold or drop task
2538 * importance assertions, we'll have to drop that lock as well.
2539 */
2540 if (*imp_task != IIT_NULL) {
2541 /* take a reference before unlocking base */
2542 ipc_importance_task_reference(*imp_task);
2543 }
2544
2545 if (dropped) {
2546 ip_unlock(base);
2547 if (took_base_ref) {
2548 ip_release(base);
2549 }
2550 }
2551
2552 return dropped;
2553 }
2554 #endif /* IMPORTANCE_INHERITANCE */
2555
2556 /*
2557 * Routine: ipc_port_importance_delta
2558 * Purpose:
2559 * Adjust the importance count through the given port.
2560 * If the port is in transit, apply the delta throughout
2561 * the chain.
2562 *
2563 * If there is a task at the base of the chain that wants/needs
2564 * to be adjusted, apply the delta.
2565 * Conditions:
2566 * The port is referenced and locked on entry.
2567 * Nothing else is locked.
2568 * The lock may be dropped on exit.
2569 * Returns TRUE if lock was dropped.
2570 */
2571 #if IMPORTANCE_INHERITANCE
2572
2573 boolean_t
2574 ipc_port_importance_delta(
2575 ipc_port_t port,
2576 natural_t options,
2577 mach_port_delta_t delta)
2578 {
2579 ipc_importance_task_t imp_task = IIT_NULL;
2580 boolean_t dropped;
2581
2582 dropped = ipc_port_importance_delta_internal(port, options, &delta, &imp_task);
2583
2584 if (IIT_NULL == imp_task || delta == 0) {
2585 return dropped;
2586 }
2587
2588 if (!dropped) {
2589 ip_unlock(port);
2590 }
2591
2592 assert(ipc_importance_task_is_any_receiver_type(imp_task));
2593
2594 if (delta > 0) {
2595 ipc_importance_task_hold_internal_assertion(imp_task, delta);
2596 } else {
2597 ipc_importance_task_drop_internal_assertion(imp_task, -delta);
2598 }
2599
2600 ipc_importance_task_release(imp_task);
2601 return TRUE;
2602 }
2603 #endif /* IMPORTANCE_INHERITANCE */
2604
2605 /*
2606 * Routine: ipc_port_make_send_locked
2607 * Purpose:
2608 * Make a naked send right from a receive right.
2609 *
2610 * Conditions:
2611 * port locked and active.
2612 */
2613 ipc_port_t
2614 ipc_port_make_send_locked(
2615 ipc_port_t port)
2616 {
2617 require_ip_active(port);
2618 port->ip_mscount++;
2619 port->ip_srights++;
2620 ip_reference(port);
2621 return port;
2622 }
2623
2624 /*
2625 * Routine: ipc_port_make_send
2626 * Purpose:
2627 * Make a naked send right from a receive right.
2628 */
2629
2630 ipc_port_t
2631 ipc_port_make_send(
2632 ipc_port_t port)
2633 {
2634 if (!IP_VALID(port)) {
2635 return port;
2636 }
2637
2638 ip_lock(port);
2639 if (ip_active(port)) {
2640 ipc_port_make_send_locked(port);
2641 ip_unlock(port);
2642 return port;
2643 }
2644 ip_unlock(port);
2645 return IP_DEAD;
2646 }
2647
2648 /*
2649 * Routine: ipc_port_copy_send_locked
2650 * Purpose:
2651 * Make a naked send right from another naked send right.
2652 * Conditions:
2653 * port locked and active.
2654 */
2655 void
2656 ipc_port_copy_send_locked(
2657 ipc_port_t port)
2658 {
2659 assert(port->ip_srights > 0);
2660 port->ip_srights++;
2661 ip_reference(port);
2662 }
2663
2664 /*
2665 * Routine: ipc_port_copy_send
2666 * Purpose:
2667 * Make a naked send right from another naked send right.
2668 * IP_NULL -> IP_NULL
2669 * IP_DEAD -> IP_DEAD
2670 * dead port -> IP_DEAD
2671 * live port -> port + ref
2672 * Conditions:
2673 * Nothing locked except possibly a space.
2674 */
2675
2676 ipc_port_t
2677 ipc_port_copy_send(
2678 ipc_port_t port)
2679 {
2680 ipc_port_t sright;
2681
2682 if (!IP_VALID(port)) {
2683 return port;
2684 }
2685
2686 ip_lock(port);
2687 if (ip_active(port)) {
2688 ipc_port_copy_send_locked(port);
2689 sright = port;
2690 } else {
2691 sright = IP_DEAD;
2692 }
2693 ip_unlock(port);
2694
2695 return sright;
2696 }
2697
2698 /*
2699 * Routine: ipc_port_copyout_send
2700 * Purpose:
2701 * Copyout a naked send right (possibly null/dead),
2702 * or if that fails, destroy the right.
2703 * Conditions:
2704 * Nothing locked.
2705 */
2706
2707 static mach_port_name_t
2708 ipc_port_copyout_send_internal(
2709 ipc_port_t sright,
2710 ipc_space_t space,
2711 ipc_object_copyout_flags_t flags)
2712 {
2713 mach_port_name_t name;
2714
2715 if (IP_VALID(sright)) {
2716 kern_return_t kr;
2717
2718 kr = ipc_object_copyout(space, ip_to_object(sright),
2719 MACH_MSG_TYPE_PORT_SEND, flags, NULL, NULL, &name);
2720 if (kr != KERN_SUCCESS) {
2721 if (kr == KERN_INVALID_CAPABILITY) {
2722 name = MACH_PORT_DEAD;
2723 } else {
2724 name = MACH_PORT_NULL;
2725 }
2726 }
2727 } else {
2728 name = CAST_MACH_PORT_TO_NAME(sright);
2729 }
2730
2731 return name;
2732 }
2733
2734 mach_port_name_t
2735 ipc_port_copyout_send(
2736 ipc_port_t sright,
2737 ipc_space_t space)
2738 {
2739 return ipc_port_copyout_send_internal(sright, space, IPC_OBJECT_COPYOUT_FLAGS_NONE);
2740 }
2741
2742 mach_port_name_t
2743 ipc_port_copyout_send_pinned(
2744 ipc_port_t sright,
2745 ipc_space_t space)
2746 {
2747 return ipc_port_copyout_send_internal(sright, space, IPC_OBJECT_COPYOUT_FLAGS_PINNED);
2748 }
2749
2750 /*
2751 * Routine: ipc_port_release_send_and_unlock
2752 * Purpose:
2753 * Release a naked send right.
2754 * Consumes a ref for the port.
2755 * Conditions:
2756 * Port is valid and locked on entry
2757 * Port is unlocked on exit.
2758 */
2759 void
2760 ipc_port_release_send_and_unlock(
2761 ipc_port_t port)
2762 {
2763 ipc_port_t nsrequest = IP_NULL;
2764 mach_port_mscount_t mscount;
2765
2766 assert(port->ip_srights > 0);
2767 if (port->ip_srights == 0) {
2768 panic("Over-release of port %p send right!", port);
2769 }
2770
2771 port->ip_srights--;
2772
2773 if (!ip_active(port)) {
2774 ip_unlock(port);
2775 ip_release(port);
2776 return;
2777 }
2778
2779 if (port->ip_srights == 0 &&
2780 port->ip_nsrequest != IP_NULL) {
2781 nsrequest = port->ip_nsrequest;
2782 port->ip_nsrequest = IP_NULL;
2783 mscount = port->ip_mscount;
2784 ip_unlock(port);
2785 ip_release(port);
2786 ipc_notify_no_senders(nsrequest, mscount);
2787 } else {
2788 ip_unlock(port);
2789 ip_release(port);
2790 }
2791 }
2792
2793 /*
2794 * Routine: ipc_port_release_send
2795 * Purpose:
2796 * Release a naked send right.
2797 * Consumes a ref for the port.
2798 * Conditions:
2799 * Nothing locked.
2800 */
2801
2802 void
2803 ipc_port_release_send(
2804 ipc_port_t port)
2805 {
2806 if (IP_VALID(port)) {
2807 ip_lock(port);
2808 ipc_port_release_send_and_unlock(port);
2809 }
2810 }
2811
2812 /*
2813 * Routine: ipc_port_make_sonce_locked
2814 * Purpose:
2815 * Make a naked send-once right from a receive right.
2816 * Conditions:
2817 * The port is locked and active.
2818 */
2819
2820 ipc_port_t
2821 ipc_port_make_sonce_locked(
2822 ipc_port_t port)
2823 {
2824 require_ip_active(port);
2825 port->ip_sorights++;
2826 ip_reference(port);
2827 return port;
2828 }
2829
2830 /*
2831 * Routine: ipc_port_make_sonce
2832 * Purpose:
2833 * Make a naked send-once right from a receive right.
2834 * Conditions:
2835 * The port is not locked.
2836 */
2837
2838 ipc_port_t
2839 ipc_port_make_sonce(
2840 ipc_port_t port)
2841 {
2842 if (!IP_VALID(port)) {
2843 return port;
2844 }
2845
2846 ip_lock(port);
2847 if (ip_active(port)) {
2848 ipc_port_make_sonce_locked(port);
2849 ip_unlock(port);
2850 return port;
2851 }
2852 ip_unlock(port);
2853 return IP_DEAD;
2854 }
2855
2856 /*
2857 * Routine: ipc_port_release_sonce
2858 * Purpose:
2859 * Release a naked send-once right.
2860 * Consumes a ref for the port.
2861 *
2862 * In normal situations, this is never used.
2863 * Send-once rights are only consumed when
2864 * a message (possibly a send-once notification)
2865 * is sent to them.
2866 * Conditions:
2867 * Nothing locked except possibly a space.
2868 */
2869
2870 void
2871 ipc_port_release_sonce(
2872 ipc_port_t port)
2873 {
2874 if (!IP_VALID(port)) {
2875 return;
2876 }
2877
2878 ipc_port_adjust_special_reply_port(port, IPC_PORT_ADJUST_RESET_BOOSTRAP_CHECKIN);
2879
2880 ip_lock(port);
2881
2882 assert(port->ip_sorights > 0);
2883 if (port->ip_sorights == 0) {
2884 panic("Over-release of port %p send-once right!", port);
2885 }
2886
2887 port->ip_sorights--;
2888
2889 ip_unlock(port);
2890 ip_release(port);
2891 }
2892
2893 /*
2894 * Routine: ipc_port_release_receive
2895 * Purpose:
2896 * Release a naked (in limbo or in transit) receive right.
2897 * Consumes a ref for the port; destroys the port.
2898 * Conditions:
2899 * Nothing locked.
2900 */
2901
2902 void
2903 ipc_port_release_receive(
2904 ipc_port_t port)
2905 {
2906 ipc_port_t dest;
2907
2908 if (!IP_VALID(port)) {
2909 return;
2910 }
2911
2912 ip_lock(port);
2913 require_ip_active(port);
2914 assert(port->ip_receiver_name == MACH_PORT_NULL);
2915 dest = port->ip_destination;
2916
2917 ipc_port_destroy(port); /* consumes ref, unlocks */
2918
2919 if (dest != IP_NULL) {
2920 ipc_port_send_turnstile_complete(dest);
2921 ip_release(dest);
2922 }
2923 }
2924
2925 /*
2926 * Routine: ipc_port_alloc_special
2927 * Purpose:
2928 * Allocate a port in a special space.
2929 * The new port is returned with one ref.
2930 * If unsuccessful, IP_NULL is returned.
2931 * Conditions:
2932 * Nothing locked.
2933 */
2934
2935 ipc_port_t
2936 ipc_port_alloc_special(
2937 ipc_space_t space,
2938 ipc_port_init_flags_t flags)
2939 {
2940 ipc_port_t port;
2941
2942 port = ip_object_to_port(io_alloc(IOT_PORT, Z_WAITOK | Z_ZERO));
2943 if (port == IP_NULL) {
2944 return IP_NULL;
2945 }
2946
2947 #if MACH_ASSERT
2948 uintptr_t buf[IP_CALLSTACK_MAX];
2949 ipc_port_callstack_init_debug(&buf[0], IP_CALLSTACK_MAX);
2950 #endif /* MACH_ASSERT */
2951
2952 io_lock_init(ip_to_object(port));
2953 port->ip_references = 1;
2954 port->ip_object.io_bits = io_makebits(TRUE, IOT_PORT, 0);
2955
2956 ipc_port_init(port, space, flags, 1);
2957
2958 #if MACH_ASSERT
2959 ipc_port_init_debug(port, &buf[0], IP_CALLSTACK_MAX);
2960 #endif /* MACH_ASSERT */
2961
2962 return port;
2963 }
2964
2965 /*
2966 * Routine: ipc_port_dealloc_special
2967 * Purpose:
2968 * Deallocate a port in a special space.
2969 * Consumes one ref for the port.
2970 * Conditions:
2971 * Nothing locked.
2972 */
2973
2974 void
2975 ipc_port_dealloc_special(
2976 ipc_port_t port,
2977 __assert_only ipc_space_t space)
2978 {
2979 ip_lock(port);
2980 require_ip_active(port);
2981 // assert(port->ip_receiver_name != MACH_PORT_NULL);
2982 assert(port->ip_receiver == space);
2983
2984 /*
2985 * We clear ip_receiver_name and ip_receiver to simplify
2986 * the ipc_space_kernel check in ipc_mqueue_send.
2987 */
2988
2989 imq_lock(&port->ip_messages);
2990 port->ip_receiver_name = MACH_PORT_NULL;
2991 port->ip_receiver = IS_NULL;
2992 imq_unlock(&port->ip_messages);
2993
2994 /* relevant part of ipc_port_clear_receiver */
2995 port->ip_mscount = 0;
2996 port->ip_messages.imq_seqno = 0;
2997
2998 ipc_port_destroy(port);
2999 }
3000
3001 /*
3002 * Routine: ipc_port_finalize
3003 * Purpose:
3004 * Called on last reference deallocate to
3005 * free any remaining data associated with the
3006 * port.
3007 * Conditions:
3008 * Nothing locked.
3009 */
3010 void
3011 ipc_port_finalize(
3012 ipc_port_t port)
3013 {
3014 ipc_port_request_t requests = port->ip_requests;
3015
3016 assert(port_send_turnstile(port) == TURNSTILE_NULL);
3017 if (imq_is_turnstile_proxy(&port->ip_messages)) {
3018 assert(ipc_port_rcv_turnstile(port) == TURNSTILE_NULL);
3019 }
3020
3021 if (ip_active(port)) {
3022 panic("Trying to free an active port. port %p", port);
3023 }
3024
3025 if (requests != IPR_NULL) {
3026 ipc_table_size_t its = requests->ipr_size;
3027 it_requests_free(its, requests);
3028 port->ip_requests = IPR_NULL;
3029 }
3030
3031 ipc_mqueue_deinit(&port->ip_messages);
3032
3033 #if MACH_ASSERT
3034 ipc_port_track_dealloc(port);
3035 #endif /* MACH_ASSERT */
3036 }
3037
3038 /*
3039 * Routine: kdp_mqueue_send_find_owner
3040 * Purpose:
3041 * Discover the owner of the ipc_mqueue that contains the input
3042 * waitq object. The thread blocked on the waitq should be
3043 * waiting for an IPC_MQUEUE_FULL event.
3044 * Conditions:
3045 * The 'waitinfo->wait_type' value should already be set to
3046 * kThreadWaitPortSend.
3047 * Note:
3048 * If we find out that the containing port is actually in
3049 * transit, we reset the wait_type field to reflect this.
3050 */
3051 void
3052 kdp_mqueue_send_find_owner(struct waitq * waitq, __assert_only event64_t event, thread_waitinfo_t * waitinfo)
3053 {
3054 struct turnstile *turnstile;
3055 assert(waitinfo->wait_type == kThreadWaitPortSend);
3056 assert(event == IPC_MQUEUE_FULL);
3057 assert(waitq_is_turnstile_queue(waitq));
3058
3059 turnstile = waitq_to_turnstile(waitq);
3060 ipc_port_t port = (ipc_port_t)turnstile->ts_proprietor; /* we are blocking on send */
3061
3062 zone_id_require(ZONE_ID_IPC_PORT, sizeof(struct ipc_port), port);
3063
3064 waitinfo->owner = 0;
3065 waitinfo->context = VM_KERNEL_UNSLIDE_OR_PERM(port);
3066 if (ip_lock_held_kdp(port)) {
3067 /*
3068 * someone has the port locked: it may be in an
3069 * inconsistent state: bail
3070 */
3071 waitinfo->owner = STACKSHOT_WAITOWNER_PORT_LOCKED;
3072 return;
3073 }
3074
3075 if (ip_active(port)) {
3076 if (port->ip_tempowner) {
3077 if (port->ip_imp_task != IIT_NULL && port->ip_imp_task->iit_task != NULL) {
3078 /* port is held by a tempowner */
3079 waitinfo->owner = pid_from_task(port->ip_imp_task->iit_task);
3080 } else {
3081 waitinfo->owner = STACKSHOT_WAITOWNER_INTRANSIT;
3082 }
3083 } else if (port->ip_receiver_name) {
3084 /* port in a space */
3085 if (port->ip_receiver == ipc_space_kernel) {
3086 /*
3087 * The kernel pid is 0, make this
3088 * distinguishable from no-owner and
3089 * inconsistent port state.
3090 */
3091 waitinfo->owner = STACKSHOT_WAITOWNER_KERNEL;
3092 } else {
3093 waitinfo->owner = pid_from_task(port->ip_receiver->is_task);
3094 }
3095 } else if (port->ip_destination != IP_NULL) {
3096 /* port in transit */
3097 waitinfo->wait_type = kThreadWaitPortSendInTransit;
3098 waitinfo->owner = VM_KERNEL_UNSLIDE_OR_PERM(port->ip_destination);
3099 }
3100 }
3101 }
3102
3103 /*
3104 * Routine: kdp_mqueue_recv_find_owner
3105 * Purpose:
3106 * Discover the "owner" of the ipc_mqueue that contains the input
3107 * waitq object. The thread blocked on the waitq is trying to
3108 * receive on the mqueue.
3109 * Conditions:
3110 * The 'waitinfo->wait_type' value should already be set to
3111 * kThreadWaitPortReceive.
3112 * Note:
3113 * If we find that we are actualy waiting on a port set, we reset
3114 * the wait_type field to reflect this.
3115 */
3116 void
3117 kdp_mqueue_recv_find_owner(struct waitq * waitq, __assert_only event64_t event, thread_waitinfo_t * waitinfo)
3118 {
3119 assert(waitinfo->wait_type == kThreadWaitPortReceive);
3120 assert(event == IPC_MQUEUE_RECEIVE);
3121
3122 ipc_mqueue_t mqueue = imq_from_waitq(waitq);
3123 waitinfo->owner = 0;
3124 if (imq_is_set(mqueue)) { /* we are waiting on a port set */
3125 ipc_pset_t set = ips_from_mq(mqueue);
3126
3127 zone_id_require(ZONE_ID_IPC_PORT_SET, sizeof(struct ipc_pset), set);
3128
3129 /* Reset wait type to specify waiting on port set receive */
3130 waitinfo->wait_type = kThreadWaitPortSetReceive;
3131 waitinfo->context = VM_KERNEL_UNSLIDE_OR_PERM(set);
3132 if (ips_lock_held_kdp(set)) {
3133 waitinfo->owner = STACKSHOT_WAITOWNER_PSET_LOCKED;
3134 }
3135 /* There is no specific owner "at the other end" of a port set, so leave unset. */
3136 } else {
3137 ipc_port_t port = ip_from_mq(mqueue);
3138
3139 zone_id_require(ZONE_ID_IPC_PORT, sizeof(struct ipc_port), port);
3140
3141 waitinfo->context = VM_KERNEL_UNSLIDE_OR_PERM(port);
3142 if (ip_lock_held_kdp(port)) {
3143 waitinfo->owner = STACKSHOT_WAITOWNER_PORT_LOCKED;
3144 return;
3145 }
3146
3147 if (ip_active(port)) {
3148 if (port->ip_receiver_name != MACH_PORT_NULL) {
3149 waitinfo->owner = port->ip_receiver_name;
3150 } else {
3151 waitinfo->owner = STACKSHOT_WAITOWNER_INTRANSIT;
3152 }
3153 }
3154 }
3155 }
3156
3157 #if MACH_ASSERT
3158 #include <kern/machine.h>
3159
3160 /*
3161 * Keep a list of all allocated ports.
3162 * Allocation is intercepted via ipc_port_init;
3163 * deallocation is intercepted via io_free.
3164 */
3165 #if 0
3166 queue_head_t port_alloc_queue = QUEUE_HEAD_INITIALIZER(port_alloc_queue);
3167 LCK_SPIN_DECLARE(port_alloc_queue_lock, &ipc_lck_grp, &ipc_lck_attr);
3168 #endif
3169
3170 unsigned long port_count = 0;
3171 unsigned long port_count_warning = 20000;
3172 unsigned long port_timestamp = 0;
3173
3174 void db_port_stack_trace(
3175 ipc_port_t port);
3176 void db_ref(
3177 int refs);
3178 int db_port_walk(
3179 unsigned int verbose,
3180 unsigned int display,
3181 unsigned int ref_search,
3182 unsigned int ref_target);
3183
3184 #ifdef MACH_BSD
3185 extern int proc_pid(struct proc*);
3186 #endif /* MACH_BSD */
3187
3188 /*
3189 * Initialize all of the debugging state in a port.
3190 * Insert the port into a global list of all allocated ports.
3191 */
3192 void
3193 ipc_port_init_debug(
3194 ipc_port_t port,
3195 uintptr_t *callstack,
3196 unsigned int callstack_max)
3197 {
3198 unsigned int i;
3199
3200 port->ip_thread = current_thread();
3201 port->ip_timetrack = port_timestamp++;
3202 for (i = 0; i < callstack_max; ++i) {
3203 port->ip_callstack[i] = callstack[i];
3204 }
3205 for (i = 0; i < IP_NSPARES; ++i) {
3206 port->ip_spares[i] = 0;
3207 }
3208
3209 #ifdef MACH_BSD
3210 task_t task = current_task();
3211 if (task != TASK_NULL) {
3212 struct proc* proc = (struct proc*) get_bsdtask_info(task);
3213 if (proc) {
3214 port->ip_spares[0] = proc_pid(proc);
3215 }
3216 }
3217 #endif /* MACH_BSD */
3218
3219 #if 0
3220 lck_spin_lock(&port_alloc_queue_lock);
3221 ++port_count;
3222 if (port_count_warning > 0 && port_count >= port_count_warning) {
3223 assert(port_count < port_count_warning);
3224 }
3225 queue_enter(&port_alloc_queue, port, ipc_port_t, ip_port_links);
3226 lck_spin_unlock(&port_alloc_queue_lock);
3227 #endif
3228 }
3229
3230 /*
3231 * Routine: ipc_port_callstack_init_debug
3232 * Purpose:
3233 * Calls the machine-dependent routine to
3234 * fill in an array with up to IP_CALLSTACK_MAX
3235 * levels of return pc information
3236 * Conditions:
3237 * May block (via copyin)
3238 */
3239 void
3240 ipc_port_callstack_init_debug(
3241 uintptr_t *callstack,
3242 unsigned int callstack_max)
3243 {
3244 unsigned int i;
3245
3246 /* guarantee the callstack is initialized */
3247 for (i = 0; i < callstack_max; i++) {
3248 callstack[i] = 0;
3249 }
3250
3251 if (ipc_portbt) {
3252 machine_callstack(callstack, callstack_max);
3253 }
3254 }
3255
3256 /*
3257 * Remove a port from the queue of allocated ports.
3258 * This routine should be invoked JUST prior to
3259 * deallocating the actual memory occupied by the port.
3260 */
3261 #if 1
3262 void
3263 ipc_port_track_dealloc(
3264 __unused ipc_port_t port)
3265 {
3266 }
3267 #else
3268 void
3269 ipc_port_track_dealloc(
3270 ipc_port_t port)
3271 {
3272 lck_spin_lock(&port_alloc_queue_lock);
3273 assert(port_count > 0);
3274 --port_count;
3275 queue_remove(&port_alloc_queue, port, ipc_port_t, ip_port_links);
3276 lck_spin_unlock(&port_alloc_queue_lock);
3277 }
3278 #endif
3279
3280
3281 #endif /* MACH_ASSERT */