]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ipc/ipc_port.c
xnu-6153.121.1.tar.gz
[apple/xnu.git] / osfmk / ipc / ipc_port.c
1 /*
2 * Copyright (c) 2000-2019 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_FREE_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
58 * support for mandatory and extensible security protections. This notice
59 * is included in support of clause 2.2 (b) of the Apple Public License,
60 * Version 2.0.
61 */
62 /*
63 */
64 /*
65 * File: ipc/ipc_port.c
66 * Author: Rich Draves
67 * Date: 1989
68 *
69 * Functions to manipulate IPC ports.
70 */
71
72 #include <zone_debug.h>
73 #include <mach_assert.h>
74
75 #include <mach/port.h>
76 #include <mach/kern_return.h>
77 #include <kern/ipc_kobject.h>
78 #include <kern/thread.h>
79 #include <kern/misc_protos.h>
80 #include <kern/waitq.h>
81 #include <kern/policy_internal.h>
82 #include <kern/debug.h>
83 #include <kern/kcdata.h>
84 #include <ipc/ipc_entry.h>
85 #include <ipc/ipc_space.h>
86 #include <ipc/ipc_object.h>
87 #include <ipc/ipc_port.h>
88 #include <ipc/ipc_pset.h>
89 #include <ipc/ipc_kmsg.h>
90 #include <ipc/ipc_mqueue.h>
91 #include <ipc/ipc_notify.h>
92 #include <ipc/ipc_table.h>
93 #include <ipc/ipc_importance.h>
94 #include <machine/limits.h>
95 #include <kern/turnstile.h>
96
97 #include <security/mac_mach_internal.h>
98
99 #include <string.h>
100
101 decl_lck_spin_data(, ipc_port_multiple_lock_data);
102 ipc_port_timestamp_t ipc_port_timestamp_data;
103 int ipc_portbt;
104 extern int prioritize_launch;
105
106 #if MACH_ASSERT
107 void ipc_port_init_debug(
108 ipc_port_t port,
109 uintptr_t *callstack,
110 unsigned int callstack_max);
111
112 void ipc_port_callstack_init_debug(
113 uintptr_t *callstack,
114 unsigned int callstack_max);
115
116 #endif /* MACH_ASSERT */
117
118 static void
119 ipc_port_send_turnstile_recompute_push_locked(
120 ipc_port_t port);
121
122 static thread_t
123 ipc_port_get_watchport_inheritor(
124 ipc_port_t port);
125
126 void
127 ipc_port_release(ipc_port_t port)
128 {
129 ip_release(port);
130 }
131
132 void
133 ipc_port_reference(ipc_port_t port)
134 {
135 ip_reference(port);
136 }
137
138 /*
139 * Routine: ipc_port_timestamp
140 * Purpose:
141 * Retrieve a timestamp value.
142 */
143
144 ipc_port_timestamp_t
145 ipc_port_timestamp(void)
146 {
147 return OSIncrementAtomic(&ipc_port_timestamp_data);
148 }
149
150 /*
151 * Routine: ipc_port_request_alloc
152 * Purpose:
153 * Try to allocate a request slot.
154 * If successful, returns the request index.
155 * Otherwise returns zero.
156 * Conditions:
157 * The port is locked and active.
158 * Returns:
159 * KERN_SUCCESS A request index was found.
160 * KERN_NO_SPACE No index allocated.
161 */
162
163 #if IMPORTANCE_INHERITANCE
164 kern_return_t
165 ipc_port_request_alloc(
166 ipc_port_t port,
167 mach_port_name_t name,
168 ipc_port_t soright,
169 boolean_t send_possible,
170 boolean_t immediate,
171 ipc_port_request_index_t *indexp,
172 boolean_t *importantp)
173 #else
174 kern_return_t
175 ipc_port_request_alloc(
176 ipc_port_t port,
177 mach_port_name_t name,
178 ipc_port_t soright,
179 boolean_t send_possible,
180 boolean_t immediate,
181 ipc_port_request_index_t *indexp)
182 #endif /* IMPORTANCE_INHERITANCE */
183 {
184 ipc_port_request_t ipr, table;
185 ipc_port_request_index_t index;
186 uintptr_t mask = 0;
187
188 #if IMPORTANCE_INHERITANCE
189 *importantp = FALSE;
190 #endif /* IMPORTANCE_INHERITANCE */
191
192 require_ip_active(port);
193 assert(name != MACH_PORT_NULL);
194 assert(soright != IP_NULL);
195
196 table = port->ip_requests;
197
198 if (table == IPR_NULL) {
199 return KERN_NO_SPACE;
200 }
201
202 index = table->ipr_next;
203 if (index == 0) {
204 return KERN_NO_SPACE;
205 }
206
207 ipr = &table[index];
208 assert(ipr->ipr_name == MACH_PORT_NULL);
209
210 table->ipr_next = ipr->ipr_next;
211 ipr->ipr_name = name;
212
213 if (send_possible) {
214 mask |= IPR_SOR_SPREQ_MASK;
215 if (immediate) {
216 mask |= IPR_SOR_SPARM_MASK;
217 if (port->ip_sprequests == 0) {
218 port->ip_sprequests = 1;
219 #if IMPORTANCE_INHERITANCE
220 /* TODO: Live importance support in send-possible */
221 if (port->ip_impdonation != 0 &&
222 port->ip_spimportant == 0 &&
223 (task_is_importance_donor(current_task()))) {
224 *importantp = TRUE;
225 }
226 #endif /* IMPORTANCE_INHERTANCE */
227 }
228 }
229 }
230 ipr->ipr_soright = IPR_SOR_MAKE(soright, mask);
231
232 *indexp = index;
233
234 return KERN_SUCCESS;
235 }
236
237 /*
238 * Routine: ipc_port_request_grow
239 * Purpose:
240 * Grow a port's table of requests.
241 * Conditions:
242 * The port must be locked and active.
243 * Nothing else locked; will allocate memory.
244 * Upon return the port is unlocked.
245 * Returns:
246 * KERN_SUCCESS Grew the table.
247 * KERN_SUCCESS Somebody else grew the table.
248 * KERN_SUCCESS The port died.
249 * KERN_RESOURCE_SHORTAGE Couldn't allocate new table.
250 * KERN_NO_SPACE Couldn't grow to desired size
251 */
252
253 kern_return_t
254 ipc_port_request_grow(
255 ipc_port_t port,
256 ipc_table_elems_t target_size)
257 {
258 ipc_table_size_t its;
259 ipc_port_request_t otable, ntable;
260 require_ip_active(port);
261
262 otable = port->ip_requests;
263 if (otable == IPR_NULL) {
264 its = &ipc_table_requests[0];
265 } else {
266 its = otable->ipr_size + 1;
267 }
268
269 if (target_size != ITS_SIZE_NONE) {
270 if ((otable != IPR_NULL) &&
271 (target_size <= otable->ipr_size->its_size)) {
272 ip_unlock(port);
273 return KERN_SUCCESS;
274 }
275 while ((its->its_size) && (its->its_size < target_size)) {
276 its++;
277 }
278 if (its->its_size == 0) {
279 ip_unlock(port);
280 return KERN_NO_SPACE;
281 }
282 }
283
284 ip_reference(port);
285 ip_unlock(port);
286
287 if ((its->its_size == 0) ||
288 ((ntable = it_requests_alloc(its)) == IPR_NULL)) {
289 ip_release(port);
290 return KERN_RESOURCE_SHORTAGE;
291 }
292
293 ip_lock(port);
294
295 /*
296 * Check that port is still active and that nobody else
297 * has slipped in and grown the table on us. Note that
298 * just checking if the current table pointer == otable
299 * isn't sufficient; must check ipr_size.
300 */
301
302 if (ip_active(port) && (port->ip_requests == otable) &&
303 ((otable == IPR_NULL) || (otable->ipr_size + 1 == its))) {
304 ipc_table_size_t oits;
305 ipc_table_elems_t osize, nsize;
306 ipc_port_request_index_t free, i;
307
308 /* copy old table to new table */
309
310 if (otable != IPR_NULL) {
311 oits = otable->ipr_size;
312 osize = oits->its_size;
313 free = otable->ipr_next;
314
315 (void) memcpy((void *)(ntable + 1),
316 (const void *)(otable + 1),
317 (osize - 1) * sizeof(struct ipc_port_request));
318 } else {
319 osize = 1;
320 oits = 0;
321 free = 0;
322 }
323
324 nsize = its->its_size;
325 assert(nsize > osize);
326
327 /* add new elements to the new table's free list */
328
329 for (i = osize; i < nsize; i++) {
330 ipc_port_request_t ipr = &ntable[i];
331
332 ipr->ipr_name = MACH_PORT_NULL;
333 ipr->ipr_next = free;
334 free = i;
335 }
336
337 ntable->ipr_next = free;
338 ntable->ipr_size = its;
339 port->ip_requests = ntable;
340 ip_unlock(port);
341 ip_release(port);
342
343 if (otable != IPR_NULL) {
344 it_requests_free(oits, otable);
345 }
346 } else {
347 ip_unlock(port);
348 ip_release(port);
349 it_requests_free(its, ntable);
350 }
351
352 return KERN_SUCCESS;
353 }
354
355 /*
356 * Routine: ipc_port_request_sparm
357 * Purpose:
358 * Arm delayed send-possible request.
359 * Conditions:
360 * The port must be locked and active.
361 *
362 * Returns TRUE if the request was armed
363 * (or armed with importance in that version).
364 */
365
366 boolean_t
367 ipc_port_request_sparm(
368 ipc_port_t port,
369 __assert_only mach_port_name_t name,
370 ipc_port_request_index_t index,
371 mach_msg_option_t option,
372 mach_msg_priority_t override)
373 {
374 if (index != IE_REQ_NONE) {
375 ipc_port_request_t ipr, table;
376
377 require_ip_active(port);
378
379 table = port->ip_requests;
380 assert(table != IPR_NULL);
381
382 ipr = &table[index];
383 assert(ipr->ipr_name == name);
384
385 /* Is there a valid destination? */
386 if (IPR_SOR_SPREQ(ipr->ipr_soright)) {
387 ipr->ipr_soright = IPR_SOR_MAKE(ipr->ipr_soright, IPR_SOR_SPARM_MASK);
388 port->ip_sprequests = 1;
389
390 if (option & MACH_SEND_OVERRIDE) {
391 /* apply override to message queue */
392 ipc_mqueue_override_send(&port->ip_messages, override);
393 }
394
395 #if IMPORTANCE_INHERITANCE
396 if (((option & MACH_SEND_NOIMPORTANCE) == 0) &&
397 (port->ip_impdonation != 0) &&
398 (port->ip_spimportant == 0) &&
399 (((option & MACH_SEND_IMPORTANCE) != 0) ||
400 (task_is_importance_donor(current_task())))) {
401 return TRUE;
402 }
403 #else
404 return TRUE;
405 #endif /* IMPORTANCE_INHERITANCE */
406 }
407 }
408 return FALSE;
409 }
410
411 /*
412 * Routine: ipc_port_request_type
413 * Purpose:
414 * Determine the type(s) of port requests enabled for a name.
415 * Conditions:
416 * The port must be locked or inactive (to avoid table growth).
417 * The index must not be IE_REQ_NONE and for the name in question.
418 */
419 mach_port_type_t
420 ipc_port_request_type(
421 ipc_port_t port,
422 __assert_only mach_port_name_t name,
423 ipc_port_request_index_t index)
424 {
425 ipc_port_request_t ipr, table;
426 mach_port_type_t type = 0;
427
428 table = port->ip_requests;
429 assert(table != IPR_NULL);
430
431 assert(index != IE_REQ_NONE);
432 ipr = &table[index];
433 assert(ipr->ipr_name == name);
434
435 if (IP_VALID(IPR_SOR_PORT(ipr->ipr_soright))) {
436 type |= MACH_PORT_TYPE_DNREQUEST;
437
438 if (IPR_SOR_SPREQ(ipr->ipr_soright)) {
439 type |= MACH_PORT_TYPE_SPREQUEST;
440
441 if (!IPR_SOR_SPARMED(ipr->ipr_soright)) {
442 type |= MACH_PORT_TYPE_SPREQUEST_DELAYED;
443 }
444 }
445 }
446 return type;
447 }
448
449 /*
450 * Routine: ipc_port_request_cancel
451 * Purpose:
452 * Cancel a dead-name/send-possible request and return the send-once right.
453 * Conditions:
454 * The port must be locked and active.
455 * The index must not be IPR_REQ_NONE and must correspond with name.
456 */
457
458 ipc_port_t
459 ipc_port_request_cancel(
460 ipc_port_t port,
461 __assert_only mach_port_name_t name,
462 ipc_port_request_index_t index)
463 {
464 ipc_port_request_t ipr, table;
465 ipc_port_t request = IP_NULL;
466
467 require_ip_active(port);
468 table = port->ip_requests;
469 assert(table != IPR_NULL);
470
471 assert(index != IE_REQ_NONE);
472 ipr = &table[index];
473 assert(ipr->ipr_name == name);
474 request = IPR_SOR_PORT(ipr->ipr_soright);
475
476 /* return ipr to the free list inside the table */
477 ipr->ipr_name = MACH_PORT_NULL;
478 ipr->ipr_next = table->ipr_next;
479 table->ipr_next = index;
480
481 return request;
482 }
483
484 /*
485 * Routine: ipc_port_pdrequest
486 * Purpose:
487 * Make a port-deleted request, returning the
488 * previously registered send-once right.
489 * Just cancels the previous request if notify is IP_NULL.
490 * Conditions:
491 * The port is locked and active. It is unlocked.
492 * Consumes a ref for notify (if non-null), and
493 * returns previous with a ref (if non-null).
494 */
495
496 void
497 ipc_port_pdrequest(
498 ipc_port_t port,
499 ipc_port_t notify,
500 ipc_port_t *previousp)
501 {
502 ipc_port_t previous;
503 require_ip_active(port);
504
505 previous = port->ip_pdrequest;
506 port->ip_pdrequest = notify;
507 ip_unlock(port);
508
509 *previousp = previous;
510 }
511
512 /*
513 * Routine: ipc_port_nsrequest
514 * Purpose:
515 * Make a no-senders request, returning the
516 * previously registered send-once right.
517 * Just cancels the previous request if notify is IP_NULL.
518 * Conditions:
519 * The port is locked and active. It is unlocked.
520 * Consumes a ref for notify (if non-null), and
521 * returns previous with a ref (if non-null).
522 */
523
524 void
525 ipc_port_nsrequest(
526 ipc_port_t port,
527 mach_port_mscount_t sync,
528 ipc_port_t notify,
529 ipc_port_t *previousp)
530 {
531 ipc_port_t previous;
532 mach_port_mscount_t mscount;
533 require_ip_active(port);
534
535 previous = port->ip_nsrequest;
536 mscount = port->ip_mscount;
537
538 if ((port->ip_srights == 0) && (sync <= mscount) &&
539 (notify != IP_NULL)) {
540 port->ip_nsrequest = IP_NULL;
541 ip_unlock(port);
542 ipc_notify_no_senders(notify, mscount);
543 } else {
544 port->ip_nsrequest = notify;
545 ip_unlock(port);
546 }
547
548 *previousp = previous;
549 }
550
551
552 /*
553 * Routine: ipc_port_clear_receiver
554 * Purpose:
555 * Prepares a receive right for transmission/destruction,
556 * optionally performs mqueue destruction (with port lock held)
557 *
558 * Conditions:
559 * The port is locked and active.
560 * Returns:
561 * If should_destroy is TRUE, then the return value indicates
562 * whether the caller needs to reap kmsg structures that should
563 * be destroyed (by calling ipc_kmsg_reap_delayed)
564 *
565 * If should_destroy is FALSE, this always returns FALSE
566 */
567
568 boolean_t
569 ipc_port_clear_receiver(
570 ipc_port_t port,
571 boolean_t should_destroy)
572 {
573 ipc_mqueue_t mqueue = &port->ip_messages;
574 boolean_t reap_messages = FALSE;
575
576 /*
577 * Pull ourselves out of any sets to which we belong.
578 * We hold the port locked, so even though this acquires and releases
579 * the mqueue lock, we know we won't be added to any other sets.
580 */
581 if (port->ip_in_pset != 0) {
582 ipc_pset_remove_from_all(port);
583 assert(port->ip_in_pset == 0);
584 }
585
586 /*
587 * Send anyone waiting on the port's queue directly away.
588 * Also clear the mscount, seqno, guard bits
589 */
590 imq_lock(mqueue);
591 if (port->ip_receiver_name) {
592 ipc_mqueue_changed(port->ip_receiver, mqueue);
593 } else {
594 ipc_mqueue_changed(NULL, mqueue);
595 }
596 port->ip_mscount = 0;
597 mqueue->imq_seqno = 0;
598 port->ip_context = port->ip_guarded = port->ip_strict_guard = 0;
599 /*
600 * clear the immovable bit so the port can move back to anyone listening
601 * for the port destroy notification
602 */
603 port->ip_immovable_receive = 0;
604
605 if (should_destroy) {
606 /*
607 * Mark the port and mqueue invalid, preventing further send/receive
608 * operations from succeeding. It's important for this to be
609 * done under the same lock hold as the ipc_mqueue_changed
610 * call to avoid additional threads blocking on an mqueue
611 * that's being destroyed.
612 *
613 * The port active bit needs to be guarded under mqueue lock for
614 * turnstiles
615 */
616 port->ip_object.io_bits &= ~IO_BITS_ACTIVE;
617 port->ip_timestamp = ipc_port_timestamp();
618 reap_messages = ipc_mqueue_destroy_locked(mqueue);
619 } else {
620 /* make port be in limbo */
621 port->ip_receiver_name = MACH_PORT_NULL;
622 port->ip_destination = IP_NULL;
623 }
624
625 imq_unlock(&port->ip_messages);
626
627 return reap_messages;
628 }
629
630 /*
631 * Routine: ipc_port_init
632 * Purpose:
633 * Initializes a newly-allocated port.
634 * Doesn't touch the ip_object fields.
635 */
636
637 void
638 ipc_port_init(
639 ipc_port_t port,
640 ipc_space_t space,
641 ipc_port_init_flags_t flags,
642 mach_port_name_t name)
643 {
644 /* port->ip_kobject doesn't have to be initialized */
645
646 port->ip_receiver = space;
647 port->ip_receiver_name = name;
648
649 port->ip_mscount = 0;
650 port->ip_srights = 0;
651 port->ip_sorights = 0;
652 if (flags & IPC_PORT_INIT_MAKE_SEND_RIGHT) {
653 port->ip_srights = 1;
654 port->ip_mscount = 1;
655 }
656
657 port->ip_nsrequest = IP_NULL;
658 port->ip_pdrequest = IP_NULL;
659 port->ip_requests = IPR_NULL;
660
661 port->ip_premsg = IKM_NULL;
662 port->ip_context = 0;
663 port->ip_reply_context = 0;
664
665 port->ip_sprequests = 0;
666 port->ip_spimportant = 0;
667 port->ip_impdonation = 0;
668 port->ip_tempowner = 0;
669
670 port->ip_guarded = 0;
671 port->ip_strict_guard = 0;
672 port->ip_immovable_receive = 0;
673 port->ip_no_grant = 0;
674 port->ip_immovable_send = 0;
675 port->ip_impcount = 0;
676
677 port->ip_specialreply = (flags & IPC_PORT_INIT_SPECIAL_REPLY) != 0;
678 port->ip_sync_link_state = PORT_SYNC_LINK_ANY;
679 port->ip_sync_bootstrap_checkin = 0;
680
681 ipc_special_reply_port_bits_reset(port);
682
683 port->ip_send_turnstile = TURNSTILE_NULL;
684
685 ipc_mqueue_kind_t kind = IPC_MQUEUE_KIND_NONE;
686 if (flags & IPC_PORT_INIT_MESSAGE_QUEUE) {
687 kind = IPC_MQUEUE_KIND_PORT;
688 }
689 ipc_mqueue_init(&port->ip_messages, kind);
690 }
691
692 /*
693 * Routine: ipc_port_alloc
694 * Purpose:
695 * Allocate a port.
696 * Conditions:
697 * Nothing locked. If successful, the port is returned
698 * locked. (The caller doesn't have a reference.)
699 * Returns:
700 * KERN_SUCCESS The port is allocated.
701 * KERN_INVALID_TASK The space is dead.
702 * KERN_NO_SPACE No room for an entry in the space.
703 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
704 */
705
706 kern_return_t
707 ipc_port_alloc(
708 ipc_space_t space,
709 ipc_port_init_flags_t flags,
710 mach_port_name_t *namep,
711 ipc_port_t *portp)
712 {
713 ipc_port_t port;
714 mach_port_name_t name;
715 kern_return_t kr;
716 mach_port_type_t type = MACH_PORT_TYPE_RECEIVE;
717 mach_port_urefs_t urefs = 0;
718
719 #if MACH_ASSERT
720 uintptr_t buf[IP_CALLSTACK_MAX];
721 ipc_port_callstack_init_debug(&buf[0], IP_CALLSTACK_MAX);
722 #endif /* MACH_ASSERT */
723
724 if (flags & IPC_PORT_INIT_MAKE_SEND_RIGHT) {
725 type |= MACH_PORT_TYPE_SEND;
726 urefs = 1;
727 }
728 kr = ipc_object_alloc(space, IOT_PORT, type, urefs,
729 &name, (ipc_object_t *) &port);
730 if (kr != KERN_SUCCESS) {
731 return kr;
732 }
733
734 /* port and space are locked */
735 ipc_port_init(port, space, flags, name);
736
737 #if MACH_ASSERT
738 ipc_port_init_debug(port, &buf[0], IP_CALLSTACK_MAX);
739 #endif /* MACH_ASSERT */
740
741 /* unlock space after init */
742 is_write_unlock(space);
743
744 *namep = name;
745 *portp = port;
746
747 return KERN_SUCCESS;
748 }
749
750 /*
751 * Routine: ipc_port_alloc_name
752 * Purpose:
753 * Allocate a port, with a specific name.
754 * Conditions:
755 * Nothing locked. If successful, the port is returned
756 * locked. (The caller doesn't have a reference.)
757 * Returns:
758 * KERN_SUCCESS The port is allocated.
759 * KERN_INVALID_TASK The space is dead.
760 * KERN_NAME_EXISTS The name already denotes a right.
761 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
762 */
763
764 kern_return_t
765 ipc_port_alloc_name(
766 ipc_space_t space,
767 ipc_port_init_flags_t flags,
768 mach_port_name_t name,
769 ipc_port_t *portp)
770 {
771 ipc_port_t port;
772 kern_return_t kr;
773 mach_port_type_t type = MACH_PORT_TYPE_RECEIVE;
774 mach_port_urefs_t urefs = 0;
775
776 #if MACH_ASSERT
777 uintptr_t buf[IP_CALLSTACK_MAX];
778 ipc_port_callstack_init_debug(&buf[0], IP_CALLSTACK_MAX);
779 #endif /* MACH_ASSERT */
780
781 if (flags & IPC_PORT_INIT_MAKE_SEND_RIGHT) {
782 type |= MACH_PORT_TYPE_SEND;
783 urefs = 1;
784 }
785 kr = ipc_object_alloc_name(space, IOT_PORT, type, urefs,
786 name, (ipc_object_t *) &port);
787 if (kr != KERN_SUCCESS) {
788 return kr;
789 }
790
791 /* port is locked */
792
793 ipc_port_init(port, space, flags, name);
794
795 #if MACH_ASSERT
796 ipc_port_init_debug(port, &buf[0], IP_CALLSTACK_MAX);
797 #endif /* MACH_ASSERT */
798
799 *portp = port;
800
801 return KERN_SUCCESS;
802 }
803
804 /*
805 * Routine: ipc_port_spnotify
806 * Purpose:
807 * Generate send-possible port notifications.
808 * Conditions:
809 * Nothing locked, reference held on port.
810 */
811 void
812 ipc_port_spnotify(
813 ipc_port_t port)
814 {
815 ipc_port_request_index_t index = 0;
816 ipc_table_elems_t size = 0;
817
818 /*
819 * If the port has no send-possible request
820 * armed, don't bother to lock the port.
821 */
822 if (port->ip_sprequests == 0) {
823 return;
824 }
825
826 ip_lock(port);
827
828 #if IMPORTANCE_INHERITANCE
829 if (port->ip_spimportant != 0) {
830 port->ip_spimportant = 0;
831 if (ipc_port_importance_delta(port, IPID_OPTION_NORMAL, -1) == TRUE) {
832 ip_lock(port);
833 }
834 }
835 #endif /* IMPORTANCE_INHERITANCE */
836
837 if (port->ip_sprequests == 0) {
838 ip_unlock(port);
839 return;
840 }
841 port->ip_sprequests = 0;
842
843 revalidate:
844 if (ip_active(port)) {
845 ipc_port_request_t requests;
846
847 /* table may change each time port unlocked (reload) */
848 requests = port->ip_requests;
849 assert(requests != IPR_NULL);
850
851 /*
852 * no need to go beyond table size when first
853 * we entered - those are future notifications.
854 */
855 if (size == 0) {
856 size = requests->ipr_size->its_size;
857 }
858
859 /* no need to backtrack either */
860 while (++index < size) {
861 ipc_port_request_t ipr = &requests[index];
862 mach_port_name_t name = ipr->ipr_name;
863 ipc_port_t soright = IPR_SOR_PORT(ipr->ipr_soright);
864 boolean_t armed = IPR_SOR_SPARMED(ipr->ipr_soright);
865
866 if (MACH_PORT_VALID(name) && armed && IP_VALID(soright)) {
867 /* claim send-once right - slot still inuse */
868 ipr->ipr_soright = IP_NULL;
869 ip_unlock(port);
870
871 ipc_notify_send_possible(soright, name);
872
873 ip_lock(port);
874 goto revalidate;
875 }
876 }
877 }
878 ip_unlock(port);
879 return;
880 }
881
882 /*
883 * Routine: ipc_port_dnnotify
884 * Purpose:
885 * Generate dead name notifications for
886 * all outstanding dead-name and send-
887 * possible requests.
888 * Conditions:
889 * Nothing locked.
890 * Port must be inactive.
891 * Reference held on port.
892 */
893 void
894 ipc_port_dnnotify(
895 ipc_port_t port)
896 {
897 ipc_port_request_t requests = port->ip_requests;
898
899 assert(!ip_active(port));
900 if (requests != IPR_NULL) {
901 ipc_table_size_t its = requests->ipr_size;
902 ipc_table_elems_t size = its->its_size;
903 ipc_port_request_index_t index;
904 for (index = 1; index < size; index++) {
905 ipc_port_request_t ipr = &requests[index];
906 mach_port_name_t name = ipr->ipr_name;
907 ipc_port_t soright = IPR_SOR_PORT(ipr->ipr_soright);
908
909 if (MACH_PORT_VALID(name) && IP_VALID(soright)) {
910 ipc_notify_dead_name(soright, name);
911 }
912 }
913 }
914 }
915
916
917 /*
918 * Routine: ipc_port_destroy
919 * Purpose:
920 * Destroys a port. Cleans up queued messages.
921 *
922 * If the port has a backup, it doesn't get destroyed,
923 * but is sent in a port-destroyed notification to the backup.
924 * Conditions:
925 * The port is locked and alive; nothing else locked.
926 * The caller has a reference, which is consumed.
927 * Afterwards, the port is unlocked and dead.
928 */
929
930 void
931 ipc_port_destroy(ipc_port_t port)
932 {
933 ipc_port_t pdrequest, nsrequest;
934 ipc_mqueue_t mqueue;
935 ipc_kmsg_t kmsg;
936 boolean_t special_reply = port->ip_specialreply;
937 struct task_watchport_elem *watchport_elem = NULL;
938
939 #if IMPORTANCE_INHERITANCE
940 ipc_importance_task_t release_imp_task = IIT_NULL;
941 thread_t self = current_thread();
942 boolean_t top = (self->ith_assertions == 0);
943 natural_t assertcnt = 0;
944 #endif /* IMPORTANCE_INHERITANCE */
945
946 require_ip_active(port);
947 /* port->ip_receiver_name is garbage */
948 /* port->ip_receiver/port->ip_destination is garbage */
949
950 /* clear any reply-port context */
951 port->ip_reply_context = 0;
952
953 /* check for a backup port */
954 pdrequest = port->ip_pdrequest;
955
956 #if IMPORTANCE_INHERITANCE
957 /* determine how many assertions to drop and from whom */
958 if (port->ip_tempowner != 0) {
959 assert(top);
960 release_imp_task = port->ip_imp_task;
961 if (IIT_NULL != release_imp_task) {
962 port->ip_imp_task = IIT_NULL;
963 assertcnt = port->ip_impcount;
964 }
965 /* Otherwise, nothing to drop */
966 } else {
967 assertcnt = port->ip_impcount;
968 if (pdrequest != IP_NULL) {
969 /* mark in limbo for the journey */
970 port->ip_tempowner = 1;
971 }
972 }
973
974 if (top) {
975 self->ith_assertions = assertcnt;
976 }
977 #endif /* IMPORTANCE_INHERITANCE */
978
979 if (pdrequest != IP_NULL) {
980 /* clear receiver, don't destroy the port */
981 (void)ipc_port_clear_receiver(port, FALSE);
982 assert(port->ip_in_pset == 0);
983 assert(port->ip_mscount == 0);
984
985 /* we assume the ref for pdrequest */
986 port->ip_pdrequest = IP_NULL;
987
988 imq_lock(&port->ip_messages);
989 watchport_elem = ipc_port_clear_watchport_elem_internal(port);
990 ipc_port_send_turnstile_recompute_push_locked(port);
991 /* mqueue and port unlocked */
992
993 if (special_reply) {
994 ipc_port_adjust_special_reply_port(port,
995 IPC_PORT_ADJUST_SR_ALLOW_SYNC_LINKAGE);
996 }
997
998 if (watchport_elem) {
999 task_watchport_elem_deallocate(watchport_elem);
1000 watchport_elem = NULL;
1001 }
1002 /* consumes our refs for port and pdrequest */
1003 ipc_notify_port_destroyed(pdrequest, port);
1004
1005 goto drop_assertions;
1006 }
1007
1008 /*
1009 * The mach_msg_* paths don't hold a port lock, they only hold a
1010 * reference to the port object. If a thread raced us and is now
1011 * blocked waiting for message reception on this mqueue (or waiting
1012 * for ipc_mqueue_full), it will never be woken up. We call
1013 * ipc_port_clear_receiver() here, _after_ the port has been marked
1014 * inactive, to wakeup any threads which may be blocked and ensure
1015 * that no other thread can get lost waiting for a wake up on a
1016 * port/mqueue that's been destroyed.
1017 */
1018 boolean_t reap_msgs = FALSE;
1019 reap_msgs = ipc_port_clear_receiver(port, TRUE); /* marks port and mqueue inactive */
1020 assert(port->ip_in_pset == 0);
1021 assert(port->ip_mscount == 0);
1022
1023 imq_lock(&port->ip_messages);
1024 watchport_elem = ipc_port_clear_watchport_elem_internal(port);
1025 imq_unlock(&port->ip_messages);
1026 nsrequest = port->ip_nsrequest;
1027
1028 /*
1029 * If the port has a preallocated message buffer and that buffer
1030 * is not inuse, free it. If it has an inuse one, then the kmsg
1031 * free will detect that we freed the association and it can free it
1032 * like a normal buffer.
1033 *
1034 * Once the port is marked inactive we don't need to keep it locked.
1035 */
1036 if (IP_PREALLOC(port)) {
1037 ipc_port_t inuse_port;
1038
1039 kmsg = port->ip_premsg;
1040 assert(kmsg != IKM_NULL);
1041 inuse_port = ikm_prealloc_inuse_port(kmsg);
1042 ipc_kmsg_clear_prealloc(kmsg, port);
1043
1044 imq_lock(&port->ip_messages);
1045 ipc_port_send_turnstile_recompute_push_locked(port);
1046 /* mqueue and port unlocked */
1047
1048 if (inuse_port != IP_NULL) {
1049 assert(inuse_port == port);
1050 } else {
1051 ipc_kmsg_free(kmsg);
1052 }
1053 } else {
1054 imq_lock(&port->ip_messages);
1055 ipc_port_send_turnstile_recompute_push_locked(port);
1056 /* mqueue and port unlocked */
1057 }
1058
1059 /* Deallocate the watchport element */
1060 if (watchport_elem) {
1061 task_watchport_elem_deallocate(watchport_elem);
1062 watchport_elem = NULL;
1063 }
1064
1065 /* unlink the kmsg from special reply port */
1066 if (special_reply) {
1067 ipc_port_adjust_special_reply_port(port,
1068 IPC_PORT_ADJUST_SR_ALLOW_SYNC_LINKAGE);
1069 }
1070
1071 /* throw away no-senders request */
1072 if (nsrequest != IP_NULL) {
1073 ipc_notify_send_once(nsrequest); /* consumes ref */
1074 }
1075 /*
1076 * Reap any kmsg objects waiting to be destroyed.
1077 * This must be done after we've released the port lock.
1078 */
1079 if (reap_msgs) {
1080 ipc_kmsg_reap_delayed();
1081 }
1082
1083 mqueue = &port->ip_messages;
1084
1085 /* cleanup waitq related resources */
1086 ipc_mqueue_deinit(mqueue);
1087
1088 /* generate dead-name notifications */
1089 ipc_port_dnnotify(port);
1090
1091 ipc_kobject_destroy(port);
1092
1093 ip_release(port); /* consume caller's ref */
1094
1095 drop_assertions:
1096 #if IMPORTANCE_INHERITANCE
1097 if (release_imp_task != IIT_NULL) {
1098 if (assertcnt > 0) {
1099 assert(top);
1100 self->ith_assertions = 0;
1101 assert(ipc_importance_task_is_any_receiver_type(release_imp_task));
1102 ipc_importance_task_drop_internal_assertion(release_imp_task, assertcnt);
1103 }
1104 ipc_importance_task_release(release_imp_task);
1105 } else if (assertcnt > 0) {
1106 if (top) {
1107 self->ith_assertions = 0;
1108 release_imp_task = current_task()->task_imp_base;
1109 if (ipc_importance_task_is_any_receiver_type(release_imp_task)) {
1110 ipc_importance_task_drop_internal_assertion(release_imp_task, assertcnt);
1111 }
1112 }
1113 }
1114 #endif /* IMPORTANCE_INHERITANCE */
1115 }
1116
1117 /*
1118 * Routine: ipc_port_check_circularity
1119 * Purpose:
1120 * Check if queueing "port" in a message for "dest"
1121 * would create a circular group of ports and messages.
1122 *
1123 * If no circularity (FALSE returned), then "port"
1124 * is changed from "in limbo" to "in transit".
1125 *
1126 * That is, we want to set port->ip_destination == dest,
1127 * but guaranteeing that this doesn't create a circle
1128 * port->ip_destination->ip_destination->... == port
1129 *
1130 * Conditions:
1131 * No ports locked. References held for "port" and "dest".
1132 */
1133
1134 boolean_t
1135 ipc_port_check_circularity(
1136 ipc_port_t port,
1137 ipc_port_t dest)
1138 {
1139 #if IMPORTANCE_INHERITANCE
1140 /* adjust importance counts at the same time */
1141 return ipc_importance_check_circularity(port, dest);
1142 #else
1143 ipc_port_t base;
1144 struct task_watchport_elem *watchport_elem = NULL;
1145
1146 assert(port != IP_NULL);
1147 assert(dest != IP_NULL);
1148
1149 if (port == dest) {
1150 return TRUE;
1151 }
1152 base = dest;
1153
1154 /* Check if destination needs a turnstile */
1155 ipc_port_send_turnstile_prepare(dest);
1156
1157 /*
1158 * First try a quick check that can run in parallel.
1159 * No circularity if dest is not in transit.
1160 */
1161 ip_lock(port);
1162 if (ip_lock_try(dest)) {
1163 if (!ip_active(dest) ||
1164 (dest->ip_receiver_name != MACH_PORT_NULL) ||
1165 (dest->ip_destination == IP_NULL)) {
1166 goto not_circular;
1167 }
1168
1169 /* dest is in transit; further checking necessary */
1170
1171 ip_unlock(dest);
1172 }
1173 ip_unlock(port);
1174
1175 ipc_port_multiple_lock(); /* massive serialization */
1176
1177 /*
1178 * Search for the end of the chain (a port not in transit),
1179 * acquiring locks along the way.
1180 */
1181
1182 for (;;) {
1183 ip_lock(base);
1184
1185 if (!ip_active(base) ||
1186 (base->ip_receiver_name != MACH_PORT_NULL) ||
1187 (base->ip_destination == IP_NULL)) {
1188 break;
1189 }
1190
1191 base = base->ip_destination;
1192 }
1193
1194 /* all ports in chain from dest to base, inclusive, are locked */
1195
1196 if (port == base) {
1197 /* circularity detected! */
1198
1199 ipc_port_multiple_unlock();
1200
1201 /* port (== base) is in limbo */
1202 require_ip_active(port);
1203 assert(port->ip_receiver_name == MACH_PORT_NULL);
1204 assert(port->ip_destination == IP_NULL);
1205
1206 base = dest;
1207 while (base != IP_NULL) {
1208 ipc_port_t next;
1209
1210 /* dest is in transit or in limbo */
1211 require_ip_active(base);
1212 assert(base->ip_receiver_name == MACH_PORT_NULL);
1213
1214 next = base->ip_destination;
1215 ip_unlock(base);
1216 base = next;
1217 }
1218
1219 ipc_port_send_turnstile_complete(dest);
1220 return TRUE;
1221 }
1222
1223 /*
1224 * The guarantee: lock port while the entire chain is locked.
1225 * Once port is locked, we can take a reference to dest,
1226 * add port to the chain, and unlock everything.
1227 */
1228
1229 ip_lock(port);
1230 ipc_port_multiple_unlock();
1231
1232 not_circular:
1233 imq_lock(&port->ip_messages);
1234
1235 /* port is in limbo */
1236 require_ip_active(port);
1237 assert(port->ip_receiver_name == MACH_PORT_NULL);
1238 assert(port->ip_destination == IP_NULL);
1239
1240 /* Clear the watchport boost */
1241 watchport_elem = ipc_port_clear_watchport_elem_internal(port);
1242
1243 /* Check if the port is being enqueued as a part of sync bootstrap checkin */
1244 if (dest->ip_specialreply && dest->ip_sync_bootstrap_checkin) {
1245 port->ip_sync_bootstrap_checkin = 1;
1246 }
1247
1248 ip_reference(dest);
1249 port->ip_destination = dest;
1250
1251 /* Setup linkage for source port if it has sync ipc push */
1252 struct turnstile *send_turnstile = TURNSTILE_NULL;
1253 if (port_send_turnstile(port)) {
1254 send_turnstile = turnstile_prepare((uintptr_t)port,
1255 port_send_turnstile_address(port),
1256 TURNSTILE_NULL, TURNSTILE_SYNC_IPC);
1257
1258 /*
1259 * What ipc_port_adjust_port_locked would do,
1260 * but we need to also drop even more locks before
1261 * calling turnstile_update_inheritor_complete().
1262 */
1263 ipc_port_adjust_sync_link_state_locked(port, PORT_SYNC_LINK_ANY, NULL);
1264
1265 turnstile_update_inheritor(send_turnstile, port_send_turnstile(dest),
1266 (TURNSTILE_INHERITOR_TURNSTILE | TURNSTILE_IMMEDIATE_UPDATE));
1267
1268 /* update complete and turnstile complete called after dropping all locks */
1269 }
1270 imq_unlock(&port->ip_messages);
1271
1272 /* now unlock chain */
1273
1274 ip_unlock(port);
1275
1276 for (;;) {
1277 ipc_port_t next;
1278
1279 if (dest == base) {
1280 break;
1281 }
1282
1283 /* port is in transit */
1284 require_ip_active(dest);
1285 assert(dest->ip_receiver_name == MACH_PORT_NULL);
1286 assert(dest->ip_destination != IP_NULL);
1287
1288 next = dest->ip_destination;
1289 ip_unlock(dest);
1290 dest = next;
1291 }
1292
1293 /* base is not in transit */
1294 assert(!ip_active(base) ||
1295 (base->ip_receiver_name != MACH_PORT_NULL) ||
1296 (base->ip_destination == IP_NULL));
1297
1298 ip_unlock(base);
1299
1300 /* All locks dropped, call turnstile_update_inheritor_complete for source port's turnstile */
1301 if (send_turnstile) {
1302 turnstile_update_inheritor_complete(send_turnstile, TURNSTILE_INTERLOCK_NOT_HELD);
1303
1304 /* Take the mq lock to call turnstile complete */
1305 imq_lock(&port->ip_messages);
1306 turnstile_complete((uintptr_t)port, port_send_turnstile_address(port), NULL, TURNSTILE_SYNC_IPC);
1307 send_turnstile = TURNSTILE_NULL;
1308 imq_unlock(&port->ip_messages);
1309 turnstile_cleanup();
1310 }
1311
1312 if (watchport_elem) {
1313 task_watchport_elem_deallocate(watchport_elem);
1314 }
1315
1316 return FALSE;
1317 #endif /* !IMPORTANCE_INHERITANCE */
1318 }
1319
1320 /*
1321 * Routine: ipc_port_watchport_elem
1322 * Purpose:
1323 * Get the port's watchport elem field
1324 *
1325 * Conditions:
1326 * mqueue locked
1327 */
1328 static struct task_watchport_elem *
1329 ipc_port_watchport_elem(ipc_port_t port)
1330 {
1331 return port->ip_messages.imq_wait_queue.waitq_tspriv;
1332 }
1333
1334 /*
1335 * Routine: ipc_port_update_watchport_elem
1336 * Purpose:
1337 * Set the port's watchport elem field
1338 *
1339 * Conditions:
1340 * mqueue locked
1341 */
1342 static inline struct task_watchport_elem *
1343 ipc_port_update_watchport_elem(ipc_port_t port, struct task_watchport_elem *we)
1344 {
1345 struct task_watchport_elem *old_we = ipc_port_watchport_elem(port);
1346 port->ip_messages.imq_wait_queue.waitq_tspriv = we;
1347 return old_we;
1348 }
1349
1350 /*
1351 * Update the recv turnstile inheritor for a port.
1352 *
1353 * Sync IPC through the port receive turnstile only happens for the special
1354 * reply port case. It has three sub-cases:
1355 *
1356 * 1. a send-once right is in transit, and pushes on the send turnstile of its
1357 * destination mqueue.
1358 *
1359 * 2. a send-once right has been stashed on a knote it was copied out "through",
1360 * as the first such copied out port.
1361 *
1362 * 3. a send-once right has been stashed on a knote it was copied out "through",
1363 * as the second or more copied out port.
1364 */
1365 void
1366 ipc_port_recv_update_inheritor(
1367 ipc_port_t port,
1368 struct turnstile *rcv_turnstile,
1369 turnstile_update_flags_t flags)
1370 {
1371 struct turnstile *inheritor = TURNSTILE_NULL;
1372 struct knote *kn;
1373
1374 if (ip_active(port) && port->ip_specialreply) {
1375 imq_held(&port->ip_messages);
1376
1377 switch (port->ip_sync_link_state) {
1378 case PORT_SYNC_LINK_PORT:
1379 if (port->ip_sync_inheritor_port != NULL) {
1380 inheritor = port_send_turnstile(port->ip_sync_inheritor_port);
1381 }
1382 break;
1383
1384 case PORT_SYNC_LINK_WORKLOOP_KNOTE:
1385 kn = port->ip_sync_inheritor_knote;
1386 inheritor = filt_ipc_kqueue_turnstile(kn);
1387 break;
1388
1389 case PORT_SYNC_LINK_WORKLOOP_STASH:
1390 inheritor = port->ip_sync_inheritor_ts;
1391 break;
1392 }
1393 }
1394
1395 turnstile_update_inheritor(rcv_turnstile, inheritor,
1396 flags | TURNSTILE_INHERITOR_TURNSTILE);
1397 }
1398
1399 /*
1400 * Update the send turnstile inheritor for a port.
1401 *
1402 * Sync IPC through the port send turnstile has 7 possible reasons to be linked:
1403 *
1404 * 1. a special reply port is part of sync ipc for bootstrap checkin and needs
1405 * to push on thread doing the sync ipc.
1406 *
1407 * 2. a receive right is in transit, and pushes on the send turnstile of its
1408 * destination mqueue.
1409 *
1410 * 3. port was passed as an exec watchport and port is pushing on main thread
1411 * of the task.
1412 *
1413 * 4. a receive right has been stashed on a knote it was copied out "through",
1414 * as the first such copied out port (same as PORT_SYNC_LINK_WORKLOOP_KNOTE
1415 * for the special reply port)
1416 *
1417 * 5. a receive right has been stashed on a knote it was copied out "through",
1418 * as the second or more copied out port (same as
1419 * PORT_SYNC_LINK_WORKLOOP_STASH for the special reply port)
1420 *
1421 * 6. a receive right has been copied out as a part of sync bootstrap checkin
1422 * and needs to push on thread doing the sync bootstrap checkin.
1423 *
1424 * 7. the receive right is monitored by a knote, and pushes on any that is
1425 * registered on a workloop. filt_machport makes sure that if such a knote
1426 * exists, it is kept as the first item in the knote list, so we never need
1427 * to walk.
1428 */
1429 void
1430 ipc_port_send_update_inheritor(
1431 ipc_port_t port,
1432 struct turnstile *send_turnstile,
1433 turnstile_update_flags_t flags)
1434 {
1435 ipc_mqueue_t mqueue = &port->ip_messages;
1436 turnstile_inheritor_t inheritor = TURNSTILE_INHERITOR_NULL;
1437 struct knote *kn;
1438 turnstile_update_flags_t inheritor_flags = TURNSTILE_INHERITOR_TURNSTILE;
1439
1440 assert(imq_held(mqueue));
1441
1442 if (!ip_active(port)) {
1443 /* this port is no longer active, it should not push anywhere */
1444 } else if (port->ip_specialreply) {
1445 /* Case 1. */
1446 if (port->ip_sync_bootstrap_checkin && prioritize_launch) {
1447 inheritor = port->ip_messages.imq_srp_owner_thread;
1448 inheritor_flags = TURNSTILE_INHERITOR_THREAD;
1449 }
1450 } else if (port->ip_receiver_name == MACH_PORT_NULL &&
1451 port->ip_destination != NULL) {
1452 /* Case 2. */
1453 inheritor = port_send_turnstile(port->ip_destination);
1454 } else if (ipc_port_watchport_elem(port) != NULL) {
1455 /* Case 3. */
1456 if (prioritize_launch) {
1457 assert(port->ip_sync_link_state == PORT_SYNC_LINK_ANY);
1458 inheritor = ipc_port_get_watchport_inheritor(port);
1459 inheritor_flags = TURNSTILE_INHERITOR_THREAD;
1460 }
1461 } else if (port->ip_sync_link_state == PORT_SYNC_LINK_WORKLOOP_KNOTE) {
1462 /* Case 4. */
1463 inheritor = filt_ipc_kqueue_turnstile(mqueue->imq_inheritor_knote);
1464 } else if (port->ip_sync_link_state == PORT_SYNC_LINK_WORKLOOP_STASH) {
1465 /* Case 5. */
1466 inheritor = mqueue->imq_inheritor_turnstile;
1467 } else if (port->ip_sync_link_state == PORT_SYNC_LINK_RCV_THREAD) {
1468 /* Case 6. */
1469 if (prioritize_launch) {
1470 inheritor = port->ip_messages.imq_inheritor_thread_ref;
1471 inheritor_flags = TURNSTILE_INHERITOR_THREAD;
1472 }
1473 } else if ((kn = SLIST_FIRST(&mqueue->imq_klist))) {
1474 /* Case 7. Push on a workloop that is interested */
1475 if (filt_machport_kqueue_has_turnstile(kn)) {
1476 assert(port->ip_sync_link_state == PORT_SYNC_LINK_ANY);
1477 inheritor = filt_ipc_kqueue_turnstile(kn);
1478 }
1479 }
1480
1481 turnstile_update_inheritor(send_turnstile, inheritor,
1482 flags | inheritor_flags);
1483 }
1484
1485 /*
1486 * Routine: ipc_port_send_turnstile_prepare
1487 * Purpose:
1488 * Get a reference on port's send turnstile, if
1489 * port does not have a send turnstile then allocate one.
1490 *
1491 * Conditions:
1492 * Nothing is locked.
1493 */
1494 void
1495 ipc_port_send_turnstile_prepare(ipc_port_t port)
1496 {
1497 struct turnstile *turnstile = TURNSTILE_NULL;
1498 struct turnstile *send_turnstile = TURNSTILE_NULL;
1499
1500 retry_alloc:
1501 imq_lock(&port->ip_messages);
1502
1503 if (port_send_turnstile(port) == NULL ||
1504 port_send_turnstile(port)->ts_port_ref == 0) {
1505 if (turnstile == TURNSTILE_NULL) {
1506 imq_unlock(&port->ip_messages);
1507 turnstile = turnstile_alloc();
1508 goto retry_alloc;
1509 }
1510
1511 send_turnstile = turnstile_prepare((uintptr_t)port,
1512 port_send_turnstile_address(port),
1513 turnstile, TURNSTILE_SYNC_IPC);
1514 turnstile = TURNSTILE_NULL;
1515
1516 ipc_port_send_update_inheritor(port, send_turnstile,
1517 TURNSTILE_IMMEDIATE_UPDATE);
1518
1519 /* turnstile complete will be called in ipc_port_send_turnstile_complete */
1520 }
1521
1522 /* Increment turnstile counter */
1523 port_send_turnstile(port)->ts_port_ref++;
1524 imq_unlock(&port->ip_messages);
1525
1526 if (send_turnstile) {
1527 turnstile_update_inheritor_complete(send_turnstile,
1528 TURNSTILE_INTERLOCK_NOT_HELD);
1529 }
1530 if (turnstile != TURNSTILE_NULL) {
1531 turnstile_deallocate(turnstile);
1532 }
1533 }
1534
1535
1536 /*
1537 * Routine: ipc_port_send_turnstile_complete
1538 * Purpose:
1539 * Drop a ref on the port's send turnstile, if the
1540 * ref becomes zero, deallocate the turnstile.
1541 *
1542 * Conditions:
1543 * The space might be locked, use safe deallocate.
1544 */
1545 void
1546 ipc_port_send_turnstile_complete(ipc_port_t port)
1547 {
1548 struct turnstile *turnstile = TURNSTILE_NULL;
1549
1550 /* Drop turnstile count on dest port */
1551 imq_lock(&port->ip_messages);
1552
1553 port_send_turnstile(port)->ts_port_ref--;
1554 if (port_send_turnstile(port)->ts_port_ref == 0) {
1555 turnstile_complete((uintptr_t)port, port_send_turnstile_address(port),
1556 &turnstile, TURNSTILE_SYNC_IPC);
1557 assert(turnstile != TURNSTILE_NULL);
1558 }
1559 imq_unlock(&port->ip_messages);
1560 turnstile_cleanup();
1561
1562 if (turnstile != TURNSTILE_NULL) {
1563 turnstile_deallocate_safe(turnstile);
1564 turnstile = TURNSTILE_NULL;
1565 }
1566 }
1567
1568 /*
1569 * Routine: ipc_port_rcv_turnstile
1570 * Purpose:
1571 * Get the port's receive turnstile
1572 *
1573 * Conditions:
1574 * mqueue locked or thread waiting on turnstile is locked.
1575 */
1576 static struct turnstile *
1577 ipc_port_rcv_turnstile(ipc_port_t port)
1578 {
1579 return *port_rcv_turnstile_address(port);
1580 }
1581
1582
1583 /*
1584 * Routine: ipc_port_link_special_reply_port
1585 * Purpose:
1586 * Link the special reply port with the destination port.
1587 * Allocates turnstile to dest port.
1588 *
1589 * Conditions:
1590 * Nothing is locked.
1591 */
1592 void
1593 ipc_port_link_special_reply_port(
1594 ipc_port_t special_reply_port,
1595 ipc_port_t dest_port,
1596 boolean_t sync_bootstrap_checkin)
1597 {
1598 boolean_t drop_turnstile_ref = FALSE;
1599
1600 /* Check if dest_port needs a turnstile */
1601 ipc_port_send_turnstile_prepare(dest_port);
1602
1603 /* Lock the special reply port and establish the linkage */
1604 ip_lock(special_reply_port);
1605 imq_lock(&special_reply_port->ip_messages);
1606
1607 if (sync_bootstrap_checkin && special_reply_port->ip_specialreply) {
1608 special_reply_port->ip_sync_bootstrap_checkin = 1;
1609 }
1610
1611 /* Check if we need to drop the acquired turnstile ref on dest port */
1612 if (!special_reply_port->ip_specialreply ||
1613 special_reply_port->ip_sync_link_state != PORT_SYNC_LINK_ANY ||
1614 special_reply_port->ip_sync_inheritor_port != IPC_PORT_NULL) {
1615 drop_turnstile_ref = TRUE;
1616 } else {
1617 /* take a reference on dest_port */
1618 ip_reference(dest_port);
1619 special_reply_port->ip_sync_inheritor_port = dest_port;
1620 special_reply_port->ip_sync_link_state = PORT_SYNC_LINK_PORT;
1621 }
1622
1623 imq_unlock(&special_reply_port->ip_messages);
1624 ip_unlock(special_reply_port);
1625
1626 if (drop_turnstile_ref) {
1627 ipc_port_send_turnstile_complete(dest_port);
1628 }
1629
1630 return;
1631 }
1632
1633 #if DEVELOPMENT || DEBUG
1634 inline void
1635 ipc_special_reply_port_bits_reset(ipc_port_t special_reply_port)
1636 {
1637 special_reply_port->ip_srp_lost_link = 0;
1638 special_reply_port->ip_srp_msg_sent = 0;
1639 }
1640
1641 static inline void
1642 ipc_special_reply_port_msg_sent_reset(ipc_port_t special_reply_port)
1643 {
1644 if (special_reply_port->ip_specialreply == 1) {
1645 special_reply_port->ip_srp_msg_sent = 0;
1646 }
1647 }
1648
1649 inline void
1650 ipc_special_reply_port_msg_sent(ipc_port_t special_reply_port)
1651 {
1652 if (special_reply_port->ip_specialreply == 1) {
1653 special_reply_port->ip_srp_msg_sent = 1;
1654 }
1655 }
1656
1657 static inline void
1658 ipc_special_reply_port_lost_link(ipc_port_t special_reply_port)
1659 {
1660 if (special_reply_port->ip_specialreply == 1 && special_reply_port->ip_srp_msg_sent == 0) {
1661 special_reply_port->ip_srp_lost_link = 1;
1662 }
1663 }
1664
1665 #else /* DEVELOPMENT || DEBUG */
1666 inline void
1667 ipc_special_reply_port_bits_reset(__unused ipc_port_t special_reply_port)
1668 {
1669 return;
1670 }
1671
1672 static inline void
1673 ipc_special_reply_port_msg_sent_reset(__unused ipc_port_t special_reply_port)
1674 {
1675 return;
1676 }
1677
1678 inline void
1679 ipc_special_reply_port_msg_sent(__unused ipc_port_t special_reply_port)
1680 {
1681 return;
1682 }
1683
1684 static inline void
1685 ipc_special_reply_port_lost_link(__unused ipc_port_t special_reply_port)
1686 {
1687 return;
1688 }
1689 #endif /* DEVELOPMENT || DEBUG */
1690
1691 /*
1692 * Routine: ipc_port_adjust_special_reply_port_locked
1693 * Purpose:
1694 * If the special port has a turnstile, update its inheritor.
1695 * Condition:
1696 * Special reply port locked on entry.
1697 * Special reply port unlocked on return.
1698 * The passed in port is a special reply port.
1699 * Returns:
1700 * None.
1701 */
1702 void
1703 ipc_port_adjust_special_reply_port_locked(
1704 ipc_port_t special_reply_port,
1705 struct knote *kn,
1706 uint8_t flags,
1707 boolean_t get_turnstile)
1708 {
1709 ipc_port_t dest_port = IPC_PORT_NULL;
1710 int sync_link_state = PORT_SYNC_LINK_NO_LINKAGE;
1711 turnstile_inheritor_t inheritor = TURNSTILE_INHERITOR_NULL;
1712 struct turnstile *ts = TURNSTILE_NULL;
1713
1714 ip_lock_held(special_reply_port); // ip_sync_link_state is touched
1715 imq_lock(&special_reply_port->ip_messages);
1716
1717 if (!special_reply_port->ip_specialreply) {
1718 // only mach_msg_receive_results_complete() calls this with any port
1719 assert(get_turnstile);
1720 goto not_special;
1721 }
1722
1723 if (flags & IPC_PORT_ADJUST_SR_RECEIVED_MSG) {
1724 ipc_special_reply_port_msg_sent_reset(special_reply_port);
1725 }
1726
1727 if (flags & IPC_PORT_ADJUST_UNLINK_THREAD) {
1728 special_reply_port->ip_messages.imq_srp_owner_thread = NULL;
1729 }
1730
1731 if (flags & IPC_PORT_ADJUST_RESET_BOOSTRAP_CHECKIN) {
1732 special_reply_port->ip_sync_bootstrap_checkin = 0;
1733 }
1734
1735 /* Check if the special reply port is marked non-special */
1736 if (special_reply_port->ip_sync_link_state == PORT_SYNC_LINK_ANY) {
1737 not_special:
1738 if (get_turnstile) {
1739 turnstile_complete((uintptr_t)special_reply_port,
1740 port_rcv_turnstile_address(special_reply_port), NULL, TURNSTILE_SYNC_IPC);
1741 }
1742 imq_unlock(&special_reply_port->ip_messages);
1743 ip_unlock(special_reply_port);
1744 if (get_turnstile) {
1745 turnstile_cleanup();
1746 }
1747 return;
1748 }
1749
1750 if (flags & IPC_PORT_ADJUST_SR_LINK_WORKLOOP) {
1751 if (ITH_KNOTE_VALID(kn, MACH_MSG_TYPE_PORT_SEND_ONCE)) {
1752 inheritor = filt_machport_stash_port(kn, special_reply_port,
1753 &sync_link_state);
1754 }
1755 } else if (flags & IPC_PORT_ADJUST_SR_ALLOW_SYNC_LINKAGE) {
1756 sync_link_state = PORT_SYNC_LINK_ANY;
1757 }
1758
1759 /* Check if need to break linkage */
1760 if (!get_turnstile && sync_link_state == PORT_SYNC_LINK_NO_LINKAGE &&
1761 special_reply_port->ip_sync_link_state == PORT_SYNC_LINK_NO_LINKAGE) {
1762 imq_unlock(&special_reply_port->ip_messages);
1763 ip_unlock(special_reply_port);
1764 return;
1765 }
1766
1767 switch (special_reply_port->ip_sync_link_state) {
1768 case PORT_SYNC_LINK_PORT:
1769 dest_port = special_reply_port->ip_sync_inheritor_port;
1770 special_reply_port->ip_sync_inheritor_port = IPC_PORT_NULL;
1771 break;
1772 case PORT_SYNC_LINK_WORKLOOP_KNOTE:
1773 special_reply_port->ip_sync_inheritor_knote = NULL;
1774 break;
1775 case PORT_SYNC_LINK_WORKLOOP_STASH:
1776 special_reply_port->ip_sync_inheritor_ts = NULL;
1777 break;
1778 }
1779
1780 special_reply_port->ip_sync_link_state = sync_link_state;
1781
1782 switch (sync_link_state) {
1783 case PORT_SYNC_LINK_WORKLOOP_KNOTE:
1784 special_reply_port->ip_sync_inheritor_knote = kn;
1785 break;
1786 case PORT_SYNC_LINK_WORKLOOP_STASH:
1787 special_reply_port->ip_sync_inheritor_ts = inheritor;
1788 break;
1789 case PORT_SYNC_LINK_NO_LINKAGE:
1790 if (flags & IPC_PORT_ADJUST_SR_ENABLE_EVENT) {
1791 ipc_special_reply_port_lost_link(special_reply_port);
1792 }
1793 break;
1794 }
1795
1796 /* Get thread's turnstile donated to special reply port */
1797 if (get_turnstile) {
1798 turnstile_complete((uintptr_t)special_reply_port,
1799 port_rcv_turnstile_address(special_reply_port), NULL, TURNSTILE_SYNC_IPC);
1800 } else {
1801 ts = ipc_port_rcv_turnstile(special_reply_port);
1802 if (ts) {
1803 turnstile_reference(ts);
1804 ipc_port_recv_update_inheritor(special_reply_port, ts,
1805 TURNSTILE_IMMEDIATE_UPDATE);
1806 }
1807 }
1808
1809 imq_unlock(&special_reply_port->ip_messages);
1810 ip_unlock(special_reply_port);
1811
1812 if (get_turnstile) {
1813 turnstile_cleanup();
1814 } else if (ts) {
1815 /* Call turnstile cleanup after dropping the interlock */
1816 turnstile_update_inheritor_complete(ts, TURNSTILE_INTERLOCK_NOT_HELD);
1817 turnstile_deallocate_safe(ts);
1818 }
1819
1820 /* Release the ref on the dest port and its turnstile */
1821 if (dest_port) {
1822 ipc_port_send_turnstile_complete(dest_port);
1823 /* release the reference on the dest port */
1824 ip_release(dest_port);
1825 }
1826 }
1827
1828 /*
1829 * Routine: ipc_port_adjust_special_reply_port
1830 * Purpose:
1831 * If the special port has a turnstile, update its inheritor.
1832 * Condition:
1833 * Nothing locked.
1834 * Returns:
1835 * None.
1836 */
1837 void
1838 ipc_port_adjust_special_reply_port(
1839 ipc_port_t port,
1840 uint8_t flags)
1841 {
1842 if (port->ip_specialreply) {
1843 ip_lock(port);
1844 ipc_port_adjust_special_reply_port_locked(port, NULL, flags, FALSE);
1845 }
1846 }
1847
1848 /*
1849 * Routine: ipc_port_adjust_sync_link_state_locked
1850 * Purpose:
1851 * Update the sync link state of the port and the
1852 * turnstile inheritor.
1853 * Condition:
1854 * Port and mqueue locked on entry.
1855 * Port and mqueue locked on return.
1856 * Returns:
1857 * None.
1858 */
1859 void
1860 ipc_port_adjust_sync_link_state_locked(
1861 ipc_port_t port,
1862 int sync_link_state,
1863 turnstile_inheritor_t inheritor)
1864 {
1865 switch (port->ip_sync_link_state) {
1866 case PORT_SYNC_LINK_RCV_THREAD:
1867 /* deallocate the thread reference for the inheritor */
1868 thread_deallocate_safe(port->ip_messages.imq_inheritor_thread_ref);
1869 /* Fall through */
1870
1871 default:
1872 klist_init(&port->ip_messages.imq_klist);
1873 }
1874
1875 switch (sync_link_state) {
1876 case PORT_SYNC_LINK_WORKLOOP_KNOTE:
1877 port->ip_messages.imq_inheritor_knote = inheritor;
1878 break;
1879 case PORT_SYNC_LINK_WORKLOOP_STASH:
1880 port->ip_messages.imq_inheritor_turnstile = inheritor;
1881 break;
1882 case PORT_SYNC_LINK_RCV_THREAD:
1883 /* The thread could exit without clearing port state, take a thread ref */
1884 thread_reference((thread_t)inheritor);
1885 port->ip_messages.imq_inheritor_thread_ref = inheritor;
1886 break;
1887 default:
1888 klist_init(&port->ip_messages.imq_klist);
1889 sync_link_state = PORT_SYNC_LINK_ANY;
1890 }
1891
1892 port->ip_sync_link_state = sync_link_state;
1893 }
1894
1895
1896 /*
1897 * Routine: ipc_port_adjust_port_locked
1898 * Purpose:
1899 * If the port has a turnstile, update its inheritor.
1900 * Condition:
1901 * Port locked on entry.
1902 * Port unlocked on return.
1903 * Returns:
1904 * None.
1905 */
1906 void
1907 ipc_port_adjust_port_locked(
1908 ipc_port_t port,
1909 struct knote *kn,
1910 boolean_t sync_bootstrap_checkin)
1911 {
1912 int sync_link_state = PORT_SYNC_LINK_ANY;
1913 turnstile_inheritor_t inheritor = TURNSTILE_INHERITOR_NULL;
1914
1915 ip_lock_held(port); // ip_sync_link_state is touched
1916 imq_held(&port->ip_messages);
1917
1918 assert(!port->ip_specialreply);
1919
1920 if (kn) {
1921 inheritor = filt_machport_stash_port(kn, port, &sync_link_state);
1922 if (sync_link_state == PORT_SYNC_LINK_WORKLOOP_KNOTE) {
1923 inheritor = kn;
1924 }
1925 } else if (sync_bootstrap_checkin) {
1926 inheritor = current_thread();
1927 sync_link_state = PORT_SYNC_LINK_RCV_THREAD;
1928 }
1929
1930 ipc_port_adjust_sync_link_state_locked(port, sync_link_state, inheritor);
1931 port->ip_sync_bootstrap_checkin = 0;
1932
1933 ipc_port_send_turnstile_recompute_push_locked(port);
1934 /* port and mqueue unlocked */
1935 }
1936
1937 /*
1938 * Routine: ipc_port_clear_sync_rcv_thread_boost_locked
1939 * Purpose:
1940 * If the port is pushing on rcv thread, clear it.
1941 * Condition:
1942 * Port locked on entry
1943 * mqueue is not locked.
1944 * Port unlocked on return.
1945 * Returns:
1946 * None.
1947 */
1948 void
1949 ipc_port_clear_sync_rcv_thread_boost_locked(
1950 ipc_port_t port)
1951 {
1952 ip_lock_held(port); // ip_sync_link_state is touched
1953
1954 if (port->ip_sync_link_state != PORT_SYNC_LINK_RCV_THREAD) {
1955 ip_unlock(port);
1956 return;
1957 }
1958
1959 imq_lock(&port->ip_messages);
1960 ipc_port_adjust_sync_link_state_locked(port, PORT_SYNC_LINK_ANY, NULL);
1961
1962 ipc_port_send_turnstile_recompute_push_locked(port);
1963 /* port and mqueue unlocked */
1964 }
1965
1966 /*
1967 * Routine: ipc_port_add_watchport_elem_locked
1968 * Purpose:
1969 * Transfer the turnstile boost of watchport to task calling exec.
1970 * Condition:
1971 * Port locked on entry.
1972 * Port unlocked on return.
1973 * Returns:
1974 * KERN_SUCESS on success.
1975 * KERN_FAILURE otherwise.
1976 */
1977 kern_return_t
1978 ipc_port_add_watchport_elem_locked(
1979 ipc_port_t port,
1980 struct task_watchport_elem *watchport_elem,
1981 struct task_watchport_elem **old_elem)
1982 {
1983 ip_lock_held(port);
1984 imq_held(&port->ip_messages);
1985
1986 /* Watchport boost only works for non-special active ports mapped in an ipc space */
1987 if (!ip_active(port) || port->ip_specialreply ||
1988 port->ip_receiver_name == MACH_PORT_NULL) {
1989 imq_unlock(&port->ip_messages);
1990 ip_unlock(port);
1991 return KERN_FAILURE;
1992 }
1993
1994 if (port->ip_sync_link_state != PORT_SYNC_LINK_ANY) {
1995 /* Sever the linkage if the port was pushing on knote */
1996 ipc_port_adjust_sync_link_state_locked(port, PORT_SYNC_LINK_ANY, NULL);
1997 }
1998
1999 *old_elem = ipc_port_update_watchport_elem(port, watchport_elem);
2000
2001 ipc_port_send_turnstile_recompute_push_locked(port);
2002 /* port and mqueue unlocked */
2003 return KERN_SUCCESS;
2004 }
2005
2006 /*
2007 * Routine: ipc_port_clear_watchport_elem_internal_conditional_locked
2008 * Purpose:
2009 * Remove the turnstile boost of watchport and recompute the push.
2010 * Condition:
2011 * Port locked on entry.
2012 * Port unlocked on return.
2013 * Returns:
2014 * KERN_SUCESS on success.
2015 * KERN_FAILURE otherwise.
2016 */
2017 kern_return_t
2018 ipc_port_clear_watchport_elem_internal_conditional_locked(
2019 ipc_port_t port,
2020 struct task_watchport_elem *watchport_elem)
2021 {
2022 ip_lock_held(port);
2023 imq_held(&port->ip_messages);
2024
2025 if (ipc_port_watchport_elem(port) != watchport_elem) {
2026 imq_unlock(&port->ip_messages);
2027 ip_unlock(port);
2028 return KERN_FAILURE;
2029 }
2030
2031 ipc_port_clear_watchport_elem_internal(port);
2032 ipc_port_send_turnstile_recompute_push_locked(port);
2033 /* port and mqueue unlocked */
2034 return KERN_SUCCESS;
2035 }
2036
2037 /*
2038 * Routine: ipc_port_replace_watchport_elem_conditional_locked
2039 * Purpose:
2040 * Replace the turnstile boost of watchport and recompute the push.
2041 * Condition:
2042 * Port locked on entry.
2043 * Port unlocked on return.
2044 * Returns:
2045 * KERN_SUCESS on success.
2046 * KERN_FAILURE otherwise.
2047 */
2048 kern_return_t
2049 ipc_port_replace_watchport_elem_conditional_locked(
2050 ipc_port_t port,
2051 struct task_watchport_elem *old_watchport_elem,
2052 struct task_watchport_elem *new_watchport_elem)
2053 {
2054 ip_lock_held(port);
2055 imq_held(&port->ip_messages);
2056
2057 if (ipc_port_watchport_elem(port) != old_watchport_elem) {
2058 imq_unlock(&port->ip_messages);
2059 ip_unlock(port);
2060 return KERN_FAILURE;
2061 }
2062
2063 ipc_port_update_watchport_elem(port, new_watchport_elem);
2064 ipc_port_send_turnstile_recompute_push_locked(port);
2065 /* port and mqueue unlocked */
2066 return KERN_SUCCESS;
2067 }
2068
2069 /*
2070 * Routine: ipc_port_clear_watchport_elem_internal
2071 * Purpose:
2072 * Remove the turnstile boost of watchport.
2073 * Condition:
2074 * Port locked on entry.
2075 * Port locked on return.
2076 * Returns:
2077 * Old task_watchport_elem returned.
2078 */
2079 struct task_watchport_elem *
2080 ipc_port_clear_watchport_elem_internal(
2081 ipc_port_t port)
2082 {
2083 ip_lock_held(port);
2084 imq_held(&port->ip_messages);
2085
2086 return ipc_port_update_watchport_elem(port, NULL);
2087 }
2088
2089 /*
2090 * Routine: ipc_port_send_turnstile_recompute_push_locked
2091 * Purpose:
2092 * Update send turnstile inheritor of port and recompute the push.
2093 * Condition:
2094 * Port locked on entry.
2095 * Port unlocked on return.
2096 * Returns:
2097 * None.
2098 */
2099 static void
2100 ipc_port_send_turnstile_recompute_push_locked(
2101 ipc_port_t port)
2102 {
2103 struct turnstile *send_turnstile = port_send_turnstile(port);
2104 if (send_turnstile) {
2105 turnstile_reference(send_turnstile);
2106 ipc_port_send_update_inheritor(port, send_turnstile,
2107 TURNSTILE_IMMEDIATE_UPDATE);
2108 }
2109 imq_unlock(&port->ip_messages);
2110 ip_unlock(port);
2111
2112 if (send_turnstile) {
2113 turnstile_update_inheritor_complete(send_turnstile,
2114 TURNSTILE_INTERLOCK_NOT_HELD);
2115 turnstile_deallocate_safe(send_turnstile);
2116 }
2117 }
2118
2119 /*
2120 * Routine: ipc_port_get_watchport_inheritor
2121 * Purpose:
2122 * Returns inheritor for watchport.
2123 *
2124 * Conditions:
2125 * mqueue locked.
2126 * Returns:
2127 * watchport inheritor.
2128 */
2129 static thread_t
2130 ipc_port_get_watchport_inheritor(
2131 ipc_port_t port)
2132 {
2133 imq_held(&port->ip_messages);
2134 return ipc_port_watchport_elem(port)->twe_task->watchports->tw_thread;
2135 }
2136
2137 /*
2138 * Routine: ipc_port_impcount_delta
2139 * Purpose:
2140 * Adjust only the importance count associated with a port.
2141 * If there are any adjustments to be made to receiver task,
2142 * those are handled elsewhere.
2143 *
2144 * For now, be defensive during deductions to make sure the
2145 * impcount for the port doesn't underflow zero. This will
2146 * go away when the port boost addition is made atomic (see
2147 * note in ipc_port_importance_delta()).
2148 * Conditions:
2149 * The port is referenced and locked.
2150 * Nothing else is locked.
2151 */
2152 mach_port_delta_t
2153 ipc_port_impcount_delta(
2154 ipc_port_t port,
2155 mach_port_delta_t delta,
2156 ipc_port_t __unused base)
2157 {
2158 mach_port_delta_t absdelta;
2159
2160 if (!ip_active(port)) {
2161 return 0;
2162 }
2163
2164 /* adding/doing nothing is easy */
2165 if (delta >= 0) {
2166 port->ip_impcount += delta;
2167 return delta;
2168 }
2169
2170 absdelta = 0 - delta;
2171 if (port->ip_impcount >= absdelta) {
2172 port->ip_impcount -= absdelta;
2173 return delta;
2174 }
2175
2176 #if (DEVELOPMENT || DEBUG)
2177 if (port->ip_receiver_name != MACH_PORT_NULL) {
2178 task_t target_task = port->ip_receiver->is_task;
2179 ipc_importance_task_t target_imp = target_task->task_imp_base;
2180 const char *target_procname;
2181 int target_pid;
2182
2183 if (target_imp != IIT_NULL) {
2184 target_procname = target_imp->iit_procname;
2185 target_pid = target_imp->iit_bsd_pid;
2186 } else {
2187 target_procname = "unknown";
2188 target_pid = -1;
2189 }
2190 printf("Over-release of importance assertions for port 0x%x receiver pid %d (%s), "
2191 "dropping %d assertion(s) but port only has %d remaining.\n",
2192 port->ip_receiver_name,
2193 target_pid, target_procname,
2194 absdelta, port->ip_impcount);
2195 } else if (base != IP_NULL) {
2196 task_t target_task = base->ip_receiver->is_task;
2197 ipc_importance_task_t target_imp = target_task->task_imp_base;
2198 const char *target_procname;
2199 int target_pid;
2200
2201 if (target_imp != IIT_NULL) {
2202 target_procname = target_imp->iit_procname;
2203 target_pid = target_imp->iit_bsd_pid;
2204 } else {
2205 target_procname = "unknown";
2206 target_pid = -1;
2207 }
2208 printf("Over-release of importance assertions for port 0x%lx "
2209 "enqueued on port 0x%x with receiver pid %d (%s), "
2210 "dropping %d assertion(s) but port only has %d remaining.\n",
2211 (unsigned long)VM_KERNEL_UNSLIDE_OR_PERM((uintptr_t)port),
2212 base->ip_receiver_name,
2213 target_pid, target_procname,
2214 absdelta, port->ip_impcount);
2215 }
2216 #endif
2217
2218 delta = 0 - port->ip_impcount;
2219 port->ip_impcount = 0;
2220 return delta;
2221 }
2222
2223 /*
2224 * Routine: ipc_port_importance_delta_internal
2225 * Purpose:
2226 * Adjust the importance count through the given port.
2227 * If the port is in transit, apply the delta throughout
2228 * the chain. Determine if the there is a task at the
2229 * base of the chain that wants/needs to be adjusted,
2230 * and if so, apply the delta.
2231 * Conditions:
2232 * The port is referenced and locked on entry.
2233 * Importance may be locked.
2234 * Nothing else is locked.
2235 * The lock may be dropped on exit.
2236 * Returns TRUE if lock was dropped.
2237 */
2238 #if IMPORTANCE_INHERITANCE
2239
2240 boolean_t
2241 ipc_port_importance_delta_internal(
2242 ipc_port_t port,
2243 natural_t options,
2244 mach_port_delta_t *deltap,
2245 ipc_importance_task_t *imp_task)
2246 {
2247 ipc_port_t next, base;
2248 boolean_t dropped = FALSE;
2249
2250 *imp_task = IIT_NULL;
2251
2252 if (*deltap == 0) {
2253 return FALSE;
2254 }
2255
2256 assert(options == IPID_OPTION_NORMAL || options == IPID_OPTION_SENDPOSSIBLE);
2257
2258 base = port;
2259
2260 /* if port is in transit, have to search for end of chain */
2261 if (ip_active(port) &&
2262 port->ip_destination != IP_NULL &&
2263 port->ip_receiver_name == MACH_PORT_NULL) {
2264 dropped = TRUE;
2265
2266 ip_unlock(port);
2267 ipc_port_multiple_lock(); /* massive serialization */
2268 ip_lock(base);
2269
2270 while (ip_active(base) &&
2271 base->ip_destination != IP_NULL &&
2272 base->ip_receiver_name == MACH_PORT_NULL) {
2273 base = base->ip_destination;
2274 ip_lock(base);
2275 }
2276 ipc_port_multiple_unlock();
2277 }
2278
2279 /*
2280 * If the port lock is dropped b/c the port is in transit, there is a
2281 * race window where another thread can drain messages and/or fire a
2282 * send possible notification before we get here.
2283 *
2284 * We solve this race by checking to see if our caller armed the send
2285 * possible notification, whether or not it's been fired yet, and
2286 * whether or not we've already set the port's ip_spimportant bit. If
2287 * we don't need a send-possible boost, then we'll just apply a
2288 * harmless 0-boost to the port.
2289 */
2290 if (options & IPID_OPTION_SENDPOSSIBLE) {
2291 assert(*deltap == 1);
2292 if (port->ip_sprequests && port->ip_spimportant == 0) {
2293 port->ip_spimportant = 1;
2294 } else {
2295 *deltap = 0;
2296 }
2297 }
2298
2299 /* unlock down to the base, adjusting boost(s) at each level */
2300 for (;;) {
2301 *deltap = ipc_port_impcount_delta(port, *deltap, base);
2302
2303 if (port == base) {
2304 break;
2305 }
2306
2307 /* port is in transit */
2308 assert(port->ip_tempowner == 0);
2309 next = port->ip_destination;
2310 ip_unlock(port);
2311 port = next;
2312 }
2313
2314 /* find the task (if any) to boost according to the base */
2315 if (ip_active(base)) {
2316 if (base->ip_tempowner != 0) {
2317 if (IIT_NULL != base->ip_imp_task) {
2318 *imp_task = base->ip_imp_task;
2319 }
2320 /* otherwise don't boost */
2321 } else if (base->ip_receiver_name != MACH_PORT_NULL) {
2322 ipc_space_t space = base->ip_receiver;
2323
2324 /* only spaces with boost-accepting tasks */
2325 if (space->is_task != TASK_NULL &&
2326 ipc_importance_task_is_any_receiver_type(space->is_task->task_imp_base)) {
2327 *imp_task = space->is_task->task_imp_base;
2328 }
2329 }
2330 }
2331
2332 /*
2333 * Only the base is locked. If we have to hold or drop task
2334 * importance assertions, we'll have to drop that lock as well.
2335 */
2336 if (*imp_task != IIT_NULL) {
2337 /* take a reference before unlocking base */
2338 ipc_importance_task_reference(*imp_task);
2339 }
2340
2341 if (dropped == TRUE) {
2342 ip_unlock(base);
2343 }
2344
2345 return dropped;
2346 }
2347 #endif /* IMPORTANCE_INHERITANCE */
2348
2349 /*
2350 * Routine: ipc_port_importance_delta
2351 * Purpose:
2352 * Adjust the importance count through the given port.
2353 * If the port is in transit, apply the delta throughout
2354 * the chain.
2355 *
2356 * If there is a task at the base of the chain that wants/needs
2357 * to be adjusted, apply the delta.
2358 * Conditions:
2359 * The port is referenced and locked on entry.
2360 * Nothing else is locked.
2361 * The lock may be dropped on exit.
2362 * Returns TRUE if lock was dropped.
2363 */
2364 #if IMPORTANCE_INHERITANCE
2365
2366 boolean_t
2367 ipc_port_importance_delta(
2368 ipc_port_t port,
2369 natural_t options,
2370 mach_port_delta_t delta)
2371 {
2372 ipc_importance_task_t imp_task = IIT_NULL;
2373 boolean_t dropped;
2374
2375 dropped = ipc_port_importance_delta_internal(port, options, &delta, &imp_task);
2376
2377 if (IIT_NULL == imp_task || delta == 0) {
2378 return dropped;
2379 }
2380
2381 if (!dropped) {
2382 ip_unlock(port);
2383 }
2384
2385 assert(ipc_importance_task_is_any_receiver_type(imp_task));
2386
2387 if (delta > 0) {
2388 ipc_importance_task_hold_internal_assertion(imp_task, delta);
2389 } else {
2390 ipc_importance_task_drop_internal_assertion(imp_task, -delta);
2391 }
2392
2393 ipc_importance_task_release(imp_task);
2394 return TRUE;
2395 }
2396 #endif /* IMPORTANCE_INHERITANCE */
2397
2398 /*
2399 * Routine: ipc_port_make_send_locked
2400 * Purpose:
2401 * Make a naked send right from a receive right.
2402 *
2403 * Conditions:
2404 * port locked and active.
2405 */
2406 ipc_port_t
2407 ipc_port_make_send_locked(
2408 ipc_port_t port)
2409 {
2410 require_ip_active(port);
2411 port->ip_mscount++;
2412 port->ip_srights++;
2413 ip_reference(port);
2414 return port;
2415 }
2416
2417 /*
2418 * Routine: ipc_port_make_send
2419 * Purpose:
2420 * Make a naked send right from a receive right.
2421 */
2422
2423 ipc_port_t
2424 ipc_port_make_send(
2425 ipc_port_t port)
2426 {
2427 if (!IP_VALID(port)) {
2428 return port;
2429 }
2430
2431 ip_lock(port);
2432 if (ip_active(port)) {
2433 ipc_port_make_send_locked(port);
2434 ip_unlock(port);
2435 return port;
2436 }
2437 ip_unlock(port);
2438 return IP_DEAD;
2439 }
2440
2441 /*
2442 * Routine: ipc_port_copy_send_locked
2443 * Purpose:
2444 * Make a naked send right from another naked send right.
2445 * Conditions:
2446 * port locked and active.
2447 */
2448 void
2449 ipc_port_copy_send_locked(
2450 ipc_port_t port)
2451 {
2452 assert(port->ip_srights > 0);
2453 port->ip_srights++;
2454 ip_reference(port);
2455 }
2456
2457 /*
2458 * Routine: ipc_port_copy_send
2459 * Purpose:
2460 * Make a naked send right from another naked send right.
2461 * IP_NULL -> IP_NULL
2462 * IP_DEAD -> IP_DEAD
2463 * dead port -> IP_DEAD
2464 * live port -> port + ref
2465 * Conditions:
2466 * Nothing locked except possibly a space.
2467 */
2468
2469 ipc_port_t
2470 ipc_port_copy_send(
2471 ipc_port_t port)
2472 {
2473 ipc_port_t sright;
2474
2475 if (!IP_VALID(port)) {
2476 return port;
2477 }
2478
2479 ip_lock(port);
2480 if (ip_active(port)) {
2481 ipc_port_copy_send_locked(port);
2482 sright = port;
2483 } else {
2484 sright = IP_DEAD;
2485 }
2486 ip_unlock(port);
2487
2488 return sright;
2489 }
2490
2491 /*
2492 * Routine: ipc_port_copyout_send
2493 * Purpose:
2494 * Copyout a naked send right (possibly null/dead),
2495 * or if that fails, destroy the right.
2496 * Conditions:
2497 * Nothing locked.
2498 */
2499
2500 mach_port_name_t
2501 ipc_port_copyout_send(
2502 ipc_port_t sright,
2503 ipc_space_t space)
2504 {
2505 mach_port_name_t name;
2506
2507 if (IP_VALID(sright)) {
2508 kern_return_t kr;
2509
2510 kr = ipc_object_copyout(space, ip_to_object(sright),
2511 MACH_MSG_TYPE_PORT_SEND, NULL, NULL, &name);
2512 if (kr != KERN_SUCCESS) {
2513 ipc_port_release_send(sright);
2514
2515 if (kr == KERN_INVALID_CAPABILITY) {
2516 name = MACH_PORT_DEAD;
2517 } else {
2518 name = MACH_PORT_NULL;
2519 }
2520 }
2521 } else {
2522 name = CAST_MACH_PORT_TO_NAME(sright);
2523 }
2524
2525 return name;
2526 }
2527
2528 /*
2529 * Routine: ipc_port_release_send
2530 * Purpose:
2531 * Release a naked send right.
2532 * Consumes a ref for the port.
2533 * Conditions:
2534 * Nothing locked.
2535 */
2536
2537 void
2538 ipc_port_release_send(
2539 ipc_port_t port)
2540 {
2541 ipc_port_t nsrequest = IP_NULL;
2542 mach_port_mscount_t mscount;
2543
2544 if (!IP_VALID(port)) {
2545 return;
2546 }
2547
2548 ip_lock(port);
2549
2550 assert(port->ip_srights > 0);
2551 if (port->ip_srights == 0) {
2552 panic("Over-release of port %p send right!", port);
2553 }
2554
2555 port->ip_srights--;
2556
2557 if (!ip_active(port)) {
2558 ip_unlock(port);
2559 ip_release(port);
2560 return;
2561 }
2562
2563 if (port->ip_srights == 0 &&
2564 port->ip_nsrequest != IP_NULL) {
2565 nsrequest = port->ip_nsrequest;
2566 port->ip_nsrequest = IP_NULL;
2567 mscount = port->ip_mscount;
2568 ip_unlock(port);
2569 ip_release(port);
2570 ipc_notify_no_senders(nsrequest, mscount);
2571 } else {
2572 ip_unlock(port);
2573 ip_release(port);
2574 }
2575 }
2576
2577 /*
2578 * Routine: ipc_port_make_sonce_locked
2579 * Purpose:
2580 * Make a naked send-once right from a receive right.
2581 * Conditions:
2582 * The port is locked and active.
2583 */
2584
2585 ipc_port_t
2586 ipc_port_make_sonce_locked(
2587 ipc_port_t port)
2588 {
2589 require_ip_active(port);
2590 port->ip_sorights++;
2591 ip_reference(port);
2592 return port;
2593 }
2594
2595 /*
2596 * Routine: ipc_port_make_sonce
2597 * Purpose:
2598 * Make a naked send-once right from a receive right.
2599 * Conditions:
2600 * The port is not locked.
2601 */
2602
2603 ipc_port_t
2604 ipc_port_make_sonce(
2605 ipc_port_t port)
2606 {
2607 if (!IP_VALID(port)) {
2608 return port;
2609 }
2610
2611 ip_lock(port);
2612 if (ip_active(port)) {
2613 ipc_port_make_sonce_locked(port);
2614 ip_unlock(port);
2615 return port;
2616 }
2617 ip_unlock(port);
2618 return IP_DEAD;
2619 }
2620
2621 /*
2622 * Routine: ipc_port_release_sonce
2623 * Purpose:
2624 * Release a naked send-once right.
2625 * Consumes a ref for the port.
2626 *
2627 * In normal situations, this is never used.
2628 * Send-once rights are only consumed when
2629 * a message (possibly a send-once notification)
2630 * is sent to them.
2631 * Conditions:
2632 * Nothing locked except possibly a space.
2633 */
2634
2635 void
2636 ipc_port_release_sonce(
2637 ipc_port_t port)
2638 {
2639 if (!IP_VALID(port)) {
2640 return;
2641 }
2642
2643 ipc_port_adjust_special_reply_port(port, IPC_PORT_ADJUST_RESET_BOOSTRAP_CHECKIN);
2644
2645 ip_lock(port);
2646
2647 assert(port->ip_sorights > 0);
2648 if (port->ip_sorights == 0) {
2649 panic("Over-release of port %p send-once right!", port);
2650 }
2651
2652 port->ip_sorights--;
2653
2654 ip_unlock(port);
2655 ip_release(port);
2656 }
2657
2658 /*
2659 * Routine: ipc_port_release_receive
2660 * Purpose:
2661 * Release a naked (in limbo or in transit) receive right.
2662 * Consumes a ref for the port; destroys the port.
2663 * Conditions:
2664 * Nothing locked.
2665 */
2666
2667 void
2668 ipc_port_release_receive(
2669 ipc_port_t port)
2670 {
2671 ipc_port_t dest;
2672
2673 if (!IP_VALID(port)) {
2674 return;
2675 }
2676
2677 ip_lock(port);
2678 require_ip_active(port);
2679 assert(port->ip_receiver_name == MACH_PORT_NULL);
2680 dest = port->ip_destination;
2681
2682 ipc_port_destroy(port); /* consumes ref, unlocks */
2683
2684 if (dest != IP_NULL) {
2685 ipc_port_send_turnstile_complete(dest);
2686 ip_release(dest);
2687 }
2688 }
2689
2690 /*
2691 * Routine: ipc_port_alloc_special
2692 * Purpose:
2693 * Allocate a port in a special space.
2694 * The new port is returned with one ref.
2695 * If unsuccessful, IP_NULL is returned.
2696 * Conditions:
2697 * Nothing locked.
2698 */
2699
2700 ipc_port_t
2701 ipc_port_alloc_special(
2702 ipc_space_t space,
2703 ipc_port_init_flags_t flags)
2704 {
2705 ipc_port_t port;
2706
2707 port = ip_object_to_port(io_alloc(IOT_PORT));
2708 if (port == IP_NULL) {
2709 return IP_NULL;
2710 }
2711
2712 #if MACH_ASSERT
2713 uintptr_t buf[IP_CALLSTACK_MAX];
2714 ipc_port_callstack_init_debug(&buf[0], IP_CALLSTACK_MAX);
2715 #endif /* MACH_ASSERT */
2716
2717 bzero((char *)port, sizeof(*port));
2718 io_lock_init(ip_to_object(port));
2719 port->ip_references = 1;
2720 port->ip_object.io_bits = io_makebits(TRUE, IOT_PORT, 0);
2721
2722 ipc_port_init(port, space, flags, 1);
2723
2724 #if MACH_ASSERT
2725 ipc_port_init_debug(port, &buf[0], IP_CALLSTACK_MAX);
2726 #endif /* MACH_ASSERT */
2727
2728 return port;
2729 }
2730
2731 /*
2732 * Routine: ipc_port_dealloc_special
2733 * Purpose:
2734 * Deallocate a port in a special space.
2735 * Consumes one ref for the port.
2736 * Conditions:
2737 * Nothing locked.
2738 */
2739
2740 void
2741 ipc_port_dealloc_special(
2742 ipc_port_t port,
2743 __assert_only ipc_space_t space)
2744 {
2745 ip_lock(port);
2746 require_ip_active(port);
2747 // assert(port->ip_receiver_name != MACH_PORT_NULL);
2748 assert(port->ip_receiver == space);
2749
2750 /*
2751 * We clear ip_receiver_name and ip_receiver to simplify
2752 * the ipc_space_kernel check in ipc_mqueue_send.
2753 */
2754
2755 imq_lock(&port->ip_messages);
2756 port->ip_receiver_name = MACH_PORT_NULL;
2757 port->ip_receiver = IS_NULL;
2758 imq_unlock(&port->ip_messages);
2759
2760 /* relevant part of ipc_port_clear_receiver */
2761 port->ip_mscount = 0;
2762 port->ip_messages.imq_seqno = 0;
2763
2764 ipc_port_destroy(port);
2765 }
2766
2767 /*
2768 * Routine: ipc_port_finalize
2769 * Purpose:
2770 * Called on last reference deallocate to
2771 * free any remaining data associated with the
2772 * port.
2773 * Conditions:
2774 * Nothing locked.
2775 */
2776 void
2777 ipc_port_finalize(
2778 ipc_port_t port)
2779 {
2780 ipc_port_request_t requests = port->ip_requests;
2781
2782 assert(port_send_turnstile(port) == TURNSTILE_NULL);
2783 if (imq_is_turnstile_proxy(&port->ip_messages)) {
2784 assert(ipc_port_rcv_turnstile(port) == TURNSTILE_NULL);
2785 }
2786
2787 if (ip_active(port)) {
2788 panic("Trying to free an active port. port %p", port);
2789 }
2790
2791 if (requests != IPR_NULL) {
2792 ipc_table_size_t its = requests->ipr_size;
2793 it_requests_free(its, requests);
2794 port->ip_requests = IPR_NULL;
2795 }
2796
2797 ipc_mqueue_deinit(&port->ip_messages);
2798
2799 #if MACH_ASSERT
2800 ipc_port_track_dealloc(port);
2801 #endif /* MACH_ASSERT */
2802 }
2803
2804 /*
2805 * Routine: kdp_mqueue_send_find_owner
2806 * Purpose:
2807 * Discover the owner of the ipc_mqueue that contains the input
2808 * waitq object. The thread blocked on the waitq should be
2809 * waiting for an IPC_MQUEUE_FULL event.
2810 * Conditions:
2811 * The 'waitinfo->wait_type' value should already be set to
2812 * kThreadWaitPortSend.
2813 * Note:
2814 * If we find out that the containing port is actually in
2815 * transit, we reset the wait_type field to reflect this.
2816 */
2817 void
2818 kdp_mqueue_send_find_owner(struct waitq * waitq, __assert_only event64_t event, thread_waitinfo_t * waitinfo)
2819 {
2820 struct turnstile *turnstile;
2821 assert(waitinfo->wait_type == kThreadWaitPortSend);
2822 assert(event == IPC_MQUEUE_FULL);
2823 assert(waitq_is_turnstile_queue(waitq));
2824
2825 turnstile = waitq_to_turnstile(waitq);
2826 ipc_port_t port = (ipc_port_t)turnstile->ts_proprietor; /* we are blocking on send */
2827 assert(kdp_is_in_zone(port, "ipc ports"));
2828
2829 waitinfo->owner = 0;
2830 waitinfo->context = VM_KERNEL_UNSLIDE_OR_PERM(port);
2831 if (ip_lock_held_kdp(port)) {
2832 /*
2833 * someone has the port locked: it may be in an
2834 * inconsistent state: bail
2835 */
2836 waitinfo->owner = STACKSHOT_WAITOWNER_PORT_LOCKED;
2837 return;
2838 }
2839
2840 if (ip_active(port)) {
2841 if (port->ip_tempowner) {
2842 if (port->ip_imp_task != IIT_NULL && port->ip_imp_task->iit_task != NULL) {
2843 /* port is held by a tempowner */
2844 waitinfo->owner = pid_from_task(port->ip_imp_task->iit_task);
2845 } else {
2846 waitinfo->owner = STACKSHOT_WAITOWNER_INTRANSIT;
2847 }
2848 } else if (port->ip_receiver_name) {
2849 /* port in a space */
2850 if (port->ip_receiver == ipc_space_kernel) {
2851 /*
2852 * The kernel pid is 0, make this
2853 * distinguishable from no-owner and
2854 * inconsistent port state.
2855 */
2856 waitinfo->owner = STACKSHOT_WAITOWNER_KERNEL;
2857 } else {
2858 waitinfo->owner = pid_from_task(port->ip_receiver->is_task);
2859 }
2860 } else if (port->ip_destination != IP_NULL) {
2861 /* port in transit */
2862 waitinfo->wait_type = kThreadWaitPortSendInTransit;
2863 waitinfo->owner = VM_KERNEL_UNSLIDE_OR_PERM(port->ip_destination);
2864 }
2865 }
2866 }
2867
2868 /*
2869 * Routine: kdp_mqueue_recv_find_owner
2870 * Purpose:
2871 * Discover the "owner" of the ipc_mqueue that contains the input
2872 * waitq object. The thread blocked on the waitq is trying to
2873 * receive on the mqueue.
2874 * Conditions:
2875 * The 'waitinfo->wait_type' value should already be set to
2876 * kThreadWaitPortReceive.
2877 * Note:
2878 * If we find that we are actualy waiting on a port set, we reset
2879 * the wait_type field to reflect this.
2880 */
2881 void
2882 kdp_mqueue_recv_find_owner(struct waitq * waitq, __assert_only event64_t event, thread_waitinfo_t * waitinfo)
2883 {
2884 assert(waitinfo->wait_type == kThreadWaitPortReceive);
2885 assert(event == IPC_MQUEUE_RECEIVE);
2886
2887 ipc_mqueue_t mqueue = imq_from_waitq(waitq);
2888 waitinfo->owner = 0;
2889 if (imq_is_set(mqueue)) { /* we are waiting on a port set */
2890 ipc_pset_t set = ips_from_mq(mqueue);
2891 assert(kdp_is_in_zone(set, "ipc port sets"));
2892
2893 /* Reset wait type to specify waiting on port set receive */
2894 waitinfo->wait_type = kThreadWaitPortSetReceive;
2895 waitinfo->context = VM_KERNEL_UNSLIDE_OR_PERM(set);
2896 if (ips_lock_held_kdp(set)) {
2897 waitinfo->owner = STACKSHOT_WAITOWNER_PSET_LOCKED;
2898 }
2899 /* There is no specific owner "at the other end" of a port set, so leave unset. */
2900 } else {
2901 ipc_port_t port = ip_from_mq(mqueue);
2902 assert(kdp_is_in_zone(port, "ipc ports"));
2903
2904 waitinfo->context = VM_KERNEL_UNSLIDE_OR_PERM(port);
2905 if (ip_lock_held_kdp(port)) {
2906 waitinfo->owner = STACKSHOT_WAITOWNER_PORT_LOCKED;
2907 return;
2908 }
2909
2910 if (ip_active(port)) {
2911 if (port->ip_receiver_name != MACH_PORT_NULL) {
2912 waitinfo->owner = port->ip_receiver_name;
2913 } else {
2914 waitinfo->owner = STACKSHOT_WAITOWNER_INTRANSIT;
2915 }
2916 }
2917 }
2918 }
2919
2920 #if MACH_ASSERT
2921 #include <kern/machine.h>
2922
2923 /*
2924 * Keep a list of all allocated ports.
2925 * Allocation is intercepted via ipc_port_init;
2926 * deallocation is intercepted via io_free.
2927 */
2928 #if 0
2929 queue_head_t port_alloc_queue;
2930 lck_spin_t port_alloc_queue_lock;
2931 #endif
2932
2933 unsigned long port_count = 0;
2934 unsigned long port_count_warning = 20000;
2935 unsigned long port_timestamp = 0;
2936
2937 void db_port_stack_trace(
2938 ipc_port_t port);
2939 void db_ref(
2940 int refs);
2941 int db_port_walk(
2942 unsigned int verbose,
2943 unsigned int display,
2944 unsigned int ref_search,
2945 unsigned int ref_target);
2946
2947 /*
2948 * Initialize global state needed for run-time
2949 * port debugging.
2950 */
2951 void
2952 ipc_port_debug_init(void)
2953 {
2954 #if 0
2955 queue_init(&port_alloc_queue);
2956 lck_spin_init(&port_alloc_queue_lock, &ipc_lck_grp, &ipc_lck_attr);
2957 #endif
2958
2959 if (!PE_parse_boot_argn("ipc_portbt", &ipc_portbt, sizeof(ipc_portbt))) {
2960 ipc_portbt = 0;
2961 }
2962 }
2963
2964 #ifdef MACH_BSD
2965 extern int proc_pid(struct proc*);
2966 #endif /* MACH_BSD */
2967
2968 /*
2969 * Initialize all of the debugging state in a port.
2970 * Insert the port into a global list of all allocated ports.
2971 */
2972 void
2973 ipc_port_init_debug(
2974 ipc_port_t port,
2975 uintptr_t *callstack,
2976 unsigned int callstack_max)
2977 {
2978 unsigned int i;
2979
2980 port->ip_thread = current_thread();
2981 port->ip_timetrack = port_timestamp++;
2982 for (i = 0; i < callstack_max; ++i) {
2983 port->ip_callstack[i] = callstack[i];
2984 }
2985 for (i = 0; i < IP_NSPARES; ++i) {
2986 port->ip_spares[i] = 0;
2987 }
2988
2989 #ifdef MACH_BSD
2990 task_t task = current_task();
2991 if (task != TASK_NULL) {
2992 struct proc* proc = (struct proc*) get_bsdtask_info(task);
2993 if (proc) {
2994 port->ip_spares[0] = proc_pid(proc);
2995 }
2996 }
2997 #endif /* MACH_BSD */
2998
2999 #if 0
3000 lck_spin_lock(&port_alloc_queue_lock);
3001 ++port_count;
3002 if (port_count_warning > 0 && port_count >= port_count_warning) {
3003 assert(port_count < port_count_warning);
3004 }
3005 queue_enter(&port_alloc_queue, port, ipc_port_t, ip_port_links);
3006 lck_spin_unlock(&port_alloc_queue_lock);
3007 #endif
3008 }
3009
3010 /*
3011 * Routine: ipc_port_callstack_init_debug
3012 * Purpose:
3013 * Calls the machine-dependent routine to
3014 * fill in an array with up to IP_CALLSTACK_MAX
3015 * levels of return pc information
3016 * Conditions:
3017 * May block (via copyin)
3018 */
3019 void
3020 ipc_port_callstack_init_debug(
3021 uintptr_t *callstack,
3022 unsigned int callstack_max)
3023 {
3024 unsigned int i;
3025
3026 /* guarantee the callstack is initialized */
3027 for (i = 0; i < callstack_max; i++) {
3028 callstack[i] = 0;
3029 }
3030
3031 if (ipc_portbt) {
3032 machine_callstack(callstack, callstack_max);
3033 }
3034 }
3035
3036 /*
3037 * Remove a port from the queue of allocated ports.
3038 * This routine should be invoked JUST prior to
3039 * deallocating the actual memory occupied by the port.
3040 */
3041 #if 1
3042 void
3043 ipc_port_track_dealloc(
3044 __unused ipc_port_t port)
3045 {
3046 }
3047 #else
3048 void
3049 ipc_port_track_dealloc(
3050 ipc_port_t port)
3051 {
3052 lck_spin_lock(&port_alloc_queue_lock);
3053 assert(port_count > 0);
3054 --port_count;
3055 queue_remove(&port_alloc_queue, port, ipc_port_t, ip_port_links);
3056 lck_spin_unlock(&port_alloc_queue_lock);
3057 }
3058 #endif
3059
3060
3061 #endif /* MACH_ASSERT */