]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ipc/ipc_port.c
25010f1fc07ec3a2d89d778dab6c51381f943bec
[apple/xnu.git] / osfmk / ipc / ipc_port.c
1 /*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_FREE_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
58 * support for mandatory and extensible security protections. This notice
59 * is included in support of clause 2.2 (b) of the Apple Public License,
60 * Version 2.0.
61 */
62 /*
63 */
64 /*
65 * File: ipc/ipc_port.c
66 * Author: Rich Draves
67 * Date: 1989
68 *
69 * Functions to manipulate IPC ports.
70 */
71
72 #include <zone_debug.h>
73 #include <mach_assert.h>
74
75 #include <mach/port.h>
76 #include <mach/kern_return.h>
77 #include <kern/ipc_kobject.h>
78 #include <kern/thread.h>
79 #include <kern/misc_protos.h>
80 #include <kern/waitq.h>
81 #include <kern/policy_internal.h>
82 #include <ipc/ipc_entry.h>
83 #include <ipc/ipc_space.h>
84 #include <ipc/ipc_object.h>
85 #include <ipc/ipc_port.h>
86 #include <ipc/ipc_pset.h>
87 #include <ipc/ipc_kmsg.h>
88 #include <ipc/ipc_mqueue.h>
89 #include <ipc/ipc_notify.h>
90 #include <ipc/ipc_table.h>
91 #include <ipc/ipc_importance.h>
92
93 #include <security/mac_mach_internal.h>
94
95 #include <string.h>
96
97 decl_lck_spin_data(, ipc_port_multiple_lock_data)
98 ipc_port_timestamp_t ipc_port_timestamp_data;
99 int ipc_portbt;
100
101 #if MACH_ASSERT
102 void ipc_port_init_debug(
103 ipc_port_t port,
104 uintptr_t *callstack,
105 unsigned int callstack_max);
106
107 void ipc_port_callstack_init_debug(
108 uintptr_t *callstack,
109 unsigned int callstack_max);
110
111 #endif /* MACH_ASSERT */
112
113 void
114 ipc_port_release(ipc_port_t port)
115 {
116 ip_release(port);
117 }
118
119 void
120 ipc_port_reference(ipc_port_t port)
121 {
122 ip_reference(port);
123 }
124
125 /*
126 * Routine: ipc_port_timestamp
127 * Purpose:
128 * Retrieve a timestamp value.
129 */
130
131 ipc_port_timestamp_t
132 ipc_port_timestamp(void)
133 {
134 return OSIncrementAtomic(&ipc_port_timestamp_data);
135 }
136
137 /*
138 * Routine: ipc_port_request_alloc
139 * Purpose:
140 * Try to allocate a request slot.
141 * If successful, returns the request index.
142 * Otherwise returns zero.
143 * Conditions:
144 * The port is locked and active.
145 * Returns:
146 * KERN_SUCCESS A request index was found.
147 * KERN_NO_SPACE No index allocated.
148 */
149
150 #if IMPORTANCE_INHERITANCE
151 kern_return_t
152 ipc_port_request_alloc(
153 ipc_port_t port,
154 mach_port_name_t name,
155 ipc_port_t soright,
156 boolean_t send_possible,
157 boolean_t immediate,
158 ipc_port_request_index_t *indexp,
159 boolean_t *importantp)
160 #else
161 kern_return_t
162 ipc_port_request_alloc(
163 ipc_port_t port,
164 mach_port_name_t name,
165 ipc_port_t soright,
166 boolean_t send_possible,
167 boolean_t immediate,
168 ipc_port_request_index_t *indexp)
169 #endif /* IMPORTANCE_INHERITANCE */
170 {
171 ipc_port_request_t ipr, table;
172 ipc_port_request_index_t index;
173 uintptr_t mask = 0;
174
175 #if IMPORTANCE_INHERITANCE
176 *importantp = FALSE;
177 #endif /* IMPORTANCE_INHERITANCE */
178
179 assert(ip_active(port));
180 assert(name != MACH_PORT_NULL);
181 assert(soright != IP_NULL);
182
183 table = port->ip_requests;
184
185 if (table == IPR_NULL)
186 return KERN_NO_SPACE;
187
188 index = table->ipr_next;
189 if (index == 0)
190 return KERN_NO_SPACE;
191
192 ipr = &table[index];
193 assert(ipr->ipr_name == MACH_PORT_NULL);
194
195 table->ipr_next = ipr->ipr_next;
196 ipr->ipr_name = name;
197
198 if (send_possible) {
199 mask |= IPR_SOR_SPREQ_MASK;
200 if (immediate) {
201 mask |= IPR_SOR_SPARM_MASK;
202 if (port->ip_sprequests == 0) {
203 port->ip_sprequests = 1;
204 #if IMPORTANCE_INHERITANCE
205 /* TODO: Live importance support in send-possible */
206 if (port->ip_impdonation != 0 &&
207 port->ip_spimportant == 0 &&
208 (task_is_importance_donor(current_task()))) {
209 *importantp = TRUE;
210 }
211 #endif /* IMPORTANCE_INHERTANCE */
212 }
213 }
214 }
215 ipr->ipr_soright = IPR_SOR_MAKE(soright, mask);
216
217 *indexp = index;
218
219 return KERN_SUCCESS;
220 }
221
222 /*
223 * Routine: ipc_port_request_grow
224 * Purpose:
225 * Grow a port's table of requests.
226 * Conditions:
227 * The port must be locked and active.
228 * Nothing else locked; will allocate memory.
229 * Upon return the port is unlocked.
230 * Returns:
231 * KERN_SUCCESS Grew the table.
232 * KERN_SUCCESS Somebody else grew the table.
233 * KERN_SUCCESS The port died.
234 * KERN_RESOURCE_SHORTAGE Couldn't allocate new table.
235 * KERN_NO_SPACE Couldn't grow to desired size
236 */
237
238 kern_return_t
239 ipc_port_request_grow(
240 ipc_port_t port,
241 ipc_table_elems_t target_size)
242 {
243 ipc_table_size_t its;
244 ipc_port_request_t otable, ntable;
245
246 assert(ip_active(port));
247
248 otable = port->ip_requests;
249 if (otable == IPR_NULL)
250 its = &ipc_table_requests[0];
251 else
252 its = otable->ipr_size + 1;
253
254 if (target_size != ITS_SIZE_NONE) {
255 if ((otable != IPR_NULL) &&
256 (target_size <= otable->ipr_size->its_size)) {
257 ip_unlock(port);
258 return KERN_SUCCESS;
259 }
260 while ((its->its_size) && (its->its_size < target_size)) {
261 its++;
262 }
263 if (its->its_size == 0) {
264 ip_unlock(port);
265 return KERN_NO_SPACE;
266 }
267 }
268
269 ip_reference(port);
270 ip_unlock(port);
271
272 if ((its->its_size == 0) ||
273 ((ntable = it_requests_alloc(its)) == IPR_NULL)) {
274 ip_release(port);
275 return KERN_RESOURCE_SHORTAGE;
276 }
277
278 ip_lock(port);
279
280 /*
281 * Check that port is still active and that nobody else
282 * has slipped in and grown the table on us. Note that
283 * just checking if the current table pointer == otable
284 * isn't sufficient; must check ipr_size.
285 */
286
287 if (ip_active(port) && (port->ip_requests == otable) &&
288 ((otable == IPR_NULL) || (otable->ipr_size+1 == its))) {
289 ipc_table_size_t oits;
290 ipc_table_elems_t osize, nsize;
291 ipc_port_request_index_t free, i;
292
293 /* copy old table to new table */
294
295 if (otable != IPR_NULL) {
296 oits = otable->ipr_size;
297 osize = oits->its_size;
298 free = otable->ipr_next;
299
300 (void) memcpy((void *)(ntable + 1),
301 (const void *)(otable + 1),
302 (osize - 1) * sizeof(struct ipc_port_request));
303 } else {
304 osize = 1;
305 oits = 0;
306 free = 0;
307 }
308
309 nsize = its->its_size;
310 assert(nsize > osize);
311
312 /* add new elements to the new table's free list */
313
314 for (i = osize; i < nsize; i++) {
315 ipc_port_request_t ipr = &ntable[i];
316
317 ipr->ipr_name = MACH_PORT_NULL;
318 ipr->ipr_next = free;
319 free = i;
320 }
321
322 ntable->ipr_next = free;
323 ntable->ipr_size = its;
324 port->ip_requests = ntable;
325 ip_unlock(port);
326 ip_release(port);
327
328 if (otable != IPR_NULL) {
329 it_requests_free(oits, otable);
330 }
331 } else {
332 ip_unlock(port);
333 ip_release(port);
334 it_requests_free(its, ntable);
335 }
336
337 return KERN_SUCCESS;
338 }
339
340 /*
341 * Routine: ipc_port_request_sparm
342 * Purpose:
343 * Arm delayed send-possible request.
344 * Conditions:
345 * The port must be locked and active.
346 *
347 * Returns TRUE if the request was armed
348 * (or armed with importance in that version).
349 */
350
351 boolean_t
352 ipc_port_request_sparm(
353 ipc_port_t port,
354 __assert_only mach_port_name_t name,
355 ipc_port_request_index_t index,
356 mach_msg_option_t option,
357 mach_msg_priority_t override)
358 {
359 if (index != IE_REQ_NONE) {
360 ipc_port_request_t ipr, table;
361
362 assert(ip_active(port));
363
364 table = port->ip_requests;
365 assert(table != IPR_NULL);
366
367 ipr = &table[index];
368 assert(ipr->ipr_name == name);
369
370 /* Is there a valid destination? */
371 if (IPR_SOR_SPREQ(ipr->ipr_soright)) {
372 ipr->ipr_soright = IPR_SOR_MAKE(ipr->ipr_soright, IPR_SOR_SPARM_MASK);
373 port->ip_sprequests = 1;
374
375 if (option & MACH_SEND_OVERRIDE) {
376 /* apply override to message queue */
377 ipc_mqueue_override_send(&port->ip_messages, override);
378 }
379
380 #if IMPORTANCE_INHERITANCE
381 if (((option & MACH_SEND_NOIMPORTANCE) == 0) &&
382 (port->ip_impdonation != 0) &&
383 (port->ip_spimportant == 0) &&
384 (((option & MACH_SEND_IMPORTANCE) != 0) ||
385 (task_is_importance_donor(current_task())))) {
386 return TRUE;
387 }
388 #else
389 return TRUE;
390 #endif /* IMPORTANCE_INHERITANCE */
391 }
392 }
393 return FALSE;
394 }
395
396 /*
397 * Routine: ipc_port_request_type
398 * Purpose:
399 * Determine the type(s) of port requests enabled for a name.
400 * Conditions:
401 * The port must be locked or inactive (to avoid table growth).
402 * The index must not be IE_REQ_NONE and for the name in question.
403 */
404 mach_port_type_t
405 ipc_port_request_type(
406 ipc_port_t port,
407 __assert_only mach_port_name_t name,
408 ipc_port_request_index_t index)
409 {
410 ipc_port_request_t ipr, table;
411 mach_port_type_t type = 0;
412
413 table = port->ip_requests;
414 assert (table != IPR_NULL);
415
416 assert(index != IE_REQ_NONE);
417 ipr = &table[index];
418 assert(ipr->ipr_name == name);
419
420 if (IP_VALID(IPR_SOR_PORT(ipr->ipr_soright))) {
421 type |= MACH_PORT_TYPE_DNREQUEST;
422
423 if (IPR_SOR_SPREQ(ipr->ipr_soright)) {
424 type |= MACH_PORT_TYPE_SPREQUEST;
425
426 if (!IPR_SOR_SPARMED(ipr->ipr_soright)) {
427 type |= MACH_PORT_TYPE_SPREQUEST_DELAYED;
428 }
429 }
430 }
431 return type;
432 }
433
434 /*
435 * Routine: ipc_port_request_cancel
436 * Purpose:
437 * Cancel a dead-name/send-possible request and return the send-once right.
438 * Conditions:
439 * The port must be locked and active.
440 * The index must not be IPR_REQ_NONE and must correspond with name.
441 */
442
443 ipc_port_t
444 ipc_port_request_cancel(
445 ipc_port_t port,
446 __assert_only mach_port_name_t name,
447 ipc_port_request_index_t index)
448 {
449 ipc_port_request_t ipr, table;
450 ipc_port_t request = IP_NULL;
451
452 assert(ip_active(port));
453 table = port->ip_requests;
454 assert(table != IPR_NULL);
455
456 assert (index != IE_REQ_NONE);
457 ipr = &table[index];
458 assert(ipr->ipr_name == name);
459 request = IPR_SOR_PORT(ipr->ipr_soright);
460
461 /* return ipr to the free list inside the table */
462 ipr->ipr_name = MACH_PORT_NULL;
463 ipr->ipr_next = table->ipr_next;
464 table->ipr_next = index;
465
466 return request;
467 }
468
469 /*
470 * Routine: ipc_port_pdrequest
471 * Purpose:
472 * Make a port-deleted request, returning the
473 * previously registered send-once right.
474 * Just cancels the previous request if notify is IP_NULL.
475 * Conditions:
476 * The port is locked and active. It is unlocked.
477 * Consumes a ref for notify (if non-null), and
478 * returns previous with a ref (if non-null).
479 */
480
481 void
482 ipc_port_pdrequest(
483 ipc_port_t port,
484 ipc_port_t notify,
485 ipc_port_t *previousp)
486 {
487 ipc_port_t previous;
488
489 assert(ip_active(port));
490
491 previous = port->ip_pdrequest;
492 port->ip_pdrequest = notify;
493 ip_unlock(port);
494
495 *previousp = previous;
496 }
497
498 /*
499 * Routine: ipc_port_nsrequest
500 * Purpose:
501 * Make a no-senders request, returning the
502 * previously registered send-once right.
503 * Just cancels the previous request if notify is IP_NULL.
504 * Conditions:
505 * The port is locked and active. It is unlocked.
506 * Consumes a ref for notify (if non-null), and
507 * returns previous with a ref (if non-null).
508 */
509
510 void
511 ipc_port_nsrequest(
512 ipc_port_t port,
513 mach_port_mscount_t sync,
514 ipc_port_t notify,
515 ipc_port_t *previousp)
516 {
517 ipc_port_t previous;
518 mach_port_mscount_t mscount;
519
520 assert(ip_active(port));
521
522 previous = port->ip_nsrequest;
523 mscount = port->ip_mscount;
524
525 if ((port->ip_srights == 0) && (sync <= mscount) &&
526 (notify != IP_NULL)) {
527 port->ip_nsrequest = IP_NULL;
528 ip_unlock(port);
529 ipc_notify_no_senders(notify, mscount);
530 } else {
531 port->ip_nsrequest = notify;
532 ip_unlock(port);
533 }
534
535 *previousp = previous;
536 }
537
538
539 /*
540 * Routine: ipc_port_clear_receiver
541 * Purpose:
542 * Prepares a receive right for transmission/destruction,
543 * optionally performs mqueue destruction (with port lock held)
544 *
545 * Conditions:
546 * The port is locked and active.
547 * Returns:
548 * If should_destroy is TRUE, then the return value indicates
549 * whether the caller needs to reap kmsg structures that should
550 * be destroyed (by calling ipc_kmsg_reap_delayed)
551 *
552 * If should_destroy is FALSE, this always returns FALSE
553 */
554
555 boolean_t
556 ipc_port_clear_receiver(
557 ipc_port_t port,
558 boolean_t should_destroy)
559 {
560 ipc_mqueue_t mqueue = &port->ip_messages;
561 boolean_t reap_messages = FALSE;
562
563 /*
564 * Pull ourselves out of any sets to which we belong.
565 * We hold the port locked, so even though this acquires and releases
566 * the mqueue lock, we know we won't be added to any other sets.
567 */
568 if (port->ip_in_pset != 0) {
569 ipc_pset_remove_from_all(port);
570 assert(port->ip_in_pset == 0);
571 }
572
573 /*
574 * Send anyone waiting on the port's queue directly away.
575 * Also clear the mscount and seqno.
576 */
577 imq_lock(mqueue);
578 ipc_mqueue_changed(mqueue);
579 port->ip_mscount = 0;
580 mqueue->imq_seqno = 0;
581 port->ip_context = port->ip_guarded = port->ip_strict_guard = 0;
582
583 if (should_destroy) {
584 /*
585 * Mark the mqueue invalid, preventing further send/receive
586 * operations from succeeding. It's important for this to be
587 * done under the same lock hold as the ipc_mqueue_changed
588 * call to avoid additional threads blocking on an mqueue
589 * that's being destroyed.
590 */
591 reap_messages = ipc_mqueue_destroy_locked(mqueue);
592 }
593
594 imq_unlock(&port->ip_messages);
595
596 return reap_messages;
597 }
598
599 /*
600 * Routine: ipc_port_init
601 * Purpose:
602 * Initializes a newly-allocated port.
603 * Doesn't touch the ip_object fields.
604 */
605
606 void
607 ipc_port_init(
608 ipc_port_t port,
609 ipc_space_t space,
610 mach_port_name_t name)
611 {
612 /* port->ip_kobject doesn't have to be initialized */
613
614 port->ip_receiver = space;
615 port->ip_receiver_name = name;
616
617 port->ip_mscount = 0;
618 port->ip_srights = 0;
619 port->ip_sorights = 0;
620
621 port->ip_nsrequest = IP_NULL;
622 port->ip_pdrequest = IP_NULL;
623 port->ip_requests = IPR_NULL;
624
625 port->ip_premsg = IKM_NULL;
626 port->ip_context = 0;
627
628 port->ip_sprequests = 0;
629 port->ip_spimportant = 0;
630 port->ip_impdonation = 0;
631 port->ip_tempowner = 0;
632
633 port->ip_guarded = 0;
634 port->ip_strict_guard = 0;
635 port->ip_impcount = 0;
636
637 port->ip_reserved = 0;
638
639 ipc_mqueue_init(&port->ip_messages,
640 FALSE /* !set */, NULL /* no reserved link */);
641 }
642
643 /*
644 * Routine: ipc_port_alloc
645 * Purpose:
646 * Allocate a port.
647 * Conditions:
648 * Nothing locked. If successful, the port is returned
649 * locked. (The caller doesn't have a reference.)
650 * Returns:
651 * KERN_SUCCESS The port is allocated.
652 * KERN_INVALID_TASK The space is dead.
653 * KERN_NO_SPACE No room for an entry in the space.
654 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
655 */
656
657 kern_return_t
658 ipc_port_alloc(
659 ipc_space_t space,
660 mach_port_name_t *namep,
661 ipc_port_t *portp)
662 {
663 ipc_port_t port;
664 mach_port_name_t name;
665 kern_return_t kr;
666
667 #if MACH_ASSERT
668 uintptr_t buf[IP_CALLSTACK_MAX];
669 ipc_port_callstack_init_debug(&buf[0], IP_CALLSTACK_MAX);
670 #endif /* MACH_ASSERT */
671
672 kr = ipc_object_alloc(space, IOT_PORT,
673 MACH_PORT_TYPE_RECEIVE, 0,
674 &name, (ipc_object_t *) &port);
675 if (kr != KERN_SUCCESS)
676 return kr;
677
678 /* port and space are locked */
679 ipc_port_init(port, space, name);
680
681 #if MACH_ASSERT
682 ipc_port_init_debug(port, &buf[0], IP_CALLSTACK_MAX);
683 #endif /* MACH_ASSERT */
684
685 /* unlock space after init */
686 is_write_unlock(space);
687
688 *namep = name;
689 *portp = port;
690
691 return KERN_SUCCESS;
692 }
693
694 /*
695 * Routine: ipc_port_alloc_name
696 * Purpose:
697 * Allocate a port, with a specific name.
698 * Conditions:
699 * Nothing locked. If successful, the port is returned
700 * locked. (The caller doesn't have a reference.)
701 * Returns:
702 * KERN_SUCCESS The port is allocated.
703 * KERN_INVALID_TASK The space is dead.
704 * KERN_NAME_EXISTS The name already denotes a right.
705 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
706 */
707
708 kern_return_t
709 ipc_port_alloc_name(
710 ipc_space_t space,
711 mach_port_name_t name,
712 ipc_port_t *portp)
713 {
714 ipc_port_t port;
715 kern_return_t kr;
716
717 #if MACH_ASSERT
718 uintptr_t buf[IP_CALLSTACK_MAX];
719 ipc_port_callstack_init_debug(&buf[0], IP_CALLSTACK_MAX);
720 #endif /* MACH_ASSERT */
721
722 kr = ipc_object_alloc_name(space, IOT_PORT,
723 MACH_PORT_TYPE_RECEIVE, 0,
724 name, (ipc_object_t *) &port);
725 if (kr != KERN_SUCCESS)
726 return kr;
727
728 /* port is locked */
729
730 ipc_port_init(port, space, name);
731
732 #if MACH_ASSERT
733 ipc_port_init_debug(port, &buf[0], IP_CALLSTACK_MAX);
734 #endif /* MACH_ASSERT */
735
736 *portp = port;
737
738 return KERN_SUCCESS;
739 }
740
741 /*
742 * Routine: ipc_port_spnotify
743 * Purpose:
744 * Generate send-possible port notifications.
745 * Conditions:
746 * Nothing locked, reference held on port.
747 */
748 void
749 ipc_port_spnotify(
750 ipc_port_t port)
751 {
752 ipc_port_request_index_t index = 0;
753 ipc_table_elems_t size = 0;
754
755 /*
756 * If the port has no send-possible request
757 * armed, don't bother to lock the port.
758 */
759 if (port->ip_sprequests == 0)
760 return;
761
762 ip_lock(port);
763
764 #if IMPORTANCE_INHERITANCE
765 if (port->ip_spimportant != 0) {
766 port->ip_spimportant = 0;
767 if (ipc_port_importance_delta(port, IPID_OPTION_NORMAL, -1) == TRUE) {
768 ip_lock(port);
769 }
770 }
771 #endif /* IMPORTANCE_INHERITANCE */
772
773 if (port->ip_sprequests == 0) {
774 ip_unlock(port);
775 return;
776 }
777 port->ip_sprequests = 0;
778
779 revalidate:
780 if (ip_active(port)) {
781 ipc_port_request_t requests;
782
783 /* table may change each time port unlocked (reload) */
784 requests = port->ip_requests;
785 assert(requests != IPR_NULL);
786
787 /*
788 * no need to go beyond table size when first
789 * we entered - those are future notifications.
790 */
791 if (size == 0)
792 size = requests->ipr_size->its_size;
793
794 /* no need to backtrack either */
795 while (++index < size) {
796 ipc_port_request_t ipr = &requests[index];
797 mach_port_name_t name = ipr->ipr_name;
798 ipc_port_t soright = IPR_SOR_PORT(ipr->ipr_soright);
799 boolean_t armed = IPR_SOR_SPARMED(ipr->ipr_soright);
800
801 if (MACH_PORT_VALID(name) && armed && IP_VALID(soright)) {
802 /* claim send-once right - slot still inuse */
803 ipr->ipr_soright = IP_NULL;
804 ip_unlock(port);
805
806 ipc_notify_send_possible(soright, name);
807
808 ip_lock(port);
809 goto revalidate;
810 }
811 }
812 }
813 ip_unlock(port);
814 return;
815 }
816
817 /*
818 * Routine: ipc_port_dnnotify
819 * Purpose:
820 * Generate dead name notifications for
821 * all outstanding dead-name and send-
822 * possible requests.
823 * Conditions:
824 * Nothing locked.
825 * Port must be inactive.
826 * Reference held on port.
827 */
828 void
829 ipc_port_dnnotify(
830 ipc_port_t port)
831 {
832 ipc_port_request_t requests = port->ip_requests;
833
834 assert(!ip_active(port));
835 if (requests != IPR_NULL) {
836 ipc_table_size_t its = requests->ipr_size;
837 ipc_table_elems_t size = its->its_size;
838 ipc_port_request_index_t index;
839 for (index = 1; index < size; index++) {
840 ipc_port_request_t ipr = &requests[index];
841 mach_port_name_t name = ipr->ipr_name;
842 ipc_port_t soright = IPR_SOR_PORT(ipr->ipr_soright);
843
844 if (MACH_PORT_VALID(name) && IP_VALID(soright)) {
845 ipc_notify_dead_name(soright, name);
846 }
847 }
848 }
849 }
850
851
852 /*
853 * Routine: ipc_port_destroy
854 * Purpose:
855 * Destroys a port. Cleans up queued messages.
856 *
857 * If the port has a backup, it doesn't get destroyed,
858 * but is sent in a port-destroyed notification to the backup.
859 * Conditions:
860 * The port is locked and alive; nothing else locked.
861 * The caller has a reference, which is consumed.
862 * Afterwards, the port is unlocked and dead.
863 */
864
865 void
866 ipc_port_destroy(ipc_port_t port)
867 {
868 ipc_port_t pdrequest, nsrequest;
869 ipc_mqueue_t mqueue;
870 ipc_kmsg_t kmsg;
871
872 #if IMPORTANCE_INHERITANCE
873 ipc_importance_task_t release_imp_task = IIT_NULL;
874 thread_t self = current_thread();
875 boolean_t top = (self->ith_assertions == 0);
876 natural_t assertcnt = 0;
877 #endif /* IMPORTANCE_INHERITANCE */
878
879 assert(ip_active(port));
880 /* port->ip_receiver_name is garbage */
881 /* port->ip_receiver/port->ip_destination is garbage */
882
883 /* check for a backup port */
884 pdrequest = port->ip_pdrequest;
885
886 #if IMPORTANCE_INHERITANCE
887 /* determine how many assertions to drop and from whom */
888 if (port->ip_tempowner != 0) {
889 assert(top);
890 release_imp_task = port->ip_imp_task;
891 if (IIT_NULL != release_imp_task) {
892 port->ip_imp_task = IIT_NULL;
893 assertcnt = port->ip_impcount;
894 }
895 /* Otherwise, nothing to drop */
896 } else {
897 assertcnt = port->ip_impcount;
898 if (pdrequest != IP_NULL)
899 /* mark in limbo for the journey */
900 port->ip_tempowner = 1;
901 }
902
903 if (top)
904 self->ith_assertions = assertcnt;
905 #endif /* IMPORTANCE_INHERITANCE */
906
907 if (pdrequest != IP_NULL) {
908 /* clear receiver, don't destroy the port */
909 (void)ipc_port_clear_receiver(port, FALSE);
910 assert(port->ip_in_pset == 0);
911 assert(port->ip_mscount == 0);
912
913 /* we assume the ref for pdrequest */
914 port->ip_pdrequest = IP_NULL;
915
916 /* make port be in limbo */
917 port->ip_receiver_name = MACH_PORT_NULL;
918 port->ip_destination = IP_NULL;
919 ip_unlock(port);
920
921 /* consumes our refs for port and pdrequest */
922 ipc_notify_port_destroyed(pdrequest, port);
923
924 goto drop_assertions;
925 }
926
927 port->ip_object.io_bits &= ~IO_BITS_ACTIVE;
928 port->ip_timestamp = ipc_port_timestamp();
929 nsrequest = port->ip_nsrequest;
930
931 /*
932 * The mach_msg_* paths don't hold a port lock, they only hold a
933 * reference to the port object. If a thread raced us and is now
934 * blocked waiting for message reception on this mqueue (or waiting
935 * for ipc_mqueue_full), it will never be woken up. We call
936 * ipc_port_clear_receiver() here, _after_ the port has been marked
937 * inactive, to wakeup any threads which may be blocked and ensure
938 * that no other thread can get lost waiting for a wake up on a
939 * port/mqueue that's been destroyed.
940 */
941 boolean_t reap_msgs = FALSE;
942 reap_msgs = ipc_port_clear_receiver(port, TRUE); /* marks mqueue inactive */
943 assert(port->ip_in_pset == 0);
944 assert(port->ip_mscount == 0);
945
946 /*
947 * If the port has a preallocated message buffer and that buffer
948 * is not inuse, free it. If it has an inuse one, then the kmsg
949 * free will detect that we freed the association and it can free it
950 * like a normal buffer.
951 *
952 * Once the port is marked inactive we don't need to keep it locked.
953 */
954 if (IP_PREALLOC(port)) {
955 ipc_port_t inuse_port;
956
957 kmsg = port->ip_premsg;
958 assert(kmsg != IKM_NULL);
959 inuse_port = ikm_prealloc_inuse_port(kmsg);
960 IP_CLEAR_PREALLOC(port, kmsg);
961 ip_unlock(port);
962 if (inuse_port != IP_NULL) {
963 assert(inuse_port == port);
964 } else {
965 ipc_kmsg_free(kmsg);
966 }
967 } else {
968 ip_unlock(port);
969 }
970
971 /* throw away no-senders request */
972 if (nsrequest != IP_NULL)
973 ipc_notify_send_once(nsrequest); /* consumes ref */
974
975 /*
976 * Reap any kmsg objects waiting to be destroyed.
977 * This must be done after we've released the port lock.
978 */
979 if (reap_msgs)
980 ipc_kmsg_reap_delayed();
981
982 mqueue = &port->ip_messages;
983
984 /* cleanup waitq related resources */
985 ipc_mqueue_deinit(mqueue);
986
987 /* generate dead-name notifications */
988 ipc_port_dnnotify(port);
989
990 ipc_kobject_destroy(port);
991
992 ip_release(port); /* consume caller's ref */
993
994 drop_assertions:
995 #if IMPORTANCE_INHERITANCE
996 if (release_imp_task != IIT_NULL) {
997 if (assertcnt > 0) {
998 assert(top);
999 self->ith_assertions = 0;
1000 assert(ipc_importance_task_is_any_receiver_type(release_imp_task));
1001 ipc_importance_task_drop_internal_assertion(release_imp_task, assertcnt);
1002 }
1003 ipc_importance_task_release(release_imp_task);
1004
1005 } else if (assertcnt > 0) {
1006 if (top) {
1007 self->ith_assertions = 0;
1008 release_imp_task = current_task()->task_imp_base;
1009 if (ipc_importance_task_is_any_receiver_type(release_imp_task)) {
1010 ipc_importance_task_drop_internal_assertion(release_imp_task, assertcnt);
1011 }
1012 }
1013 }
1014 #endif /* IMPORTANCE_INHERITANCE */
1015 }
1016
1017 /*
1018 * Routine: ipc_port_check_circularity
1019 * Purpose:
1020 * Check if queueing "port" in a message for "dest"
1021 * would create a circular group of ports and messages.
1022 *
1023 * If no circularity (FALSE returned), then "port"
1024 * is changed from "in limbo" to "in transit".
1025 *
1026 * That is, we want to set port->ip_destination == dest,
1027 * but guaranteeing that this doesn't create a circle
1028 * port->ip_destination->ip_destination->... == port
1029 *
1030 * Conditions:
1031 * No ports locked. References held for "port" and "dest".
1032 */
1033
1034 boolean_t
1035 ipc_port_check_circularity(
1036 ipc_port_t port,
1037 ipc_port_t dest)
1038 {
1039 #if IMPORTANCE_INHERITANCE
1040 /* adjust importance counts at the same time */
1041 return ipc_importance_check_circularity(port, dest);
1042 #else
1043 ipc_port_t base;
1044
1045 assert(port != IP_NULL);
1046 assert(dest != IP_NULL);
1047
1048 if (port == dest)
1049 return TRUE;
1050 base = dest;
1051
1052 /*
1053 * First try a quick check that can run in parallel.
1054 * No circularity if dest is not in transit.
1055 */
1056 ip_lock(port);
1057 if (ip_lock_try(dest)) {
1058 if (!ip_active(dest) ||
1059 (dest->ip_receiver_name != MACH_PORT_NULL) ||
1060 (dest->ip_destination == IP_NULL))
1061 goto not_circular;
1062
1063 /* dest is in transit; further checking necessary */
1064
1065 ip_unlock(dest);
1066 }
1067 ip_unlock(port);
1068
1069 ipc_port_multiple_lock(); /* massive serialization */
1070
1071 /*
1072 * Search for the end of the chain (a port not in transit),
1073 * acquiring locks along the way.
1074 */
1075
1076 for (;;) {
1077 ip_lock(base);
1078
1079 if (!ip_active(base) ||
1080 (base->ip_receiver_name != MACH_PORT_NULL) ||
1081 (base->ip_destination == IP_NULL))
1082 break;
1083
1084 base = base->ip_destination;
1085 }
1086
1087 /* all ports in chain from dest to base, inclusive, are locked */
1088
1089 if (port == base) {
1090 /* circularity detected! */
1091
1092 ipc_port_multiple_unlock();
1093
1094 /* port (== base) is in limbo */
1095
1096 assert(ip_active(port));
1097 assert(port->ip_receiver_name == MACH_PORT_NULL);
1098 assert(port->ip_destination == IP_NULL);
1099
1100 while (dest != IP_NULL) {
1101 ipc_port_t next;
1102
1103 /* dest is in transit or in limbo */
1104
1105 assert(ip_active(dest));
1106 assert(dest->ip_receiver_name == MACH_PORT_NULL);
1107
1108 next = dest->ip_destination;
1109 ip_unlock(dest);
1110 dest = next;
1111 }
1112
1113 return TRUE;
1114 }
1115
1116 /*
1117 * The guarantee: lock port while the entire chain is locked.
1118 * Once port is locked, we can take a reference to dest,
1119 * add port to the chain, and unlock everything.
1120 */
1121
1122 ip_lock(port);
1123 ipc_port_multiple_unlock();
1124
1125 not_circular:
1126
1127 /* port is in limbo */
1128
1129 assert(ip_active(port));
1130 assert(port->ip_receiver_name == MACH_PORT_NULL);
1131 assert(port->ip_destination == IP_NULL);
1132
1133 ip_reference(dest);
1134 port->ip_destination = dest;
1135
1136 /* now unlock chain */
1137
1138 ip_unlock(port);
1139
1140 for (;;) {
1141 if (dest == base)
1142 break;
1143
1144 /* port is in transit */
1145
1146 assert(ip_active(dest));
1147 assert(dest->ip_receiver_name == MACH_PORT_NULL);
1148 assert(dest->ip_destination != IP_NULL);
1149
1150 port = dest->ip_destination;
1151 ip_unlock(dest);
1152 dest = port;
1153 }
1154
1155 /* base is not in transit */
1156 assert(!ip_active(base) ||
1157 (base->ip_receiver_name != MACH_PORT_NULL) ||
1158 (base->ip_destination == IP_NULL));
1159
1160 ip_unlock(base);
1161
1162 return FALSE;
1163 #endif /* !IMPORTANCE_INHERITANCE */
1164 }
1165
1166 /*
1167 * Routine: ipc_port_impcount_delta
1168 * Purpose:
1169 * Adjust only the importance count associated with a port.
1170 * If there are any adjustments to be made to receiver task,
1171 * those are handled elsewhere.
1172 *
1173 * For now, be defensive during deductions to make sure the
1174 * impcount for the port doesn't underflow zero. This will
1175 * go away when the port boost addition is made atomic (see
1176 * note in ipc_port_importance_delta()).
1177 * Conditions:
1178 * The port is referenced and locked.
1179 * Nothing else is locked.
1180 */
1181 mach_port_delta_t
1182 ipc_port_impcount_delta(
1183 ipc_port_t port,
1184 mach_port_delta_t delta,
1185 ipc_port_t __unused base)
1186 {
1187 mach_port_delta_t absdelta;
1188
1189 if (!ip_active(port)) {
1190 return 0;
1191 }
1192
1193 /* adding/doing nothing is easy */
1194 if (delta >= 0) {
1195 port->ip_impcount += delta;
1196 return delta;
1197 }
1198
1199 absdelta = 0 - delta;
1200 if (port->ip_impcount >= absdelta) {
1201 port->ip_impcount -= absdelta;
1202 return delta;
1203 }
1204
1205 #if (DEVELOPMENT || DEBUG)
1206 if (port->ip_receiver_name != MACH_PORT_NULL) {
1207 task_t target_task = port->ip_receiver->is_task;
1208 ipc_importance_task_t target_imp = target_task->task_imp_base;
1209 const char *target_procname;
1210 int target_pid;
1211
1212 if (target_imp != IIT_NULL) {
1213 target_procname = target_imp->iit_procname;
1214 target_pid = target_imp->iit_bsd_pid;
1215 } else {
1216 target_procname = "unknown";
1217 target_pid = -1;
1218 }
1219 printf("Over-release of importance assertions for port 0x%x receiver pid %d (%s), "
1220 "dropping %d assertion(s) but port only has %d remaining.\n",
1221 port->ip_receiver_name,
1222 target_pid, target_procname,
1223 absdelta, port->ip_impcount);
1224
1225 } else if (base != IP_NULL) {
1226 task_t target_task = base->ip_receiver->is_task;
1227 ipc_importance_task_t target_imp = target_task->task_imp_base;
1228 const char *target_procname;
1229 int target_pid;
1230
1231 if (target_imp != IIT_NULL) {
1232 target_procname = target_imp->iit_procname;
1233 target_pid = target_imp->iit_bsd_pid;
1234 } else {
1235 target_procname = "unknown";
1236 target_pid = -1;
1237 }
1238 printf("Over-release of importance assertions for port 0x%lx "
1239 "enqueued on port 0x%x with receiver pid %d (%s), "
1240 "dropping %d assertion(s) but port only has %d remaining.\n",
1241 (unsigned long)VM_KERNEL_UNSLIDE_OR_PERM((uintptr_t)port),
1242 base->ip_receiver_name,
1243 target_pid, target_procname,
1244 absdelta, port->ip_impcount);
1245 }
1246 #endif
1247
1248 delta = 0 - port->ip_impcount;
1249 port->ip_impcount = 0;
1250 return delta;
1251 }
1252
1253 /*
1254 * Routine: ipc_port_importance_delta_internal
1255 * Purpose:
1256 * Adjust the importance count through the given port.
1257 * If the port is in transit, apply the delta throughout
1258 * the chain. Determine if the there is a task at the
1259 * base of the chain that wants/needs to be adjusted,
1260 * and if so, apply the delta.
1261 * Conditions:
1262 * The port is referenced and locked on entry.
1263 * Importance may be locked.
1264 * Nothing else is locked.
1265 * The lock may be dropped on exit.
1266 * Returns TRUE if lock was dropped.
1267 */
1268 #if IMPORTANCE_INHERITANCE
1269
1270 boolean_t
1271 ipc_port_importance_delta_internal(
1272 ipc_port_t port,
1273 natural_t options,
1274 mach_port_delta_t *deltap,
1275 ipc_importance_task_t *imp_task)
1276 {
1277 ipc_port_t next, base;
1278 boolean_t dropped = FALSE;
1279
1280 *imp_task = IIT_NULL;
1281
1282 if (*deltap == 0)
1283 return FALSE;
1284
1285 assert(options == IPID_OPTION_NORMAL || options == IPID_OPTION_SENDPOSSIBLE);
1286
1287 base = port;
1288
1289 /* if port is in transit, have to search for end of chain */
1290 if (ip_active(port) &&
1291 port->ip_destination != IP_NULL &&
1292 port->ip_receiver_name == MACH_PORT_NULL) {
1293
1294 dropped = TRUE;
1295
1296 ip_unlock(port);
1297 ipc_port_multiple_lock(); /* massive serialization */
1298 ip_lock(base);
1299
1300 while(ip_active(base) &&
1301 base->ip_destination != IP_NULL &&
1302 base->ip_receiver_name == MACH_PORT_NULL) {
1303
1304 base = base->ip_destination;
1305 ip_lock(base);
1306 }
1307 ipc_port_multiple_unlock();
1308 }
1309
1310 /*
1311 * If the port lock is dropped b/c the port is in transit, there is a
1312 * race window where another thread can drain messages and/or fire a
1313 * send possible notification before we get here.
1314 *
1315 * We solve this race by checking to see if our caller armed the send
1316 * possible notification, whether or not it's been fired yet, and
1317 * whether or not we've already set the port's ip_spimportant bit. If
1318 * we don't need a send-possible boost, then we'll just apply a
1319 * harmless 0-boost to the port.
1320 */
1321 if (options & IPID_OPTION_SENDPOSSIBLE) {
1322 assert(*deltap == 1);
1323 if (port->ip_sprequests && port->ip_spimportant == 0)
1324 port->ip_spimportant = 1;
1325 else
1326 *deltap = 0;
1327 }
1328
1329 /* unlock down to the base, adjusting boost(s) at each level */
1330 for (;;) {
1331 *deltap = ipc_port_impcount_delta(port, *deltap, base);
1332
1333 if (port == base) {
1334 break;
1335 }
1336
1337 /* port is in transit */
1338 assert(port->ip_tempowner == 0);
1339 next = port->ip_destination;
1340 ip_unlock(port);
1341 port = next;
1342 }
1343
1344 /* find the task (if any) to boost according to the base */
1345 if (ip_active(base)) {
1346 if (base->ip_tempowner != 0) {
1347 if (IIT_NULL != base->ip_imp_task)
1348 *imp_task = base->ip_imp_task;
1349 /* otherwise don't boost */
1350
1351 } else if (base->ip_receiver_name != MACH_PORT_NULL) {
1352 ipc_space_t space = base->ip_receiver;
1353
1354 /* only spaces with boost-accepting tasks */
1355 if (space->is_task != TASK_NULL &&
1356 ipc_importance_task_is_any_receiver_type(space->is_task->task_imp_base)) {
1357 *imp_task = space->is_task->task_imp_base;
1358 }
1359 }
1360 }
1361
1362 /*
1363 * Only the base is locked. If we have to hold or drop task
1364 * importance assertions, we'll have to drop that lock as well.
1365 */
1366 if (*imp_task != IIT_NULL) {
1367 /* take a reference before unlocking base */
1368 ipc_importance_task_reference(*imp_task);
1369 }
1370
1371 if (dropped == TRUE) {
1372 ip_unlock(base);
1373 }
1374
1375 return dropped;
1376 }
1377 #endif /* IMPORTANCE_INHERITANCE */
1378
1379 /*
1380 * Routine: ipc_port_importance_delta
1381 * Purpose:
1382 * Adjust the importance count through the given port.
1383 * If the port is in transit, apply the delta throughout
1384 * the chain.
1385 *
1386 * If there is a task at the base of the chain that wants/needs
1387 * to be adjusted, apply the delta.
1388 * Conditions:
1389 * The port is referenced and locked on entry.
1390 * Nothing else is locked.
1391 * The lock may be dropped on exit.
1392 * Returns TRUE if lock was dropped.
1393 */
1394 #if IMPORTANCE_INHERITANCE
1395
1396 boolean_t
1397 ipc_port_importance_delta(
1398 ipc_port_t port,
1399 natural_t options,
1400 mach_port_delta_t delta)
1401 {
1402 ipc_importance_task_t imp_task = IIT_NULL;
1403 boolean_t dropped;
1404
1405 dropped = ipc_port_importance_delta_internal(port, options, &delta, &imp_task);
1406
1407 if (IIT_NULL == imp_task || delta == 0)
1408 return dropped;
1409
1410 if (!dropped)
1411 ip_unlock(port);
1412
1413 assert(ipc_importance_task_is_any_receiver_type(imp_task));
1414
1415 if (delta > 0)
1416 ipc_importance_task_hold_internal_assertion(imp_task, delta);
1417 else
1418 ipc_importance_task_drop_internal_assertion(imp_task, -delta);
1419
1420 ipc_importance_task_release(imp_task);
1421 return TRUE;
1422 }
1423 #endif /* IMPORTANCE_INHERITANCE */
1424
1425 /*
1426 * Routine: ipc_port_lookup_notify
1427 * Purpose:
1428 * Make a send-once notify port from a receive right.
1429 * Returns IP_NULL if name doesn't denote a receive right.
1430 * Conditions:
1431 * The space must be locked (read or write) and active.
1432 * Being the active space, we can rely on thread server_id
1433 * context to give us the proper server level sub-order
1434 * within the space.
1435 */
1436
1437 ipc_port_t
1438 ipc_port_lookup_notify(
1439 ipc_space_t space,
1440 mach_port_name_t name)
1441 {
1442 ipc_port_t port;
1443 ipc_entry_t entry;
1444
1445 assert(is_active(space));
1446
1447 entry = ipc_entry_lookup(space, name);
1448 if (entry == IE_NULL)
1449 return IP_NULL;
1450 if ((entry->ie_bits & MACH_PORT_TYPE_RECEIVE) == 0)
1451 return IP_NULL;
1452
1453 __IGNORE_WCASTALIGN(port = (ipc_port_t) entry->ie_object);
1454 assert(port != IP_NULL);
1455
1456 ip_lock(port);
1457 assert(ip_active(port));
1458 assert(port->ip_receiver_name == name);
1459 assert(port->ip_receiver == space);
1460
1461 ip_reference(port);
1462 port->ip_sorights++;
1463 ip_unlock(port);
1464
1465 return port;
1466 }
1467
1468 /*
1469 * Routine: ipc_port_make_send_locked
1470 * Purpose:
1471 * Make a naked send right from a receive right.
1472 *
1473 * Conditions:
1474 * port locked and active.
1475 */
1476 ipc_port_t
1477 ipc_port_make_send_locked(
1478 ipc_port_t port)
1479 {
1480 assert(ip_active(port));
1481 port->ip_mscount++;
1482 port->ip_srights++;
1483 ip_reference(port);
1484 return port;
1485 }
1486
1487 /*
1488 * Routine: ipc_port_make_send
1489 * Purpose:
1490 * Make a naked send right from a receive right.
1491 */
1492
1493 ipc_port_t
1494 ipc_port_make_send(
1495 ipc_port_t port)
1496 {
1497
1498 if (!IP_VALID(port))
1499 return port;
1500
1501 ip_lock(port);
1502 if (ip_active(port)) {
1503 port->ip_mscount++;
1504 port->ip_srights++;
1505 ip_reference(port);
1506 ip_unlock(port);
1507 return port;
1508 }
1509 ip_unlock(port);
1510 return IP_DEAD;
1511 }
1512
1513 /*
1514 * Routine: ipc_port_copy_send
1515 * Purpose:
1516 * Make a naked send right from another naked send right.
1517 * IP_NULL -> IP_NULL
1518 * IP_DEAD -> IP_DEAD
1519 * dead port -> IP_DEAD
1520 * live port -> port + ref
1521 * Conditions:
1522 * Nothing locked except possibly a space.
1523 */
1524
1525 ipc_port_t
1526 ipc_port_copy_send(
1527 ipc_port_t port)
1528 {
1529 ipc_port_t sright;
1530
1531 if (!IP_VALID(port))
1532 return port;
1533
1534 ip_lock(port);
1535 if (ip_active(port)) {
1536 assert(port->ip_srights > 0);
1537
1538 ip_reference(port);
1539 port->ip_srights++;
1540 sright = port;
1541 } else
1542 sright = IP_DEAD;
1543 ip_unlock(port);
1544
1545 return sright;
1546 }
1547
1548 /*
1549 * Routine: ipc_port_copyout_send
1550 * Purpose:
1551 * Copyout a naked send right (possibly null/dead),
1552 * or if that fails, destroy the right.
1553 * Conditions:
1554 * Nothing locked.
1555 */
1556
1557 mach_port_name_t
1558 ipc_port_copyout_send(
1559 ipc_port_t sright,
1560 ipc_space_t space)
1561 {
1562 mach_port_name_t name;
1563
1564 if (IP_VALID(sright)) {
1565 kern_return_t kr;
1566
1567 kr = ipc_object_copyout(space, (ipc_object_t) sright,
1568 MACH_MSG_TYPE_PORT_SEND, TRUE, &name);
1569 if (kr != KERN_SUCCESS) {
1570 ipc_port_release_send(sright);
1571
1572 if (kr == KERN_INVALID_CAPABILITY)
1573 name = MACH_PORT_DEAD;
1574 else
1575 name = MACH_PORT_NULL;
1576 }
1577 } else
1578 name = CAST_MACH_PORT_TO_NAME(sright);
1579
1580 return name;
1581 }
1582
1583 /*
1584 * Routine: ipc_port_release_send
1585 * Purpose:
1586 * Release a naked send right.
1587 * Consumes a ref for the port.
1588 * Conditions:
1589 * Nothing locked.
1590 */
1591
1592 void
1593 ipc_port_release_send(
1594 ipc_port_t port)
1595 {
1596 ipc_port_t nsrequest = IP_NULL;
1597 mach_port_mscount_t mscount;
1598
1599 if (!IP_VALID(port))
1600 return;
1601
1602 ip_lock(port);
1603
1604 assert(port->ip_srights > 0);
1605 port->ip_srights--;
1606
1607 if (!ip_active(port)) {
1608 ip_unlock(port);
1609 ip_release(port);
1610 return;
1611 }
1612
1613 if (port->ip_srights == 0 &&
1614 port->ip_nsrequest != IP_NULL) {
1615 nsrequest = port->ip_nsrequest;
1616 port->ip_nsrequest = IP_NULL;
1617 mscount = port->ip_mscount;
1618 ip_unlock(port);
1619 ip_release(port);
1620 ipc_notify_no_senders(nsrequest, mscount);
1621 } else {
1622 ip_unlock(port);
1623 ip_release(port);
1624 }
1625 }
1626
1627 /*
1628 * Routine: ipc_port_make_sonce_locked
1629 * Purpose:
1630 * Make a naked send-once right from a receive right.
1631 * Conditions:
1632 * The port is locked and active.
1633 */
1634
1635 ipc_port_t
1636 ipc_port_make_sonce_locked(
1637 ipc_port_t port)
1638 {
1639 assert(ip_active(port));
1640 port->ip_sorights++;
1641 ip_reference(port);
1642 return port;
1643 }
1644
1645 /*
1646 * Routine: ipc_port_make_sonce
1647 * Purpose:
1648 * Make a naked send-once right from a receive right.
1649 * Conditions:
1650 * The port is not locked.
1651 */
1652
1653 ipc_port_t
1654 ipc_port_make_sonce(
1655 ipc_port_t port)
1656 {
1657 if (!IP_VALID(port))
1658 return port;
1659
1660 ip_lock(port);
1661 if (ip_active(port)) {
1662 port->ip_sorights++;
1663 ip_reference(port);
1664 ip_unlock(port);
1665 return port;
1666 }
1667 ip_unlock(port);
1668 return IP_DEAD;
1669 }
1670
1671 /*
1672 * Routine: ipc_port_release_sonce
1673 * Purpose:
1674 * Release a naked send-once right.
1675 * Consumes a ref for the port.
1676 *
1677 * In normal situations, this is never used.
1678 * Send-once rights are only consumed when
1679 * a message (possibly a send-once notification)
1680 * is sent to them.
1681 * Conditions:
1682 * Nothing locked except possibly a space.
1683 */
1684
1685 void
1686 ipc_port_release_sonce(
1687 ipc_port_t port)
1688 {
1689 if (!IP_VALID(port))
1690 return;
1691
1692 ip_lock(port);
1693
1694 assert(port->ip_sorights > 0);
1695
1696 port->ip_sorights--;
1697
1698 ip_unlock(port);
1699 ip_release(port);
1700 }
1701
1702 /*
1703 * Routine: ipc_port_release_receive
1704 * Purpose:
1705 * Release a naked (in limbo or in transit) receive right.
1706 * Consumes a ref for the port; destroys the port.
1707 * Conditions:
1708 * Nothing locked.
1709 */
1710
1711 void
1712 ipc_port_release_receive(
1713 ipc_port_t port)
1714 {
1715 ipc_port_t dest;
1716
1717 if (!IP_VALID(port))
1718 return;
1719
1720 ip_lock(port);
1721 assert(ip_active(port));
1722 assert(port->ip_receiver_name == MACH_PORT_NULL);
1723 dest = port->ip_destination;
1724
1725 ipc_port_destroy(port); /* consumes ref, unlocks */
1726
1727 if (dest != IP_NULL)
1728 ip_release(dest);
1729 }
1730
1731 /*
1732 * Routine: ipc_port_alloc_special
1733 * Purpose:
1734 * Allocate a port in a special space.
1735 * The new port is returned with one ref.
1736 * If unsuccessful, IP_NULL is returned.
1737 * Conditions:
1738 * Nothing locked.
1739 */
1740
1741 ipc_port_t
1742 ipc_port_alloc_special(
1743 ipc_space_t space)
1744 {
1745 ipc_port_t port;
1746
1747 __IGNORE_WCASTALIGN(port = (ipc_port_t) io_alloc(IOT_PORT));
1748 if (port == IP_NULL)
1749 return IP_NULL;
1750
1751 #if MACH_ASSERT
1752 uintptr_t buf[IP_CALLSTACK_MAX];
1753 ipc_port_callstack_init_debug(&buf[0], IP_CALLSTACK_MAX);
1754 #endif /* MACH_ASSERT */
1755
1756 bzero((char *)port, sizeof(*port));
1757 io_lock_init(&port->ip_object);
1758 port->ip_references = 1;
1759 port->ip_object.io_bits = io_makebits(TRUE, IOT_PORT, 0);
1760
1761 ipc_port_init(port, space, 1);
1762
1763 #if MACH_ASSERT
1764 ipc_port_init_debug(port, &buf[0], IP_CALLSTACK_MAX);
1765 #endif /* MACH_ASSERT */
1766
1767 return port;
1768 }
1769
1770 /*
1771 * Routine: ipc_port_dealloc_special
1772 * Purpose:
1773 * Deallocate a port in a special space.
1774 * Consumes one ref for the port.
1775 * Conditions:
1776 * Nothing locked.
1777 */
1778
1779 void
1780 ipc_port_dealloc_special(
1781 ipc_port_t port,
1782 __assert_only ipc_space_t space)
1783 {
1784 ip_lock(port);
1785 assert(ip_active(port));
1786 // assert(port->ip_receiver_name != MACH_PORT_NULL);
1787 assert(port->ip_receiver == space);
1788
1789 /*
1790 * We clear ip_receiver_name and ip_receiver to simplify
1791 * the ipc_space_kernel check in ipc_mqueue_send.
1792 */
1793
1794 port->ip_receiver_name = MACH_PORT_NULL;
1795 port->ip_receiver = IS_NULL;
1796
1797 /* relevant part of ipc_port_clear_receiver */
1798 ipc_port_set_mscount(port, 0);
1799 port->ip_messages.imq_seqno = 0;
1800
1801 ipc_port_destroy(port);
1802 }
1803
1804 /*
1805 * Routine: ipc_port_finalize
1806 * Purpose:
1807 * Called on last reference deallocate to
1808 * free any remaining data associated with the
1809 * port.
1810 * Conditions:
1811 * Nothing locked.
1812 */
1813 void
1814 ipc_port_finalize(
1815 ipc_port_t port)
1816 {
1817 ipc_port_request_t requests = port->ip_requests;
1818
1819 assert(!ip_active(port));
1820 if (requests != IPR_NULL) {
1821 ipc_table_size_t its = requests->ipr_size;
1822 it_requests_free(its, requests);
1823 port->ip_requests = IPR_NULL;
1824 }
1825
1826 ipc_mqueue_deinit(&port->ip_messages);
1827
1828 #if MACH_ASSERT
1829 ipc_port_track_dealloc(port);
1830 #endif /* MACH_ASSERT */
1831 }
1832
1833 #if MACH_ASSERT
1834 #include <kern/machine.h>
1835
1836 /*
1837 * Keep a list of all allocated ports.
1838 * Allocation is intercepted via ipc_port_init;
1839 * deallocation is intercepted via io_free.
1840 */
1841 #if 0
1842 queue_head_t port_alloc_queue;
1843 lck_spin_t port_alloc_queue_lock;
1844 #endif
1845
1846 unsigned long port_count = 0;
1847 unsigned long port_count_warning = 20000;
1848 unsigned long port_timestamp = 0;
1849
1850 void db_port_stack_trace(
1851 ipc_port_t port);
1852 void db_ref(
1853 int refs);
1854 int db_port_walk(
1855 unsigned int verbose,
1856 unsigned int display,
1857 unsigned int ref_search,
1858 unsigned int ref_target);
1859
1860 /*
1861 * Initialize global state needed for run-time
1862 * port debugging.
1863 */
1864 void
1865 ipc_port_debug_init(void)
1866 {
1867 #if 0
1868 queue_init(&port_alloc_queue);
1869 lck_spin_init(&port_alloc_queue_lock, &ipc_lck_grp, &ipc_lck_attr);
1870 #endif
1871
1872 if (!PE_parse_boot_argn("ipc_portbt", &ipc_portbt, sizeof (ipc_portbt)))
1873 ipc_portbt = 0;
1874 }
1875
1876 #ifdef MACH_BSD
1877 extern int proc_pid(struct proc*);
1878 #endif /* MACH_BSD */
1879
1880 /*
1881 * Initialize all of the debugging state in a port.
1882 * Insert the port into a global list of all allocated ports.
1883 */
1884 void
1885 ipc_port_init_debug(
1886 ipc_port_t port,
1887 uintptr_t *callstack,
1888 unsigned int callstack_max)
1889 {
1890 unsigned int i;
1891
1892 port->ip_thread = current_thread();
1893 port->ip_timetrack = port_timestamp++;
1894 for (i = 0; i < callstack_max; ++i)
1895 port->ip_callstack[i] = callstack[i];
1896 for (i = 0; i < IP_NSPARES; ++i)
1897 port->ip_spares[i] = 0;
1898
1899 #ifdef MACH_BSD
1900 task_t task = current_task();
1901 if (task != TASK_NULL) {
1902 struct proc* proc = (struct proc*) get_bsdtask_info(task);
1903 if (proc)
1904 port->ip_spares[0] = proc_pid(proc);
1905 }
1906 #endif /* MACH_BSD */
1907
1908 #if 0
1909 lck_spin_lock(&port_alloc_queue_lock);
1910 ++port_count;
1911 if (port_count_warning > 0 && port_count >= port_count_warning)
1912 assert(port_count < port_count_warning);
1913 queue_enter(&port_alloc_queue, port, ipc_port_t, ip_port_links);
1914 lck_spin_unlock(&port_alloc_queue_lock);
1915 #endif
1916 }
1917
1918 /*
1919 * Routine: ipc_port_callstack_init_debug
1920 * Purpose:
1921 * Calls the machine-dependent routine to
1922 * fill in an array with up to IP_CALLSTACK_MAX
1923 * levels of return pc information
1924 * Conditions:
1925 * May block (via copyin)
1926 */
1927 void
1928 ipc_port_callstack_init_debug(
1929 uintptr_t *callstack,
1930 unsigned int callstack_max)
1931 {
1932 unsigned int i;
1933
1934 /* guarantee the callstack is initialized */
1935 for (i=0; i < callstack_max; i++)
1936 callstack[i] = 0;
1937
1938 if (ipc_portbt)
1939 machine_callstack(callstack, callstack_max);
1940 }
1941
1942 /*
1943 * Remove a port from the queue of allocated ports.
1944 * This routine should be invoked JUST prior to
1945 * deallocating the actual memory occupied by the port.
1946 */
1947 #if 1
1948 void
1949 ipc_port_track_dealloc(
1950 __unused ipc_port_t port)
1951 {
1952 }
1953 #else
1954 void
1955 ipc_port_track_dealloc(
1956 ipc_port_t port)
1957 {
1958 lck_spin_lock(&port_alloc_queue_lock);
1959 assert(port_count > 0);
1960 --port_count;
1961 queue_remove(&port_alloc_queue, port, ipc_port_t, ip_port_links);
1962 lck_spin_unlock(&port_alloc_queue_lock);
1963 }
1964 #endif
1965
1966
1967 #endif /* MACH_ASSERT */