]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ipc/ipc_port.c
xnu-2422.115.4.tar.gz
[apple/xnu.git] / osfmk / ipc / ipc_port.c
1 /*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_FREE_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 * NOTICE: This file was modified by McAfee Research in 2004 to introduce
58 * support for mandatory and extensible security protections. This notice
59 * is included in support of clause 2.2 (b) of the Apple Public License,
60 * Version 2.0.
61 */
62 /*
63 */
64 /*
65 * File: ipc/ipc_port.c
66 * Author: Rich Draves
67 * Date: 1989
68 *
69 * Functions to manipulate IPC ports.
70 */
71
72 #include <zone_debug.h>
73 #include <mach_assert.h>
74
75 #include <mach/port.h>
76 #include <mach/kern_return.h>
77 #include <kern/lock.h>
78 #include <kern/ipc_kobject.h>
79 #include <kern/thread.h>
80 #include <kern/misc_protos.h>
81 #include <kern/wait_queue.h>
82 #include <ipc/ipc_entry.h>
83 #include <ipc/ipc_space.h>
84 #include <ipc/ipc_object.h>
85 #include <ipc/ipc_port.h>
86 #include <ipc/ipc_pset.h>
87 #include <ipc/ipc_kmsg.h>
88 #include <ipc/ipc_mqueue.h>
89 #include <ipc/ipc_notify.h>
90 #include <ipc/ipc_table.h>
91
92 #include <security/mac_mach_internal.h>
93
94 #include <string.h>
95
96 decl_lck_mtx_data(, ipc_port_multiple_lock_data)
97 lck_mtx_ext_t ipc_port_multiple_lock_data_ext;
98 ipc_port_timestamp_t ipc_port_timestamp_data;
99 int ipc_portbt;
100
101 #if MACH_ASSERT
102 void ipc_port_init_debug(
103 ipc_port_t port,
104 uintptr_t *callstack,
105 unsigned int callstack_max);
106
107 void ipc_port_callstack_init_debug(
108 uintptr_t *callstack,
109 unsigned int callstack_max);
110
111 #endif /* MACH_ASSERT */
112
113 void
114 ipc_port_release(ipc_port_t port)
115 {
116 ip_release(port);
117 }
118
119 void
120 ipc_port_reference(ipc_port_t port)
121 {
122 ip_reference(port);
123 }
124
125 /*
126 * Routine: ipc_port_timestamp
127 * Purpose:
128 * Retrieve a timestamp value.
129 */
130
131 ipc_port_timestamp_t
132 ipc_port_timestamp(void)
133 {
134 return OSIncrementAtomic(&ipc_port_timestamp_data);
135 }
136
137 /*
138 * Routine: ipc_port_request_alloc
139 * Purpose:
140 * Try to allocate a request slot.
141 * If successful, returns the request index.
142 * Otherwise returns zero.
143 * Conditions:
144 * The port is locked and active.
145 * Returns:
146 * KERN_SUCCESS A request index was found.
147 * KERN_NO_SPACE No index allocated.
148 */
149
150 #if IMPORTANCE_INHERITANCE
151 kern_return_t
152 ipc_port_request_alloc(
153 ipc_port_t port,
154 mach_port_name_t name,
155 ipc_port_t soright,
156 boolean_t send_possible,
157 boolean_t immediate,
158 ipc_port_request_index_t *indexp,
159 boolean_t *importantp)
160 #else
161 kern_return_t
162 ipc_port_request_alloc(
163 ipc_port_t port,
164 mach_port_name_t name,
165 ipc_port_t soright,
166 boolean_t send_possible,
167 boolean_t immediate,
168 ipc_port_request_index_t *indexp)
169 #endif /* IMPORTANCE_INHERITANCE */
170 {
171 ipc_port_request_t ipr, table;
172 ipc_port_request_index_t index;
173 uintptr_t mask = 0;
174
175 #if IMPORTANCE_INHERITANCE
176 *importantp = FALSE;
177 #endif /* IMPORTANCE_INHERITANCE */
178
179 assert(ip_active(port));
180 assert(name != MACH_PORT_NULL);
181 assert(soright != IP_NULL);
182
183 table = port->ip_requests;
184
185 if (table == IPR_NULL)
186 return KERN_NO_SPACE;
187
188 index = table->ipr_next;
189 if (index == 0)
190 return KERN_NO_SPACE;
191
192 ipr = &table[index];
193 assert(ipr->ipr_name == MACH_PORT_NULL);
194
195 table->ipr_next = ipr->ipr_next;
196 ipr->ipr_name = name;
197
198 if (send_possible) {
199 mask |= IPR_SOR_SPREQ_MASK;
200 if (immediate) {
201 mask |= IPR_SOR_SPARM_MASK;
202 if (port->ip_sprequests == 0) {
203 port->ip_sprequests = 1;
204 #if IMPORTANCE_INHERITANCE
205 if (port->ip_impdonation != 0 &&
206 port->ip_spimportant == 0 &&
207 (task_is_importance_donor(current_task()))) {
208 port->ip_spimportant = 1;
209 *importantp = TRUE;
210 }
211 #endif /* IMPORTANCE_INHERTANCE */
212 }
213 }
214 }
215 ipr->ipr_soright = IPR_SOR_MAKE(soright, mask);
216
217 *indexp = index;
218
219 return KERN_SUCCESS;
220 }
221
222 /*
223 * Routine: ipc_port_request_grow
224 * Purpose:
225 * Grow a port's table of requests.
226 * Conditions:
227 * The port must be locked and active.
228 * Nothing else locked; will allocate memory.
229 * Upon return the port is unlocked.
230 * Returns:
231 * KERN_SUCCESS Grew the table.
232 * KERN_SUCCESS Somebody else grew the table.
233 * KERN_SUCCESS The port died.
234 * KERN_RESOURCE_SHORTAGE Couldn't allocate new table.
235 * KERN_NO_SPACE Couldn't grow to desired size
236 */
237
238 kern_return_t
239 ipc_port_request_grow(
240 ipc_port_t port,
241 ipc_table_elems_t target_size)
242 {
243 ipc_table_size_t its;
244 ipc_port_request_t otable, ntable;
245
246 assert(ip_active(port));
247
248 otable = port->ip_requests;
249 if (otable == IPR_NULL)
250 its = &ipc_table_requests[0];
251 else
252 its = otable->ipr_size + 1;
253
254 if (target_size != ITS_SIZE_NONE) {
255 if ((otable != IPR_NULL) &&
256 (target_size <= otable->ipr_size->its_size)) {
257 ip_unlock(port);
258 return KERN_SUCCESS;
259 }
260 while ((its->its_size) && (its->its_size < target_size)) {
261 its++;
262 }
263 if (its->its_size == 0) {
264 ip_unlock(port);
265 return KERN_NO_SPACE;
266 }
267 }
268
269 ip_reference(port);
270 ip_unlock(port);
271
272 if ((its->its_size == 0) ||
273 ((ntable = it_requests_alloc(its)) == IPR_NULL)) {
274 ip_release(port);
275 return KERN_RESOURCE_SHORTAGE;
276 }
277
278 ip_lock(port);
279
280 /*
281 * Check that port is still active and that nobody else
282 * has slipped in and grown the table on us. Note that
283 * just checking if the current table pointer == otable
284 * isn't sufficient; must check ipr_size.
285 */
286
287 if (ip_active(port) && (port->ip_requests == otable) &&
288 ((otable == IPR_NULL) || (otable->ipr_size+1 == its))) {
289 ipc_table_size_t oits;
290 ipc_table_elems_t osize, nsize;
291 ipc_port_request_index_t free, i;
292
293 /* copy old table to new table */
294
295 if (otable != IPR_NULL) {
296 oits = otable->ipr_size;
297 osize = oits->its_size;
298 free = otable->ipr_next;
299
300 (void) memcpy((void *)(ntable + 1),
301 (const void *)(otable + 1),
302 (osize - 1) * sizeof(struct ipc_port_request));
303 } else {
304 osize = 1;
305 oits = 0;
306 free = 0;
307 }
308
309 nsize = its->its_size;
310 assert(nsize > osize);
311
312 /* add new elements to the new table's free list */
313
314 for (i = osize; i < nsize; i++) {
315 ipc_port_request_t ipr = &ntable[i];
316
317 ipr->ipr_name = MACH_PORT_NULL;
318 ipr->ipr_next = free;
319 free = i;
320 }
321
322 ntable->ipr_next = free;
323 ntable->ipr_size = its;
324 port->ip_requests = ntable;
325 ip_unlock(port);
326 ip_release(port);
327
328 if (otable != IPR_NULL) {
329 it_requests_free(oits, otable);
330 }
331 } else {
332 ip_unlock(port);
333 ip_release(port);
334 it_requests_free(its, ntable);
335 }
336
337 return KERN_SUCCESS;
338 }
339
340 /*
341 * Routine: ipc_port_request_sparm
342 * Purpose:
343 * Arm delayed send-possible request.
344 * Conditions:
345 * The port must be locked and active.
346 *
347 * Returns TRUE if the request was armed
348 * (or armed with importance in that version).
349 */
350
351 #if IMPORTANCE_INHERITANCE
352 boolean_t
353 ipc_port_request_sparm(
354 ipc_port_t port,
355 __assert_only mach_port_name_t name,
356 ipc_port_request_index_t index,
357 mach_msg_option_t option)
358 #else
359 boolean_t
360 ipc_port_request_sparm(
361 ipc_port_t port,
362 __assert_only mach_port_name_t name,
363 ipc_port_request_index_t index)
364 #endif /* IMPORTANCE_INHERITANCE */
365 {
366 if (index != IE_REQ_NONE) {
367 ipc_port_request_t ipr, table;
368
369 assert(ip_active(port));
370
371 table = port->ip_requests;
372 assert(table != IPR_NULL);
373
374 ipr = &table[index];
375 assert(ipr->ipr_name == name);
376
377 if (IPR_SOR_SPREQ(ipr->ipr_soright)) {
378 ipr->ipr_soright = IPR_SOR_MAKE(ipr->ipr_soright, IPR_SOR_SPARM_MASK);
379 port->ip_sprequests = 1;
380 #if IMPORTANCE_INHERITANCE
381 if (((option & MACH_SEND_NOIMPORTANCE) == 0) &&
382 (port->ip_impdonation != 0) &&
383 (port->ip_spimportant == 0) &&
384 (((option & MACH_SEND_IMPORTANCE) != 0) ||
385 (task_is_importance_donor(current_task())))) {
386 port->ip_spimportant = 1;
387 return TRUE;
388 }
389 #else
390 return TRUE;
391 #endif /* IMPORTANCE_INHERITANCE */
392 }
393 }
394 return FALSE;
395 }
396
397 /*
398 * Routine: ipc_port_request_type
399 * Purpose:
400 * Determine the type(s) of port requests enabled for a name.
401 * Conditions:
402 * The port must be locked or inactive (to avoid table growth).
403 * The index must not be IE_REQ_NONE and for the name in question.
404 */
405 mach_port_type_t
406 ipc_port_request_type(
407 ipc_port_t port,
408 __assert_only mach_port_name_t name,
409 ipc_port_request_index_t index)
410 {
411 ipc_port_request_t ipr, table;
412 mach_port_type_t type = 0;
413
414 table = port->ip_requests;
415 assert (table != IPR_NULL);
416
417 assert(index != IE_REQ_NONE);
418 ipr = &table[index];
419 assert(ipr->ipr_name == name);
420
421 if (IP_VALID(IPR_SOR_PORT(ipr->ipr_soright))) {
422 type |= MACH_PORT_TYPE_DNREQUEST;
423
424 if (IPR_SOR_SPREQ(ipr->ipr_soright)) {
425 type |= MACH_PORT_TYPE_SPREQUEST;
426
427 if (!IPR_SOR_SPARMED(ipr->ipr_soright)) {
428 type |= MACH_PORT_TYPE_SPREQUEST_DELAYED;
429 }
430 }
431 }
432 return type;
433 }
434
435 /*
436 * Routine: ipc_port_request_cancel
437 * Purpose:
438 * Cancel a dead-name/send-possible request and return the send-once right.
439 * Conditions:
440 * The port must be locked and active.
441 * The index must not be IPR_REQ_NONE and must correspond with name.
442 */
443
444 ipc_port_t
445 ipc_port_request_cancel(
446 ipc_port_t port,
447 __assert_only mach_port_name_t name,
448 ipc_port_request_index_t index)
449 {
450 ipc_port_request_t ipr, table;
451 ipc_port_t request = IP_NULL;
452
453 assert(ip_active(port));
454 table = port->ip_requests;
455 assert(table != IPR_NULL);
456
457 assert (index != IE_REQ_NONE);
458 ipr = &table[index];
459 assert(ipr->ipr_name == name);
460 request = IPR_SOR_PORT(ipr->ipr_soright);
461
462 /* return ipr to the free list inside the table */
463 ipr->ipr_name = MACH_PORT_NULL;
464 ipr->ipr_next = table->ipr_next;
465 table->ipr_next = index;
466
467 return request;
468 }
469
470 /*
471 * Routine: ipc_port_pdrequest
472 * Purpose:
473 * Make a port-deleted request, returning the
474 * previously registered send-once right.
475 * Just cancels the previous request if notify is IP_NULL.
476 * Conditions:
477 * The port is locked and active. It is unlocked.
478 * Consumes a ref for notify (if non-null), and
479 * returns previous with a ref (if non-null).
480 */
481
482 void
483 ipc_port_pdrequest(
484 ipc_port_t port,
485 ipc_port_t notify,
486 ipc_port_t *previousp)
487 {
488 ipc_port_t previous;
489
490 assert(ip_active(port));
491
492 previous = port->ip_pdrequest;
493 port->ip_pdrequest = notify;
494 ip_unlock(port);
495
496 *previousp = previous;
497 }
498
499 /*
500 * Routine: ipc_port_nsrequest
501 * Purpose:
502 * Make a no-senders request, returning the
503 * previously registered send-once right.
504 * Just cancels the previous request if notify is IP_NULL.
505 * Conditions:
506 * The port is locked and active. It is unlocked.
507 * Consumes a ref for notify (if non-null), and
508 * returns previous with a ref (if non-null).
509 */
510
511 void
512 ipc_port_nsrequest(
513 ipc_port_t port,
514 mach_port_mscount_t sync,
515 ipc_port_t notify,
516 ipc_port_t *previousp)
517 {
518 ipc_port_t previous;
519 mach_port_mscount_t mscount;
520
521 assert(ip_active(port));
522
523 previous = port->ip_nsrequest;
524 mscount = port->ip_mscount;
525
526 if ((port->ip_srights == 0) && (sync <= mscount) &&
527 (notify != IP_NULL)) {
528 port->ip_nsrequest = IP_NULL;
529 ip_unlock(port);
530 ipc_notify_no_senders(notify, mscount);
531 } else {
532 port->ip_nsrequest = notify;
533 ip_unlock(port);
534 }
535
536 *previousp = previous;
537 }
538
539
540 /*
541 * Routine: ipc_port_clear_receiver
542 * Purpose:
543 * Prepares a receive right for transmission/destruction.
544 * Conditions:
545 * The port is locked and active.
546 */
547
548 void
549 ipc_port_clear_receiver(
550 ipc_port_t port,
551 queue_t links)
552 {
553 spl_t s;
554
555 assert(ip_active(port));
556
557 /*
558 * pull ourselves from any sets.
559 */
560 if (port->ip_pset_count != 0) {
561 ipc_pset_remove_from_all(port, links);
562 assert(port->ip_pset_count == 0);
563 }
564
565 /*
566 * Send anyone waiting on the port's queue directly away.
567 * Also clear the mscount and seqno.
568 */
569 s = splsched();
570 imq_lock(&port->ip_messages);
571 ipc_mqueue_changed(&port->ip_messages);
572 ipc_port_set_mscount(port, 0);
573 port->ip_messages.imq_seqno = 0;
574 port->ip_context = port->ip_guarded = port->ip_strict_guard = 0;
575 imq_unlock(&port->ip_messages);
576 splx(s);
577 }
578
579 /*
580 * Routine: ipc_port_init
581 * Purpose:
582 * Initializes a newly-allocated port.
583 * Doesn't touch the ip_object fields.
584 */
585
586 void
587 ipc_port_init(
588 ipc_port_t port,
589 ipc_space_t space,
590 mach_port_name_t name)
591 {
592 /* port->ip_kobject doesn't have to be initialized */
593
594 port->ip_receiver = space;
595 port->ip_receiver_name = name;
596
597 port->ip_mscount = 0;
598 port->ip_srights = 0;
599 port->ip_sorights = 0;
600
601 port->ip_nsrequest = IP_NULL;
602 port->ip_pdrequest = IP_NULL;
603 port->ip_requests = IPR_NULL;
604
605 port->ip_pset_count = 0;
606 port->ip_premsg = IKM_NULL;
607 port->ip_context = 0;
608
609 port->ip_sprequests = 0;
610 port->ip_spimportant = 0;
611 port->ip_impdonation = 0;
612 port->ip_tempowner = 0;
613 port->ip_taskptr = 0;
614
615 port->ip_guarded = 0;
616 port->ip_strict_guard = 0;
617 port->ip_impcount = 0;
618
619 port->ip_reserved = 0;
620
621 ipc_mqueue_init(&port->ip_messages, FALSE /* set */);
622 }
623
624 /*
625 * Routine: ipc_port_alloc
626 * Purpose:
627 * Allocate a port.
628 * Conditions:
629 * Nothing locked. If successful, the port is returned
630 * locked. (The caller doesn't have a reference.)
631 * Returns:
632 * KERN_SUCCESS The port is allocated.
633 * KERN_INVALID_TASK The space is dead.
634 * KERN_NO_SPACE No room for an entry in the space.
635 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
636 */
637
638 kern_return_t
639 ipc_port_alloc(
640 ipc_space_t space,
641 mach_port_name_t *namep,
642 ipc_port_t *portp)
643 {
644 ipc_port_t port;
645 mach_port_name_t name;
646 kern_return_t kr;
647
648 #if MACH_ASSERT
649 uintptr_t buf[IP_CALLSTACK_MAX];
650 ipc_port_callstack_init_debug(&buf[0], IP_CALLSTACK_MAX);
651 #endif /* MACH_ASSERT */
652
653 kr = ipc_object_alloc(space, IOT_PORT,
654 MACH_PORT_TYPE_RECEIVE, 0,
655 &name, (ipc_object_t *) &port);
656 if (kr != KERN_SUCCESS)
657 return kr;
658
659 /* port and space are locked */
660 ipc_port_init(port, space, name);
661
662 #if MACH_ASSERT
663 ipc_port_init_debug(port, &buf[0], IP_CALLSTACK_MAX);
664 #endif /* MACH_ASSERT */
665
666 /* unlock space after init */
667 is_write_unlock(space);
668
669 #if CONFIG_MACF_MACH
670 task_t issuer = current_task();
671 tasklabel_lock2 (issuer, space->is_task);
672 mac_port_label_associate(&issuer->maclabel, &space->is_task->maclabel,
673 &port->ip_label);
674 tasklabel_unlock2 (issuer, space->is_task);
675 #endif
676
677 *namep = name;
678 *portp = port;
679
680 return KERN_SUCCESS;
681 }
682
683 /*
684 * Routine: ipc_port_alloc_name
685 * Purpose:
686 * Allocate a port, with a specific name.
687 * Conditions:
688 * Nothing locked. If successful, the port is returned
689 * locked. (The caller doesn't have a reference.)
690 * Returns:
691 * KERN_SUCCESS The port is allocated.
692 * KERN_INVALID_TASK The space is dead.
693 * KERN_NAME_EXISTS The name already denotes a right.
694 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
695 */
696
697 kern_return_t
698 ipc_port_alloc_name(
699 ipc_space_t space,
700 mach_port_name_t name,
701 ipc_port_t *portp)
702 {
703 ipc_port_t port;
704 kern_return_t kr;
705
706 #if MACH_ASSERT
707 uintptr_t buf[IP_CALLSTACK_MAX];
708 ipc_port_callstack_init_debug(&buf[0], IP_CALLSTACK_MAX);
709 #endif /* MACH_ASSERT */
710
711 kr = ipc_object_alloc_name(space, IOT_PORT,
712 MACH_PORT_TYPE_RECEIVE, 0,
713 name, (ipc_object_t *) &port);
714 if (kr != KERN_SUCCESS)
715 return kr;
716
717 /* port is locked */
718
719 ipc_port_init(port, space, name);
720
721 #if MACH_ASSERT
722 ipc_port_init_debug(port, &buf[0], IP_CALLSTACK_MAX);
723 #endif /* MACH_ASSERT */
724
725 #if CONFIG_MACF_MACH
726 task_t issuer = current_task();
727 tasklabel_lock2 (issuer, space->is_task);
728 mac_port_label_associate(&issuer->maclabel, &space->is_task->maclabel,
729 &port->ip_label);
730 tasklabel_unlock2 (issuer, space->is_task);
731 #endif
732
733 *portp = port;
734
735 return KERN_SUCCESS;
736 }
737
738 /*
739 * Routine: ipc_port_spnotify
740 * Purpose:
741 * Generate send-possible port notifications.
742 * Conditions:
743 * Nothing locked, reference held on port.
744 */
745 void
746 ipc_port_spnotify(
747 ipc_port_t port)
748 {
749 ipc_port_request_index_t index = 0;
750 ipc_table_elems_t size = 0;
751 #if IMPORTANCE_INHERITANCE
752 boolean_t dropassert = FALSE;
753 #endif /* IMPORTANCE_INHERITANCE */
754
755 /*
756 * If the port has no send-possible request
757 * armed, don't bother to lock the port.
758 */
759 if (port->ip_sprequests == 0)
760 return;
761
762 ip_lock(port);
763
764 #if IMPORTANCE_INHERITANCE
765 if (port->ip_spimportant != 0) {
766 port->ip_spimportant = 0;
767 port->ip_impcount--;
768 dropassert = TRUE;
769 }
770 #endif /* IMPORTANCE_INHERITANCE */
771
772 if (port->ip_sprequests == 0) {
773 ip_unlock(port);
774 goto out;
775 }
776 port->ip_sprequests = 0;
777
778 revalidate:
779 if (ip_active(port)) {
780 ipc_port_request_t requests;
781
782 /* table may change each time port unlocked (reload) */
783 requests = port->ip_requests;
784 assert(requests != IPR_NULL);
785
786 /*
787 * no need to go beyond table size when first
788 * we entered - those are future notifications.
789 */
790 if (size == 0)
791 size = requests->ipr_size->its_size;
792
793 /* no need to backtrack either */
794 while (++index < size) {
795 ipc_port_request_t ipr = &requests[index];
796 mach_port_name_t name = ipr->ipr_name;
797 ipc_port_t soright = IPR_SOR_PORT(ipr->ipr_soright);
798 boolean_t armed = IPR_SOR_SPARMED(ipr->ipr_soright);
799
800 if (MACH_PORT_VALID(name) && armed && IP_VALID(soright)) {
801 /* claim send-once right - slot still inuse */
802 ipr->ipr_soright = IP_NULL;
803 ip_unlock(port);
804
805 ipc_notify_send_possible(soright, name);
806
807 ip_lock(port);
808 goto revalidate;
809 }
810 }
811 }
812 ip_unlock(port);
813 out:
814 #if IMPORTANCE_INHERITANCE
815 if ((dropassert == TRUE) && (current_task()->imp_receiver != 0)) {
816 /* drop internal assertion and no task lock held */
817 task_importance_drop_internal_assertion(current_task(), 1);
818 }
819 #endif /* IMPORTANCE_INHERITANCE */
820 return;
821 }
822
823 /*
824 * Routine: ipc_port_dnnotify
825 * Purpose:
826 * Generate dead name notifications for
827 * all outstanding dead-name and send-
828 * possible requests.
829 * Conditions:
830 * Nothing locked.
831 * Port must be inactive.
832 * Reference held on port.
833 */
834 void
835 ipc_port_dnnotify(
836 ipc_port_t port)
837 {
838 ipc_port_request_t requests = port->ip_requests;
839
840 assert(!ip_active(port));
841 if (requests != IPR_NULL) {
842 ipc_table_size_t its = requests->ipr_size;
843 ipc_table_elems_t size = its->its_size;
844 ipc_port_request_index_t index;
845 for (index = 1; index < size; index++) {
846 ipc_port_request_t ipr = &requests[index];
847 mach_port_name_t name = ipr->ipr_name;
848 ipc_port_t soright = IPR_SOR_PORT(ipr->ipr_soright);
849
850 if (MACH_PORT_VALID(name) && IP_VALID(soright)) {
851 ipc_notify_dead_name(soright, name);
852 }
853 }
854 }
855 }
856
857
858 /*
859 * Routine: ipc_port_destroy
860 * Purpose:
861 * Destroys a port. Cleans up queued messages.
862 *
863 * If the port has a backup, it doesn't get destroyed,
864 * but is sent in a port-destroyed notification to the backup.
865 * Conditions:
866 * The port is locked and alive; nothing else locked.
867 * The caller has a reference, which is consumed.
868 * Afterwards, the port is unlocked and dead.
869 */
870
871 void
872 ipc_port_destroy(
873 ipc_port_t port)
874 {
875 ipc_port_t pdrequest, nsrequest;
876 ipc_mqueue_t mqueue;
877 ipc_kmsg_t kmsg;
878
879 #if IMPORTANCE_INHERITANCE
880 task_t release_imp_task = TASK_NULL;
881 thread_t self = current_thread();
882 boolean_t top = (self->ith_assertions == 0);
883 natural_t assertcnt = 0;
884 #endif /* IMPORTANCE_INHERITANCE */
885
886 assert(ip_active(port));
887 /* port->ip_receiver_name is garbage */
888 /* port->ip_receiver/port->ip_destination is garbage */
889 assert(port->ip_pset_count == 0);
890 assert(port->ip_mscount == 0);
891
892 /* check for a backup port */
893 pdrequest = port->ip_pdrequest;
894
895 #if IMPORTANCE_INHERITANCE
896 /* determine how may assertions to drop and from whom */
897 if (port->ip_tempowner != 0) {
898 assert(top);
899 if (port->ip_taskptr != 0) {
900 release_imp_task = port->ip_imp_task;
901 port->ip_imp_task = TASK_NULL;
902 port->ip_taskptr = 0;
903 assertcnt = port->ip_impcount;
904 }
905 /* Otherwise, nothing to drop */
906 } else {
907 assert(port->ip_taskptr == 0);
908 assertcnt = port->ip_impcount;
909 if (pdrequest != IP_NULL)
910 /* mark in limbo for the journey */
911 port->ip_tempowner = 1;
912 }
913
914 if (top)
915 self->ith_assertions = assertcnt;
916 #endif /* IMPORTANCE_INHERITANCE */
917
918 if (pdrequest != IP_NULL) {
919 /* we assume the ref for pdrequest */
920 port->ip_pdrequest = IP_NULL;
921
922 /* make port be in limbo */
923 port->ip_receiver_name = MACH_PORT_NULL;
924 port->ip_destination = IP_NULL;
925 ip_unlock(port);
926
927 /* consumes our refs for port and pdrequest */
928 ipc_notify_port_destroyed(pdrequest, port);
929
930 goto drop_assertions;
931 }
932
933 /* once port is dead, we don't need to keep it locked */
934
935 port->ip_object.io_bits &= ~IO_BITS_ACTIVE;
936 port->ip_timestamp = ipc_port_timestamp();
937
938 /*
939 * If the port has a preallocated message buffer and that buffer
940 * is not inuse, free it. If it has an inuse one, then the kmsg
941 * free will detect that we freed the association and it can free it
942 * like a normal buffer.
943 */
944 if (IP_PREALLOC(port)) {
945 ipc_port_t inuse_port;
946
947 kmsg = port->ip_premsg;
948 assert(kmsg != IKM_NULL);
949 inuse_port = ikm_prealloc_inuse_port(kmsg);
950 IP_CLEAR_PREALLOC(port, kmsg);
951 ip_unlock(port);
952 if (inuse_port != IP_NULL) {
953 assert(inuse_port == port);
954 } else {
955 ipc_kmsg_free(kmsg);
956 }
957 } else {
958 ip_unlock(port);
959 }
960
961 /* throw away no-senders request */
962 nsrequest = port->ip_nsrequest;
963 if (nsrequest != IP_NULL)
964 ipc_notify_send_once(nsrequest); /* consumes ref */
965
966 /* destroy any queued messages */
967 mqueue = &port->ip_messages;
968 ipc_mqueue_destroy(mqueue);
969
970 /* generate dead-name notifications */
971 ipc_port_dnnotify(port);
972
973 ipc_kobject_destroy(port);
974
975 ip_release(port); /* consume caller's ref */
976
977 drop_assertions:
978 #if IMPORTANCE_INHERITANCE
979 if (release_imp_task != TASK_NULL) {
980 if (assertcnt > 0) {
981 assert(top);
982 self->ith_assertions = 0;
983 assert(release_imp_task->imp_receiver != 0);
984 task_importance_drop_internal_assertion(release_imp_task, assertcnt);
985 }
986 task_deallocate(release_imp_task);
987
988 } else if (assertcnt > 0) {
989 if (top) {
990 self->ith_assertions = 0;
991 release_imp_task = current_task();
992 if (release_imp_task->imp_receiver != 0) {
993 task_importance_drop_internal_assertion(release_imp_task, assertcnt);
994 }
995 } else {
996 /* the port chain we are enqueued on should cover our assertions */
997 assert(assertcnt <= self->ith_assertions);
998 }
999 }
1000 #endif /* IMPORTANCE_INHERITANCE */
1001 }
1002
1003 /*
1004 * Routine: ipc_port_check_circularity
1005 * Purpose:
1006 * Check if queueing "port" in a message for "dest"
1007 * would create a circular group of ports and messages.
1008 *
1009 * If no circularity (FALSE returned), then "port"
1010 * is changed from "in limbo" to "in transit".
1011 *
1012 * That is, we want to set port->ip_destination == dest,
1013 * but guaranteeing that this doesn't create a circle
1014 * port->ip_destination->ip_destination->... == port
1015 *
1016 * Additionally, if port was successfully changed to "in transit",
1017 * propagate boost assertions from the "in limbo" port to all
1018 * the ports in the chain, and, if the destination task accepts
1019 * boosts, to the destination task.
1020 *
1021 * Conditions:
1022 * No ports locked. References held for "port" and "dest".
1023 */
1024
1025 boolean_t
1026 ipc_port_check_circularity(
1027 ipc_port_t port,
1028 ipc_port_t dest)
1029 {
1030 ipc_port_t base;
1031
1032 #if IMPORTANCE_INHERITANCE
1033 task_t task = TASK_NULL;
1034 task_t release_task = TASK_NULL;
1035 int assertcnt = 0;
1036 #endif /* IMPORTANCE_INHERITANCE */
1037
1038 assert(port != IP_NULL);
1039 assert(dest != IP_NULL);
1040
1041 if (port == dest)
1042 return TRUE;
1043 base = dest;
1044
1045 /*
1046 * First try a quick check that can run in parallel.
1047 * No circularity if dest is not in transit.
1048 */
1049
1050 ip_lock(port);
1051 if (ip_lock_try(dest)) {
1052 if (!ip_active(dest) ||
1053 (dest->ip_receiver_name != MACH_PORT_NULL) ||
1054 (dest->ip_destination == IP_NULL))
1055 goto not_circular;
1056
1057 /* dest is in transit; further checking necessary */
1058
1059 ip_unlock(dest);
1060 }
1061 ip_unlock(port);
1062
1063 ipc_port_multiple_lock(); /* massive serialization */
1064
1065 /*
1066 * Search for the end of the chain (a port not in transit),
1067 * acquiring locks along the way.
1068 */
1069
1070 for (;;) {
1071 ip_lock(base);
1072
1073 if (!ip_active(base) ||
1074 (base->ip_receiver_name != MACH_PORT_NULL) ||
1075 (base->ip_destination == IP_NULL))
1076 break;
1077
1078 base = base->ip_destination;
1079 }
1080
1081 /* all ports in chain from dest to base, inclusive, are locked */
1082
1083 if (port == base) {
1084 /* circularity detected! */
1085
1086 ipc_port_multiple_unlock();
1087
1088 /* port (== base) is in limbo */
1089
1090 assert(ip_active(port));
1091 assert(port->ip_receiver_name == MACH_PORT_NULL);
1092 assert(port->ip_destination == IP_NULL);
1093
1094 while (dest != IP_NULL) {
1095 ipc_port_t next;
1096
1097 /* dest is in transit or in limbo */
1098
1099 assert(ip_active(dest));
1100 assert(dest->ip_receiver_name == MACH_PORT_NULL);
1101
1102 next = dest->ip_destination;
1103 ip_unlock(dest);
1104 dest = next;
1105 }
1106
1107 return TRUE;
1108 }
1109
1110 /*
1111 * The guarantee: lock port while the entire chain is locked.
1112 * Once port is locked, we can take a reference to dest,
1113 * add port to the chain, and unlock everything.
1114 */
1115
1116 ip_lock(port);
1117 ipc_port_multiple_unlock();
1118
1119 not_circular:
1120
1121 /* port is in limbo */
1122
1123 assert(ip_active(port));
1124 assert(port->ip_receiver_name == MACH_PORT_NULL);
1125 assert(port->ip_destination == IP_NULL);
1126
1127 ip_reference(dest);
1128 port->ip_destination = dest;
1129
1130 #if IMPORTANCE_INHERITANCE
1131 /* must have been in limbo or still bound to a task */
1132 assert(port->ip_tempowner != 0);
1133
1134 if (port->ip_taskptr != 0) {
1135 /*
1136 * We delayed dropping assertions from a specific task.
1137 * Cache that info now (we'll drop assertions and the
1138 * task reference below).
1139 */
1140 release_task = port->ip_imp_task;
1141 port->ip_imp_task = TASK_NULL;
1142 port->ip_taskptr = 0;
1143 }
1144 assertcnt = port->ip_impcount;
1145
1146 /* take the port out of limbo w.r.t. assertions */
1147 port->ip_tempowner = 0;
1148
1149 #endif /* IMPORTANCE_INHERITANCE */
1150
1151 /* now unlock chain */
1152
1153 ip_unlock(port);
1154
1155 for (;;) {
1156
1157 #if IMPORTANCE_INHERITANCE
1158 /* every port along chain track assertions behind it */
1159 dest->ip_impcount += assertcnt;
1160 #endif /* IMPORTANCE_INHERITANCE */
1161
1162 if (dest == base)
1163 break;
1164
1165 /* port is in transit */
1166
1167 assert(ip_active(dest));
1168 assert(dest->ip_receiver_name == MACH_PORT_NULL);
1169 assert(dest->ip_destination != IP_NULL);
1170
1171 #if IMPORTANCE_INHERITANCE
1172 assert(dest->ip_tempowner == 0);
1173 #endif /* IMPORTANCE_INHERITANCE */
1174
1175 port = dest->ip_destination;
1176 ip_unlock(dest);
1177 dest = port;
1178 }
1179
1180 /* base is not in transit */
1181 assert(!ip_active(base) ||
1182 (base->ip_receiver_name != MACH_PORT_NULL) ||
1183 (base->ip_destination == IP_NULL));
1184
1185 #if IMPORTANCE_INHERITANCE
1186 /*
1187 * Find the task to boost (if any).
1188 * We will boost "through" ports that don't know
1189 * about inheritance to deliver receive rights that
1190 * do.
1191 */
1192 if (ip_active(base) && (assertcnt > 0)) {
1193 if (base->ip_tempowner != 0) {
1194 if (base->ip_taskptr != 0)
1195 /* specified tempowner task */
1196 task = base->ip_imp_task;
1197 /* otherwise don't boost current task */
1198
1199 } else if (base->ip_receiver_name != MACH_PORT_NULL) {
1200 ipc_space_t space = base->ip_receiver;
1201
1202 /* only spaces with boost-accepting tasks */
1203 if (space->is_task != TASK_NULL &&
1204 space->is_task->imp_receiver != 0)
1205 task = space->is_task;
1206 }
1207
1208 /* take reference before unlocking base */
1209 if (task != TASK_NULL) {
1210 assert(task->imp_receiver != 0);
1211 task_reference(task);
1212 }
1213 }
1214 #endif /* IMPORTANCE_INHERITANCE */
1215
1216 ip_unlock(base);
1217
1218 #if IMPORTANCE_INHERITANCE
1219 /*
1220 * Transfer assertions now that the ports are unlocked.
1221 * Avoid extra overhead if transferring to/from the same task.
1222 */
1223 boolean_t transfer_assertions = (task != release_task) ? TRUE : FALSE;
1224
1225 if (task != TASK_NULL) {
1226 if (transfer_assertions)
1227 task_importance_hold_internal_assertion(task, assertcnt);
1228 task_deallocate(task);
1229 task = TASK_NULL;
1230 }
1231
1232 if (release_task != TASK_NULL) {
1233 if (transfer_assertions)
1234 task_importance_drop_internal_assertion(release_task, assertcnt);
1235 task_deallocate(release_task);
1236 release_task = TASK_NULL;
1237 }
1238 #endif /* IMPORTANCE_INHERITANCE */
1239
1240 return FALSE;
1241 }
1242
1243 /*
1244 * Routine: ipc_port_importance_delta
1245 * Purpose:
1246 * Adjust the importance count through the given port.
1247 * If the port is in transit, apply the delta throughout
1248 * the chain. Determine if the there is a task at the
1249 * base of the chain that wants/needs to be adjusted,
1250 * and if so, apply the delta.
1251 * Conditions:
1252 * The port is referenced and locked on entry.
1253 * Nothing else is locked.
1254 * The lock may be dropped on exit.
1255 * Returns TRUE if lock was dropped.
1256 */
1257 #if IMPORTANCE_INHERITANCE
1258
1259 boolean_t
1260 ipc_port_importance_delta(
1261 ipc_port_t port,
1262 mach_port_delta_t delta)
1263 {
1264 ipc_port_t next, base;
1265 task_t task = TASK_NULL;
1266 boolean_t dropped = FALSE;
1267
1268 if (delta == 0)
1269 return FALSE;
1270
1271 base = port;
1272
1273 /* if port is in transit, have to search for end of chain */
1274 if (ip_active(port) &&
1275 port->ip_destination != IP_NULL &&
1276 port->ip_receiver_name == MACH_PORT_NULL) {
1277
1278 dropped = TRUE;
1279
1280 ip_unlock(port);
1281 ipc_port_multiple_lock(); /* massive serialization */
1282 ip_lock(base);
1283
1284 while(ip_active(base) &&
1285 base->ip_destination != IP_NULL &&
1286 base->ip_receiver_name == MACH_PORT_NULL) {
1287
1288 base = base->ip_destination;
1289 ip_lock(base);
1290 }
1291 ipc_port_multiple_unlock();
1292 }
1293
1294 /* unlock down to the base, adding a boost at each level */
1295 for (;;) {
1296 port->ip_impcount += delta;
1297
1298 if (port == base)
1299 break;
1300
1301 /* port is in transit */
1302 assert(port->ip_tempowner == 0);
1303 next = port->ip_destination;
1304 ip_unlock(port);
1305 port = next;
1306 }
1307
1308 /* find the task (if any) to boost according to the base */
1309 if (ip_active(base)) {
1310 if (base->ip_tempowner != 0) {
1311 if (base->ip_taskptr != 0)
1312 task = base->ip_imp_task;
1313 /* otherwise don't boost */
1314
1315 } else if (base->ip_receiver_name != MACH_PORT_NULL) {
1316 ipc_space_t space = base->ip_receiver;
1317
1318 /* only spaces with boost-accepting tasks */
1319 if (space->is_task != TASK_NULL &&
1320 space->is_task->imp_receiver != 0)
1321 task = space->is_task;
1322 }
1323 }
1324
1325 /*
1326 * Only the base is locked. If we have to hold or drop task
1327 * importance assertions, we'll have to drop that lock as well.
1328 */
1329 if (task != TASK_NULL) {
1330 /* take a reference before unlocking base */
1331 assert(task->imp_receiver != 0);
1332 task_reference(task);
1333
1334 ip_unlock(base);
1335 dropped = TRUE;
1336
1337 if (delta > 0)
1338 task_importance_hold_internal_assertion(task, delta);
1339 else
1340 task_importance_drop_internal_assertion(task, -delta);
1341
1342 task_deallocate(task);
1343 } else if (dropped == TRUE) {
1344 ip_unlock(base);
1345 }
1346
1347 return dropped;
1348 }
1349 #endif /* IMPORTANCE_INHERITANCE */
1350
1351 /*
1352 * Routine: ipc_port_lookup_notify
1353 * Purpose:
1354 * Make a send-once notify port from a receive right.
1355 * Returns IP_NULL if name doesn't denote a receive right.
1356 * Conditions:
1357 * The space must be locked (read or write) and active.
1358 * Being the active space, we can rely on thread server_id
1359 * context to give us the proper server level sub-order
1360 * within the space.
1361 */
1362
1363 ipc_port_t
1364 ipc_port_lookup_notify(
1365 ipc_space_t space,
1366 mach_port_name_t name)
1367 {
1368 ipc_port_t port;
1369 ipc_entry_t entry;
1370
1371 assert(is_active(space));
1372
1373 entry = ipc_entry_lookup(space, name);
1374 if (entry == IE_NULL)
1375 return IP_NULL;
1376 if ((entry->ie_bits & MACH_PORT_TYPE_RECEIVE) == 0)
1377 return IP_NULL;
1378
1379 port = (ipc_port_t) entry->ie_object;
1380 assert(port != IP_NULL);
1381
1382 ip_lock(port);
1383 assert(ip_active(port));
1384 assert(port->ip_receiver_name == name);
1385 assert(port->ip_receiver == space);
1386
1387 ip_reference(port);
1388 port->ip_sorights++;
1389 ip_unlock(port);
1390
1391 return port;
1392 }
1393
1394 /*
1395 * Routine: ipc_port_make_send_locked
1396 * Purpose:
1397 * Make a naked send right from a receive right.
1398 *
1399 * Conditions:
1400 * port locked and active.
1401 */
1402 ipc_port_t
1403 ipc_port_make_send_locked(
1404 ipc_port_t port)
1405 {
1406 assert(ip_active(port));
1407 port->ip_mscount++;
1408 port->ip_srights++;
1409 ip_reference(port);
1410 return port;
1411 }
1412
1413 /*
1414 * Routine: ipc_port_make_send
1415 * Purpose:
1416 * Make a naked send right from a receive right.
1417 */
1418
1419 ipc_port_t
1420 ipc_port_make_send(
1421 ipc_port_t port)
1422 {
1423
1424 if (!IP_VALID(port))
1425 return port;
1426
1427 ip_lock(port);
1428 if (ip_active(port)) {
1429 port->ip_mscount++;
1430 port->ip_srights++;
1431 ip_reference(port);
1432 ip_unlock(port);
1433 return port;
1434 }
1435 ip_unlock(port);
1436 return IP_DEAD;
1437 }
1438
1439 /*
1440 * Routine: ipc_port_copy_send
1441 * Purpose:
1442 * Make a naked send right from another naked send right.
1443 * IP_NULL -> IP_NULL
1444 * IP_DEAD -> IP_DEAD
1445 * dead port -> IP_DEAD
1446 * live port -> port + ref
1447 * Conditions:
1448 * Nothing locked except possibly a space.
1449 */
1450
1451 ipc_port_t
1452 ipc_port_copy_send(
1453 ipc_port_t port)
1454 {
1455 ipc_port_t sright;
1456
1457 if (!IP_VALID(port))
1458 return port;
1459
1460 ip_lock(port);
1461 if (ip_active(port)) {
1462 assert(port->ip_srights > 0);
1463
1464 ip_reference(port);
1465 port->ip_srights++;
1466 sright = port;
1467 } else
1468 sright = IP_DEAD;
1469 ip_unlock(port);
1470
1471 return sright;
1472 }
1473
1474 /*
1475 * Routine: ipc_port_copyout_send
1476 * Purpose:
1477 * Copyout a naked send right (possibly null/dead),
1478 * or if that fails, destroy the right.
1479 * Conditions:
1480 * Nothing locked.
1481 */
1482
1483 mach_port_name_t
1484 ipc_port_copyout_send(
1485 ipc_port_t sright,
1486 ipc_space_t space)
1487 {
1488 mach_port_name_t name;
1489
1490 if (IP_VALID(sright)) {
1491 kern_return_t kr;
1492
1493 kr = ipc_object_copyout(space, (ipc_object_t) sright,
1494 MACH_MSG_TYPE_PORT_SEND, TRUE, &name);
1495 if (kr != KERN_SUCCESS) {
1496 ipc_port_release_send(sright);
1497
1498 if (kr == KERN_INVALID_CAPABILITY)
1499 name = MACH_PORT_DEAD;
1500 else
1501 name = MACH_PORT_NULL;
1502 }
1503 } else
1504 name = CAST_MACH_PORT_TO_NAME(sright);
1505
1506 return name;
1507 }
1508
1509 /*
1510 * Routine: ipc_port_release_send
1511 * Purpose:
1512 * Release a naked send right.
1513 * Consumes a ref for the port.
1514 * Conditions:
1515 * Nothing locked.
1516 */
1517
1518 void
1519 ipc_port_release_send(
1520 ipc_port_t port)
1521 {
1522 ipc_port_t nsrequest = IP_NULL;
1523 mach_port_mscount_t mscount;
1524
1525 if (!IP_VALID(port))
1526 return;
1527
1528 ip_lock(port);
1529
1530 if (!ip_active(port)) {
1531 ip_unlock(port);
1532 ip_release(port);
1533 return;
1534 }
1535
1536 assert(port->ip_srights > 0);
1537
1538 if (--port->ip_srights == 0 &&
1539 port->ip_nsrequest != IP_NULL) {
1540 nsrequest = port->ip_nsrequest;
1541 port->ip_nsrequest = IP_NULL;
1542 mscount = port->ip_mscount;
1543 ip_unlock(port);
1544 ip_release(port);
1545 ipc_notify_no_senders(nsrequest, mscount);
1546 } else {
1547 ip_unlock(port);
1548 ip_release(port);
1549 }
1550 }
1551
1552 /*
1553 * Routine: ipc_port_make_sonce_locked
1554 * Purpose:
1555 * Make a naked send-once right from a receive right.
1556 * Conditions:
1557 * The port is locked and active.
1558 */
1559
1560 ipc_port_t
1561 ipc_port_make_sonce_locked(
1562 ipc_port_t port)
1563 {
1564 assert(ip_active(port));
1565 port->ip_sorights++;
1566 ip_reference(port);
1567 return port;
1568 }
1569
1570 /*
1571 * Routine: ipc_port_make_sonce
1572 * Purpose:
1573 * Make a naked send-once right from a receive right.
1574 * Conditions:
1575 * The port is not locked.
1576 */
1577
1578 ipc_port_t
1579 ipc_port_make_sonce(
1580 ipc_port_t port)
1581 {
1582 if (!IP_VALID(port))
1583 return port;
1584
1585 ip_lock(port);
1586 if (ip_active(port)) {
1587 port->ip_sorights++;
1588 ip_reference(port);
1589 ip_unlock(port);
1590 return port;
1591 }
1592 ip_unlock(port);
1593 return IP_DEAD;
1594 }
1595
1596 /*
1597 * Routine: ipc_port_release_sonce
1598 * Purpose:
1599 * Release a naked send-once right.
1600 * Consumes a ref for the port.
1601 *
1602 * In normal situations, this is never used.
1603 * Send-once rights are only consumed when
1604 * a message (possibly a send-once notification)
1605 * is sent to them.
1606 * Conditions:
1607 * Nothing locked except possibly a space.
1608 */
1609
1610 void
1611 ipc_port_release_sonce(
1612 ipc_port_t port)
1613 {
1614 if (!IP_VALID(port))
1615 return;
1616
1617 ip_lock(port);
1618
1619 assert(port->ip_sorights > 0);
1620
1621 port->ip_sorights--;
1622
1623 ip_unlock(port);
1624 ip_release(port);
1625 }
1626
1627 /*
1628 * Routine: ipc_port_release_receive
1629 * Purpose:
1630 * Release a naked (in limbo or in transit) receive right.
1631 * Consumes a ref for the port; destroys the port.
1632 * Conditions:
1633 * Nothing locked.
1634 */
1635
1636 void
1637 ipc_port_release_receive(
1638 ipc_port_t port)
1639 {
1640 ipc_port_t dest;
1641
1642 if (!IP_VALID(port))
1643 return;
1644
1645 ip_lock(port);
1646 assert(ip_active(port));
1647 assert(port->ip_receiver_name == MACH_PORT_NULL);
1648 dest = port->ip_destination;
1649
1650 ipc_port_destroy(port); /* consumes ref, unlocks */
1651
1652 if (dest != IP_NULL)
1653 ip_release(dest);
1654 }
1655
1656 /*
1657 * Routine: ipc_port_alloc_special
1658 * Purpose:
1659 * Allocate a port in a special space.
1660 * The new port is returned with one ref.
1661 * If unsuccessful, IP_NULL is returned.
1662 * Conditions:
1663 * Nothing locked.
1664 */
1665
1666 ipc_port_t
1667 ipc_port_alloc_special(
1668 ipc_space_t space)
1669 {
1670 ipc_port_t port;
1671
1672 port = (ipc_port_t) io_alloc(IOT_PORT);
1673 if (port == IP_NULL)
1674 return IP_NULL;
1675
1676 #if MACH_ASSERT
1677 uintptr_t buf[IP_CALLSTACK_MAX];
1678 ipc_port_callstack_init_debug(&buf[0], IP_CALLSTACK_MAX);
1679 #endif /* MACH_ASSERT */
1680
1681 bzero((char *)port, sizeof(*port));
1682 io_lock_init(&port->ip_object);
1683 port->ip_references = 1;
1684 port->ip_object.io_bits = io_makebits(TRUE, IOT_PORT, 0);
1685
1686 ipc_port_init(port, space, 1);
1687
1688 #if MACH_ASSERT
1689 ipc_port_init_debug(port, &buf[0], IP_CALLSTACK_MAX);
1690 #endif /* MACH_ASSERT */
1691
1692 #if CONFIG_MACF_MACH
1693 /* Currently, ipc_port_alloc_special is used for two things:
1694 * - Reply ports for messages from the kernel
1695 * - Ports for communication with the kernel (e.g. task ports)
1696 * Since both of these would typically be labelled as kernel objects,
1697 * we will use a new entry point for this purpose, as current_task()
1698 * is often wrong (i.e. not kernel_task) or null.
1699 */
1700 mac_port_label_init(&port->ip_label);
1701 mac_port_label_associate_kernel(&port->ip_label, space == ipc_space_reply);
1702 #endif
1703
1704 return port;
1705 }
1706
1707 /*
1708 * Routine: ipc_port_dealloc_special
1709 * Purpose:
1710 * Deallocate a port in a special space.
1711 * Consumes one ref for the port.
1712 * Conditions:
1713 * Nothing locked.
1714 */
1715
1716 void
1717 ipc_port_dealloc_special(
1718 ipc_port_t port,
1719 __assert_only ipc_space_t space)
1720 {
1721 ip_lock(port);
1722 assert(ip_active(port));
1723 // assert(port->ip_receiver_name != MACH_PORT_NULL);
1724 assert(port->ip_receiver == space);
1725
1726 /*
1727 * We clear ip_receiver_name and ip_receiver to simplify
1728 * the ipc_space_kernel check in ipc_mqueue_send.
1729 */
1730
1731 port->ip_receiver_name = MACH_PORT_NULL;
1732 port->ip_receiver = IS_NULL;
1733
1734 /* relevant part of ipc_port_clear_receiver */
1735 ipc_port_set_mscount(port, 0);
1736 port->ip_messages.imq_seqno = 0;
1737
1738 ipc_port_destroy(port);
1739 }
1740
1741 /*
1742 * Routine: ipc_port_finalize
1743 * Purpose:
1744 * Called on last reference deallocate to
1745 * free any remaining data associated with the
1746 * port.
1747 * Conditions:
1748 * Nothing locked.
1749 */
1750 void
1751 ipc_port_finalize(
1752 ipc_port_t port)
1753 {
1754 ipc_port_request_t requests = port->ip_requests;
1755
1756 assert(!ip_active(port));
1757 if (requests != IPR_NULL) {
1758 ipc_table_size_t its = requests->ipr_size;
1759 it_requests_free(its, requests);
1760 port->ip_requests = IPR_NULL;
1761 }
1762
1763 #if MACH_ASSERT
1764 ipc_port_track_dealloc(port);
1765 #endif /* MACH_ASSERT */
1766
1767 #if CONFIG_MACF_MACH
1768 /* Port label should have been initialized after creation. */
1769 mac_port_label_destroy(&port->ip_label);
1770 #endif
1771 }
1772
1773 #if MACH_ASSERT
1774 #include <kern/machine.h>
1775
1776 /*
1777 * Keep a list of all allocated ports.
1778 * Allocation is intercepted via ipc_port_init;
1779 * deallocation is intercepted via io_free.
1780 */
1781 queue_head_t port_alloc_queue;
1782 lck_spin_t port_alloc_queue_lock;
1783
1784 unsigned long port_count = 0;
1785 unsigned long port_count_warning = 20000;
1786 unsigned long port_timestamp = 0;
1787
1788 void db_port_stack_trace(
1789 ipc_port_t port);
1790 void db_ref(
1791 int refs);
1792 int db_port_walk(
1793 unsigned int verbose,
1794 unsigned int display,
1795 unsigned int ref_search,
1796 unsigned int ref_target);
1797
1798 /*
1799 * Initialize global state needed for run-time
1800 * port debugging.
1801 */
1802 void
1803 ipc_port_debug_init(void)
1804 {
1805 queue_init(&port_alloc_queue);
1806
1807 lck_spin_init(&port_alloc_queue_lock, &ipc_lck_grp, &ipc_lck_attr);
1808
1809 if (!PE_parse_boot_argn("ipc_portbt", &ipc_portbt, sizeof (ipc_portbt)))
1810 ipc_portbt = 0;
1811 }
1812
1813 #ifdef MACH_BSD
1814 extern int proc_pid(struct proc*);
1815 #endif /* MACH_BSD */
1816
1817 /*
1818 * Initialize all of the debugging state in a port.
1819 * Insert the port into a global list of all allocated ports.
1820 */
1821 void
1822 ipc_port_init_debug(
1823 ipc_port_t port,
1824 uintptr_t *callstack,
1825 unsigned int callstack_max)
1826 {
1827 unsigned int i;
1828
1829 port->ip_thread = current_thread();
1830 port->ip_timetrack = port_timestamp++;
1831 for (i = 0; i < callstack_max; ++i)
1832 port->ip_callstack[i] = callstack[i];
1833 for (i = 0; i < IP_NSPARES; ++i)
1834 port->ip_spares[i] = 0;
1835
1836 #ifdef MACH_BSD
1837 task_t task = current_task();
1838 if (task != TASK_NULL) {
1839 struct proc* proc = (struct proc*) get_bsdtask_info(task);
1840 if (proc)
1841 port->ip_spares[0] = proc_pid(proc);
1842 }
1843 #endif /* MACH_BSD */
1844
1845 #if 0
1846 lck_spin_lock(&port_alloc_queue_lock);
1847 ++port_count;
1848 if (port_count_warning > 0 && port_count >= port_count_warning)
1849 assert(port_count < port_count_warning);
1850 queue_enter(&port_alloc_queue, port, ipc_port_t, ip_port_links);
1851 lck_spin_unlock(&port_alloc_queue_lock);
1852 #endif
1853 }
1854
1855 /*
1856 * Routine: ipc_port_callstack_init_debug
1857 * Purpose:
1858 * Calls the machine-dependent routine to
1859 * fill in an array with up to IP_CALLSTACK_MAX
1860 * levels of return pc information
1861 * Conditions:
1862 * May block (via copyin)
1863 */
1864 void
1865 ipc_port_callstack_init_debug(
1866 uintptr_t *callstack,
1867 unsigned int callstack_max)
1868 {
1869 unsigned int i;
1870
1871 /* guarantee the callstack is initialized */
1872 for (i=0; i < callstack_max; i++)
1873 callstack[i] = 0;
1874
1875 if (ipc_portbt)
1876 machine_callstack(callstack, callstack_max);
1877 }
1878
1879 /*
1880 * Remove a port from the queue of allocated ports.
1881 * This routine should be invoked JUST prior to
1882 * deallocating the actual memory occupied by the port.
1883 */
1884 #if 1
1885 void
1886 ipc_port_track_dealloc(
1887 __unused ipc_port_t port)
1888 {
1889 }
1890 #else
1891 void
1892 ipc_port_track_dealloc(
1893 ipc_port_t port)
1894 {
1895 lck_spin_lock(&port_alloc_queue_lock);
1896 assert(port_count > 0);
1897 --port_count;
1898 queue_remove(&port_alloc_queue, port, ipc_port_t, ip_port_links);
1899 lck_spin_unlock(&port_alloc_queue_lock);
1900 }
1901 #endif
1902
1903
1904 #endif /* MACH_ASSERT */