]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ipc/ipc_mqueue.c
xnu-4570.1.46.tar.gz
[apple/xnu.git] / osfmk / ipc / ipc_mqueue.c
1 /*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_FREE_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: ipc/ipc_mqueue.c
60 * Author: Rich Draves
61 * Date: 1989
62 *
63 * Functions to manipulate IPC message queues.
64 */
65 /*
66 * NOTICE: This file was modified by SPARTA, Inc. in 2006 to introduce
67 * support for mandatory and extensible security protections. This notice
68 * is included in support of clause 2.2 (b) of the Apple Public License,
69 * Version 2.0.
70 */
71
72
73 #include <mach/port.h>
74 #include <mach/message.h>
75 #include <mach/sync_policy.h>
76
77 #include <kern/assert.h>
78 #include <kern/counters.h>
79 #include <kern/sched_prim.h>
80 #include <kern/ipc_kobject.h>
81 #include <kern/ipc_mig.h> /* XXX - for mach_msg_receive_continue */
82 #include <kern/misc_protos.h>
83 #include <kern/task.h>
84 #include <kern/thread.h>
85 #include <kern/waitq.h>
86
87 #include <ipc/ipc_mqueue.h>
88 #include <ipc/ipc_kmsg.h>
89 #include <ipc/ipc_port.h>
90 #include <ipc/ipc_pset.h>
91 #include <ipc/ipc_space.h>
92
93 #if MACH_FLIPC
94 #include <ipc/flipc.h>
95 #endif
96
97 #ifdef __LP64__
98 #include <vm/vm_map.h>
99 #endif
100
101 #include <sys/event.h>
102
103 extern char *proc_name_address(void *p);
104
105 int ipc_mqueue_full; /* address is event for queue space */
106 int ipc_mqueue_rcv; /* address is event for message arrival */
107
108 /* forward declarations */
109 void ipc_mqueue_receive_results(wait_result_t result);
110 static void ipc_mqueue_peek_on_thread(
111 ipc_mqueue_t port_mq,
112 mach_msg_option_t option,
113 thread_t thread);
114
115 /*
116 * Routine: ipc_mqueue_init
117 * Purpose:
118 * Initialize a newly-allocated message queue.
119 */
120 void
121 ipc_mqueue_init(
122 ipc_mqueue_t mqueue,
123 boolean_t is_set,
124 uint64_t *reserved_link)
125 {
126 if (is_set) {
127 waitq_set_init(&mqueue->imq_set_queue,
128 SYNC_POLICY_FIFO|SYNC_POLICY_PREPOST,
129 reserved_link, NULL);
130 } else {
131 waitq_init(&mqueue->imq_wait_queue, SYNC_POLICY_FIFO);
132 ipc_kmsg_queue_init(&mqueue->imq_messages);
133 mqueue->imq_seqno = 0;
134 mqueue->imq_msgcount = 0;
135 mqueue->imq_qlimit = MACH_PORT_QLIMIT_DEFAULT;
136 mqueue->imq_fullwaiters = FALSE;
137 #if MACH_FLIPC
138 mqueue->imq_fport = FPORT_NULL;
139 #endif
140 }
141 klist_init(&mqueue->imq_klist);
142 }
143
144 void ipc_mqueue_deinit(
145 ipc_mqueue_t mqueue)
146 {
147 boolean_t is_set = imq_is_set(mqueue);
148
149 if (is_set)
150 waitq_set_deinit(&mqueue->imq_set_queue);
151 else
152 waitq_deinit(&mqueue->imq_wait_queue);
153 }
154
155 /*
156 * Routine: imq_reserve_and_lock
157 * Purpose:
158 * Atomically lock an ipc_mqueue_t object and reserve
159 * an appropriate number of prepost linkage objects for
160 * use in wakeup operations.
161 * Conditions:
162 * mq is unlocked
163 */
164 void
165 imq_reserve_and_lock(ipc_mqueue_t mq, uint64_t *reserved_prepost)
166 {
167 *reserved_prepost = waitq_prepost_reserve(&mq->imq_wait_queue, 0,
168 WAITQ_KEEP_LOCKED);
169
170 }
171
172
173 /*
174 * Routine: imq_release_and_unlock
175 * Purpose:
176 * Unlock an ipc_mqueue_t object, re-enable interrupts,
177 * and release any unused prepost object reservations.
178 * Conditions:
179 * mq is locked
180 */
181 void
182 imq_release_and_unlock(ipc_mqueue_t mq, uint64_t reserved_prepost)
183 {
184 assert(imq_held(mq));
185 waitq_unlock(&mq->imq_wait_queue);
186 waitq_prepost_release_reserve(reserved_prepost);
187 }
188
189
190 /*
191 * Routine: ipc_mqueue_member
192 * Purpose:
193 * Indicate whether the (port) mqueue is a member of
194 * this portset's mqueue. We do this by checking
195 * whether the portset mqueue's waitq is an member of
196 * the port's mqueue waitq.
197 * Conditions:
198 * the portset's mqueue is not already a member
199 * this may block while allocating linkage structures.
200 */
201
202 boolean_t
203 ipc_mqueue_member(
204 ipc_mqueue_t port_mqueue,
205 ipc_mqueue_t set_mqueue)
206 {
207 struct waitq *port_waitq = &port_mqueue->imq_wait_queue;
208 struct waitq_set *set_waitq = &set_mqueue->imq_set_queue;
209
210 return waitq_member(port_waitq, set_waitq);
211
212 }
213
214 /*
215 * Routine: ipc_mqueue_remove
216 * Purpose:
217 * Remove the association between the queue and the specified
218 * set message queue.
219 */
220
221 kern_return_t
222 ipc_mqueue_remove(
223 ipc_mqueue_t mqueue,
224 ipc_mqueue_t set_mqueue)
225 {
226 struct waitq *mq_waitq = &mqueue->imq_wait_queue;
227 struct waitq_set *set_waitq = &set_mqueue->imq_set_queue;
228
229 return waitq_unlink(mq_waitq, set_waitq);
230 }
231
232 /*
233 * Routine: ipc_mqueue_remove_from_all
234 * Purpose:
235 * Remove the mqueue from all the sets it is a member of
236 * Conditions:
237 * Nothing locked.
238 * Returns:
239 * mqueue unlocked and set links deallocated
240 */
241 void
242 ipc_mqueue_remove_from_all(ipc_mqueue_t mqueue)
243 {
244 struct waitq *mq_waitq = &mqueue->imq_wait_queue;
245 kern_return_t kr;
246
247 imq_lock(mqueue);
248
249 assert(waitq_valid(mq_waitq));
250 kr = waitq_unlink_all_unlock(mq_waitq);
251 /* mqueue unlocked and set links deallocated */
252 }
253
254 /*
255 * Routine: ipc_mqueue_remove_all
256 * Purpose:
257 * Remove all the member queues from the specified set.
258 * Also removes the queue from any containing sets.
259 * Conditions:
260 * Nothing locked.
261 * Returns:
262 * mqueue unlocked all set links deallocated
263 */
264 void
265 ipc_mqueue_remove_all(ipc_mqueue_t mqueue)
266 {
267 struct waitq_set *mq_setq = &mqueue->imq_set_queue;
268
269 imq_lock(mqueue);
270 assert(waitqs_is_set(mq_setq));
271 waitq_set_unlink_all_unlock(mq_setq);
272 /* mqueue unlocked set links deallocated */
273 }
274
275
276 /*
277 * Routine: ipc_mqueue_add
278 * Purpose:
279 * Associate the portset's mqueue with the port's mqueue.
280 * This has to be done so that posting the port will wakeup
281 * a portset waiter. If there are waiters on the portset
282 * mqueue and messages on the port mqueue, try to match them
283 * up now.
284 * Conditions:
285 * May block.
286 */
287 kern_return_t
288 ipc_mqueue_add(
289 ipc_mqueue_t port_mqueue,
290 ipc_mqueue_t set_mqueue,
291 uint64_t *reserved_link,
292 uint64_t *reserved_prepost)
293 {
294 struct waitq *port_waitq = &port_mqueue->imq_wait_queue;
295 struct waitq_set *set_waitq = &set_mqueue->imq_set_queue;
296 ipc_kmsg_queue_t kmsgq;
297 ipc_kmsg_t kmsg, next;
298 kern_return_t kr;
299
300 assert(reserved_link && *reserved_link != 0);
301
302 imq_lock(port_mqueue);
303
304 /*
305 * The link operation is now under the same lock-hold as
306 * message iteration and thread wakeup, but doesn't have to be...
307 */
308 kr = waitq_link(port_waitq, set_waitq, WAITQ_ALREADY_LOCKED, reserved_link);
309 if (kr != KERN_SUCCESS) {
310 imq_unlock(port_mqueue);
311 return kr;
312 }
313
314 /*
315 * Now that the set has been added to the port, there may be
316 * messages queued on the port and threads waiting on the set
317 * waitq. Lets get them together.
318 */
319 kmsgq = &port_mqueue->imq_messages;
320 for (kmsg = ipc_kmsg_queue_first(kmsgq);
321 kmsg != IKM_NULL;
322 kmsg = next) {
323 next = ipc_kmsg_queue_next(kmsgq, kmsg);
324
325 for (;;) {
326 thread_t th;
327 mach_msg_size_t msize;
328 spl_t th_spl;
329
330 th = waitq_wakeup64_identify_locked(
331 port_waitq,
332 IPC_MQUEUE_RECEIVE,
333 THREAD_AWAKENED, &th_spl,
334 reserved_prepost, WAITQ_ALL_PRIORITIES,
335 WAITQ_KEEP_LOCKED);
336 /* waitq/mqueue still locked, thread locked */
337
338 if (th == THREAD_NULL)
339 goto leave;
340
341 /*
342 * If the receiver waited with a facility not directly
343 * related to Mach messaging, then it isn't prepared to get
344 * handed the message directly. Just set it running, and
345 * go look for another thread that can.
346 */
347 if (th->ith_state != MACH_RCV_IN_PROGRESS) {
348 if (th->ith_state == MACH_PEEK_IN_PROGRESS) {
349 /*
350 * wakeup the peeking thread, but
351 * continue to loop over the threads
352 * waiting on the port's mqueue to see
353 * if there are any actual receivers
354 */
355 ipc_mqueue_peek_on_thread(port_mqueue,
356 th->ith_option,
357 th);
358 }
359 thread_unlock(th);
360 splx(th_spl);
361 continue;
362 }
363
364 /*
365 * Found a receiver. see if they can handle the message
366 * correctly (the message is not too large for them, or
367 * they didn't care to be informed that the message was
368 * too large). If they can't handle it, take them off
369 * the list and let them go back and figure it out and
370 * just move onto the next.
371 */
372 msize = ipc_kmsg_copyout_size(kmsg, th->map);
373 if (th->ith_rsize <
374 (msize + REQUESTED_TRAILER_SIZE(thread_is_64bit(th), th->ith_option))) {
375 th->ith_state = MACH_RCV_TOO_LARGE;
376 th->ith_msize = msize;
377 if (th->ith_option & MACH_RCV_LARGE) {
378 /*
379 * let him go without message
380 */
381 th->ith_receiver_name = port_mqueue->imq_receiver_name;
382 th->ith_kmsg = IKM_NULL;
383 th->ith_seqno = 0;
384 thread_unlock(th);
385 splx(th_spl);
386 continue; /* find another thread */
387 }
388 } else {
389 th->ith_state = MACH_MSG_SUCCESS;
390 }
391
392 /*
393 * This thread is going to take this message,
394 * so give it to him.
395 */
396 ipc_kmsg_rmqueue(kmsgq, kmsg);
397 #if MACH_FLIPC
398 mach_node_t node = kmsg->ikm_node;
399 #endif
400 ipc_mqueue_release_msgcount(port_mqueue, IMQ_NULL);
401
402 th->ith_kmsg = kmsg;
403 th->ith_seqno = port_mqueue->imq_seqno++;
404 thread_unlock(th);
405 splx(th_spl);
406 #if MACH_FLIPC
407 if (MACH_NODE_VALID(node) && FPORT_VALID(port_mqueue->imq_fport))
408 flipc_msg_ack(node, port_mqueue, TRUE);
409 #endif
410 break; /* go to next message */
411 }
412 }
413 leave:
414 imq_unlock(port_mqueue);
415 return KERN_SUCCESS;
416 }
417
418 /*
419 * Routine: ipc_mqueue_changed
420 * Purpose:
421 * Wake up receivers waiting in a message queue.
422 * Conditions:
423 * The message queue is locked.
424 */
425
426 void
427 ipc_mqueue_changed(
428 ipc_mqueue_t mqueue)
429 {
430 /* Indicate that this message queue is vanishing */
431 knote_vanish(&mqueue->imq_klist);
432
433 waitq_wakeup64_all_locked(&mqueue->imq_wait_queue,
434 IPC_MQUEUE_RECEIVE,
435 THREAD_RESTART,
436 NULL,
437 WAITQ_ALL_PRIORITIES,
438 WAITQ_KEEP_LOCKED);
439 }
440
441
442
443
444 /*
445 * Routine: ipc_mqueue_send
446 * Purpose:
447 * Send a message to a message queue. The message holds a reference
448 * for the destination port for this message queue in the
449 * msgh_remote_port field.
450 *
451 * If unsuccessful, the caller still has possession of
452 * the message and must do something with it. If successful,
453 * the message is queued, given to a receiver, or destroyed.
454 * Conditions:
455 * mqueue is locked.
456 * Returns:
457 * MACH_MSG_SUCCESS The message was accepted.
458 * MACH_SEND_TIMED_OUT Caller still has message.
459 * MACH_SEND_INTERRUPTED Caller still has message.
460 */
461 mach_msg_return_t
462 ipc_mqueue_send(
463 ipc_mqueue_t mqueue,
464 ipc_kmsg_t kmsg,
465 mach_msg_option_t option,
466 mach_msg_timeout_t send_timeout)
467 {
468 int wresult;
469
470 /*
471 * Don't block if:
472 * 1) We're under the queue limit.
473 * 2) Caller used the MACH_SEND_ALWAYS internal option.
474 * 3) Message is sent to a send-once right.
475 */
476 if (!imq_full(mqueue) ||
477 (!imq_full_kernel(mqueue) &&
478 ((option & MACH_SEND_ALWAYS) ||
479 (MACH_MSGH_BITS_REMOTE(kmsg->ikm_header->msgh_bits) ==
480 MACH_MSG_TYPE_PORT_SEND_ONCE)))) {
481 mqueue->imq_msgcount++;
482 assert(mqueue->imq_msgcount > 0);
483 imq_unlock(mqueue);
484 } else {
485 thread_t cur_thread = current_thread();
486 uint64_t deadline;
487
488 /*
489 * We have to wait for space to be granted to us.
490 */
491 if ((option & MACH_SEND_TIMEOUT) && (send_timeout == 0)) {
492 imq_unlock(mqueue);
493 return MACH_SEND_TIMED_OUT;
494 }
495 if (imq_full_kernel(mqueue)) {
496 imq_unlock(mqueue);
497 return MACH_SEND_NO_BUFFER;
498 }
499 mqueue->imq_fullwaiters = TRUE;
500
501 if (option & MACH_SEND_TIMEOUT)
502 clock_interval_to_deadline(send_timeout, 1000*NSEC_PER_USEC, &deadline);
503 else
504 deadline = 0;
505
506 thread_set_pending_block_hint(cur_thread, kThreadWaitPortSend);
507 wresult = waitq_assert_wait64_locked(
508 &mqueue->imq_wait_queue,
509 IPC_MQUEUE_FULL,
510 THREAD_ABORTSAFE,
511 TIMEOUT_URGENCY_USER_NORMAL,
512 deadline, TIMEOUT_NO_LEEWAY,
513 cur_thread);
514
515 imq_unlock(mqueue);
516
517 if (wresult == THREAD_WAITING) {
518 wresult = thread_block(THREAD_CONTINUE_NULL);
519 counter(c_ipc_mqueue_send_block++);
520 }
521
522 switch (wresult) {
523
524 case THREAD_AWAKENED:
525 /*
526 * we can proceed - inherited msgcount from waker
527 * or the message queue has been destroyed and the msgcount
528 * has been reset to zero (will detect in ipc_mqueue_post()).
529 */
530 break;
531
532 case THREAD_TIMED_OUT:
533 assert(option & MACH_SEND_TIMEOUT);
534 return MACH_SEND_TIMED_OUT;
535
536 case THREAD_INTERRUPTED:
537 return MACH_SEND_INTERRUPTED;
538
539 case THREAD_RESTART:
540 /* mqueue is being destroyed */
541 return MACH_SEND_INVALID_DEST;
542 default:
543 panic("ipc_mqueue_send");
544 }
545 }
546
547 ipc_mqueue_post(mqueue, kmsg, option);
548 return MACH_MSG_SUCCESS;
549 }
550
551 /*
552 * Routine: ipc_mqueue_override_send
553 * Purpose:
554 * Set an override qos on the first message in the queue
555 * (if the queue is full). This is a send-possible override
556 * that will go away as soon as we drain a message from the
557 * queue.
558 *
559 * Conditions:
560 * The message queue is not locked.
561 * The caller holds a reference on the message queue.
562 */
563 extern void ipc_mqueue_override_send(
564 ipc_mqueue_t mqueue,
565 mach_msg_priority_t override)
566 {
567 boolean_t __unused full_queue_empty = FALSE;
568
569 imq_lock(mqueue);
570 assert(imq_valid(mqueue));
571 assert(!imq_is_set(mqueue));
572
573 if (imq_full(mqueue)) {
574 ipc_kmsg_t first = ipc_kmsg_queue_first(&mqueue->imq_messages);
575
576 if (first && ipc_kmsg_override_qos(&mqueue->imq_messages, first, override))
577 KNOTE(&mqueue->imq_klist, 0);
578 if (!first)
579 full_queue_empty = TRUE;
580 }
581 imq_unlock(mqueue);
582
583 #if DEVELOPMENT || DEBUG
584 if (full_queue_empty) {
585 ipc_port_t port = ip_from_mq(mqueue);
586 int dst_pid = 0;
587 if (ip_active(port) && !port->ip_tempowner &&
588 port->ip_receiver_name && port->ip_receiver &&
589 port->ip_receiver != ipc_space_kernel) {
590 dst_pid = task_pid(port->ip_receiver->is_task);
591 }
592 }
593 #endif
594 }
595
596 /*
597 * Routine: ipc_mqueue_release_msgcount
598 * Purpose:
599 * Release a message queue reference in the case where we
600 * found a waiter.
601 *
602 * Conditions:
603 * The message queue is locked.
604 * The message corresponding to this reference is off the queue.
605 * There is no need to pass reserved preposts because this will
606 * never prepost to anyone
607 */
608 void
609 ipc_mqueue_release_msgcount(ipc_mqueue_t port_mq, ipc_mqueue_t set_mq)
610 {
611 (void)set_mq;
612 assert(imq_held(port_mq));
613 assert(port_mq->imq_msgcount > 1 || ipc_kmsg_queue_empty(&port_mq->imq_messages));
614
615 port_mq->imq_msgcount--;
616
617 if (!imq_full(port_mq) && port_mq->imq_fullwaiters) {
618 /*
619 * boost the priority of the awoken thread
620 * (WAITQ_PROMOTE_PRIORITY) to ensure it uses
621 * the message queue slot we've just reserved.
622 *
623 * NOTE: this will never prepost
624 */
625 if (waitq_wakeup64_one_locked(&port_mq->imq_wait_queue,
626 IPC_MQUEUE_FULL,
627 THREAD_AWAKENED,
628 NULL,
629 WAITQ_PROMOTE_PRIORITY,
630 WAITQ_KEEP_LOCKED) != KERN_SUCCESS) {
631 port_mq->imq_fullwaiters = FALSE;
632 } else {
633 /* gave away our slot - add reference back */
634 port_mq->imq_msgcount++;
635 }
636 }
637
638 if (ipc_kmsg_queue_empty(&port_mq->imq_messages)) {
639 /* no more msgs: invalidate the port's prepost object */
640 waitq_clear_prepost_locked(&port_mq->imq_wait_queue);
641 }
642 }
643
644 /*
645 * Routine: ipc_mqueue_post
646 * Purpose:
647 * Post a message to a waiting receiver or enqueue it. If a
648 * receiver is waiting, we can release our reserved space in
649 * the message queue.
650 *
651 * Conditions:
652 * mqueue is unlocked
653 * If we need to queue, our space in the message queue is reserved.
654 */
655 void
656 ipc_mqueue_post(
657 ipc_mqueue_t mqueue,
658 ipc_kmsg_t kmsg,
659 mach_msg_option_t __unused option)
660 {
661 uint64_t reserved_prepost = 0;
662 boolean_t destroy_msg = FALSE;
663
664 ipc_kmsg_trace_send(kmsg, option);
665
666 /*
667 * While the msg queue is locked, we have control of the
668 * kmsg, so the ref in it for the port is still good.
669 *
670 * Check for a receiver for the message.
671 */
672 imq_reserve_and_lock(mqueue, &reserved_prepost);
673
674 /* we may have raced with port destruction! */
675 if (!imq_valid(mqueue)) {
676 destroy_msg = TRUE;
677 goto out_unlock;
678 }
679
680 for (;;) {
681 struct waitq *waitq = &mqueue->imq_wait_queue;
682 spl_t th_spl;
683 thread_t receiver;
684 mach_msg_size_t msize;
685
686 receiver = waitq_wakeup64_identify_locked(waitq,
687 IPC_MQUEUE_RECEIVE,
688 THREAD_AWAKENED,
689 &th_spl,
690 &reserved_prepost,
691 WAITQ_ALL_PRIORITIES,
692 WAITQ_KEEP_LOCKED);
693 /* waitq still locked, thread locked */
694
695 if (receiver == THREAD_NULL) {
696
697 /*
698 * no receivers; queue kmsg if space still reserved
699 * Reservations are cancelled when the port goes inactive.
700 * note that this will enqueue the message for any
701 * "peeking" receivers.
702 *
703 * Also, post the knote to wake up any threads waiting
704 * on that style of interface if this insertion is of
705 * note (first insertion, or adjusted override qos all
706 * the way to the head of the queue).
707 *
708 * This is just for ports. portset knotes are stay-active,
709 * and their threads get awakened through the !MACH_RCV_IN_PROGRESS
710 * logic below).
711 */
712 if (mqueue->imq_msgcount > 0) {
713 if (ipc_kmsg_enqueue_qos(&mqueue->imq_messages, kmsg))
714 KNOTE(&mqueue->imq_klist, 0);
715 break;
716 }
717
718 /*
719 * Otherwise, the message queue must belong to an inactive
720 * port, so just destroy the message and pretend it was posted.
721 */
722 destroy_msg = TRUE;
723 goto out_unlock;
724 }
725
726 /*
727 * If a thread is attempting a "peek" into the message queue
728 * (MACH_PEEK_IN_PROGRESS), then we enqueue the message and set the
729 * thread running. A successful peek is essentially the same as
730 * message delivery since the peeking thread takes responsibility
731 * for delivering the message and (eventually) removing it from
732 * the mqueue. Only one thread can successfully use the peek
733 * facility on any given port, so we exit the waitq loop after
734 * encountering such a thread.
735 */
736 if (receiver->ith_state == MACH_PEEK_IN_PROGRESS && mqueue->imq_msgcount > 0) {
737 ipc_kmsg_enqueue_qos(&mqueue->imq_messages, kmsg);
738 ipc_mqueue_peek_on_thread(mqueue, receiver->ith_option, receiver);
739 thread_unlock(receiver);
740 splx(th_spl);
741 break; /* Message was posted, so break out of loop */
742 }
743
744 /*
745 * If the receiver waited with a facility not directly related
746 * to Mach messaging, then it isn't prepared to get handed the
747 * message directly. Just set it running, and go look for
748 * another thread that can.
749 */
750 if (receiver->ith_state != MACH_RCV_IN_PROGRESS) {
751 thread_unlock(receiver);
752 splx(th_spl);
753 continue;
754 }
755
756
757 /*
758 * We found a waiting thread.
759 * If the message is too large or the scatter list is too small
760 * the thread we wake up will get that as its status.
761 */
762 msize = ipc_kmsg_copyout_size(kmsg, receiver->map);
763 if (receiver->ith_rsize <
764 (msize + REQUESTED_TRAILER_SIZE(thread_is_64bit(receiver), receiver->ith_option))) {
765 receiver->ith_msize = msize;
766 receiver->ith_state = MACH_RCV_TOO_LARGE;
767 } else {
768 receiver->ith_state = MACH_MSG_SUCCESS;
769 }
770
771 /*
772 * If there is no problem with the upcoming receive, or the
773 * receiver thread didn't specifically ask for special too
774 * large error condition, go ahead and select it anyway.
775 */
776 if ((receiver->ith_state == MACH_MSG_SUCCESS) ||
777 !(receiver->ith_option & MACH_RCV_LARGE)) {
778 receiver->ith_kmsg = kmsg;
779 receiver->ith_seqno = mqueue->imq_seqno++;
780 #if MACH_FLIPC
781 mach_node_t node = kmsg->ikm_node;
782 #endif
783 thread_unlock(receiver);
784 splx(th_spl);
785
786 /* we didn't need our reserved spot in the queue */
787 ipc_mqueue_release_msgcount(mqueue, IMQ_NULL);
788
789 #if MACH_FLIPC
790 if (MACH_NODE_VALID(node) && FPORT_VALID(mqueue->imq_fport))
791 flipc_msg_ack(node, mqueue, TRUE);
792 #endif
793 break;
794 }
795
796 /*
797 * Otherwise, this thread needs to be released to run
798 * and handle its error without getting the message. We
799 * need to go back and pick another one.
800 */
801 receiver->ith_receiver_name = mqueue->imq_receiver_name;
802 receiver->ith_kmsg = IKM_NULL;
803 receiver->ith_seqno = 0;
804 thread_unlock(receiver);
805 splx(th_spl);
806 }
807
808 out_unlock:
809 /* clear the waitq boost we may have been given */
810 waitq_clear_promotion_locked(&mqueue->imq_wait_queue, current_thread());
811 imq_release_and_unlock(mqueue, reserved_prepost);
812 if (destroy_msg)
813 ipc_kmsg_destroy(kmsg);
814
815 current_task()->messages_sent++;
816 return;
817 }
818
819
820 /* static */ void
821 ipc_mqueue_receive_results(wait_result_t saved_wait_result)
822 {
823 thread_t self = current_thread();
824 mach_msg_option_t option = self->ith_option;
825
826 /*
827 * why did we wake up?
828 */
829 switch (saved_wait_result) {
830 case THREAD_TIMED_OUT:
831 self->ith_state = MACH_RCV_TIMED_OUT;
832 return;
833
834 case THREAD_INTERRUPTED:
835 self->ith_state = MACH_RCV_INTERRUPTED;
836 return;
837
838 case THREAD_RESTART:
839 /* something bad happened to the port/set */
840 self->ith_state = MACH_RCV_PORT_CHANGED;
841 return;
842
843 case THREAD_AWAKENED:
844 /*
845 * We do not need to go select a message, somebody
846 * handed us one (or a too-large indication).
847 */
848 switch (self->ith_state) {
849 case MACH_RCV_SCATTER_SMALL:
850 case MACH_RCV_TOO_LARGE:
851 /*
852 * Somebody tried to give us a too large
853 * message. If we indicated that we cared,
854 * then they only gave us the indication,
855 * otherwise they gave us the indication
856 * AND the message anyway.
857 */
858 if (option & MACH_RCV_LARGE) {
859 return;
860 }
861
862 case MACH_MSG_SUCCESS:
863 case MACH_PEEK_READY:
864 return;
865
866 default:
867 panic("ipc_mqueue_receive_results: strange ith_state");
868 }
869
870 default:
871 panic("ipc_mqueue_receive_results: strange wait_result");
872 }
873 }
874
875 void
876 ipc_mqueue_receive_continue(
877 __unused void *param,
878 wait_result_t wresult)
879 {
880 ipc_mqueue_receive_results(wresult);
881 mach_msg_receive_continue(); /* hard-coded for now */
882 }
883
884 /*
885 * Routine: ipc_mqueue_receive
886 * Purpose:
887 * Receive a message from a message queue.
888 *
889 * Conditions:
890 * Our caller must hold a reference for the port or port set
891 * to which this queue belongs, to keep the queue
892 * from being deallocated.
893 *
894 * The kmsg is returned with clean header fields
895 * and with the circular bit turned off through the ith_kmsg
896 * field of the thread's receive continuation state.
897 * Returns:
898 * MACH_MSG_SUCCESS Message returned in ith_kmsg.
899 * MACH_RCV_TOO_LARGE Message size returned in ith_msize.
900 * MACH_RCV_TIMED_OUT No message obtained.
901 * MACH_RCV_INTERRUPTED No message obtained.
902 * MACH_RCV_PORT_DIED Port/set died; no message.
903 * MACH_RCV_PORT_CHANGED Port moved into set; no msg.
904 *
905 */
906
907 void
908 ipc_mqueue_receive(
909 ipc_mqueue_t mqueue,
910 mach_msg_option_t option,
911 mach_msg_size_t max_size,
912 mach_msg_timeout_t rcv_timeout,
913 int interruptible)
914 {
915 wait_result_t wresult;
916 thread_t self = current_thread();
917
918 imq_lock(mqueue);
919 wresult = ipc_mqueue_receive_on_thread(mqueue, option, max_size,
920 rcv_timeout, interruptible,
921 self);
922 /* mqueue unlocked */
923 if (wresult == THREAD_NOT_WAITING)
924 return;
925
926 if (wresult == THREAD_WAITING) {
927 counter((interruptible == THREAD_ABORTSAFE) ?
928 c_ipc_mqueue_receive_block_user++ :
929 c_ipc_mqueue_receive_block_kernel++);
930
931 if (self->ith_continuation)
932 thread_block(ipc_mqueue_receive_continue);
933 /* NOTREACHED */
934
935 wresult = thread_block(THREAD_CONTINUE_NULL);
936 }
937 ipc_mqueue_receive_results(wresult);
938 }
939
940 static int mqueue_process_prepost_receive(void *ctx, struct waitq *waitq,
941 struct waitq_set *wqset)
942 {
943 ipc_mqueue_t port_mq, *pmq_ptr;
944
945 (void)wqset;
946 port_mq = (ipc_mqueue_t)waitq;
947
948 /*
949 * If there are no messages on this queue, skip it and remove
950 * it from the prepost list
951 */
952 if (ipc_kmsg_queue_empty(&port_mq->imq_messages))
953 return WQ_ITERATE_INVALIDATE_CONTINUE;
954
955 /*
956 * There are messages waiting on this port.
957 * Instruct the prepost iteration logic to break, but keep the
958 * waitq locked.
959 */
960 pmq_ptr = (ipc_mqueue_t *)ctx;
961 if (pmq_ptr)
962 *pmq_ptr = port_mq;
963 return WQ_ITERATE_BREAK_KEEP_LOCKED;
964 }
965
966 /*
967 * Routine: ipc_mqueue_receive_on_thread
968 * Purpose:
969 * Receive a message from a message queue using a specified thread.
970 * If no message available, assert_wait on the appropriate waitq.
971 *
972 * Conditions:
973 * Assumes thread is self.
974 * Called with mqueue locked.
975 * Returns with mqueue unlocked.
976 * May have assert-waited. Caller must block in those cases.
977 */
978 wait_result_t
979 ipc_mqueue_receive_on_thread(
980 ipc_mqueue_t mqueue,
981 mach_msg_option_t option,
982 mach_msg_size_t max_size,
983 mach_msg_timeout_t rcv_timeout,
984 int interruptible,
985 thread_t thread)
986 {
987 wait_result_t wresult;
988 uint64_t deadline;
989
990 /* called with mqueue locked */
991
992 /* no need to reserve anything: we never prepost to anyone */
993
994 if (!imq_valid(mqueue)) {
995 /* someone raced us to destroy this mqueue/port! */
996 imq_unlock(mqueue);
997 /*
998 * ipc_mqueue_receive_results updates the thread's ith_state
999 * TODO: differentiate between rights being moved and
1000 * rights/ports being destroyed (21885327)
1001 */
1002 return THREAD_RESTART;
1003 }
1004
1005 if (imq_is_set(mqueue)) {
1006 ipc_mqueue_t port_mq = IMQ_NULL;
1007
1008 (void)waitq_set_iterate_preposts(&mqueue->imq_set_queue,
1009 &port_mq,
1010 mqueue_process_prepost_receive);
1011
1012 if (port_mq != IMQ_NULL) {
1013 /*
1014 * We get here if there is at least one message
1015 * waiting on port_mq. We have instructed the prepost
1016 * iteration logic to leave both the port_mq and the
1017 * set mqueue locked.
1018 *
1019 * TODO: previously, we would place this port at the
1020 * back of the prepost list...
1021 */
1022 imq_unlock(mqueue);
1023
1024 /*
1025 * Continue on to handling the message with just
1026 * the port mqueue locked.
1027 */
1028 if (option & MACH_PEEK_MSG)
1029 ipc_mqueue_peek_on_thread(port_mq, option, thread);
1030 else
1031 ipc_mqueue_select_on_thread(port_mq, mqueue, option,
1032 max_size, thread);
1033
1034 imq_unlock(port_mq);
1035 return THREAD_NOT_WAITING;
1036 }
1037 } else if (imq_is_queue(mqueue)) {
1038 ipc_kmsg_queue_t kmsgs;
1039
1040 /*
1041 * Receive on a single port. Just try to get the messages.
1042 */
1043 kmsgs = &mqueue->imq_messages;
1044 if (ipc_kmsg_queue_first(kmsgs) != IKM_NULL) {
1045 if (option & MACH_PEEK_MSG)
1046 ipc_mqueue_peek_on_thread(mqueue, option, thread);
1047 else
1048 ipc_mqueue_select_on_thread(mqueue, IMQ_NULL, option,
1049 max_size, thread);
1050 imq_unlock(mqueue);
1051 return THREAD_NOT_WAITING;
1052 }
1053 } else {
1054 panic("Unknown mqueue type 0x%x: likely memory corruption!\n",
1055 mqueue->imq_wait_queue.waitq_type);
1056 }
1057
1058 /*
1059 * Looks like we'll have to block. The mqueue we will
1060 * block on (whether the set's or the local port's) is
1061 * still locked.
1062 */
1063 if (option & MACH_RCV_TIMEOUT) {
1064 if (rcv_timeout == 0) {
1065 imq_unlock(mqueue);
1066 thread->ith_state = MACH_RCV_TIMED_OUT;
1067 return THREAD_NOT_WAITING;
1068 }
1069 }
1070
1071 thread->ith_option = option;
1072 thread->ith_rsize = max_size;
1073 thread->ith_msize = 0;
1074
1075 if (option & MACH_PEEK_MSG)
1076 thread->ith_state = MACH_PEEK_IN_PROGRESS;
1077 else
1078 thread->ith_state = MACH_RCV_IN_PROGRESS;
1079
1080 if (option & MACH_RCV_TIMEOUT)
1081 clock_interval_to_deadline(rcv_timeout, 1000*NSEC_PER_USEC, &deadline);
1082 else
1083 deadline = 0;
1084
1085 thread_set_pending_block_hint(thread, kThreadWaitPortReceive);
1086 wresult = waitq_assert_wait64_locked(&mqueue->imq_wait_queue,
1087 IPC_MQUEUE_RECEIVE,
1088 interruptible,
1089 TIMEOUT_URGENCY_USER_NORMAL,
1090 deadline,
1091 TIMEOUT_NO_LEEWAY,
1092 thread);
1093 /* preposts should be detected above, not here */
1094 if (wresult == THREAD_AWAKENED)
1095 panic("ipc_mqueue_receive_on_thread: sleep walking");
1096
1097 imq_unlock(mqueue);
1098
1099 return wresult;
1100 }
1101
1102
1103 /*
1104 * Routine: ipc_mqueue_peek_on_thread
1105 * Purpose:
1106 * A receiver discovered that there was a message on the queue
1107 * before he had to block. Tell a thread about the message queue,
1108 * but don't pick off any messages.
1109 * Conditions:
1110 * port_mq locked
1111 * at least one message on port_mq's message queue
1112 *
1113 * Returns: (on thread->ith_state)
1114 * MACH_PEEK_READY ith_peekq contains a message queue
1115 */
1116 void
1117 ipc_mqueue_peek_on_thread(
1118 ipc_mqueue_t port_mq,
1119 mach_msg_option_t option,
1120 thread_t thread)
1121 {
1122 (void)option;
1123 assert(option & MACH_PEEK_MSG);
1124 assert(ipc_kmsg_queue_first(&port_mq->imq_messages) != IKM_NULL);
1125
1126 /*
1127 * Take a reference on the mqueue's associated port:
1128 * the peeking thread will be responsible to release this reference
1129 * using ip_release_mq()
1130 */
1131 ip_reference_mq(port_mq);
1132 thread->ith_peekq = port_mq;
1133 thread->ith_state = MACH_PEEK_READY;
1134 }
1135
1136 /*
1137 * Routine: ipc_mqueue_select_on_thread
1138 * Purpose:
1139 * A receiver discovered that there was a message on the queue
1140 * before he had to block. Pick the message off the queue and
1141 * "post" it to thread.
1142 * Conditions:
1143 * mqueue locked.
1144 * thread not locked.
1145 * There is a message.
1146 * No need to reserve prepost objects - it will never prepost
1147 *
1148 * Returns:
1149 * MACH_MSG_SUCCESS Actually selected a message for ourselves.
1150 * MACH_RCV_TOO_LARGE May or may not have pull it, but it is large
1151 */
1152 void
1153 ipc_mqueue_select_on_thread(
1154 ipc_mqueue_t port_mq,
1155 ipc_mqueue_t set_mq,
1156 mach_msg_option_t option,
1157 mach_msg_size_t max_size,
1158 thread_t thread)
1159 {
1160 ipc_kmsg_t kmsg;
1161 mach_msg_return_t mr = MACH_MSG_SUCCESS;
1162 mach_msg_size_t msize;
1163
1164 /*
1165 * Do some sanity checking of our ability to receive
1166 * before pulling the message off the queue.
1167 */
1168 kmsg = ipc_kmsg_queue_first(&port_mq->imq_messages);
1169 assert(kmsg != IKM_NULL);
1170
1171 /*
1172 * If we really can't receive it, but we had the
1173 * MACH_RCV_LARGE option set, then don't take it off
1174 * the queue, instead return the appropriate error
1175 * (and size needed).
1176 */
1177 msize = ipc_kmsg_copyout_size(kmsg, thread->map);
1178 if (msize + REQUESTED_TRAILER_SIZE(thread_is_64bit(thread), option) > max_size) {
1179 mr = MACH_RCV_TOO_LARGE;
1180 if (option & MACH_RCV_LARGE) {
1181 thread->ith_receiver_name = port_mq->imq_receiver_name;
1182 thread->ith_kmsg = IKM_NULL;
1183 thread->ith_msize = msize;
1184 thread->ith_seqno = 0;
1185 thread->ith_state = mr;
1186 return;
1187 }
1188 }
1189
1190 ipc_kmsg_rmqueue(&port_mq->imq_messages, kmsg);
1191 #if MACH_FLIPC
1192 if (MACH_NODE_VALID(kmsg->ikm_node) && FPORT_VALID(port_mq->imq_fport))
1193 flipc_msg_ack(kmsg->ikm_node, port_mq, TRUE);
1194 #endif
1195 ipc_mqueue_release_msgcount(port_mq, set_mq);
1196 thread->ith_seqno = port_mq->imq_seqno++;
1197 thread->ith_kmsg = kmsg;
1198 thread->ith_state = mr;
1199
1200 current_task()->messages_received++;
1201 return;
1202 }
1203
1204 /*
1205 * Routine: ipc_mqueue_peek_locked
1206 * Purpose:
1207 * Peek at a (non-set) message queue to see if it has a message
1208 * matching the sequence number provided (if zero, then the
1209 * first message in the queue) and return vital info about the
1210 * message.
1211 *
1212 * Conditions:
1213 * The ipc_mqueue_t is locked by callers.
1214 * Other locks may be held by callers, so this routine cannot block.
1215 * Caller holds reference on the message queue.
1216 */
1217 unsigned
1218 ipc_mqueue_peek_locked(ipc_mqueue_t mq,
1219 mach_port_seqno_t * seqnop,
1220 mach_msg_size_t * msg_sizep,
1221 mach_msg_id_t * msg_idp,
1222 mach_msg_max_trailer_t * msg_trailerp,
1223 ipc_kmsg_t *kmsgp)
1224 {
1225 ipc_kmsg_queue_t kmsgq;
1226 ipc_kmsg_t kmsg;
1227 mach_port_seqno_t seqno, msgoff;
1228 unsigned res = 0;
1229
1230 assert(!imq_is_set(mq));
1231
1232 seqno = 0;
1233 if (seqnop != NULL)
1234 seqno = *seqnop;
1235
1236 if (seqno == 0) {
1237 seqno = mq->imq_seqno;
1238 msgoff = 0;
1239 } else if (seqno >= mq->imq_seqno &&
1240 seqno < mq->imq_seqno + mq->imq_msgcount) {
1241 msgoff = seqno - mq->imq_seqno;
1242 } else
1243 goto out;
1244
1245 /* look for the message that would match that seqno */
1246 kmsgq = &mq->imq_messages;
1247 kmsg = ipc_kmsg_queue_first(kmsgq);
1248 while (msgoff-- && kmsg != IKM_NULL) {
1249 kmsg = ipc_kmsg_queue_next(kmsgq, kmsg);
1250 }
1251 if (kmsg == IKM_NULL)
1252 goto out;
1253
1254 /* found one - return the requested info */
1255 if (seqnop != NULL)
1256 *seqnop = seqno;
1257 if (msg_sizep != NULL)
1258 *msg_sizep = kmsg->ikm_header->msgh_size;
1259 if (msg_idp != NULL)
1260 *msg_idp = kmsg->ikm_header->msgh_id;
1261 if (msg_trailerp != NULL)
1262 memcpy(msg_trailerp,
1263 (mach_msg_max_trailer_t *)((vm_offset_t)kmsg->ikm_header +
1264 round_msg(kmsg->ikm_header->msgh_size)),
1265 sizeof(mach_msg_max_trailer_t));
1266 if (kmsgp != NULL)
1267 *kmsgp = kmsg;
1268
1269 res = 1;
1270
1271 out:
1272 return res;
1273 }
1274
1275
1276 /*
1277 * Routine: ipc_mqueue_peek
1278 * Purpose:
1279 * Peek at a (non-set) message queue to see if it has a message
1280 * matching the sequence number provided (if zero, then the
1281 * first message in the queue) and return vital info about the
1282 * message.
1283 *
1284 * Conditions:
1285 * The ipc_mqueue_t is unlocked.
1286 * Locks may be held by callers, so this routine cannot block.
1287 * Caller holds reference on the message queue.
1288 */
1289 unsigned
1290 ipc_mqueue_peek(ipc_mqueue_t mq,
1291 mach_port_seqno_t * seqnop,
1292 mach_msg_size_t * msg_sizep,
1293 mach_msg_id_t * msg_idp,
1294 mach_msg_max_trailer_t * msg_trailerp,
1295 ipc_kmsg_t *kmsgp)
1296 {
1297 unsigned res;
1298
1299 imq_lock(mq);
1300
1301 res = ipc_mqueue_peek_locked(mq, seqnop, msg_sizep, msg_idp,
1302 msg_trailerp, kmsgp);
1303
1304 imq_unlock(mq);
1305 return res;
1306 }
1307
1308 /*
1309 * Routine: ipc_mqueue_release_peek_ref
1310 * Purpose:
1311 * Release the reference on an mqueue's associated port which was
1312 * granted to a thread in ipc_mqueue_peek_on_thread (on the
1313 * MACH_PEEK_MSG thread wakeup path).
1314 *
1315 * Conditions:
1316 * The ipc_mqueue_t should be locked on entry.
1317 * The ipc_mqueue_t will be _unlocked_ on return
1318 * (and potentially invalid!)
1319 *
1320 */
1321 void ipc_mqueue_release_peek_ref(ipc_mqueue_t mq)
1322 {
1323 assert(!imq_is_set(mq));
1324 assert(imq_held(mq));
1325
1326 /*
1327 * clear any preposts this mq may have generated
1328 * (which would cause subsequent immediate wakeups)
1329 */
1330 waitq_clear_prepost_locked(&mq->imq_wait_queue);
1331
1332 imq_unlock(mq);
1333
1334 /*
1335 * release the port reference: we need to do this outside the lock
1336 * because we might be holding the last port reference!
1337 **/
1338 ip_release_mq(mq);
1339 }
1340
1341 /*
1342 * peek at the contained port message queues, break prepost iteration as soon
1343 * as we spot a message on one of the message queues referenced by the set's
1344 * prepost list. No need to lock each message queue, as only the head of each
1345 * queue is checked. If a message wasn't there before we entered here, no need
1346 * to find it (if we do, great).
1347 */
1348 static int mqueue_peek_iterator(void *ctx, struct waitq *waitq,
1349 struct waitq_set *wqset)
1350 {
1351 ipc_mqueue_t port_mq = (ipc_mqueue_t)waitq;
1352 ipc_kmsg_queue_t kmsgs = &port_mq->imq_messages;
1353
1354 (void)ctx;
1355 (void)wqset;
1356
1357 if (ipc_kmsg_queue_first(kmsgs) != IKM_NULL)
1358 return WQ_ITERATE_BREAK; /* break out of the prepost iteration */
1359
1360 return WQ_ITERATE_CONTINUE;
1361 }
1362
1363 /*
1364 * Routine: ipc_mqueue_set_peek
1365 * Purpose:
1366 * Peek at a message queue set to see if it has any ports
1367 * with messages.
1368 *
1369 * Conditions:
1370 * Locks may be held by callers, so this routine cannot block.
1371 * Caller holds reference on the message queue.
1372 */
1373 unsigned
1374 ipc_mqueue_set_peek(ipc_mqueue_t mq)
1375 {
1376 int ret;
1377
1378 imq_lock(mq);
1379
1380 /*
1381 * We may have raced with port destruction where the mqueue is marked
1382 * as invalid. In that case, even though we don't have messages, we
1383 * have an end-of-life event to deliver.
1384 */
1385 if (!imq_is_valid(mq))
1386 return 1;
1387
1388 ret = waitq_set_iterate_preposts(&mq->imq_set_queue, NULL,
1389 mqueue_peek_iterator);
1390
1391 imq_unlock(mq);
1392
1393 return (ret == WQ_ITERATE_BREAK);
1394 }
1395
1396 /*
1397 * Routine: ipc_mqueue_set_gather_member_names
1398 * Purpose:
1399 * Discover all ports which are members of a given port set.
1400 * Because the waitq linkage mechanism was redesigned to save
1401 * significan amounts of memory, it no longer keeps back-pointers
1402 * from a port set to a port. Therefore, we must iterate over all
1403 * ports within a given IPC space and individually query them to
1404 * see if they are members of the given set. Port names of ports
1405 * found to be members of the given set will be gathered into the
1406 * provided 'names' array. Actual returned names are limited to
1407 * maxnames entries, but we keep counting the actual number of
1408 * members to let the caller decide to retry if necessary.
1409 *
1410 * Conditions:
1411 * Locks may be held by callers, so this routine cannot block.
1412 * Caller holds reference on the message queue (via port set).
1413 */
1414 void
1415 ipc_mqueue_set_gather_member_names(
1416 ipc_space_t space,
1417 ipc_mqueue_t set_mq,
1418 ipc_entry_num_t maxnames,
1419 mach_port_name_t *names,
1420 ipc_entry_num_t *actualp)
1421 {
1422 ipc_entry_t table;
1423 ipc_entry_num_t tsize;
1424 struct waitq_set *wqset;
1425 ipc_entry_num_t actual = 0;
1426
1427 assert(set_mq != IMQ_NULL);
1428 wqset = &set_mq->imq_set_queue;
1429
1430 assert(space != IS_NULL);
1431 is_read_lock(space);
1432 if (!is_active(space)) {
1433 is_read_unlock(space);
1434 goto out;
1435 }
1436
1437 if (!waitq_set_is_valid(wqset)) {
1438 is_read_unlock(space);
1439 goto out;
1440 }
1441
1442 table = space->is_table;
1443 tsize = space->is_table_size;
1444 for (ipc_entry_num_t idx = 0; idx < tsize; idx++) {
1445 ipc_entry_t entry = &table[idx];
1446
1447 /* only receive rights can be members of port sets */
1448 if ((entry->ie_bits & MACH_PORT_TYPE_RECEIVE) != MACH_PORT_TYPE_NONE) {
1449 __IGNORE_WCASTALIGN(ipc_port_t port = (ipc_port_t)entry->ie_object);
1450 ipc_mqueue_t mq = &port->ip_messages;
1451
1452 assert(IP_VALID(port));
1453 if (ip_active(port) &&
1454 waitq_member(&mq->imq_wait_queue, wqset)) {
1455 if (actual < maxnames)
1456 names[actual] = mq->imq_receiver_name;
1457 actual++;
1458 }
1459 }
1460 }
1461
1462 is_read_unlock(space);
1463
1464 out:
1465 *actualp = actual;
1466 }
1467
1468
1469 /*
1470 * Routine: ipc_mqueue_destroy_locked
1471 * Purpose:
1472 * Destroy a (non-set) message queue.
1473 * Set any blocked senders running.
1474 * Destroy the kmsgs in the queue.
1475 * Conditions:
1476 * mqueue locked
1477 * Receivers were removed when the receive right was "changed"
1478 */
1479 boolean_t
1480 ipc_mqueue_destroy_locked(ipc_mqueue_t mqueue)
1481 {
1482 ipc_kmsg_queue_t kmqueue;
1483 ipc_kmsg_t kmsg;
1484 boolean_t reap = FALSE;
1485
1486 assert(!imq_is_set(mqueue));
1487
1488 /*
1489 * rouse all blocked senders
1490 * (don't boost anyone - we're tearing this queue down)
1491 * (never preposts)
1492 */
1493 mqueue->imq_fullwaiters = FALSE;
1494 waitq_wakeup64_all_locked(&mqueue->imq_wait_queue,
1495 IPC_MQUEUE_FULL,
1496 THREAD_RESTART,
1497 NULL,
1498 WAITQ_ALL_PRIORITIES,
1499 WAITQ_KEEP_LOCKED);
1500
1501 /*
1502 * Move messages from the specified queue to the per-thread
1503 * clean/drain queue while we have the mqueue lock.
1504 */
1505 kmqueue = &mqueue->imq_messages;
1506 while ((kmsg = ipc_kmsg_dequeue(kmqueue)) != IKM_NULL) {
1507 #if MACH_FLIPC
1508 if (MACH_NODE_VALID(kmsg->ikm_node) && FPORT_VALID(mqueue->imq_fport))
1509 flipc_msg_ack(kmsg->ikm_node, mqueue, TRUE);
1510 #endif
1511 boolean_t first;
1512 first = ipc_kmsg_delayed_destroy(kmsg);
1513 if (first)
1514 reap = first;
1515 }
1516
1517 /*
1518 * Wipe out message count, both for messages about to be
1519 * reaped and for reserved space for (previously) woken senders.
1520 * This is the indication to them that their reserved space is gone
1521 * (the mqueue was destroyed).
1522 */
1523 mqueue->imq_msgcount = 0;
1524
1525 /* invalidate the waitq for subsequent mqueue operations */
1526 waitq_invalidate_locked(&mqueue->imq_wait_queue);
1527
1528 /* clear out any preposting we may have done */
1529 waitq_clear_prepost_locked(&mqueue->imq_wait_queue);
1530
1531 /*
1532 * assert that we are destroying / invalidating a queue that's
1533 * not a member of any other queue.
1534 */
1535 assert(mqueue->imq_preposts == 0);
1536 assert(mqueue->imq_in_pset == 0);
1537
1538 return reap;
1539 }
1540
1541 /*
1542 * Routine: ipc_mqueue_set_qlimit
1543 * Purpose:
1544 * Changes a message queue limit; the maximum number
1545 * of messages which may be queued.
1546 * Conditions:
1547 * Nothing locked.
1548 */
1549
1550 void
1551 ipc_mqueue_set_qlimit(
1552 ipc_mqueue_t mqueue,
1553 mach_port_msgcount_t qlimit)
1554 {
1555
1556 assert(qlimit <= MACH_PORT_QLIMIT_MAX);
1557
1558 /* wake up senders allowed by the new qlimit */
1559 imq_lock(mqueue);
1560 if (qlimit > mqueue->imq_qlimit) {
1561 mach_port_msgcount_t i, wakeup;
1562
1563 /* caution: wakeup, qlimit are unsigned */
1564 wakeup = qlimit - mqueue->imq_qlimit;
1565
1566 for (i = 0; i < wakeup; i++) {
1567 /*
1568 * boost the priority of the awoken thread
1569 * (WAITQ_PROMOTE_PRIORITY) to ensure it uses
1570 * the message queue slot we've just reserved.
1571 *
1572 * NOTE: this will never prepost
1573 */
1574 if (waitq_wakeup64_one_locked(&mqueue->imq_wait_queue,
1575 IPC_MQUEUE_FULL,
1576 THREAD_AWAKENED,
1577 NULL,
1578 WAITQ_PROMOTE_PRIORITY,
1579 WAITQ_KEEP_LOCKED) == KERN_NOT_WAITING) {
1580 mqueue->imq_fullwaiters = FALSE;
1581 break;
1582 }
1583 mqueue->imq_msgcount++; /* give it to the awakened thread */
1584 }
1585 }
1586 mqueue->imq_qlimit = qlimit;
1587 imq_unlock(mqueue);
1588 }
1589
1590 /*
1591 * Routine: ipc_mqueue_set_seqno
1592 * Purpose:
1593 * Changes an mqueue's sequence number.
1594 * Conditions:
1595 * Caller holds a reference to the queue's containing object.
1596 */
1597 void
1598 ipc_mqueue_set_seqno(
1599 ipc_mqueue_t mqueue,
1600 mach_port_seqno_t seqno)
1601 {
1602 imq_lock(mqueue);
1603 mqueue->imq_seqno = seqno;
1604 imq_unlock(mqueue);
1605 }
1606
1607
1608 /*
1609 * Routine: ipc_mqueue_copyin
1610 * Purpose:
1611 * Convert a name in a space to a message queue.
1612 * Conditions:
1613 * Nothing locked. If successful, the caller gets a ref for
1614 * for the object. This ref ensures the continued existence of
1615 * the queue.
1616 * Returns:
1617 * MACH_MSG_SUCCESS Found a message queue.
1618 * MACH_RCV_INVALID_NAME The space is dead.
1619 * MACH_RCV_INVALID_NAME The name doesn't denote a right.
1620 * MACH_RCV_INVALID_NAME
1621 * The denoted right is not receive or port set.
1622 * MACH_RCV_IN_SET Receive right is a member of a set.
1623 */
1624
1625 mach_msg_return_t
1626 ipc_mqueue_copyin(
1627 ipc_space_t space,
1628 mach_port_name_t name,
1629 ipc_mqueue_t *mqueuep,
1630 ipc_object_t *objectp)
1631 {
1632 ipc_entry_t entry;
1633 ipc_object_t object;
1634 ipc_mqueue_t mqueue;
1635
1636 is_read_lock(space);
1637 if (!is_active(space)) {
1638 is_read_unlock(space);
1639 return MACH_RCV_INVALID_NAME;
1640 }
1641
1642 entry = ipc_entry_lookup(space, name);
1643 if (entry == IE_NULL) {
1644 is_read_unlock(space);
1645 return MACH_RCV_INVALID_NAME;
1646 }
1647
1648 object = entry->ie_object;
1649
1650 if (entry->ie_bits & MACH_PORT_TYPE_RECEIVE) {
1651 ipc_port_t port;
1652
1653 __IGNORE_WCASTALIGN(port = (ipc_port_t) object);
1654 assert(port != IP_NULL);
1655
1656 ip_lock(port);
1657 assert(ip_active(port));
1658 assert(port->ip_receiver_name == name);
1659 assert(port->ip_receiver == space);
1660 is_read_unlock(space);
1661 mqueue = &port->ip_messages;
1662
1663 } else if (entry->ie_bits & MACH_PORT_TYPE_PORT_SET) {
1664 ipc_pset_t pset;
1665
1666 __IGNORE_WCASTALIGN(pset = (ipc_pset_t) object);
1667 assert(pset != IPS_NULL);
1668
1669 ips_lock(pset);
1670 assert(ips_active(pset));
1671 is_read_unlock(space);
1672
1673 mqueue = &pset->ips_messages;
1674 } else {
1675 is_read_unlock(space);
1676 return MACH_RCV_INVALID_NAME;
1677 }
1678
1679 /*
1680 * At this point, the object is locked and active,
1681 * the space is unlocked, and mqueue is initialized.
1682 */
1683
1684 io_reference(object);
1685 io_unlock(object);
1686
1687 *objectp = object;
1688 *mqueuep = mqueue;
1689 return MACH_MSG_SUCCESS;
1690 }