]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ipc/ipc_mqueue.c
2a43e089df961280d298b58dd838234dc6fa2fe7
[apple/xnu.git] / osfmk / ipc / ipc_mqueue.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. Please obtain a copy of the License at
10 * http://www.opensource.apple.com/apsl/ and read it before using this
11 * file.
12 *
13 * The Original Code and all software distributed under the License are
14 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
15 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
16 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
18 * Please see the License for the specific language governing rights and
19 * limitations under the License.
20 *
21 * @APPLE_LICENSE_HEADER_END@
22 */
23 /*
24 * @OSF_FREE_COPYRIGHT@
25 */
26 /*
27 * Mach Operating System
28 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
29 * All Rights Reserved.
30 *
31 * Permission to use, copy, modify and distribute this software and its
32 * documentation is hereby granted, provided that both the copyright
33 * notice and this permission notice appear in all copies of the
34 * software, derivative works or modified versions, and any portions
35 * thereof, and that both notices appear in supporting documentation.
36 *
37 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
38 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
39 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
40 *
41 * Carnegie Mellon requests users of this software to return to
42 *
43 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
44 * School of Computer Science
45 * Carnegie Mellon University
46 * Pittsburgh PA 15213-3890
47 *
48 * any improvements or extensions that they make and grant Carnegie Mellon
49 * the rights to redistribute these changes.
50 */
51 /*
52 */
53 /*
54 * File: ipc/ipc_mqueue.c
55 * Author: Rich Draves
56 * Date: 1989
57 *
58 * Functions to manipulate IPC message queues.
59 */
60
61 #include <mach/port.h>
62 #include <mach/message.h>
63 #include <mach/sync_policy.h>
64
65 #include <kern/assert.h>
66 #include <kern/counters.h>
67 #include <kern/sched_prim.h>
68 #include <kern/ipc_kobject.h>
69 #include <kern/ipc_mig.h> /* XXX - for mach_msg_receive_continue */
70 #include <kern/misc_protos.h>
71 #include <kern/task.h>
72 #include <kern/thread.h>
73 #include <kern/wait_queue.h>
74
75 #include <ipc/ipc_mqueue.h>
76 #include <ipc/ipc_kmsg.h>
77 #include <ipc/ipc_port.h>
78 #include <ipc/ipc_pset.h>
79 #include <ipc/ipc_space.h>
80
81 #include <ddb/tr.h>
82
83 int ipc_mqueue_full; /* address is event for queue space */
84 int ipc_mqueue_rcv; /* address is event for message arrival */
85
86 #define TR_ENABLE 0
87
88 /* forward declarations */
89 void ipc_mqueue_receive_results(wait_result_t result);
90
91 /*
92 * Routine: ipc_mqueue_init
93 * Purpose:
94 * Initialize a newly-allocated message queue.
95 */
96 void
97 ipc_mqueue_init(
98 ipc_mqueue_t mqueue,
99 boolean_t is_set)
100 {
101 if (is_set) {
102 wait_queue_set_init(&mqueue->imq_set_queue, SYNC_POLICY_FIFO);
103 } else {
104 wait_queue_init(&mqueue->imq_wait_queue, SYNC_POLICY_FIFO);
105 ipc_kmsg_queue_init(&mqueue->imq_messages);
106 mqueue->imq_seqno = 0;
107 mqueue->imq_msgcount = 0;
108 mqueue->imq_qlimit = MACH_PORT_QLIMIT_DEFAULT;
109 mqueue->imq_fullwaiters = FALSE;
110 }
111 }
112
113 /*
114 * Routine: ipc_mqueue_member
115 * Purpose:
116 * Indicate whether the (port) mqueue is a member of
117 * this portset's mqueue. We do this by checking
118 * whether the portset mqueue's waitq is an member of
119 * the port's mqueue waitq.
120 * Conditions:
121 * the portset's mqueue is not already a member
122 * this may block while allocating linkage structures.
123 */
124
125 boolean_t
126 ipc_mqueue_member(
127 ipc_mqueue_t port_mqueue,
128 ipc_mqueue_t set_mqueue)
129 {
130 wait_queue_t port_waitq = &port_mqueue->imq_wait_queue;
131 wait_queue_set_t set_waitq = &set_mqueue->imq_set_queue;
132
133 return (wait_queue_member(port_waitq, set_waitq));
134
135 }
136
137 /*
138 * Routine: ipc_mqueue_remove
139 * Purpose:
140 * Remove the association between the queue and the specified
141 * set message queue.
142 */
143
144 kern_return_t
145 ipc_mqueue_remove(
146 ipc_mqueue_t mqueue,
147 ipc_mqueue_t set_mqueue)
148 {
149 wait_queue_t mq_waitq = &mqueue->imq_wait_queue;
150 wait_queue_set_t set_waitq = &set_mqueue->imq_set_queue;
151
152 return wait_queue_unlink(mq_waitq, set_waitq);
153 }
154
155 /*
156 * Routine: ipc_mqueue_remove_from_all
157 * Purpose:
158 * Remove the mqueue from all the sets it is a member of
159 * Conditions:
160 * Nothing locked.
161 */
162 void
163 ipc_mqueue_remove_from_all(
164 ipc_mqueue_t mqueue)
165 {
166 wait_queue_t mq_waitq = &mqueue->imq_wait_queue;
167
168 wait_queue_unlink_all(mq_waitq);
169 return;
170 }
171
172 /*
173 * Routine: ipc_mqueue_remove_all
174 * Purpose:
175 * Remove all the member queues from the specified set.
176 * Conditions:
177 * Nothing locked.
178 */
179 void
180 ipc_mqueue_remove_all(
181 ipc_mqueue_t mqueue)
182 {
183 wait_queue_set_t mq_setq = &mqueue->imq_set_queue;
184
185 wait_queue_set_unlink_all(mq_setq);
186 return;
187 }
188
189
190 /*
191 * Routine: ipc_mqueue_add
192 * Purpose:
193 * Associate the portset's mqueue with the port's mqueue.
194 * This has to be done so that posting the port will wakeup
195 * a portset waiter. If there are waiters on the portset
196 * mqueue and messages on the port mqueue, try to match them
197 * up now.
198 * Conditions:
199 * May block.
200 */
201 kern_return_t
202 ipc_mqueue_add(
203 ipc_mqueue_t port_mqueue,
204 ipc_mqueue_t set_mqueue)
205 {
206 wait_queue_t port_waitq = &port_mqueue->imq_wait_queue;
207 wait_queue_set_t set_waitq = &set_mqueue->imq_set_queue;
208 ipc_kmsg_queue_t kmsgq;
209 ipc_kmsg_t kmsg, next;
210 kern_return_t kr;
211 spl_t s;
212
213 kr = wait_queue_link(port_waitq, set_waitq);
214 if (kr != KERN_SUCCESS)
215 return kr;
216
217 /*
218 * Now that the set has been added to the port, there may be
219 * messages queued on the port and threads waiting on the set
220 * waitq. Lets get them together.
221 */
222 s = splsched();
223 imq_lock(port_mqueue);
224 kmsgq = &port_mqueue->imq_messages;
225 for (kmsg = ipc_kmsg_queue_first(kmsgq);
226 kmsg != IKM_NULL;
227 kmsg = next) {
228 next = ipc_kmsg_queue_next(kmsgq, kmsg);
229
230 for (;;) {
231 thread_t th;
232
233 th = wait_queue_wakeup64_identity_locked(
234 port_waitq,
235 IPC_MQUEUE_RECEIVE,
236 THREAD_AWAKENED,
237 FALSE);
238 /* waitq/mqueue still locked, thread locked */
239
240 if (th == THREAD_NULL)
241 goto leave;
242
243 /*
244 * Found a receiver. see if they can handle the message
245 * correctly (the message is not too large for them, or
246 * they didn't care to be informed that the message was
247 * too large). If they can't handle it, take them off
248 * the list and let them go back and figure it out and
249 * just move onto the next.
250 */
251 if (th->ith_msize <
252 kmsg->ikm_header->msgh_size +
253 REQUESTED_TRAILER_SIZE(th->ith_option)) {
254 th->ith_state = MACH_RCV_TOO_LARGE;
255 th->ith_msize = kmsg->ikm_header->msgh_size;
256 if (th->ith_option & MACH_RCV_LARGE) {
257 /*
258 * let him go without message
259 */
260 th->ith_kmsg = IKM_NULL;
261 th->ith_seqno = 0;
262 thread_unlock(th);
263 continue; /* find another thread */
264 }
265 } else {
266 th->ith_state = MACH_MSG_SUCCESS;
267 }
268
269 /*
270 * This thread is going to take this message,
271 * so give it to him.
272 */
273 ipc_kmsg_rmqueue(kmsgq, kmsg);
274 ipc_mqueue_release_msgcount(port_mqueue);
275
276 th->ith_kmsg = kmsg;
277 th->ith_seqno = port_mqueue->imq_seqno++;
278 thread_unlock(th);
279 break; /* go to next message */
280 }
281
282 }
283 leave:
284 imq_unlock(port_mqueue);
285 splx(s);
286 return KERN_SUCCESS;
287 }
288
289 /*
290 * Routine: ipc_mqueue_changed
291 * Purpose:
292 * Wake up receivers waiting in a message queue.
293 * Conditions:
294 * The message queue is locked.
295 */
296
297 void
298 ipc_mqueue_changed(
299 ipc_mqueue_t mqueue)
300 {
301 wait_queue_wakeup64_all_locked(
302 &mqueue->imq_wait_queue,
303 IPC_MQUEUE_RECEIVE,
304 THREAD_RESTART,
305 FALSE); /* unlock waitq? */
306 }
307
308
309
310
311 /*
312 * Routine: ipc_mqueue_send
313 * Purpose:
314 * Send a message to a message queue. The message holds a reference
315 * for the destination port for this message queue in the
316 * msgh_remote_port field.
317 *
318 * If unsuccessful, the caller still has possession of
319 * the message and must do something with it. If successful,
320 * the message is queued, given to a receiver, or destroyed.
321 * Conditions:
322 * Nothing locked.
323 * Returns:
324 * MACH_MSG_SUCCESS The message was accepted.
325 * MACH_SEND_TIMED_OUT Caller still has message.
326 * MACH_SEND_INTERRUPTED Caller still has message.
327 */
328 mach_msg_return_t
329 ipc_mqueue_send(
330 ipc_mqueue_t mqueue,
331 ipc_kmsg_t kmsg,
332 mach_msg_option_t option,
333 mach_msg_timeout_t send_timeout)
334 {
335 int wresult;
336 spl_t s;
337
338 /*
339 * Don't block if:
340 * 1) We're under the queue limit.
341 * 2) Caller used the MACH_SEND_ALWAYS internal option.
342 * 3) Message is sent to a send-once right.
343 */
344 s = splsched();
345 imq_lock(mqueue);
346
347 if (!imq_full(mqueue) ||
348 (option & MACH_SEND_ALWAYS) ||
349 (MACH_MSGH_BITS_REMOTE(kmsg->ikm_header->msgh_bits) ==
350 MACH_MSG_TYPE_PORT_SEND_ONCE)) {
351 mqueue->imq_msgcount++;
352 assert(mqueue->imq_msgcount > 0);
353 imq_unlock(mqueue);
354 splx(s);
355 } else {
356 thread_t cur_thread = current_thread();
357 uint64_t deadline;
358
359 /*
360 * We have to wait for space to be granted to us.
361 */
362 if ((option & MACH_SEND_TIMEOUT) && (send_timeout == 0)) {
363 imq_unlock(mqueue);
364 splx(s);
365 return MACH_SEND_TIMED_OUT;
366 }
367 mqueue->imq_fullwaiters = TRUE;
368 thread_lock(cur_thread);
369 if (option & MACH_SEND_TIMEOUT)
370 clock_interval_to_deadline(send_timeout, 1000*NSEC_PER_USEC, &deadline);
371 else
372 deadline = 0;
373 wresult = wait_queue_assert_wait64_locked(
374 &mqueue->imq_wait_queue,
375 IPC_MQUEUE_FULL,
376 THREAD_ABORTSAFE, deadline,
377 cur_thread);
378 thread_unlock(cur_thread);
379 imq_unlock(mqueue);
380 splx(s);
381
382 if (wresult == THREAD_WAITING) {
383 wresult = thread_block(THREAD_CONTINUE_NULL);
384 counter(c_ipc_mqueue_send_block++);
385 }
386
387 switch (wresult) {
388 case THREAD_TIMED_OUT:
389 assert(option & MACH_SEND_TIMEOUT);
390 return MACH_SEND_TIMED_OUT;
391
392 case THREAD_AWAKENED:
393 /* we can proceed - inherited msgcount from waker */
394 assert(mqueue->imq_msgcount > 0);
395 break;
396
397 case THREAD_INTERRUPTED:
398 return MACH_SEND_INTERRUPTED;
399
400 case THREAD_RESTART:
401 default:
402 panic("ipc_mqueue_send");
403 }
404 }
405
406 ipc_mqueue_post(mqueue, kmsg);
407 return MACH_MSG_SUCCESS;
408 }
409
410 /*
411 * Routine: ipc_mqueue_release_msgcount
412 * Purpose:
413 * Release a message queue reference in the case where we
414 * found a waiter.
415 *
416 * Conditions:
417 * The message queue is locked.
418 * The message corresponding to this reference is off the queue.
419 */
420 void
421 ipc_mqueue_release_msgcount(
422 ipc_mqueue_t mqueue)
423 {
424 assert(imq_held(mqueue));
425 assert(mqueue->imq_msgcount > 1 || ipc_kmsg_queue_empty(&mqueue->imq_messages));
426
427 mqueue->imq_msgcount--;
428
429 if (!imq_full(mqueue) && mqueue->imq_fullwaiters) {
430 if (wait_queue_wakeup64_one_locked(
431 &mqueue->imq_wait_queue,
432 IPC_MQUEUE_FULL,
433 THREAD_AWAKENED,
434 FALSE) != KERN_SUCCESS) {
435 mqueue->imq_fullwaiters = FALSE;
436 } else {
437 /* gave away our slot - add reference back */
438 mqueue->imq_msgcount++;
439 }
440 }
441 }
442
443 /*
444 * Routine: ipc_mqueue_post
445 * Purpose:
446 * Post a message to a waiting receiver or enqueue it. If a
447 * receiver is waiting, we can release our reserved space in
448 * the message queue.
449 *
450 * Conditions:
451 * If we need to queue, our space in the message queue is reserved.
452 */
453 void
454 ipc_mqueue_post(
455 register ipc_mqueue_t mqueue,
456 register ipc_kmsg_t kmsg)
457 {
458
459 spl_t s;
460
461 /*
462 * While the msg queue is locked, we have control of the
463 * kmsg, so the ref in it for the port is still good.
464 *
465 * Check for a receiver for the message.
466 */
467 s = splsched();
468 imq_lock(mqueue);
469 for (;;) {
470 wait_queue_t waitq = &mqueue->imq_wait_queue;
471 thread_t receiver;
472
473 receiver = wait_queue_wakeup64_identity_locked(
474 waitq,
475 IPC_MQUEUE_RECEIVE,
476 THREAD_AWAKENED,
477 FALSE);
478 /* waitq still locked, thread locked */
479
480 if (receiver == THREAD_NULL) {
481 /*
482 * no receivers; queue kmsg
483 */
484 assert(mqueue->imq_msgcount > 0);
485 ipc_kmsg_enqueue_macro(&mqueue->imq_messages, kmsg);
486 break;
487 }
488
489 /*
490 * We found a waiting thread.
491 * If the message is too large or the scatter list is too small
492 * the thread we wake up will get that as its status.
493 */
494 if (receiver->ith_msize <
495 (kmsg->ikm_header->msgh_size) +
496 REQUESTED_TRAILER_SIZE(receiver->ith_option)) {
497 receiver->ith_msize = kmsg->ikm_header->msgh_size;
498 receiver->ith_state = MACH_RCV_TOO_LARGE;
499 } else {
500 receiver->ith_state = MACH_MSG_SUCCESS;
501 }
502
503 /*
504 * If there is no problem with the upcoming receive, or the
505 * receiver thread didn't specifically ask for special too
506 * large error condition, go ahead and select it anyway.
507 */
508 if ((receiver->ith_state == MACH_MSG_SUCCESS) ||
509 !(receiver->ith_option & MACH_RCV_LARGE)) {
510
511 receiver->ith_kmsg = kmsg;
512 receiver->ith_seqno = mqueue->imq_seqno++;
513 thread_unlock(receiver);
514
515 /* we didn't need our reserved spot in the queue */
516 ipc_mqueue_release_msgcount(mqueue);
517 break;
518 }
519
520 /*
521 * Otherwise, this thread needs to be released to run
522 * and handle its error without getting the message. We
523 * need to go back and pick another one.
524 */
525 receiver->ith_kmsg = IKM_NULL;
526 receiver->ith_seqno = 0;
527 thread_unlock(receiver);
528 }
529
530 imq_unlock(mqueue);
531 splx(s);
532
533 current_task()->messages_sent++;
534 return;
535 }
536
537
538 /* static */ void
539 ipc_mqueue_receive_results(wait_result_t saved_wait_result)
540 {
541 thread_t self = current_thread();
542 mach_msg_option_t option = self->ith_option;
543 kern_return_t mr;
544
545 /*
546 * why did we wake up?
547 */
548 switch (saved_wait_result) {
549 case THREAD_TIMED_OUT:
550 self->ith_state = MACH_RCV_TIMED_OUT;
551 return;
552
553 case THREAD_INTERRUPTED:
554 self->ith_state = MACH_RCV_INTERRUPTED;
555 return;
556
557 case THREAD_RESTART:
558 /* something bad happened to the port/set */
559 self->ith_state = MACH_RCV_PORT_CHANGED;
560 return;
561
562 case THREAD_AWAKENED:
563 /*
564 * We do not need to go select a message, somebody
565 * handed us one (or a too-large indication).
566 */
567 mr = MACH_MSG_SUCCESS;
568
569 switch (self->ith_state) {
570 case MACH_RCV_SCATTER_SMALL:
571 case MACH_RCV_TOO_LARGE:
572 /*
573 * Somebody tried to give us a too large
574 * message. If we indicated that we cared,
575 * then they only gave us the indication,
576 * otherwise they gave us the indication
577 * AND the message anyway.
578 */
579 if (option & MACH_RCV_LARGE) {
580 return;
581 }
582
583 case MACH_MSG_SUCCESS:
584 return;
585
586 default:
587 panic("ipc_mqueue_receive_results: strange ith_state");
588 }
589
590 default:
591 panic("ipc_mqueue_receive_results: strange wait_result");
592 }
593 }
594
595 void
596 ipc_mqueue_receive_continue(
597 __unused void *param,
598 wait_result_t wresult)
599 {
600 ipc_mqueue_receive_results(wresult);
601 mach_msg_receive_continue(); /* hard-coded for now */
602 }
603
604 /*
605 * Routine: ipc_mqueue_receive
606 * Purpose:
607 * Receive a message from a message queue.
608 *
609 * If continuation is non-zero, then we might discard
610 * our kernel stack when we block. We will continue
611 * after unblocking by executing continuation.
612 *
613 * If resume is true, then we are resuming a receive
614 * operation after a blocked receive discarded our stack.
615 * Conditions:
616 * Our caller must hold a reference for the port or port set
617 * to which this queue belongs, to keep the queue
618 * from being deallocated.
619 *
620 * The kmsg is returned with clean header fields
621 * and with the circular bit turned off.
622 * Returns:
623 * MACH_MSG_SUCCESS Message returned in kmsgp.
624 * MACH_RCV_TOO_LARGE Message size returned in kmsgp.
625 * MACH_RCV_TIMED_OUT No message obtained.
626 * MACH_RCV_INTERRUPTED No message obtained.
627 * MACH_RCV_PORT_DIED Port/set died; no message.
628 * MACH_RCV_PORT_CHANGED Port moved into set; no msg.
629 *
630 */
631
632 void
633 ipc_mqueue_receive(
634 ipc_mqueue_t mqueue,
635 mach_msg_option_t option,
636 mach_msg_size_t max_size,
637 mach_msg_timeout_t rcv_timeout,
638 int interruptible)
639 {
640 ipc_kmsg_queue_t kmsgs;
641 wait_result_t wresult;
642 thread_t self;
643 uint64_t deadline;
644 spl_t s;
645
646 s = splsched();
647 imq_lock(mqueue);
648
649 if (imq_is_set(mqueue)) {
650 wait_queue_link_t wql;
651 ipc_mqueue_t port_mq;
652 queue_t q;
653
654 q = &mqueue->imq_setlinks;
655
656 /*
657 * If we are waiting on a portset mqueue, we need to see if
658 * any of the member ports have work for us. If so, try to
659 * deliver one of those messages. By holding the portset's
660 * mqueue lock during the search, we tie up any attempts by
661 * mqueue_deliver or portset membership changes that may
662 * cross our path. But this is a lock order violation, so we
663 * have to do it "softly." If we don't find a message waiting
664 * for us, we will assert our intention to wait while still
665 * holding that lock. When we release the lock, the deliver/
666 * change will succeed and find us.
667 */
668 search_set:
669 queue_iterate(q, wql, wait_queue_link_t, wql_setlinks) {
670 port_mq = (ipc_mqueue_t)wql->wql_queue;
671 kmsgs = &port_mq->imq_messages;
672
673 if (!imq_lock_try(port_mq)) {
674 imq_unlock(mqueue);
675 splx(s);
676 delay(1);
677 s = splsched();
678 imq_lock(mqueue);
679 goto search_set; /* start again at beginning - SMP */
680 }
681
682 /*
683 * If there is still a message to be had, we will
684 * try to select it (may not succeed because of size
685 * and options). In any case, we deliver those
686 * results back to the user.
687 *
688 * We also move the port's linkage to the tail of the
689 * list for this set (fairness). Future versions will
690 * sort by timestamp or priority.
691 */
692 if (ipc_kmsg_queue_first(kmsgs) == IKM_NULL) {
693 imq_unlock(port_mq);
694 continue;
695 }
696 queue_remove(q, wql, wait_queue_link_t, wql_setlinks);
697 queue_enter(q, wql, wait_queue_link_t, wql_setlinks);
698 imq_unlock(mqueue);
699
700 ipc_mqueue_select(port_mq, option, max_size);
701 imq_unlock(port_mq);
702 splx(s);
703 return;
704
705 }
706
707 } else {
708
709 /*
710 * Receive on a single port. Just try to get the messages.
711 */
712 kmsgs = &mqueue->imq_messages;
713 if (ipc_kmsg_queue_first(kmsgs) != IKM_NULL) {
714 ipc_mqueue_select(mqueue, option, max_size);
715 imq_unlock(mqueue);
716 splx(s);
717 return;
718 }
719 }
720
721 /*
722 * Looks like we'll have to block. The mqueue we will
723 * block on (whether the set's or the local port's) is
724 * still locked.
725 */
726 self = current_thread();
727 if (option & MACH_RCV_TIMEOUT) {
728 if (rcv_timeout == 0) {
729 imq_unlock(mqueue);
730 splx(s);
731 self->ith_state = MACH_RCV_TIMED_OUT;
732 return;
733 }
734 }
735
736 thread_lock(self);
737 self->ith_state = MACH_RCV_IN_PROGRESS;
738 self->ith_option = option;
739 self->ith_msize = max_size;
740
741 if (option & MACH_RCV_TIMEOUT)
742 clock_interval_to_deadline(rcv_timeout, 1000*NSEC_PER_USEC, &deadline);
743 else
744 deadline = 0;
745
746 wresult = wait_queue_assert_wait64_locked(&mqueue->imq_wait_queue,
747 IPC_MQUEUE_RECEIVE,
748 interruptible, deadline,
749 self);
750 thread_unlock(self);
751 imq_unlock(mqueue);
752 splx(s);
753
754 if (wresult == THREAD_WAITING) {
755 counter((interruptible == THREAD_ABORTSAFE) ?
756 c_ipc_mqueue_receive_block_user++ :
757 c_ipc_mqueue_receive_block_kernel++);
758
759 if (self->ith_continuation)
760 thread_block(ipc_mqueue_receive_continue);
761 /* NOTREACHED */
762
763 wresult = thread_block(THREAD_CONTINUE_NULL);
764 }
765 ipc_mqueue_receive_results(wresult);
766 }
767
768
769 /*
770 * Routine: ipc_mqueue_select
771 * Purpose:
772 * A receiver discovered that there was a message on the queue
773 * before he had to block. Pick the message off the queue and
774 * "post" it to himself.
775 * Conditions:
776 * mqueue locked.
777 * There is a message.
778 * Returns:
779 * MACH_MSG_SUCCESS Actually selected a message for ourselves.
780 * MACH_RCV_TOO_LARGE May or may not have pull it, but it is large
781 */
782 void
783 ipc_mqueue_select(
784 ipc_mqueue_t mqueue,
785 mach_msg_option_t option,
786 mach_msg_size_t max_size)
787 {
788 thread_t self = current_thread();
789 ipc_kmsg_t kmsg;
790 mach_msg_return_t mr;
791 mach_msg_size_t rcv_size;
792
793 mr = MACH_MSG_SUCCESS;
794
795
796 /*
797 * Do some sanity checking of our ability to receive
798 * before pulling the message off the queue.
799 */
800 kmsg = ipc_kmsg_queue_first(&mqueue->imq_messages);
801 assert(kmsg != IKM_NULL);
802
803 /*
804 * If we really can't receive it, but we had the
805 * MACH_RCV_LARGE option set, then don't take it off
806 * the queue, instead return the appropriate error
807 * (and size needed).
808 */
809 rcv_size = ipc_kmsg_copyout_size(kmsg, self->map);
810 if (rcv_size + REQUESTED_TRAILER_SIZE(option) > max_size) {
811 mr = MACH_RCV_TOO_LARGE;
812 if (option & MACH_RCV_LARGE) {
813 self->ith_kmsg = IKM_NULL;
814 self->ith_msize = rcv_size;
815 self->ith_seqno = 0;
816 self->ith_state = mr;
817 return;
818 }
819 }
820
821 ipc_kmsg_rmqueue_first_macro(&mqueue->imq_messages, kmsg);
822 ipc_mqueue_release_msgcount(mqueue);
823 self->ith_seqno = mqueue->imq_seqno++;
824 self->ith_kmsg = kmsg;
825 self->ith_state = mr;
826
827 current_task()->messages_received++;
828 return;
829 }
830
831 /*
832 * Routine: ipc_mqueue_destroy
833 * Purpose:
834 * Destroy a message queue. Set any blocked senders running.
835 * Destroy the kmsgs in the queue.
836 * Conditions:
837 * Nothing locked.
838 * Receivers were removed when the receive right was "changed"
839 */
840 void
841 ipc_mqueue_destroy(
842 ipc_mqueue_t mqueue)
843 {
844 ipc_kmsg_queue_t kmqueue;
845 ipc_kmsg_t kmsg;
846 spl_t s;
847
848
849 s = splsched();
850 imq_lock(mqueue);
851 /*
852 * rouse all blocked senders
853 */
854 mqueue->imq_fullwaiters = FALSE;
855 wait_queue_wakeup64_all_locked(
856 &mqueue->imq_wait_queue,
857 IPC_MQUEUE_FULL,
858 THREAD_AWAKENED,
859 FALSE);
860
861 kmqueue = &mqueue->imq_messages;
862
863 while ((kmsg = ipc_kmsg_dequeue(kmqueue)) != IKM_NULL) {
864 imq_unlock(mqueue);
865 splx(s);
866
867 ipc_kmsg_destroy_dest(kmsg);
868
869 s = splsched();
870 imq_lock(mqueue);
871 }
872 imq_unlock(mqueue);
873 splx(s);
874 }
875
876 /*
877 * Routine: ipc_mqueue_set_qlimit
878 * Purpose:
879 * Changes a message queue limit; the maximum number
880 * of messages which may be queued.
881 * Conditions:
882 * Nothing locked.
883 */
884
885 void
886 ipc_mqueue_set_qlimit(
887 ipc_mqueue_t mqueue,
888 mach_port_msgcount_t qlimit)
889 {
890 spl_t s;
891
892 assert(qlimit <= MACH_PORT_QLIMIT_MAX);
893
894 /* wake up senders allowed by the new qlimit */
895 s = splsched();
896 imq_lock(mqueue);
897 if (qlimit > mqueue->imq_qlimit) {
898 mach_port_msgcount_t i, wakeup;
899
900 /* caution: wakeup, qlimit are unsigned */
901 wakeup = qlimit - mqueue->imq_qlimit;
902
903 for (i = 0; i < wakeup; i++) {
904 if (wait_queue_wakeup64_one_locked(
905 &mqueue->imq_wait_queue,
906 IPC_MQUEUE_FULL,
907 THREAD_AWAKENED,
908 FALSE) == KERN_NOT_WAITING) {
909 mqueue->imq_fullwaiters = FALSE;
910 break;
911 }
912 mqueue->imq_msgcount++; /* give it to the awakened thread */
913 }
914 }
915 mqueue->imq_qlimit = qlimit;
916 imq_unlock(mqueue);
917 splx(s);
918 }
919
920 /*
921 * Routine: ipc_mqueue_set_seqno
922 * Purpose:
923 * Changes an mqueue's sequence number.
924 * Conditions:
925 * Caller holds a reference to the queue's containing object.
926 */
927 void
928 ipc_mqueue_set_seqno(
929 ipc_mqueue_t mqueue,
930 mach_port_seqno_t seqno)
931 {
932 spl_t s;
933
934 s = splsched();
935 imq_lock(mqueue);
936 mqueue->imq_seqno = seqno;
937 imq_unlock(mqueue);
938 splx(s);
939 }
940
941
942 /*
943 * Routine: ipc_mqueue_copyin
944 * Purpose:
945 * Convert a name in a space to a message queue.
946 * Conditions:
947 * Nothing locked. If successful, the caller gets a ref for
948 * for the object. This ref ensures the continued existence of
949 * the queue.
950 * Returns:
951 * MACH_MSG_SUCCESS Found a message queue.
952 * MACH_RCV_INVALID_NAME The space is dead.
953 * MACH_RCV_INVALID_NAME The name doesn't denote a right.
954 * MACH_RCV_INVALID_NAME
955 * The denoted right is not receive or port set.
956 * MACH_RCV_IN_SET Receive right is a member of a set.
957 */
958
959 mach_msg_return_t
960 ipc_mqueue_copyin(
961 ipc_space_t space,
962 mach_port_name_t name,
963 ipc_mqueue_t *mqueuep,
964 ipc_object_t *objectp)
965 {
966 ipc_entry_t entry;
967 ipc_object_t object;
968 ipc_mqueue_t mqueue;
969
970 is_read_lock(space);
971 if (!space->is_active) {
972 is_read_unlock(space);
973 return MACH_RCV_INVALID_NAME;
974 }
975
976 entry = ipc_entry_lookup(space, name);
977 if (entry == IE_NULL) {
978 is_read_unlock(space);
979 return MACH_RCV_INVALID_NAME;
980 }
981
982 object = entry->ie_object;
983
984 if (entry->ie_bits & MACH_PORT_TYPE_RECEIVE) {
985 ipc_port_t port;
986
987 port = (ipc_port_t) object;
988 assert(port != IP_NULL);
989
990 ip_lock(port);
991 assert(ip_active(port));
992 assert(port->ip_receiver_name == name);
993 assert(port->ip_receiver == space);
994 is_read_unlock(space);
995 mqueue = &port->ip_messages;
996
997 } else if (entry->ie_bits & MACH_PORT_TYPE_PORT_SET) {
998 ipc_pset_t pset;
999
1000 pset = (ipc_pset_t) object;
1001 assert(pset != IPS_NULL);
1002
1003 ips_lock(pset);
1004 assert(ips_active(pset));
1005 assert(pset->ips_local_name == name);
1006 is_read_unlock(space);
1007
1008 mqueue = &pset->ips_messages;
1009 } else {
1010 is_read_unlock(space);
1011 return MACH_RCV_INVALID_NAME;
1012 }
1013
1014 /*
1015 * At this point, the object is locked and active,
1016 * the space is unlocked, and mqueue is initialized.
1017 */
1018
1019 io_reference(object);
1020 io_unlock(object);
1021
1022 *objectp = object;
1023 *mqueuep = mqueue;
1024 return MACH_MSG_SUCCESS;
1025 }
1026