]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ipc/ipc_mqueue.c
xnu-344.tar.gz
[apple/xnu.git] / osfmk / ipc / ipc_mqueue.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * The contents of this file constitute Original Code as defined in and
7 * are subject to the Apple Public Source License Version 1.1 (the
8 * "License"). You may not use this file except in compliance with the
9 * License. Please obtain a copy of the License at
10 * http://www.apple.com/publicsource and read it before using this file.
11 *
12 * This Original Code and all software distributed under the License are
13 * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
14 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
15 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
17 * License for the specific language governing rights and limitations
18 * under the License.
19 *
20 * @APPLE_LICENSE_HEADER_END@
21 */
22 /*
23 * @OSF_FREE_COPYRIGHT@
24 */
25 /*
26 * Mach Operating System
27 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
28 * All Rights Reserved.
29 *
30 * Permission to use, copy, modify and distribute this software and its
31 * documentation is hereby granted, provided that both the copyright
32 * notice and this permission notice appear in all copies of the
33 * software, derivative works or modified versions, and any portions
34 * thereof, and that both notices appear in supporting documentation.
35 *
36 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
37 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
38 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
39 *
40 * Carnegie Mellon requests users of this software to return to
41 *
42 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
43 * School of Computer Science
44 * Carnegie Mellon University
45 * Pittsburgh PA 15213-3890
46 *
47 * any improvements or extensions that they make and grant Carnegie Mellon
48 * the rights to redistribute these changes.
49 */
50 /*
51 */
52 /*
53 * File: ipc/ipc_mqueue.c
54 * Author: Rich Draves
55 * Date: 1989
56 *
57 * Functions to manipulate IPC message queues.
58 */
59
60 #include <mach/port.h>
61 #include <mach/message.h>
62 #include <mach/sync_policy.h>
63
64 #include <kern/assert.h>
65 #include <kern/counters.h>
66 #include <kern/sched_prim.h>
67 #include <kern/ipc_kobject.h>
68 #include <kern/misc_protos.h>
69 #include <kern/task.h>
70 #include <kern/thread.h>
71 #include <kern/wait_queue.h>
72
73 #include <ipc/ipc_mqueue.h>
74 #include <ipc/ipc_kmsg.h>
75 #include <ipc/ipc_port.h>
76 #include <ipc/ipc_pset.h>
77 #include <ipc/ipc_space.h>
78
79 #include <ddb/tr.h>
80
81 int ipc_mqueue_full; /* address is event for queue space */
82 int ipc_mqueue_rcv; /* address is event for message arrival */
83
84 #define TR_ENABLE 0
85
86 /*
87 * Routine: ipc_mqueue_init
88 * Purpose:
89 * Initialize a newly-allocated message queue.
90 */
91 void
92 ipc_mqueue_init(
93 ipc_mqueue_t mqueue,
94 boolean_t is_set)
95 {
96 if (is_set) {
97 wait_queue_set_init(&mqueue->imq_set_queue, SYNC_POLICY_FIFO);
98 } else {
99 wait_queue_init(&mqueue->imq_wait_queue, SYNC_POLICY_FIFO);
100 ipc_kmsg_queue_init(&mqueue->imq_messages);
101 mqueue->imq_seqno = 0;
102 mqueue->imq_msgcount = 0;
103 mqueue->imq_qlimit = MACH_PORT_QLIMIT_DEFAULT;
104 mqueue->imq_fullwaiters = FALSE;
105 }
106 }
107
108 /*
109 * Routine: ipc_mqueue_member
110 * Purpose:
111 * Indicate whether the (port) mqueue is a member of
112 * this portset's mqueue. We do this by checking
113 * whether the portset mqueue's waitq is an member of
114 * the port's mqueue waitq.
115 * Conditions:
116 * the portset's mqueue is not already a member
117 * this may block while allocating linkage structures.
118 */
119
120 boolean_t
121 ipc_mqueue_member(
122 ipc_mqueue_t port_mqueue,
123 ipc_mqueue_t set_mqueue)
124 {
125 wait_queue_t port_waitq = &port_mqueue->imq_wait_queue;
126 wait_queue_t set_waitq = &set_mqueue->imq_wait_queue;
127
128 return (wait_queue_member(port_waitq, set_waitq));
129
130 }
131
132 /*
133 * Routine: ipc_mqueue_remove
134 * Purpose:
135 * Remove the association between the queue and the specified
136 * set message queue.
137 */
138
139 kern_return_t
140 ipc_mqueue_remove(
141 ipc_mqueue_t mqueue,
142 ipc_mqueue_t set_mqueue)
143 {
144 wait_queue_t mq_waitq = &mqueue->imq_wait_queue;
145 wait_queue_set_t set_waitq = &set_mqueue->imq_set_queue;
146
147 return wait_queue_unlink(mq_waitq, set_waitq);
148 }
149
150 /*
151 * Routine: ipc_mqueue_remove_from_all
152 * Purpose:
153 * Remove the mqueue from all the sets it is a member of
154 * Conditions:
155 * Nothing locked.
156 */
157 void
158 ipc_mqueue_remove_from_all(
159 ipc_mqueue_t mqueue)
160 {
161 wait_queue_t mq_waitq = &mqueue->imq_wait_queue;
162
163 wait_queue_unlink_all(mq_waitq);
164 return;
165 }
166
167 /*
168 * Routine: ipc_mqueue_remove_all
169 * Purpose:
170 * Remove all the member queues from the specified set.
171 * Conditions:
172 * Nothing locked.
173 */
174 void
175 ipc_mqueue_remove_all(
176 ipc_mqueue_t mqueue)
177 {
178 wait_queue_set_t mq_setq = &mqueue->imq_set_queue;
179
180 wait_queue_set_unlink_all(mq_setq);
181 return;
182 }
183
184
185 /*
186 * Routine: ipc_mqueue_add
187 * Purpose:
188 * Associate the portset's mqueue with the port's mqueue.
189 * This has to be done so that posting the port will wakeup
190 * a portset waiter. If there are waiters on the portset
191 * mqueue and messages on the port mqueue, try to match them
192 * up now.
193 * Conditions:
194 * May block.
195 */
196 kern_return_t
197 ipc_mqueue_add(
198 ipc_mqueue_t port_mqueue,
199 ipc_mqueue_t set_mqueue)
200 {
201 wait_queue_t port_waitq = &port_mqueue->imq_wait_queue;
202 wait_queue_set_t set_waitq = &set_mqueue->imq_set_queue;
203 ipc_kmsg_queue_t kmsgq;
204 ipc_kmsg_t kmsg, next;
205 kern_return_t kr;
206 spl_t s;
207
208 kr = wait_queue_link(port_waitq, set_waitq);
209 if (kr != KERN_SUCCESS)
210 return kr;
211
212 /*
213 * Now that the set has been added to the port, there may be
214 * messages queued on the port and threads waiting on the set
215 * waitq. Lets get them together.
216 */
217 s = splsched();
218 imq_lock(port_mqueue);
219 kmsgq = &port_mqueue->imq_messages;
220 for (kmsg = ipc_kmsg_queue_first(kmsgq);
221 kmsg != IKM_NULL;
222 kmsg = next) {
223 next = ipc_kmsg_queue_next(kmsgq, kmsg);
224
225 for (;;) {
226 thread_t th;
227
228 th = wait_queue_wakeup64_identity_locked(
229 port_waitq,
230 IPC_MQUEUE_RECEIVE,
231 THREAD_AWAKENED,
232 FALSE);
233 /* waitq/mqueue still locked, thread locked */
234
235 if (th == THREAD_NULL)
236 goto leave;
237
238 /*
239 * Found a receiver. see if they can handle the message
240 * correctly (the message is not too large for them, or
241 * they didn't care to be informed that the message was
242 * too large). If they can't handle it, take them off
243 * the list and let them go back and figure it out and
244 * just move onto the next.
245 */
246 if (th->ith_msize <
247 kmsg->ikm_header.msgh_size +
248 REQUESTED_TRAILER_SIZE(th->ith_option)) {
249 th->ith_state = MACH_RCV_TOO_LARGE;
250 th->ith_msize = kmsg->ikm_header.msgh_size;
251 if (th->ith_option & MACH_RCV_LARGE) {
252 /*
253 * let him go without message
254 */
255 th->ith_kmsg = IKM_NULL;
256 th->ith_seqno = 0;
257 thread_unlock(th);
258 continue; /* find another thread */
259 }
260 } else {
261 th->ith_state = MACH_MSG_SUCCESS;
262 }
263
264 /*
265 * This thread is going to take this message,
266 * so give it to him.
267 */
268 ipc_mqueue_release_msgcount(port_mqueue);
269 ipc_kmsg_rmqueue(kmsgq, kmsg);
270 th->ith_kmsg = kmsg;
271 th->ith_seqno = port_mqueue->imq_seqno++;
272 thread_unlock(th);
273 break; /* go to next message */
274 }
275
276 }
277 leave:
278 imq_unlock(port_mqueue);
279 splx(s);
280 return KERN_SUCCESS;
281 }
282
283 /*
284 * Routine: ipc_mqueue_changed
285 * Purpose:
286 * Wake up receivers waiting in a message queue.
287 * Conditions:
288 * The message queue is locked.
289 */
290
291 void
292 ipc_mqueue_changed(
293 ipc_mqueue_t mqueue)
294 {
295 wait_queue_wakeup64_all_locked(
296 &mqueue->imq_wait_queue,
297 IPC_MQUEUE_RECEIVE,
298 THREAD_RESTART,
299 FALSE); /* unlock waitq? */
300 }
301
302
303
304
305 /*
306 * Routine: ipc_mqueue_send
307 * Purpose:
308 * Send a message to a message queue. The message holds a reference
309 * for the destination port for this message queue in the
310 * msgh_remote_port field.
311 *
312 * If unsuccessful, the caller still has possession of
313 * the message and must do something with it. If successful,
314 * the message is queued, given to a receiver, or destroyed.
315 * Conditions:
316 * Nothing locked.
317 * Returns:
318 * MACH_MSG_SUCCESS The message was accepted.
319 * MACH_SEND_TIMED_OUT Caller still has message.
320 * MACH_SEND_INTERRUPTED Caller still has message.
321 */
322 mach_msg_return_t
323 ipc_mqueue_send(
324 ipc_mqueue_t mqueue,
325 ipc_kmsg_t kmsg,
326 mach_msg_option_t option,
327 mach_msg_timeout_t timeout)
328 {
329 int wresult;
330 spl_t s;
331
332 /*
333 * Don't block if:
334 * 1) We're under the queue limit.
335 * 2) Caller used the MACH_SEND_ALWAYS internal option.
336 * 3) Message is sent to a send-once right.
337 */
338 s = splsched();
339 imq_lock(mqueue);
340
341 if (!imq_full(mqueue) ||
342 (option & MACH_SEND_ALWAYS) ||
343 (MACH_MSGH_BITS_REMOTE(kmsg->ikm_header.msgh_bits) ==
344 MACH_MSG_TYPE_PORT_SEND_ONCE)) {
345 mqueue->imq_msgcount++;
346 imq_unlock(mqueue);
347 splx(s);
348 } else {
349
350 /*
351 * We have to wait for space to be granted to us.
352 */
353 if ((option & MACH_SEND_TIMEOUT) && (timeout == 0)) {
354 imq_unlock(mqueue);
355 splx(s);
356 return MACH_SEND_TIMED_OUT;
357 }
358 mqueue->imq_fullwaiters = TRUE;
359 wresult = wait_queue_assert_wait64_locked(
360 &mqueue->imq_wait_queue,
361 IPC_MQUEUE_FULL,
362 THREAD_ABORTSAFE,
363 TRUE); /* unlock? */
364 /* wait/mqueue is unlocked */
365 splx(s);
366
367 if (wresult == THREAD_WAITING) {
368 if (option & MACH_SEND_TIMEOUT) {
369 thread_set_timer(timeout, 1000*NSEC_PER_USEC);
370 wresult = thread_block(THREAD_CONTINUE_NULL);
371 if (wresult != THREAD_TIMED_OUT)
372 thread_cancel_timer();
373 } else {
374 wresult = thread_block(THREAD_CONTINUE_NULL);
375 }
376 counter(c_ipc_mqueue_send_block++);
377 }
378
379 switch (wresult) {
380 case THREAD_TIMED_OUT:
381 assert(option & MACH_SEND_TIMEOUT);
382 return MACH_SEND_TIMED_OUT;
383
384 case THREAD_AWAKENED:
385 /* we can proceed - inherited msgcount from waker */
386 break;
387
388 case THREAD_INTERRUPTED:
389 return MACH_SEND_INTERRUPTED;
390
391 case THREAD_RESTART:
392 default:
393 panic("ipc_mqueue_send");
394 }
395 }
396
397 ipc_mqueue_post(mqueue, kmsg);
398 return MACH_MSG_SUCCESS;
399 }
400
401 /*
402 * Routine: ipc_mqueue_release_msgcount
403 * Purpose:
404 * Release a message queue reference in the case where we
405 * found a waiter.
406 *
407 * Conditions:
408 * The message queue is locked
409 */
410 void
411 ipc_mqueue_release_msgcount(
412 ipc_mqueue_t mqueue)
413 {
414 assert(imq_held(mqueue));
415 assert(mqueue->imq_msgcount > 0);
416
417 mqueue->imq_msgcount--;
418 if (!imq_full(mqueue) && mqueue->imq_fullwaiters) {
419 if (wait_queue_wakeup64_one_locked(
420 &mqueue->imq_wait_queue,
421 IPC_MQUEUE_FULL,
422 THREAD_AWAKENED,
423 FALSE) != KERN_SUCCESS) {
424 mqueue->imq_fullwaiters = FALSE;
425 } else {
426 mqueue->imq_msgcount++; /* gave it away */
427 }
428 }
429 }
430
431 /*
432 * Routine: ipc_mqueue_post
433 * Purpose:
434 * Post a message to a waiting receiver or enqueue it. If a
435 * receiver is waiting, we can release our reserved space in
436 * the message queue.
437 *
438 * Conditions:
439 * If we need to queue, our space in the message queue is reserved.
440 */
441 void
442 ipc_mqueue_post(
443 register ipc_mqueue_t mqueue,
444 register ipc_kmsg_t kmsg)
445 {
446
447 spl_t s;
448
449 /*
450 * While the msg queue is locked, we have control of the
451 * kmsg, so the ref in it for the port is still good.
452 *
453 * Check for a receiver for the message.
454 */
455 s = splsched();
456 imq_lock(mqueue);
457 for (;;) {
458 wait_queue_t waitq = &mqueue->imq_wait_queue;
459 thread_t receiver;
460
461 receiver = wait_queue_wakeup64_identity_locked(
462 waitq,
463 IPC_MQUEUE_RECEIVE,
464 THREAD_AWAKENED,
465 FALSE);
466 /* waitq still locked, thread locked */
467
468 if (receiver == THREAD_NULL) {
469 /*
470 * no receivers; queue kmsg
471 */
472 assert(mqueue->imq_msgcount > 0);
473 ipc_kmsg_enqueue_macro(&mqueue->imq_messages, kmsg);
474 break;
475 }
476
477 /*
478 * We found a waiting thread.
479 * If the message is too large or the scatter list is too small
480 * the thread we wake up will get that as its status.
481 */
482 if (receiver->ith_msize <
483 (kmsg->ikm_header.msgh_size) +
484 REQUESTED_TRAILER_SIZE(receiver->ith_option)) {
485 receiver->ith_msize = kmsg->ikm_header.msgh_size;
486 receiver->ith_state = MACH_RCV_TOO_LARGE;
487 } else {
488 receiver->ith_state = MACH_MSG_SUCCESS;
489 }
490
491 /*
492 * If there is no problem with the upcoming receive, or the
493 * receiver thread didn't specifically ask for special too
494 * large error condition, go ahead and select it anyway.
495 */
496 if ((receiver->ith_state == MACH_MSG_SUCCESS) ||
497 !(receiver->ith_option & MACH_RCV_LARGE)) {
498
499 receiver->ith_kmsg = kmsg;
500 receiver->ith_seqno = mqueue->imq_seqno++;
501 thread_unlock(receiver);
502
503 /* we didn't need our reserved spot in the queue */
504 ipc_mqueue_release_msgcount(mqueue);
505 break;
506 }
507
508 /*
509 * Otherwise, this thread needs to be released to run
510 * and handle its error without getting the message. We
511 * need to go back and pick another one.
512 */
513 receiver->ith_kmsg = IKM_NULL;
514 receiver->ith_seqno = 0;
515 thread_unlock(receiver);
516 }
517
518 imq_unlock(mqueue);
519 splx(s);
520
521 current_task()->messages_sent++;
522 return;
523 }
524
525
526 kern_return_t
527 ipc_mqueue_receive_results(void)
528 {
529 thread_t self = current_thread();
530 mach_msg_option_t option = self->ith_option;
531 kern_return_t saved_wait_result = self->wait_result;
532 kern_return_t mr;
533
534 /*
535 * why did we wake up?
536 */
537 switch (saved_wait_result) {
538 case THREAD_TIMED_OUT:
539 self->ith_state = MACH_RCV_TIMED_OUT;
540 return;
541
542 case THREAD_INTERRUPTED:
543 if (option & MACH_RCV_TIMEOUT)
544 thread_cancel_timer();
545 self->ith_state = MACH_RCV_INTERRUPTED;
546 return;
547
548 case THREAD_RESTART:
549 /* something bad happened to the port/set */
550 if (option & MACH_RCV_TIMEOUT)
551 thread_cancel_timer();
552 self->ith_state = MACH_RCV_PORT_CHANGED;
553 return;
554
555 case THREAD_AWAKENED:
556 /*
557 * We do not need to go select a message, somebody
558 * handed us one (or a too-large indication).
559 */
560 if (option & MACH_RCV_TIMEOUT)
561 thread_cancel_timer();
562
563 mr = MACH_MSG_SUCCESS;
564
565 switch (self->ith_state) {
566 case MACH_RCV_SCATTER_SMALL:
567 case MACH_RCV_TOO_LARGE:
568 /*
569 * Somebody tried to give us a too large
570 * message. If we indicated that we cared,
571 * then they only gave us the indication,
572 * otherwise they gave us the indication
573 * AND the message anyway.
574 */
575 if (option & MACH_RCV_LARGE) {
576 return;
577 }
578
579 case MACH_MSG_SUCCESS:
580 return;
581
582 default:
583 panic("ipc_mqueue_receive_results: strange ith_state");
584 }
585
586 default:
587 panic("ipc_mqueue_receive_results: strange wait_result");
588 }
589 }
590
591 void
592 ipc_mqueue_receive_continue(void)
593 {
594 ipc_mqueue_receive_results();
595 mach_msg_receive_continue(); /* hard-coded for now */
596 }
597
598 /*
599 * Routine: ipc_mqueue_receive
600 * Purpose:
601 * Receive a message from a message queue.
602 *
603 * If continuation is non-zero, then we might discard
604 * our kernel stack when we block. We will continue
605 * after unblocking by executing continuation.
606 *
607 * If resume is true, then we are resuming a receive
608 * operation after a blocked receive discarded our stack.
609 * Conditions:
610 * Our caller must hold a reference for the port or port set
611 * to which this queue belongs, to keep the queue
612 * from being deallocated.
613 *
614 * The kmsg is returned with clean header fields
615 * and with the circular bit turned off.
616 * Returns:
617 * MACH_MSG_SUCCESS Message returned in kmsgp.
618 * MACH_RCV_TOO_LARGE Message size returned in kmsgp.
619 * MACH_RCV_TIMED_OUT No message obtained.
620 * MACH_RCV_INTERRUPTED No message obtained.
621 * MACH_RCV_PORT_DIED Port/set died; no message.
622 * MACH_RCV_PORT_CHANGED Port moved into set; no msg.
623 *
624 */
625
626 void
627 ipc_mqueue_receive(
628 ipc_mqueue_t mqueue,
629 mach_msg_option_t option,
630 mach_msg_size_t max_size,
631 mach_msg_timeout_t timeout,
632 int interruptible)
633 {
634 ipc_port_t port;
635 mach_msg_return_t mr, mr2;
636 ipc_kmsg_queue_t kmsgs;
637 wait_result_t wresult;
638 thread_t self;
639 ipc_kmsg_t *kmsgp;
640 mach_port_seqno_t *seqnop;
641 spl_t s;
642
643 s = splsched();
644 imq_lock(mqueue);
645
646 if (imq_is_set(mqueue)) {
647 wait_queue_link_t wql;
648 ipc_mqueue_t port_mq;
649 queue_t q;
650
651 q = &mqueue->imq_setlinks;
652
653 /*
654 * If we are waiting on a portset mqueue, we need to see if
655 * any of the member ports have work for us. If so, try to
656 * deliver one of those messages. By holding the portset's
657 * mqueue lock during the search, we tie up any attempts by
658 * mqueue_deliver or portset membership changes that may
659 * cross our path. But this is a lock order violation, so we
660 * have to do it "softly." If we don't find a message waiting
661 * for us, we will assert our intention to wait while still
662 * holding that lock. When we release the lock, the deliver/
663 * change will succeed and find us.
664 */
665 search_set:
666 queue_iterate(q, wql, wait_queue_link_t, wql_setlinks) {
667 port_mq = (ipc_mqueue_t)wql->wql_queue;
668 kmsgs = &port_mq->imq_messages;
669
670 if (!imq_lock_try(port_mq)) {
671 imq_unlock(mqueue);
672 splx(s);
673 delay(1);
674 s = splsched();
675 imq_lock(mqueue);
676 goto search_set; /* start again at beginning - SMP */
677 }
678
679 /*
680 * If there is still a message to be had, we will
681 * try to select it (may not succeed because of size
682 * and options). In any case, we deliver those
683 * results back to the user.
684 *
685 * We also move the port's linkage to the tail of the
686 * list for this set (fairness). Future versions will
687 * sort by timestamp or priority.
688 */
689 if (ipc_kmsg_queue_first(kmsgs) == IKM_NULL) {
690 imq_unlock(port_mq);
691 continue;
692 }
693 queue_remove(q, wql, wait_queue_link_t, wql_setlinks);
694 queue_enter(q, wql, wait_queue_link_t, wql_setlinks);
695 imq_unlock(mqueue);
696
697 ipc_mqueue_select(port_mq, option, max_size);
698 imq_unlock(port_mq);
699 splx(s);
700 return;
701
702 }
703
704 } else {
705
706 /*
707 * Receive on a single port. Just try to get the messages.
708 */
709 kmsgs = &mqueue->imq_messages;
710 if (ipc_kmsg_queue_first(kmsgs) != IKM_NULL) {
711 ipc_mqueue_select(mqueue, option, max_size);
712 imq_unlock(mqueue);
713 splx(s);
714 return;
715 }
716 }
717
718 /*
719 * Looks like we'll have to block. The mqueue we will
720 * block on (whether the set's or the local port's) is
721 * still locked.
722 */
723 self = current_thread();
724 if (option & MACH_RCV_TIMEOUT) {
725 if (timeout == 0) {
726 imq_unlock(mqueue);
727 splx(s);
728 self->ith_state = MACH_RCV_TIMED_OUT;
729 return;
730 }
731 }
732
733 self->ith_state = MACH_RCV_IN_PROGRESS;
734 self->ith_option = option;
735 self->ith_msize = max_size;
736
737 wresult = wait_queue_assert_wait64_locked(&mqueue->imq_wait_queue,
738 IPC_MQUEUE_RECEIVE,
739 interruptible,
740 TRUE); /* unlock? */
741 /* mqueue/waitq is unlocked */
742 splx(s);
743
744 if (wresult == THREAD_WAITING) {
745 if (option & MACH_RCV_TIMEOUT)
746 thread_set_timer(timeout, 1000*NSEC_PER_USEC);
747
748 if (interruptible == THREAD_ABORTSAFE)
749 counter(c_ipc_mqueue_receive_block_user++);
750 else
751 counter(c_ipc_mqueue_receive_block_kernel++);
752
753 if (self->ith_continuation)
754 thread_block(ipc_mqueue_receive_continue);
755 /* NOTREACHED */
756
757 thread_block(THREAD_CONTINUE_NULL);
758 }
759 ipc_mqueue_receive_results();
760 }
761
762
763 /*
764 * Routine: ipc_mqueue_select
765 * Purpose:
766 * A receiver discovered that there was a message on the queue
767 * before he had to block. Pick the message off the queue and
768 * "post" it to himself.
769 * Conditions:
770 * mqueue locked.
771 * There is a message.
772 * Returns:
773 * MACH_MSG_SUCCESS Actually selected a message for ourselves.
774 * MACH_RCV_TOO_LARGE May or may not have pull it, but it is large
775 */
776 void
777 ipc_mqueue_select(
778 ipc_mqueue_t mqueue,
779 mach_msg_option_t option,
780 mach_msg_size_t max_size)
781 {
782 thread_t self = current_thread();
783 ipc_kmsg_t kmsg;
784 mach_port_seqno_t seqno;
785 mach_msg_return_t mr;
786
787 mr = MACH_MSG_SUCCESS;
788
789
790 /*
791 * Do some sanity checking of our ability to receive
792 * before pulling the message off the queue.
793 */
794 kmsg = ipc_kmsg_queue_first(&mqueue->imq_messages);
795
796 assert(kmsg != IKM_NULL);
797
798 if (kmsg->ikm_header.msgh_size +
799 REQUESTED_TRAILER_SIZE(option) > max_size) {
800 mr = MACH_RCV_TOO_LARGE;
801 }
802
803 /*
804 * If we really can't receive it, but we had the
805 * MACH_RCV_LARGE option set, then don't take it off
806 * the queue, instead return the appropriate error
807 * (and size needed).
808 */
809 if ((mr == MACH_RCV_TOO_LARGE) && (option & MACH_RCV_LARGE)) {
810 self->ith_kmsg = IKM_NULL;
811 self->ith_msize = kmsg->ikm_header.msgh_size;
812 self->ith_seqno = 0;
813 self->ith_state = mr;
814 return;
815 }
816
817 ipc_kmsg_rmqueue_first_macro(&mqueue->imq_messages, kmsg);
818 ipc_mqueue_release_msgcount(mqueue);
819 self->ith_seqno = mqueue->imq_seqno++;
820 self->ith_kmsg = kmsg;
821 self->ith_state = mr;
822
823 current_task()->messages_received++;
824 return;
825 }
826
827 /*
828 * Routine: ipc_mqueue_destroy
829 * Purpose:
830 * Destroy a message queue. Set any blocked senders running.
831 * Destroy the kmsgs in the queue.
832 * Conditions:
833 * Nothing locked.
834 * Receivers were removed when the receive right was "changed"
835 */
836 void
837 ipc_mqueue_destroy(
838 ipc_mqueue_t mqueue)
839 {
840 ipc_kmsg_queue_t kmqueue;
841 ipc_kmsg_t kmsg;
842 spl_t s;
843
844
845 s = splsched();
846 imq_lock(mqueue);
847 /*
848 * rouse all blocked senders
849 */
850 mqueue->imq_fullwaiters = FALSE;
851 wait_queue_wakeup64_all_locked(
852 &mqueue->imq_wait_queue,
853 IPC_MQUEUE_FULL,
854 THREAD_AWAKENED,
855 FALSE);
856
857 kmqueue = &mqueue->imq_messages;
858
859 while ((kmsg = ipc_kmsg_dequeue(kmqueue)) != IKM_NULL) {
860 imq_unlock(mqueue);
861 splx(s);
862
863 ipc_kmsg_destroy_dest(kmsg);
864
865 s = splsched();
866 imq_lock(mqueue);
867 }
868 imq_unlock(mqueue);
869 splx(s);
870 }
871
872 /*
873 * Routine: ipc_mqueue_set_qlimit
874 * Purpose:
875 * Changes a message queue limit; the maximum number
876 * of messages which may be queued.
877 * Conditions:
878 * Nothing locked.
879 */
880
881 void
882 ipc_mqueue_set_qlimit(
883 ipc_mqueue_t mqueue,
884 mach_port_msgcount_t qlimit)
885 {
886 spl_t s;
887
888 /* wake up senders allowed by the new qlimit */
889 s = splsched();
890 imq_lock(mqueue);
891 if (qlimit > mqueue->imq_qlimit) {
892 mach_port_msgcount_t i, wakeup;
893
894 /* caution: wakeup, qlimit are unsigned */
895 wakeup = qlimit - mqueue->imq_qlimit;
896
897 for (i = 0; i < wakeup; i++) {
898 if (wait_queue_wakeup64_one_locked(
899 &mqueue->imq_wait_queue,
900 IPC_MQUEUE_FULL,
901 THREAD_AWAKENED,
902 FALSE) == KERN_NOT_WAITING) {
903 mqueue->imq_fullwaiters = FALSE;
904 break;
905 }
906 }
907 }
908 mqueue->imq_qlimit = qlimit;
909 imq_unlock(mqueue);
910 splx(s);
911 }
912
913 /*
914 * Routine: ipc_mqueue_set_seqno
915 * Purpose:
916 * Changes an mqueue's sequence number.
917 * Conditions:
918 * Caller holds a reference to the queue's containing object.
919 */
920 void
921 ipc_mqueue_set_seqno(
922 ipc_mqueue_t mqueue,
923 mach_port_seqno_t seqno)
924 {
925 spl_t s;
926
927 s = splsched();
928 imq_lock(mqueue);
929 mqueue->imq_seqno = seqno;
930 imq_unlock(mqueue);
931 splx(s);
932 }
933
934
935 /*
936 * Routine: ipc_mqueue_copyin
937 * Purpose:
938 * Convert a name in a space to a message queue.
939 * Conditions:
940 * Nothing locked. If successful, the caller gets a ref for
941 * for the object. This ref ensures the continued existence of
942 * the queue.
943 * Returns:
944 * MACH_MSG_SUCCESS Found a message queue.
945 * MACH_RCV_INVALID_NAME The space is dead.
946 * MACH_RCV_INVALID_NAME The name doesn't denote a right.
947 * MACH_RCV_INVALID_NAME
948 * The denoted right is not receive or port set.
949 * MACH_RCV_IN_SET Receive right is a member of a set.
950 */
951
952 mach_msg_return_t
953 ipc_mqueue_copyin(
954 ipc_space_t space,
955 mach_port_name_t name,
956 ipc_mqueue_t *mqueuep,
957 ipc_object_t *objectp)
958 {
959 ipc_entry_t entry;
960 ipc_object_t object;
961 ipc_mqueue_t mqueue;
962
963 is_read_lock(space);
964 if (!space->is_active) {
965 is_read_unlock(space);
966 return MACH_RCV_INVALID_NAME;
967 }
968
969 entry = ipc_entry_lookup(space, name);
970 if (entry == IE_NULL) {
971 is_read_unlock(space);
972 return MACH_RCV_INVALID_NAME;
973 }
974
975 object = entry->ie_object;
976
977 if (entry->ie_bits & MACH_PORT_TYPE_RECEIVE) {
978 ipc_port_t port;
979 ipc_pset_t pset;
980
981 port = (ipc_port_t) object;
982 assert(port != IP_NULL);
983
984 ip_lock(port);
985 assert(ip_active(port));
986 assert(port->ip_receiver_name == name);
987 assert(port->ip_receiver == space);
988 is_read_unlock(space);
989 mqueue = &port->ip_messages;
990
991 } else if (entry->ie_bits & MACH_PORT_TYPE_PORT_SET) {
992 ipc_pset_t pset;
993
994 pset = (ipc_pset_t) object;
995 assert(pset != IPS_NULL);
996
997 ips_lock(pset);
998 assert(ips_active(pset));
999 assert(pset->ips_local_name == name);
1000 is_read_unlock(space);
1001
1002 mqueue = &pset->ips_messages;
1003 } else {
1004 is_read_unlock(space);
1005 return MACH_RCV_INVALID_NAME;
1006 }
1007
1008 /*
1009 * At this point, the object is locked and active,
1010 * the space is unlocked, and mqueue is initialized.
1011 */
1012
1013 io_reference(object);
1014 io_unlock(object);
1015
1016 *objectp = object;
1017 *mqueuep = mqueue;
1018 return MACH_MSG_SUCCESS;
1019 }
1020