]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ipc/ipc_mqueue.c
4321242c691525354fae10c4224eb899f7560964
[apple/xnu.git] / osfmk / ipc / ipc_mqueue.c
1 /*
2 * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_LICENSE_HEADER_START@
5 *
6 * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
7 *
8 * This file contains Original Code and/or Modifications of Original Code
9 * as defined in and that are subject to the Apple Public Source License
10 * Version 2.0 (the 'License'). You may not use this file except in
11 * compliance with the License. Please obtain a copy of the License at
12 * http://www.opensource.apple.com/apsl/ and read it before using this
13 * file.
14 *
15 * The Original Code and all software distributed under the License are
16 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
17 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
18 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
20 * Please see the License for the specific language governing rights and
21 * limitations under the License.
22 *
23 * @APPLE_LICENSE_HEADER_END@
24 */
25 /*
26 * @OSF_FREE_COPYRIGHT@
27 */
28 /*
29 * Mach Operating System
30 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
31 * All Rights Reserved.
32 *
33 * Permission to use, copy, modify and distribute this software and its
34 * documentation is hereby granted, provided that both the copyright
35 * notice and this permission notice appear in all copies of the
36 * software, derivative works or modified versions, and any portions
37 * thereof, and that both notices appear in supporting documentation.
38 *
39 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
40 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
41 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
42 *
43 * Carnegie Mellon requests users of this software to return to
44 *
45 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
46 * School of Computer Science
47 * Carnegie Mellon University
48 * Pittsburgh PA 15213-3890
49 *
50 * any improvements or extensions that they make and grant Carnegie Mellon
51 * the rights to redistribute these changes.
52 */
53 /*
54 */
55 /*
56 * File: ipc/ipc_mqueue.c
57 * Author: Rich Draves
58 * Date: 1989
59 *
60 * Functions to manipulate IPC message queues.
61 */
62
63 #include <mach/port.h>
64 #include <mach/message.h>
65 #include <mach/sync_policy.h>
66
67 #include <kern/assert.h>
68 #include <kern/counters.h>
69 #include <kern/sched_prim.h>
70 #include <kern/ipc_kobject.h>
71 #include <kern/misc_protos.h>
72 #include <kern/task.h>
73 #include <kern/thread.h>
74 #include <kern/wait_queue.h>
75
76 #include <ipc/ipc_mqueue.h>
77 #include <ipc/ipc_kmsg.h>
78 #include <ipc/ipc_port.h>
79 #include <ipc/ipc_pset.h>
80 #include <ipc/ipc_space.h>
81
82 #include <ddb/tr.h>
83
84 int ipc_mqueue_full; /* address is event for queue space */
85 int ipc_mqueue_rcv; /* address is event for message arrival */
86
87 #define TR_ENABLE 0
88
89 /*
90 * Routine: ipc_mqueue_init
91 * Purpose:
92 * Initialize a newly-allocated message queue.
93 */
94 void
95 ipc_mqueue_init(
96 ipc_mqueue_t mqueue,
97 boolean_t is_set)
98 {
99 if (is_set) {
100 wait_queue_set_init(&mqueue->imq_set_queue, SYNC_POLICY_FIFO);
101 } else {
102 wait_queue_init(&mqueue->imq_wait_queue, SYNC_POLICY_FIFO);
103 ipc_kmsg_queue_init(&mqueue->imq_messages);
104 mqueue->imq_seqno = 0;
105 mqueue->imq_msgcount = 0;
106 mqueue->imq_qlimit = MACH_PORT_QLIMIT_DEFAULT;
107 mqueue->imq_fullwaiters = FALSE;
108 }
109 }
110
111 /*
112 * Routine: ipc_mqueue_member
113 * Purpose:
114 * Indicate whether the (port) mqueue is a member of
115 * this portset's mqueue. We do this by checking
116 * whether the portset mqueue's waitq is an member of
117 * the port's mqueue waitq.
118 * Conditions:
119 * the portset's mqueue is not already a member
120 * this may block while allocating linkage structures.
121 */
122
123 boolean_t
124 ipc_mqueue_member(
125 ipc_mqueue_t port_mqueue,
126 ipc_mqueue_t set_mqueue)
127 {
128 wait_queue_t port_waitq = &port_mqueue->imq_wait_queue;
129 wait_queue_t set_waitq = &set_mqueue->imq_wait_queue;
130
131 return (wait_queue_member(port_waitq, set_waitq));
132
133 }
134
135 /*
136 * Routine: ipc_mqueue_remove
137 * Purpose:
138 * Remove the association between the queue and the specified
139 * set message queue.
140 */
141
142 kern_return_t
143 ipc_mqueue_remove(
144 ipc_mqueue_t mqueue,
145 ipc_mqueue_t set_mqueue)
146 {
147 wait_queue_t mq_waitq = &mqueue->imq_wait_queue;
148 wait_queue_set_t set_waitq = &set_mqueue->imq_set_queue;
149
150 return wait_queue_unlink(mq_waitq, set_waitq);
151 }
152
153 /*
154 * Routine: ipc_mqueue_remove_from_all
155 * Purpose:
156 * Remove the mqueue from all the sets it is a member of
157 * Conditions:
158 * Nothing locked.
159 */
160 void
161 ipc_mqueue_remove_from_all(
162 ipc_mqueue_t mqueue)
163 {
164 wait_queue_t mq_waitq = &mqueue->imq_wait_queue;
165
166 wait_queue_unlink_all(mq_waitq);
167 return;
168 }
169
170 /*
171 * Routine: ipc_mqueue_remove_all
172 * Purpose:
173 * Remove all the member queues from the specified set.
174 * Conditions:
175 * Nothing locked.
176 */
177 void
178 ipc_mqueue_remove_all(
179 ipc_mqueue_t mqueue)
180 {
181 wait_queue_set_t mq_setq = &mqueue->imq_set_queue;
182
183 wait_queue_set_unlink_all(mq_setq);
184 return;
185 }
186
187
188 /*
189 * Routine: ipc_mqueue_add
190 * Purpose:
191 * Associate the portset's mqueue with the port's mqueue.
192 * This has to be done so that posting the port will wakeup
193 * a portset waiter. If there are waiters on the portset
194 * mqueue and messages on the port mqueue, try to match them
195 * up now.
196 * Conditions:
197 * May block.
198 */
199 kern_return_t
200 ipc_mqueue_add(
201 ipc_mqueue_t port_mqueue,
202 ipc_mqueue_t set_mqueue)
203 {
204 wait_queue_t port_waitq = &port_mqueue->imq_wait_queue;
205 wait_queue_set_t set_waitq = &set_mqueue->imq_set_queue;
206 ipc_kmsg_queue_t kmsgq;
207 ipc_kmsg_t kmsg, next;
208 kern_return_t kr;
209 spl_t s;
210
211 kr = wait_queue_link(port_waitq, set_waitq);
212 if (kr != KERN_SUCCESS)
213 return kr;
214
215 /*
216 * Now that the set has been added to the port, there may be
217 * messages queued on the port and threads waiting on the set
218 * waitq. Lets get them together.
219 */
220 s = splsched();
221 imq_lock(port_mqueue);
222 kmsgq = &port_mqueue->imq_messages;
223 for (kmsg = ipc_kmsg_queue_first(kmsgq);
224 kmsg != IKM_NULL;
225 kmsg = next) {
226 next = ipc_kmsg_queue_next(kmsgq, kmsg);
227
228 for (;;) {
229 thread_t th;
230
231 th = wait_queue_wakeup64_identity_locked(
232 port_waitq,
233 IPC_MQUEUE_RECEIVE,
234 THREAD_AWAKENED,
235 FALSE);
236 /* waitq/mqueue still locked, thread locked */
237
238 if (th == THREAD_NULL)
239 goto leave;
240
241 /*
242 * Found a receiver. see if they can handle the message
243 * correctly (the message is not too large for them, or
244 * they didn't care to be informed that the message was
245 * too large). If they can't handle it, take them off
246 * the list and let them go back and figure it out and
247 * just move onto the next.
248 */
249 if (th->ith_msize <
250 kmsg->ikm_header.msgh_size +
251 REQUESTED_TRAILER_SIZE(th->ith_option)) {
252 th->ith_state = MACH_RCV_TOO_LARGE;
253 th->ith_msize = kmsg->ikm_header.msgh_size;
254 if (th->ith_option & MACH_RCV_LARGE) {
255 /*
256 * let him go without message
257 */
258 th->ith_kmsg = IKM_NULL;
259 th->ith_seqno = 0;
260 thread_unlock(th);
261 continue; /* find another thread */
262 }
263 } else {
264 th->ith_state = MACH_MSG_SUCCESS;
265 }
266
267 /*
268 * This thread is going to take this message,
269 * so give it to him.
270 */
271 ipc_mqueue_release_msgcount(port_mqueue);
272 ipc_kmsg_rmqueue(kmsgq, kmsg);
273 th->ith_kmsg = kmsg;
274 th->ith_seqno = port_mqueue->imq_seqno++;
275 thread_unlock(th);
276 break; /* go to next message */
277 }
278
279 }
280 leave:
281 imq_unlock(port_mqueue);
282 splx(s);
283 return KERN_SUCCESS;
284 }
285
286 /*
287 * Routine: ipc_mqueue_changed
288 * Purpose:
289 * Wake up receivers waiting in a message queue.
290 * Conditions:
291 * The message queue is locked.
292 */
293
294 void
295 ipc_mqueue_changed(
296 ipc_mqueue_t mqueue)
297 {
298 wait_queue_wakeup64_all_locked(
299 &mqueue->imq_wait_queue,
300 IPC_MQUEUE_RECEIVE,
301 THREAD_RESTART,
302 FALSE); /* unlock waitq? */
303 }
304
305
306
307
308 /*
309 * Routine: ipc_mqueue_send
310 * Purpose:
311 * Send a message to a message queue. The message holds a reference
312 * for the destination port for this message queue in the
313 * msgh_remote_port field.
314 *
315 * If unsuccessful, the caller still has possession of
316 * the message and must do something with it. If successful,
317 * the message is queued, given to a receiver, or destroyed.
318 * Conditions:
319 * Nothing locked.
320 * Returns:
321 * MACH_MSG_SUCCESS The message was accepted.
322 * MACH_SEND_TIMED_OUT Caller still has message.
323 * MACH_SEND_INTERRUPTED Caller still has message.
324 */
325 mach_msg_return_t
326 ipc_mqueue_send(
327 ipc_mqueue_t mqueue,
328 ipc_kmsg_t kmsg,
329 mach_msg_option_t option,
330 mach_msg_timeout_t timeout)
331 {
332 int wresult;
333 spl_t s;
334
335 /*
336 * Don't block if:
337 * 1) We're under the queue limit.
338 * 2) Caller used the MACH_SEND_ALWAYS internal option.
339 * 3) Message is sent to a send-once right.
340 */
341 s = splsched();
342 imq_lock(mqueue);
343
344 if (!imq_full(mqueue) ||
345 (option & MACH_SEND_ALWAYS) ||
346 (MACH_MSGH_BITS_REMOTE(kmsg->ikm_header.msgh_bits) ==
347 MACH_MSG_TYPE_PORT_SEND_ONCE)) {
348 mqueue->imq_msgcount++;
349 imq_unlock(mqueue);
350 splx(s);
351 } else {
352 thread_t cur_thread = current_thread();
353
354 /*
355 * We have to wait for space to be granted to us.
356 */
357 if ((option & MACH_SEND_TIMEOUT) && (timeout == 0)) {
358 imq_unlock(mqueue);
359 splx(s);
360 return MACH_SEND_TIMED_OUT;
361 }
362 mqueue->imq_fullwaiters = TRUE;
363 thread_lock(cur_thread);
364 wresult = wait_queue_assert_wait64_locked(
365 &mqueue->imq_wait_queue,
366 IPC_MQUEUE_FULL,
367 THREAD_ABORTSAFE,
368 cur_thread);
369 thread_unlock(cur_thread);
370 imq_unlock(mqueue);
371 splx(s);
372
373 if (wresult == THREAD_WAITING) {
374 if (option & MACH_SEND_TIMEOUT) {
375 thread_set_timer(timeout, 1000*NSEC_PER_USEC);
376 wresult = thread_block(THREAD_CONTINUE_NULL);
377 if (wresult != THREAD_TIMED_OUT)
378 thread_cancel_timer();
379 } else {
380 wresult = thread_block(THREAD_CONTINUE_NULL);
381 }
382 counter(c_ipc_mqueue_send_block++);
383 }
384
385 switch (wresult) {
386 case THREAD_TIMED_OUT:
387 assert(option & MACH_SEND_TIMEOUT);
388 return MACH_SEND_TIMED_OUT;
389
390 case THREAD_AWAKENED:
391 /* we can proceed - inherited msgcount from waker */
392 break;
393
394 case THREAD_INTERRUPTED:
395 return MACH_SEND_INTERRUPTED;
396
397 case THREAD_RESTART:
398 default:
399 panic("ipc_mqueue_send");
400 }
401 }
402
403 ipc_mqueue_post(mqueue, kmsg);
404 return MACH_MSG_SUCCESS;
405 }
406
407 /*
408 * Routine: ipc_mqueue_release_msgcount
409 * Purpose:
410 * Release a message queue reference in the case where we
411 * found a waiter.
412 *
413 * Conditions:
414 * The message queue is locked
415 */
416 void
417 ipc_mqueue_release_msgcount(
418 ipc_mqueue_t mqueue)
419 {
420 assert(imq_held(mqueue));
421 assert(mqueue->imq_msgcount > 0);
422
423 mqueue->imq_msgcount--;
424 if (!imq_full(mqueue) && mqueue->imq_fullwaiters) {
425 if (wait_queue_wakeup64_one_locked(
426 &mqueue->imq_wait_queue,
427 IPC_MQUEUE_FULL,
428 THREAD_AWAKENED,
429 FALSE) != KERN_SUCCESS) {
430 mqueue->imq_fullwaiters = FALSE;
431 } else {
432 mqueue->imq_msgcount++; /* gave it away */
433 }
434 }
435 }
436
437 /*
438 * Routine: ipc_mqueue_post
439 * Purpose:
440 * Post a message to a waiting receiver or enqueue it. If a
441 * receiver is waiting, we can release our reserved space in
442 * the message queue.
443 *
444 * Conditions:
445 * If we need to queue, our space in the message queue is reserved.
446 */
447 void
448 ipc_mqueue_post(
449 register ipc_mqueue_t mqueue,
450 register ipc_kmsg_t kmsg)
451 {
452
453 spl_t s;
454
455 /*
456 * While the msg queue is locked, we have control of the
457 * kmsg, so the ref in it for the port is still good.
458 *
459 * Check for a receiver for the message.
460 */
461 s = splsched();
462 imq_lock(mqueue);
463 for (;;) {
464 wait_queue_t waitq = &mqueue->imq_wait_queue;
465 thread_t receiver;
466
467 receiver = wait_queue_wakeup64_identity_locked(
468 waitq,
469 IPC_MQUEUE_RECEIVE,
470 THREAD_AWAKENED,
471 FALSE);
472 /* waitq still locked, thread locked */
473
474 if (receiver == THREAD_NULL) {
475 /*
476 * no receivers; queue kmsg
477 */
478 assert(mqueue->imq_msgcount > 0);
479 ipc_kmsg_enqueue_macro(&mqueue->imq_messages, kmsg);
480 break;
481 }
482
483 /*
484 * We found a waiting thread.
485 * If the message is too large or the scatter list is too small
486 * the thread we wake up will get that as its status.
487 */
488 if (receiver->ith_msize <
489 (kmsg->ikm_header.msgh_size) +
490 REQUESTED_TRAILER_SIZE(receiver->ith_option)) {
491 receiver->ith_msize = kmsg->ikm_header.msgh_size;
492 receiver->ith_state = MACH_RCV_TOO_LARGE;
493 } else {
494 receiver->ith_state = MACH_MSG_SUCCESS;
495 }
496
497 /*
498 * If there is no problem with the upcoming receive, or the
499 * receiver thread didn't specifically ask for special too
500 * large error condition, go ahead and select it anyway.
501 */
502 if ((receiver->ith_state == MACH_MSG_SUCCESS) ||
503 !(receiver->ith_option & MACH_RCV_LARGE)) {
504
505 receiver->ith_kmsg = kmsg;
506 receiver->ith_seqno = mqueue->imq_seqno++;
507 thread_unlock(receiver);
508
509 /* we didn't need our reserved spot in the queue */
510 ipc_mqueue_release_msgcount(mqueue);
511 break;
512 }
513
514 /*
515 * Otherwise, this thread needs to be released to run
516 * and handle its error without getting the message. We
517 * need to go back and pick another one.
518 */
519 receiver->ith_kmsg = IKM_NULL;
520 receiver->ith_seqno = 0;
521 thread_unlock(receiver);
522 }
523
524 imq_unlock(mqueue);
525 splx(s);
526
527 current_task()->messages_sent++;
528 return;
529 }
530
531
532 kern_return_t
533 ipc_mqueue_receive_results(void)
534 {
535 thread_t self = current_thread();
536 mach_msg_option_t option = self->ith_option;
537 kern_return_t saved_wait_result = self->wait_result;
538 kern_return_t mr;
539
540 /*
541 * why did we wake up?
542 */
543 switch (saved_wait_result) {
544 case THREAD_TIMED_OUT:
545 self->ith_state = MACH_RCV_TIMED_OUT;
546 return;
547
548 case THREAD_INTERRUPTED:
549 if (option & MACH_RCV_TIMEOUT)
550 thread_cancel_timer();
551 self->ith_state = MACH_RCV_INTERRUPTED;
552 return;
553
554 case THREAD_RESTART:
555 /* something bad happened to the port/set */
556 if (option & MACH_RCV_TIMEOUT)
557 thread_cancel_timer();
558 self->ith_state = MACH_RCV_PORT_CHANGED;
559 return;
560
561 case THREAD_AWAKENED:
562 /*
563 * We do not need to go select a message, somebody
564 * handed us one (or a too-large indication).
565 */
566 if (option & MACH_RCV_TIMEOUT)
567 thread_cancel_timer();
568
569 mr = MACH_MSG_SUCCESS;
570
571 switch (self->ith_state) {
572 case MACH_RCV_SCATTER_SMALL:
573 case MACH_RCV_TOO_LARGE:
574 /*
575 * Somebody tried to give us a too large
576 * message. If we indicated that we cared,
577 * then they only gave us the indication,
578 * otherwise they gave us the indication
579 * AND the message anyway.
580 */
581 if (option & MACH_RCV_LARGE) {
582 return;
583 }
584
585 case MACH_MSG_SUCCESS:
586 return;
587
588 default:
589 panic("ipc_mqueue_receive_results: strange ith_state");
590 }
591
592 default:
593 panic("ipc_mqueue_receive_results: strange wait_result");
594 }
595 }
596
597 void
598 ipc_mqueue_receive_continue(void)
599 {
600 ipc_mqueue_receive_results();
601 mach_msg_receive_continue(); /* hard-coded for now */
602 }
603
604 /*
605 * Routine: ipc_mqueue_receive
606 * Purpose:
607 * Receive a message from a message queue.
608 *
609 * If continuation is non-zero, then we might discard
610 * our kernel stack when we block. We will continue
611 * after unblocking by executing continuation.
612 *
613 * If resume is true, then we are resuming a receive
614 * operation after a blocked receive discarded our stack.
615 * Conditions:
616 * Our caller must hold a reference for the port or port set
617 * to which this queue belongs, to keep the queue
618 * from being deallocated.
619 *
620 * The kmsg is returned with clean header fields
621 * and with the circular bit turned off.
622 * Returns:
623 * MACH_MSG_SUCCESS Message returned in kmsgp.
624 * MACH_RCV_TOO_LARGE Message size returned in kmsgp.
625 * MACH_RCV_TIMED_OUT No message obtained.
626 * MACH_RCV_INTERRUPTED No message obtained.
627 * MACH_RCV_PORT_DIED Port/set died; no message.
628 * MACH_RCV_PORT_CHANGED Port moved into set; no msg.
629 *
630 */
631
632 void
633 ipc_mqueue_receive(
634 ipc_mqueue_t mqueue,
635 mach_msg_option_t option,
636 mach_msg_size_t max_size,
637 mach_msg_timeout_t timeout,
638 int interruptible)
639 {
640 ipc_port_t port;
641 mach_msg_return_t mr, mr2;
642 ipc_kmsg_queue_t kmsgs;
643 wait_result_t wresult;
644 thread_t self;
645 ipc_kmsg_t *kmsgp;
646 mach_port_seqno_t *seqnop;
647 spl_t s;
648
649 s = splsched();
650 imq_lock(mqueue);
651
652 if (imq_is_set(mqueue)) {
653 wait_queue_link_t wql;
654 ipc_mqueue_t port_mq;
655 queue_t q;
656
657 q = &mqueue->imq_setlinks;
658
659 /*
660 * If we are waiting on a portset mqueue, we need to see if
661 * any of the member ports have work for us. If so, try to
662 * deliver one of those messages. By holding the portset's
663 * mqueue lock during the search, we tie up any attempts by
664 * mqueue_deliver or portset membership changes that may
665 * cross our path. But this is a lock order violation, so we
666 * have to do it "softly." If we don't find a message waiting
667 * for us, we will assert our intention to wait while still
668 * holding that lock. When we release the lock, the deliver/
669 * change will succeed and find us.
670 */
671 search_set:
672 queue_iterate(q, wql, wait_queue_link_t, wql_setlinks) {
673 port_mq = (ipc_mqueue_t)wql->wql_queue;
674 kmsgs = &port_mq->imq_messages;
675
676 if (!imq_lock_try(port_mq)) {
677 imq_unlock(mqueue);
678 splx(s);
679 delay(1);
680 s = splsched();
681 imq_lock(mqueue);
682 goto search_set; /* start again at beginning - SMP */
683 }
684
685 /*
686 * If there is still a message to be had, we will
687 * try to select it (may not succeed because of size
688 * and options). In any case, we deliver those
689 * results back to the user.
690 *
691 * We also move the port's linkage to the tail of the
692 * list for this set (fairness). Future versions will
693 * sort by timestamp or priority.
694 */
695 if (ipc_kmsg_queue_first(kmsgs) == IKM_NULL) {
696 imq_unlock(port_mq);
697 continue;
698 }
699 queue_remove(q, wql, wait_queue_link_t, wql_setlinks);
700 queue_enter(q, wql, wait_queue_link_t, wql_setlinks);
701 imq_unlock(mqueue);
702
703 ipc_mqueue_select(port_mq, option, max_size);
704 imq_unlock(port_mq);
705 splx(s);
706 return;
707
708 }
709
710 } else {
711
712 /*
713 * Receive on a single port. Just try to get the messages.
714 */
715 kmsgs = &mqueue->imq_messages;
716 if (ipc_kmsg_queue_first(kmsgs) != IKM_NULL) {
717 ipc_mqueue_select(mqueue, option, max_size);
718 imq_unlock(mqueue);
719 splx(s);
720 return;
721 }
722 }
723
724 /*
725 * Looks like we'll have to block. The mqueue we will
726 * block on (whether the set's or the local port's) is
727 * still locked.
728 */
729 self = current_thread();
730 if (option & MACH_RCV_TIMEOUT) {
731 if (timeout == 0) {
732 imq_unlock(mqueue);
733 splx(s);
734 self->ith_state = MACH_RCV_TIMED_OUT;
735 return;
736 }
737 }
738
739 thread_lock(self);
740 self->ith_state = MACH_RCV_IN_PROGRESS;
741 self->ith_option = option;
742 self->ith_msize = max_size;
743
744 wresult = wait_queue_assert_wait64_locked(&mqueue->imq_wait_queue,
745 IPC_MQUEUE_RECEIVE,
746 interruptible,
747 self);
748 thread_unlock(self);
749 imq_unlock(mqueue);
750 splx(s);
751
752 if (wresult == THREAD_WAITING) {
753 if (option & MACH_RCV_TIMEOUT)
754 thread_set_timer(timeout, 1000*NSEC_PER_USEC);
755
756 if (interruptible == THREAD_ABORTSAFE)
757 counter(c_ipc_mqueue_receive_block_user++);
758 else
759 counter(c_ipc_mqueue_receive_block_kernel++);
760
761 if (self->ith_continuation)
762 thread_block(ipc_mqueue_receive_continue);
763 /* NOTREACHED */
764
765 thread_block(THREAD_CONTINUE_NULL);
766 }
767 ipc_mqueue_receive_results();
768 }
769
770
771 /*
772 * Routine: ipc_mqueue_select
773 * Purpose:
774 * A receiver discovered that there was a message on the queue
775 * before he had to block. Pick the message off the queue and
776 * "post" it to himself.
777 * Conditions:
778 * mqueue locked.
779 * There is a message.
780 * Returns:
781 * MACH_MSG_SUCCESS Actually selected a message for ourselves.
782 * MACH_RCV_TOO_LARGE May or may not have pull it, but it is large
783 */
784 void
785 ipc_mqueue_select(
786 ipc_mqueue_t mqueue,
787 mach_msg_option_t option,
788 mach_msg_size_t max_size)
789 {
790 thread_t self = current_thread();
791 ipc_kmsg_t kmsg;
792 mach_port_seqno_t seqno;
793 mach_msg_return_t mr;
794
795 mr = MACH_MSG_SUCCESS;
796
797
798 /*
799 * Do some sanity checking of our ability to receive
800 * before pulling the message off the queue.
801 */
802 kmsg = ipc_kmsg_queue_first(&mqueue->imq_messages);
803
804 assert(kmsg != IKM_NULL);
805
806 if (kmsg->ikm_header.msgh_size +
807 REQUESTED_TRAILER_SIZE(option) > max_size) {
808 mr = MACH_RCV_TOO_LARGE;
809 }
810
811 /*
812 * If we really can't receive it, but we had the
813 * MACH_RCV_LARGE option set, then don't take it off
814 * the queue, instead return the appropriate error
815 * (and size needed).
816 */
817 if ((mr == MACH_RCV_TOO_LARGE) && (option & MACH_RCV_LARGE)) {
818 self->ith_kmsg = IKM_NULL;
819 self->ith_msize = kmsg->ikm_header.msgh_size;
820 self->ith_seqno = 0;
821 self->ith_state = mr;
822 return;
823 }
824
825 ipc_kmsg_rmqueue_first_macro(&mqueue->imq_messages, kmsg);
826 ipc_mqueue_release_msgcount(mqueue);
827 self->ith_seqno = mqueue->imq_seqno++;
828 self->ith_kmsg = kmsg;
829 self->ith_state = mr;
830
831 current_task()->messages_received++;
832 return;
833 }
834
835 /*
836 * Routine: ipc_mqueue_destroy
837 * Purpose:
838 * Destroy a message queue. Set any blocked senders running.
839 * Destroy the kmsgs in the queue.
840 * Conditions:
841 * Nothing locked.
842 * Receivers were removed when the receive right was "changed"
843 */
844 void
845 ipc_mqueue_destroy(
846 ipc_mqueue_t mqueue)
847 {
848 ipc_kmsg_queue_t kmqueue;
849 ipc_kmsg_t kmsg;
850 spl_t s;
851
852
853 s = splsched();
854 imq_lock(mqueue);
855 /*
856 * rouse all blocked senders
857 */
858 mqueue->imq_fullwaiters = FALSE;
859 wait_queue_wakeup64_all_locked(
860 &mqueue->imq_wait_queue,
861 IPC_MQUEUE_FULL,
862 THREAD_AWAKENED,
863 FALSE);
864
865 kmqueue = &mqueue->imq_messages;
866
867 while ((kmsg = ipc_kmsg_dequeue(kmqueue)) != IKM_NULL) {
868 imq_unlock(mqueue);
869 splx(s);
870
871 ipc_kmsg_destroy_dest(kmsg);
872
873 s = splsched();
874 imq_lock(mqueue);
875 }
876 imq_unlock(mqueue);
877 splx(s);
878 }
879
880 /*
881 * Routine: ipc_mqueue_set_qlimit
882 * Purpose:
883 * Changes a message queue limit; the maximum number
884 * of messages which may be queued.
885 * Conditions:
886 * Nothing locked.
887 */
888
889 void
890 ipc_mqueue_set_qlimit(
891 ipc_mqueue_t mqueue,
892 mach_port_msgcount_t qlimit)
893 {
894 spl_t s;
895
896 /* wake up senders allowed by the new qlimit */
897 s = splsched();
898 imq_lock(mqueue);
899 if (qlimit > mqueue->imq_qlimit) {
900 mach_port_msgcount_t i, wakeup;
901
902 /* caution: wakeup, qlimit are unsigned */
903 wakeup = qlimit - mqueue->imq_qlimit;
904
905 for (i = 0; i < wakeup; i++) {
906 if (wait_queue_wakeup64_one_locked(
907 &mqueue->imq_wait_queue,
908 IPC_MQUEUE_FULL,
909 THREAD_AWAKENED,
910 FALSE) == KERN_NOT_WAITING) {
911 mqueue->imq_fullwaiters = FALSE;
912 break;
913 }
914 }
915 }
916 mqueue->imq_qlimit = qlimit;
917 imq_unlock(mqueue);
918 splx(s);
919 }
920
921 /*
922 * Routine: ipc_mqueue_set_seqno
923 * Purpose:
924 * Changes an mqueue's sequence number.
925 * Conditions:
926 * Caller holds a reference to the queue's containing object.
927 */
928 void
929 ipc_mqueue_set_seqno(
930 ipc_mqueue_t mqueue,
931 mach_port_seqno_t seqno)
932 {
933 spl_t s;
934
935 s = splsched();
936 imq_lock(mqueue);
937 mqueue->imq_seqno = seqno;
938 imq_unlock(mqueue);
939 splx(s);
940 }
941
942
943 /*
944 * Routine: ipc_mqueue_copyin
945 * Purpose:
946 * Convert a name in a space to a message queue.
947 * Conditions:
948 * Nothing locked. If successful, the caller gets a ref for
949 * for the object. This ref ensures the continued existence of
950 * the queue.
951 * Returns:
952 * MACH_MSG_SUCCESS Found a message queue.
953 * MACH_RCV_INVALID_NAME The space is dead.
954 * MACH_RCV_INVALID_NAME The name doesn't denote a right.
955 * MACH_RCV_INVALID_NAME
956 * The denoted right is not receive or port set.
957 * MACH_RCV_IN_SET Receive right is a member of a set.
958 */
959
960 mach_msg_return_t
961 ipc_mqueue_copyin(
962 ipc_space_t space,
963 mach_port_name_t name,
964 ipc_mqueue_t *mqueuep,
965 ipc_object_t *objectp)
966 {
967 ipc_entry_t entry;
968 ipc_object_t object;
969 ipc_mqueue_t mqueue;
970
971 is_read_lock(space);
972 if (!space->is_active) {
973 is_read_unlock(space);
974 return MACH_RCV_INVALID_NAME;
975 }
976
977 entry = ipc_entry_lookup(space, name);
978 if (entry == IE_NULL) {
979 is_read_unlock(space);
980 return MACH_RCV_INVALID_NAME;
981 }
982
983 object = entry->ie_object;
984
985 if (entry->ie_bits & MACH_PORT_TYPE_RECEIVE) {
986 ipc_port_t port;
987 ipc_pset_t pset;
988
989 port = (ipc_port_t) object;
990 assert(port != IP_NULL);
991
992 ip_lock(port);
993 assert(ip_active(port));
994 assert(port->ip_receiver_name == name);
995 assert(port->ip_receiver == space);
996 is_read_unlock(space);
997 mqueue = &port->ip_messages;
998
999 } else if (entry->ie_bits & MACH_PORT_TYPE_PORT_SET) {
1000 ipc_pset_t pset;
1001
1002 pset = (ipc_pset_t) object;
1003 assert(pset != IPS_NULL);
1004
1005 ips_lock(pset);
1006 assert(ips_active(pset));
1007 assert(pset->ips_local_name == name);
1008 is_read_unlock(space);
1009
1010 mqueue = &pset->ips_messages;
1011 } else {
1012 is_read_unlock(space);
1013 return MACH_RCV_INVALID_NAME;
1014 }
1015
1016 /*
1017 * At this point, the object is locked and active,
1018 * the space is unlocked, and mqueue is initialized.
1019 */
1020
1021 io_reference(object);
1022 io_unlock(object);
1023
1024 *objectp = object;
1025 *mqueuep = mqueue;
1026 return MACH_MSG_SUCCESS;
1027 }
1028