]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ipc/ipc_mqueue.c
xnu-1228.15.4.tar.gz
[apple/xnu.git] / osfmk / ipc / ipc_mqueue.c
1 /*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_FREE_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: ipc/ipc_mqueue.c
60 * Author: Rich Draves
61 * Date: 1989
62 *
63 * Functions to manipulate IPC message queues.
64 */
65 /*
66 * NOTICE: This file was modified by SPARTA, Inc. in 2006 to introduce
67 * support for mandatory and extensible security protections. This notice
68 * is included in support of clause 2.2 (b) of the Apple Public License,
69 * Version 2.0.
70 */
71
72
73 #include <mach/port.h>
74 #include <mach/message.h>
75 #include <mach/sync_policy.h>
76
77 #include <kern/assert.h>
78 #include <kern/counters.h>
79 #include <kern/sched_prim.h>
80 #include <kern/ipc_kobject.h>
81 #include <kern/ipc_mig.h> /* XXX - for mach_msg_receive_continue */
82 #include <kern/misc_protos.h>
83 #include <kern/task.h>
84 #include <kern/thread.h>
85 #include <kern/wait_queue.h>
86
87 #include <ipc/ipc_mqueue.h>
88 #include <ipc/ipc_kmsg.h>
89 #include <ipc/ipc_port.h>
90 #include <ipc/ipc_pset.h>
91 #include <ipc/ipc_space.h>
92
93 #include <ddb/tr.h>
94
95 #if CONFIG_MACF_MACH
96 #include <security/mac_mach_internal.h>
97 #endif
98
99 int ipc_mqueue_full; /* address is event for queue space */
100 int ipc_mqueue_rcv; /* address is event for message arrival */
101
102 #define TR_ENABLE 0
103
104 /* forward declarations */
105 void ipc_mqueue_receive_results(wait_result_t result);
106
107 /*
108 * Routine: ipc_mqueue_init
109 * Purpose:
110 * Initialize a newly-allocated message queue.
111 */
112 void
113 ipc_mqueue_init(
114 ipc_mqueue_t mqueue,
115 boolean_t is_set)
116 {
117 if (is_set) {
118 wait_queue_set_init(&mqueue->imq_set_queue, SYNC_POLICY_FIFO);
119 } else {
120 wait_queue_init(&mqueue->imq_wait_queue, SYNC_POLICY_FIFO);
121 ipc_kmsg_queue_init(&mqueue->imq_messages);
122 mqueue->imq_seqno = 0;
123 mqueue->imq_msgcount = 0;
124 mqueue->imq_qlimit = MACH_PORT_QLIMIT_DEFAULT;
125 mqueue->imq_fullwaiters = FALSE;
126 }
127 }
128
129 /*
130 * Routine: ipc_mqueue_member
131 * Purpose:
132 * Indicate whether the (port) mqueue is a member of
133 * this portset's mqueue. We do this by checking
134 * whether the portset mqueue's waitq is an member of
135 * the port's mqueue waitq.
136 * Conditions:
137 * the portset's mqueue is not already a member
138 * this may block while allocating linkage structures.
139 */
140
141 boolean_t
142 ipc_mqueue_member(
143 ipc_mqueue_t port_mqueue,
144 ipc_mqueue_t set_mqueue)
145 {
146 wait_queue_t port_waitq = &port_mqueue->imq_wait_queue;
147 wait_queue_set_t set_waitq = &set_mqueue->imq_set_queue;
148
149 return (wait_queue_member(port_waitq, set_waitq));
150
151 }
152
153 /*
154 * Routine: ipc_mqueue_remove
155 * Purpose:
156 * Remove the association between the queue and the specified
157 * set message queue.
158 */
159
160 kern_return_t
161 ipc_mqueue_remove(
162 ipc_mqueue_t mqueue,
163 ipc_mqueue_t set_mqueue)
164 {
165 wait_queue_t mq_waitq = &mqueue->imq_wait_queue;
166 wait_queue_set_t set_waitq = &set_mqueue->imq_set_queue;
167
168 return wait_queue_unlink(mq_waitq, set_waitq);
169 }
170
171 /*
172 * Routine: ipc_mqueue_remove_from_all
173 * Purpose:
174 * Remove the mqueue from all the sets it is a member of
175 * Conditions:
176 * Nothing locked.
177 */
178 void
179 ipc_mqueue_remove_from_all(
180 ipc_mqueue_t mqueue)
181 {
182 wait_queue_t mq_waitq = &mqueue->imq_wait_queue;
183
184 wait_queue_unlink_all(mq_waitq);
185 return;
186 }
187
188 /*
189 * Routine: ipc_mqueue_remove_all
190 * Purpose:
191 * Remove all the member queues from the specified set.
192 * Conditions:
193 * Nothing locked.
194 */
195 void
196 ipc_mqueue_remove_all(
197 ipc_mqueue_t mqueue)
198 {
199 wait_queue_set_t mq_setq = &mqueue->imq_set_queue;
200
201 wait_queue_set_unlink_all(mq_setq);
202 return;
203 }
204
205
206 /*
207 * Routine: ipc_mqueue_add
208 * Purpose:
209 * Associate the portset's mqueue with the port's mqueue.
210 * This has to be done so that posting the port will wakeup
211 * a portset waiter. If there are waiters on the portset
212 * mqueue and messages on the port mqueue, try to match them
213 * up now.
214 * Conditions:
215 * May block.
216 */
217 kern_return_t
218 ipc_mqueue_add(
219 ipc_mqueue_t port_mqueue,
220 ipc_mqueue_t set_mqueue)
221 {
222 wait_queue_t port_waitq = &port_mqueue->imq_wait_queue;
223 wait_queue_set_t set_waitq = &set_mqueue->imq_set_queue;
224 ipc_kmsg_queue_t kmsgq;
225 ipc_kmsg_t kmsg, next;
226 kern_return_t kr;
227 spl_t s;
228
229 kr = wait_queue_link(port_waitq, set_waitq);
230 if (kr != KERN_SUCCESS)
231 return kr;
232
233 /*
234 * Now that the set has been added to the port, there may be
235 * messages queued on the port and threads waiting on the set
236 * waitq. Lets get them together.
237 */
238 s = splsched();
239 imq_lock(port_mqueue);
240 kmsgq = &port_mqueue->imq_messages;
241 for (kmsg = ipc_kmsg_queue_first(kmsgq);
242 kmsg != IKM_NULL;
243 kmsg = next) {
244 next = ipc_kmsg_queue_next(kmsgq, kmsg);
245
246 for (;;) {
247 thread_t th;
248
249 th = wait_queue_wakeup64_identity_locked(
250 port_waitq,
251 IPC_MQUEUE_RECEIVE,
252 THREAD_AWAKENED,
253 FALSE);
254 /* waitq/mqueue still locked, thread locked */
255
256 if (th == THREAD_NULL)
257 goto leave;
258
259 /*
260 * Found a receiver. see if they can handle the message
261 * correctly (the message is not too large for them, or
262 * they didn't care to be informed that the message was
263 * too large). If they can't handle it, take them off
264 * the list and let them go back and figure it out and
265 * just move onto the next.
266 */
267 if (th->ith_msize <
268 kmsg->ikm_header->msgh_size +
269 REQUESTED_TRAILER_SIZE(th->ith_option)) {
270 th->ith_state = MACH_RCV_TOO_LARGE;
271 th->ith_msize = kmsg->ikm_header->msgh_size;
272 if (th->ith_option & MACH_RCV_LARGE) {
273 /*
274 * let him go without message
275 */
276 th->ith_kmsg = IKM_NULL;
277 th->ith_seqno = 0;
278 thread_unlock(th);
279 continue; /* find another thread */
280 }
281 } else {
282 th->ith_state = MACH_MSG_SUCCESS;
283 }
284
285 /*
286 * This thread is going to take this message,
287 * so give it to him.
288 */
289 ipc_kmsg_rmqueue(kmsgq, kmsg);
290 ipc_mqueue_release_msgcount(port_mqueue);
291
292 th->ith_kmsg = kmsg;
293 th->ith_seqno = port_mqueue->imq_seqno++;
294 thread_unlock(th);
295 break; /* go to next message */
296 }
297
298 }
299 leave:
300 imq_unlock(port_mqueue);
301 splx(s);
302 return KERN_SUCCESS;
303 }
304
305 /*
306 * Routine: ipc_mqueue_changed
307 * Purpose:
308 * Wake up receivers waiting in a message queue.
309 * Conditions:
310 * The message queue is locked.
311 */
312
313 void
314 ipc_mqueue_changed(
315 ipc_mqueue_t mqueue)
316 {
317 wait_queue_wakeup64_all_locked(
318 &mqueue->imq_wait_queue,
319 IPC_MQUEUE_RECEIVE,
320 THREAD_RESTART,
321 FALSE); /* unlock waitq? */
322 }
323
324
325
326
327 /*
328 * Routine: ipc_mqueue_send
329 * Purpose:
330 * Send a message to a message queue. The message holds a reference
331 * for the destination port for this message queue in the
332 * msgh_remote_port field.
333 *
334 * If unsuccessful, the caller still has possession of
335 * the message and must do something with it. If successful,
336 * the message is queued, given to a receiver, or destroyed.
337 * Conditions:
338 * Nothing locked.
339 * Returns:
340 * MACH_MSG_SUCCESS The message was accepted.
341 * MACH_SEND_TIMED_OUT Caller still has message.
342 * MACH_SEND_INTERRUPTED Caller still has message.
343 */
344 mach_msg_return_t
345 ipc_mqueue_send(
346 ipc_mqueue_t mqueue,
347 ipc_kmsg_t kmsg,
348 mach_msg_option_t option,
349 mach_msg_timeout_t send_timeout)
350 {
351 int wresult;
352 spl_t s;
353
354 /*
355 * Don't block if:
356 * 1) We're under the queue limit.
357 * 2) Caller used the MACH_SEND_ALWAYS internal option.
358 * 3) Message is sent to a send-once right.
359 */
360 s = splsched();
361 imq_lock(mqueue);
362
363 if (!imq_full(mqueue) ||
364 (!imq_full_kernel(mqueue) &&
365 ((option & MACH_SEND_ALWAYS) ||
366 (MACH_MSGH_BITS_REMOTE(kmsg->ikm_header->msgh_bits) ==
367 MACH_MSG_TYPE_PORT_SEND_ONCE)))) {
368 mqueue->imq_msgcount++;
369 assert(mqueue->imq_msgcount > 0);
370 imq_unlock(mqueue);
371 splx(s);
372 } else {
373 thread_t cur_thread = current_thread();
374 uint64_t deadline;
375
376 /*
377 * We have to wait for space to be granted to us.
378 */
379 if ((option & MACH_SEND_TIMEOUT) && (send_timeout == 0)) {
380 imq_unlock(mqueue);
381 splx(s);
382 return MACH_SEND_TIMED_OUT;
383 }
384 if (imq_full_kernel(mqueue)) {
385 imq_unlock(mqueue);
386 splx(s);
387 return MACH_SEND_NO_BUFFER;
388 }
389 mqueue->imq_fullwaiters = TRUE;
390 thread_lock(cur_thread);
391 if (option & MACH_SEND_TIMEOUT)
392 clock_interval_to_deadline(send_timeout, 1000*NSEC_PER_USEC, &deadline);
393 else
394 deadline = 0;
395 wresult = wait_queue_assert_wait64_locked(
396 &mqueue->imq_wait_queue,
397 IPC_MQUEUE_FULL,
398 THREAD_ABORTSAFE, deadline,
399 cur_thread);
400 thread_unlock(cur_thread);
401 imq_unlock(mqueue);
402 splx(s);
403
404 if (wresult == THREAD_WAITING) {
405 wresult = thread_block(THREAD_CONTINUE_NULL);
406 counter(c_ipc_mqueue_send_block++);
407 }
408
409 switch (wresult) {
410 case THREAD_TIMED_OUT:
411 assert(option & MACH_SEND_TIMEOUT);
412 return MACH_SEND_TIMED_OUT;
413
414 case THREAD_AWAKENED:
415 /* we can proceed - inherited msgcount from waker */
416 assert(mqueue->imq_msgcount > 0);
417 break;
418
419 case THREAD_INTERRUPTED:
420 return MACH_SEND_INTERRUPTED;
421
422 case THREAD_RESTART:
423 default:
424 panic("ipc_mqueue_send");
425 }
426 }
427
428 ipc_mqueue_post(mqueue, kmsg);
429 return MACH_MSG_SUCCESS;
430 }
431
432 /*
433 * Routine: ipc_mqueue_release_msgcount
434 * Purpose:
435 * Release a message queue reference in the case where we
436 * found a waiter.
437 *
438 * Conditions:
439 * The message queue is locked.
440 * The message corresponding to this reference is off the queue.
441 */
442 void
443 ipc_mqueue_release_msgcount(
444 ipc_mqueue_t mqueue)
445 {
446 assert(imq_held(mqueue));
447 assert(mqueue->imq_msgcount > 1 || ipc_kmsg_queue_empty(&mqueue->imq_messages));
448
449 mqueue->imq_msgcount--;
450
451 if (!imq_full(mqueue) && mqueue->imq_fullwaiters) {
452 if (wait_queue_wakeup64_one_locked(
453 &mqueue->imq_wait_queue,
454 IPC_MQUEUE_FULL,
455 THREAD_AWAKENED,
456 FALSE) != KERN_SUCCESS) {
457 mqueue->imq_fullwaiters = FALSE;
458 } else {
459 /* gave away our slot - add reference back */
460 mqueue->imq_msgcount++;
461 }
462 }
463 }
464
465 /*
466 * Routine: ipc_mqueue_post
467 * Purpose:
468 * Post a message to a waiting receiver or enqueue it. If a
469 * receiver is waiting, we can release our reserved space in
470 * the message queue.
471 *
472 * Conditions:
473 * If we need to queue, our space in the message queue is reserved.
474 */
475 void
476 ipc_mqueue_post(
477 register ipc_mqueue_t mqueue,
478 register ipc_kmsg_t kmsg)
479 {
480
481 spl_t s;
482
483 /*
484 * While the msg queue is locked, we have control of the
485 * kmsg, so the ref in it for the port is still good.
486 *
487 * Check for a receiver for the message.
488 */
489 s = splsched();
490 imq_lock(mqueue);
491 for (;;) {
492 wait_queue_t waitq = &mqueue->imq_wait_queue;
493 thread_t receiver;
494
495 receiver = wait_queue_wakeup64_identity_locked(
496 waitq,
497 IPC_MQUEUE_RECEIVE,
498 THREAD_AWAKENED,
499 FALSE);
500 /* waitq still locked, thread locked */
501
502 if (receiver == THREAD_NULL) {
503 /*
504 * no receivers; queue kmsg
505 */
506 assert(mqueue->imq_msgcount > 0);
507 ipc_kmsg_enqueue_macro(&mqueue->imq_messages, kmsg);
508 break;
509 }
510
511 /*
512 * We found a waiting thread.
513 * If the message is too large or the scatter list is too small
514 * the thread we wake up will get that as its status.
515 */
516 if (receiver->ith_msize <
517 (kmsg->ikm_header->msgh_size) +
518 REQUESTED_TRAILER_SIZE(receiver->ith_option)) {
519 receiver->ith_msize = kmsg->ikm_header->msgh_size;
520 receiver->ith_state = MACH_RCV_TOO_LARGE;
521 } else {
522 receiver->ith_state = MACH_MSG_SUCCESS;
523 }
524
525 /*
526 * If there is no problem with the upcoming receive, or the
527 * receiver thread didn't specifically ask for special too
528 * large error condition, go ahead and select it anyway.
529 */
530 if ((receiver->ith_state == MACH_MSG_SUCCESS) ||
531 !(receiver->ith_option & MACH_RCV_LARGE)) {
532
533 receiver->ith_kmsg = kmsg;
534 receiver->ith_seqno = mqueue->imq_seqno++;
535 thread_unlock(receiver);
536
537 /* we didn't need our reserved spot in the queue */
538 ipc_mqueue_release_msgcount(mqueue);
539 break;
540 }
541
542 /*
543 * Otherwise, this thread needs to be released to run
544 * and handle its error without getting the message. We
545 * need to go back and pick another one.
546 */
547 receiver->ith_kmsg = IKM_NULL;
548 receiver->ith_seqno = 0;
549 thread_unlock(receiver);
550 }
551
552 imq_unlock(mqueue);
553 splx(s);
554
555 current_task()->messages_sent++;
556 return;
557 }
558
559
560 /* static */ void
561 ipc_mqueue_receive_results(wait_result_t saved_wait_result)
562 {
563 thread_t self = current_thread();
564 mach_msg_option_t option = self->ith_option;
565
566 /*
567 * why did we wake up?
568 */
569 switch (saved_wait_result) {
570 case THREAD_TIMED_OUT:
571 self->ith_state = MACH_RCV_TIMED_OUT;
572 return;
573
574 case THREAD_INTERRUPTED:
575 self->ith_state = MACH_RCV_INTERRUPTED;
576 return;
577
578 case THREAD_RESTART:
579 /* something bad happened to the port/set */
580 self->ith_state = MACH_RCV_PORT_CHANGED;
581 return;
582
583 case THREAD_AWAKENED:
584 /*
585 * We do not need to go select a message, somebody
586 * handed us one (or a too-large indication).
587 */
588 switch (self->ith_state) {
589 case MACH_RCV_SCATTER_SMALL:
590 case MACH_RCV_TOO_LARGE:
591 /*
592 * Somebody tried to give us a too large
593 * message. If we indicated that we cared,
594 * then they only gave us the indication,
595 * otherwise they gave us the indication
596 * AND the message anyway.
597 */
598 if (option & MACH_RCV_LARGE) {
599 return;
600 }
601
602 case MACH_MSG_SUCCESS:
603 return;
604
605 default:
606 panic("ipc_mqueue_receive_results: strange ith_state");
607 }
608
609 default:
610 panic("ipc_mqueue_receive_results: strange wait_result");
611 }
612 }
613
614 void
615 ipc_mqueue_receive_continue(
616 __unused void *param,
617 wait_result_t wresult)
618 {
619 ipc_mqueue_receive_results(wresult);
620 mach_msg_receive_continue(); /* hard-coded for now */
621 }
622
623 /*
624 * Routine: ipc_mqueue_receive
625 * Purpose:
626 * Receive a message from a message queue.
627 *
628 * If continuation is non-zero, then we might discard
629 * our kernel stack when we block. We will continue
630 * after unblocking by executing continuation.
631 *
632 * If resume is true, then we are resuming a receive
633 * operation after a blocked receive discarded our stack.
634 * Conditions:
635 * Our caller must hold a reference for the port or port set
636 * to which this queue belongs, to keep the queue
637 * from being deallocated.
638 *
639 * The kmsg is returned with clean header fields
640 * and with the circular bit turned off.
641 * Returns:
642 * MACH_MSG_SUCCESS Message returned in kmsgp.
643 * MACH_RCV_TOO_LARGE Message size returned in kmsgp.
644 * MACH_RCV_TIMED_OUT No message obtained.
645 * MACH_RCV_INTERRUPTED No message obtained.
646 * MACH_RCV_PORT_DIED Port/set died; no message.
647 * MACH_RCV_PORT_CHANGED Port moved into set; no msg.
648 *
649 */
650
651 void
652 ipc_mqueue_receive(
653 ipc_mqueue_t mqueue,
654 mach_msg_option_t option,
655 mach_msg_size_t max_size,
656 mach_msg_timeout_t rcv_timeout,
657 int interruptible)
658 {
659 ipc_kmsg_queue_t kmsgs;
660 wait_result_t wresult;
661 thread_t self;
662 uint64_t deadline;
663 spl_t s;
664 #if CONFIG_MACF_MACH
665 ipc_labelh_t lh;
666 task_t task;
667 int rc;
668 #endif
669
670 s = splsched();
671 imq_lock(mqueue);
672 self = current_thread();
673
674 if (imq_is_set(mqueue)) {
675 wait_queue_link_t wql;
676 ipc_mqueue_t port_mq;
677 queue_t q;
678
679 q = &mqueue->imq_setlinks;
680
681 /*
682 * If we are waiting on a portset mqueue, we need to see if
683 * any of the member ports have work for us. If so, try to
684 * deliver one of those messages. By holding the portset's
685 * mqueue lock during the search, we tie up any attempts by
686 * mqueue_deliver or portset membership changes that may
687 * cross our path. But this is a lock order violation, so we
688 * have to do it "softly." If we don't find a message waiting
689 * for us, we will assert our intention to wait while still
690 * holding that lock. When we release the lock, the deliver/
691 * change will succeed and find us.
692 */
693 search_set:
694 queue_iterate(q, wql, wait_queue_link_t, wql_setlinks) {
695 port_mq = (ipc_mqueue_t)wql->wql_queue;
696 kmsgs = &port_mq->imq_messages;
697
698 if (!imq_lock_try(port_mq)) {
699 imq_unlock(mqueue);
700 splx(s);
701 mutex_pause(0);
702 s = splsched();
703 imq_lock(mqueue);
704 goto search_set; /* start again at beginning - SMP */
705 }
706
707 /*
708 * If there is still a message to be had, we will
709 * try to select it (may not succeed because of size
710 * and options). In any case, we deliver those
711 * results back to the user.
712 *
713 * We also move the port's linkage to the tail of the
714 * list for this set (fairness). Future versions will
715 * sort by timestamp or priority.
716 */
717 if (ipc_kmsg_queue_first(kmsgs) == IKM_NULL) {
718 imq_unlock(port_mq);
719 continue;
720 }
721 queue_remove(q, wql, wait_queue_link_t, wql_setlinks);
722 queue_enter(q, wql, wait_queue_link_t, wql_setlinks);
723 imq_unlock(mqueue);
724
725 ipc_mqueue_select(port_mq, option, max_size);
726 imq_unlock(port_mq);
727 #if CONFIG_MACF_MACH
728 if (self->ith_kmsg != NULL &&
729 self->ith_kmsg->ikm_sender != NULL) {
730 lh = self->ith_kmsg->ikm_sender->label;
731 task = current_task();
732 tasklabel_lock(task);
733 ip_lock(lh->lh_port);
734 rc = mac_port_check_receive(&task->maclabel,
735 &lh->lh_label);
736 ip_unlock(lh->lh_port);
737 tasklabel_unlock(task);
738 if (rc)
739 self->ith_state = MACH_RCV_INVALID_DATA;
740 }
741 #endif
742 splx(s);
743 return;
744
745 }
746
747 } else {
748
749 /*
750 * Receive on a single port. Just try to get the messages.
751 */
752 kmsgs = &mqueue->imq_messages;
753 if (ipc_kmsg_queue_first(kmsgs) != IKM_NULL) {
754 ipc_mqueue_select(mqueue, option, max_size);
755 imq_unlock(mqueue);
756 #if CONFIG_MACF_MACH
757 if (self->ith_kmsg != NULL &&
758 self->ith_kmsg->ikm_sender != NULL) {
759 lh = self->ith_kmsg->ikm_sender->label;
760 task = current_task();
761 tasklabel_lock(task);
762 ip_lock(lh->lh_port);
763 rc = mac_port_check_receive(&task->maclabel,
764 &lh->lh_label);
765 ip_unlock(lh->lh_port);
766 tasklabel_unlock(task);
767 if (rc)
768 self->ith_state = MACH_RCV_INVALID_DATA;
769 }
770 #endif
771 splx(s);
772 return;
773 }
774 }
775
776 /*
777 * Looks like we'll have to block. The mqueue we will
778 * block on (whether the set's or the local port's) is
779 * still locked.
780 */
781 if (option & MACH_RCV_TIMEOUT) {
782 if (rcv_timeout == 0) {
783 imq_unlock(mqueue);
784 splx(s);
785 self->ith_state = MACH_RCV_TIMED_OUT;
786 return;
787 }
788 }
789
790 thread_lock(self);
791 self->ith_state = MACH_RCV_IN_PROGRESS;
792 self->ith_option = option;
793 self->ith_msize = max_size;
794
795 if (option & MACH_RCV_TIMEOUT)
796 clock_interval_to_deadline(rcv_timeout, 1000*NSEC_PER_USEC, &deadline);
797 else
798 deadline = 0;
799
800 wresult = wait_queue_assert_wait64_locked(&mqueue->imq_wait_queue,
801 IPC_MQUEUE_RECEIVE,
802 interruptible, deadline,
803 self);
804 thread_unlock(self);
805 imq_unlock(mqueue);
806 splx(s);
807
808 if (wresult == THREAD_WAITING) {
809 counter((interruptible == THREAD_ABORTSAFE) ?
810 c_ipc_mqueue_receive_block_user++ :
811 c_ipc_mqueue_receive_block_kernel++);
812
813 if (self->ith_continuation)
814 thread_block(ipc_mqueue_receive_continue);
815 /* NOTREACHED */
816
817 wresult = thread_block(THREAD_CONTINUE_NULL);
818 }
819 ipc_mqueue_receive_results(wresult);
820 }
821
822
823 /*
824 * Routine: ipc_mqueue_select
825 * Purpose:
826 * A receiver discovered that there was a message on the queue
827 * before he had to block. Pick the message off the queue and
828 * "post" it to himself.
829 * Conditions:
830 * mqueue locked.
831 * There is a message.
832 * Returns:
833 * MACH_MSG_SUCCESS Actually selected a message for ourselves.
834 * MACH_RCV_TOO_LARGE May or may not have pull it, but it is large
835 */
836 void
837 ipc_mqueue_select(
838 ipc_mqueue_t mqueue,
839 mach_msg_option_t option,
840 mach_msg_size_t max_size)
841 {
842 thread_t self = current_thread();
843 ipc_kmsg_t kmsg;
844 mach_msg_return_t mr;
845 mach_msg_size_t rcv_size;
846
847 mr = MACH_MSG_SUCCESS;
848
849
850 /*
851 * Do some sanity checking of our ability to receive
852 * before pulling the message off the queue.
853 */
854 kmsg = ipc_kmsg_queue_first(&mqueue->imq_messages);
855 assert(kmsg != IKM_NULL);
856
857 /*
858 * If we really can't receive it, but we had the
859 * MACH_RCV_LARGE option set, then don't take it off
860 * the queue, instead return the appropriate error
861 * (and size needed).
862 */
863 rcv_size = ipc_kmsg_copyout_size(kmsg, self->map);
864 if (rcv_size + REQUESTED_TRAILER_SIZE(option) > max_size) {
865 mr = MACH_RCV_TOO_LARGE;
866 if (option & MACH_RCV_LARGE) {
867 self->ith_kmsg = IKM_NULL;
868 self->ith_msize = rcv_size;
869 self->ith_seqno = 0;
870 self->ith_state = mr;
871 return;
872 }
873 }
874
875 ipc_kmsg_rmqueue_first_macro(&mqueue->imq_messages, kmsg);
876 ipc_mqueue_release_msgcount(mqueue);
877 self->ith_seqno = mqueue->imq_seqno++;
878 self->ith_kmsg = kmsg;
879 self->ith_state = mr;
880
881 current_task()->messages_received++;
882 return;
883 }
884
885 /*
886 * Routine: ipc_mqueue_destroy
887 * Purpose:
888 * Destroy a message queue. Set any blocked senders running.
889 * Destroy the kmsgs in the queue.
890 * Conditions:
891 * Nothing locked.
892 * Receivers were removed when the receive right was "changed"
893 */
894 void
895 ipc_mqueue_destroy(
896 ipc_mqueue_t mqueue)
897 {
898 ipc_kmsg_queue_t kmqueue;
899 ipc_kmsg_t kmsg;
900 spl_t s;
901
902
903 s = splsched();
904 imq_lock(mqueue);
905 /*
906 * rouse all blocked senders
907 */
908 mqueue->imq_fullwaiters = FALSE;
909 wait_queue_wakeup64_all_locked(
910 &mqueue->imq_wait_queue,
911 IPC_MQUEUE_FULL,
912 THREAD_AWAKENED,
913 FALSE);
914
915 kmqueue = &mqueue->imq_messages;
916
917 while ((kmsg = ipc_kmsg_dequeue(kmqueue)) != IKM_NULL) {
918 imq_unlock(mqueue);
919 splx(s);
920
921 ipc_kmsg_destroy_dest(kmsg);
922
923 s = splsched();
924 imq_lock(mqueue);
925 }
926 imq_unlock(mqueue);
927 splx(s);
928 }
929
930 /*
931 * Routine: ipc_mqueue_set_qlimit
932 * Purpose:
933 * Changes a message queue limit; the maximum number
934 * of messages which may be queued.
935 * Conditions:
936 * Nothing locked.
937 */
938
939 void
940 ipc_mqueue_set_qlimit(
941 ipc_mqueue_t mqueue,
942 mach_port_msgcount_t qlimit)
943 {
944 spl_t s;
945
946 assert(qlimit <= MACH_PORT_QLIMIT_MAX);
947
948 /* wake up senders allowed by the new qlimit */
949 s = splsched();
950 imq_lock(mqueue);
951 if (qlimit > mqueue->imq_qlimit) {
952 mach_port_msgcount_t i, wakeup;
953
954 /* caution: wakeup, qlimit are unsigned */
955 wakeup = qlimit - mqueue->imq_qlimit;
956
957 for (i = 0; i < wakeup; i++) {
958 if (wait_queue_wakeup64_one_locked(
959 &mqueue->imq_wait_queue,
960 IPC_MQUEUE_FULL,
961 THREAD_AWAKENED,
962 FALSE) == KERN_NOT_WAITING) {
963 mqueue->imq_fullwaiters = FALSE;
964 break;
965 }
966 mqueue->imq_msgcount++; /* give it to the awakened thread */
967 }
968 }
969 mqueue->imq_qlimit = qlimit;
970 imq_unlock(mqueue);
971 splx(s);
972 }
973
974 /*
975 * Routine: ipc_mqueue_set_seqno
976 * Purpose:
977 * Changes an mqueue's sequence number.
978 * Conditions:
979 * Caller holds a reference to the queue's containing object.
980 */
981 void
982 ipc_mqueue_set_seqno(
983 ipc_mqueue_t mqueue,
984 mach_port_seqno_t seqno)
985 {
986 spl_t s;
987
988 s = splsched();
989 imq_lock(mqueue);
990 mqueue->imq_seqno = seqno;
991 imq_unlock(mqueue);
992 splx(s);
993 }
994
995
996 /*
997 * Routine: ipc_mqueue_copyin
998 * Purpose:
999 * Convert a name in a space to a message queue.
1000 * Conditions:
1001 * Nothing locked. If successful, the caller gets a ref for
1002 * for the object. This ref ensures the continued existence of
1003 * the queue.
1004 * Returns:
1005 * MACH_MSG_SUCCESS Found a message queue.
1006 * MACH_RCV_INVALID_NAME The space is dead.
1007 * MACH_RCV_INVALID_NAME The name doesn't denote a right.
1008 * MACH_RCV_INVALID_NAME
1009 * The denoted right is not receive or port set.
1010 * MACH_RCV_IN_SET Receive right is a member of a set.
1011 */
1012
1013 mach_msg_return_t
1014 ipc_mqueue_copyin(
1015 ipc_space_t space,
1016 mach_port_name_t name,
1017 ipc_mqueue_t *mqueuep,
1018 ipc_object_t *objectp)
1019 {
1020 ipc_entry_t entry;
1021 ipc_object_t object;
1022 ipc_mqueue_t mqueue;
1023
1024 is_read_lock(space);
1025 if (!space->is_active) {
1026 is_read_unlock(space);
1027 return MACH_RCV_INVALID_NAME;
1028 }
1029
1030 entry = ipc_entry_lookup(space, name);
1031 if (entry == IE_NULL) {
1032 is_read_unlock(space);
1033 return MACH_RCV_INVALID_NAME;
1034 }
1035
1036 object = entry->ie_object;
1037
1038 if (entry->ie_bits & MACH_PORT_TYPE_RECEIVE) {
1039 ipc_port_t port;
1040
1041 port = (ipc_port_t) object;
1042 assert(port != IP_NULL);
1043
1044 ip_lock(port);
1045 assert(ip_active(port));
1046 assert(port->ip_receiver_name == name);
1047 assert(port->ip_receiver == space);
1048 is_read_unlock(space);
1049 mqueue = &port->ip_messages;
1050
1051 } else if (entry->ie_bits & MACH_PORT_TYPE_PORT_SET) {
1052 ipc_pset_t pset;
1053
1054 pset = (ipc_pset_t) object;
1055 assert(pset != IPS_NULL);
1056
1057 ips_lock(pset);
1058 assert(ips_active(pset));
1059 assert(pset->ips_local_name == name);
1060 is_read_unlock(space);
1061
1062 mqueue = &pset->ips_messages;
1063 } else {
1064 is_read_unlock(space);
1065 return MACH_RCV_INVALID_NAME;
1066 }
1067
1068 /*
1069 * At this point, the object is locked and active,
1070 * the space is unlocked, and mqueue is initialized.
1071 */
1072
1073 io_reference(object);
1074 io_unlock(object);
1075
1076 *objectp = object;
1077 *mqueuep = mqueue;
1078 return MACH_MSG_SUCCESS;
1079 }
1080