]> git.saurik.com Git - apple/xnu.git/blame - osfmk/ipc/ipc_mqueue.c
xnu-3789.21.4.tar.gz
[apple/xnu.git] / osfmk / ipc / ipc_mqueue.c
CommitLineData
1c79356b 1/*
2d21ac55 2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_FREE_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58/*
59 * File: ipc/ipc_mqueue.c
60 * Author: Rich Draves
61 * Date: 1989
62 *
63 * Functions to manipulate IPC message queues.
64 */
2d21ac55
A
65/*
66 * NOTICE: This file was modified by SPARTA, Inc. in 2006 to introduce
67 * support for mandatory and extensible security protections. This notice
68 * is included in support of clause 2.2 (b) of the Apple Public License,
69 * Version 2.0.
70 */
71
1c79356b
A
72
73#include <mach/port.h>
74#include <mach/message.h>
75#include <mach/sync_policy.h>
76
77#include <kern/assert.h>
78#include <kern/counters.h>
79#include <kern/sched_prim.h>
80#include <kern/ipc_kobject.h>
91447636 81#include <kern/ipc_mig.h> /* XXX - for mach_msg_receive_continue */
1c79356b
A
82#include <kern/misc_protos.h>
83#include <kern/task.h>
84#include <kern/thread.h>
3e170ce0 85#include <kern/waitq.h>
1c79356b
A
86
87#include <ipc/ipc_mqueue.h>
88#include <ipc/ipc_kmsg.h>
89#include <ipc/ipc_port.h>
90#include <ipc/ipc_pset.h>
91#include <ipc/ipc_space.h>
92
39037602
A
93#if MACH_FLIPC
94#include <ipc/flipc.h>
95#endif
96
b0d623f7
A
97#ifdef __LP64__
98#include <vm/vm_map.h>
99#endif
1c79356b 100
39037602
A
101#include <sys/event.h>
102
103extern char *proc_name_address(void *p);
104
1c79356b
A
105int ipc_mqueue_full; /* address is event for queue space */
106int ipc_mqueue_rcv; /* address is event for message arrival */
107
91447636
A
108/* forward declarations */
109void ipc_mqueue_receive_results(wait_result_t result);
39037602
A
110static void ipc_mqueue_peek_on_thread(
111 ipc_mqueue_t port_mq,
112 mach_msg_option_t option,
113 thread_t thread);
91447636 114
1c79356b
A
115/*
116 * Routine: ipc_mqueue_init
117 * Purpose:
118 * Initialize a newly-allocated message queue.
119 */
120void
121ipc_mqueue_init(
122 ipc_mqueue_t mqueue,
3e170ce0
A
123 boolean_t is_set,
124 uint64_t *reserved_link)
1c79356b
A
125{
126 if (is_set) {
3e170ce0 127 waitq_set_init(&mqueue->imq_set_queue,
39037602
A
128 SYNC_POLICY_FIFO|SYNC_POLICY_PREPOST,
129 reserved_link, NULL);
1c79356b 130 } else {
39037602 131 waitq_init(&mqueue->imq_wait_queue, SYNC_POLICY_FIFO);
1c79356b
A
132 ipc_kmsg_queue_init(&mqueue->imq_messages);
133 mqueue->imq_seqno = 0;
134 mqueue->imq_msgcount = 0;
135 mqueue->imq_qlimit = MACH_PORT_QLIMIT_DEFAULT;
136 mqueue->imq_fullwaiters = FALSE;
39037602
A
137#if MACH_FLIPC
138 mqueue->imq_fport = FPORT_NULL;
139#endif
1c79356b 140 }
39037602 141 klist_init(&mqueue->imq_klist);
1c79356b
A
142}
143
3e170ce0
A
144void ipc_mqueue_deinit(
145 ipc_mqueue_t mqueue)
146{
147 boolean_t is_set = imq_is_set(mqueue);
148
149 if (is_set)
150 waitq_set_deinit(&mqueue->imq_set_queue);
151 else
152 waitq_deinit(&mqueue->imq_wait_queue);
153}
154
155/*
156 * Routine: imq_reserve_and_lock
157 * Purpose:
158 * Atomically lock an ipc_mqueue_t object and reserve
159 * an appropriate number of prepost linkage objects for
160 * use in wakeup operations.
161 * Conditions:
162 * mq is unlocked
163 */
164void
39037602 165imq_reserve_and_lock(ipc_mqueue_t mq, uint64_t *reserved_prepost)
3e170ce0
A
166{
167 *reserved_prepost = waitq_prepost_reserve(&mq->imq_wait_queue, 0,
39037602 168 WAITQ_KEEP_LOCKED);
3e170ce0
A
169
170}
171
172
173/*
174 * Routine: imq_release_and_unlock
175 * Purpose:
176 * Unlock an ipc_mqueue_t object, re-enable interrupts,
177 * and release any unused prepost object reservations.
178 * Conditions:
179 * mq is locked
180 */
181void
39037602 182imq_release_and_unlock(ipc_mqueue_t mq, uint64_t reserved_prepost)
3e170ce0
A
183{
184 assert(imq_held(mq));
185 waitq_unlock(&mq->imq_wait_queue);
3e170ce0
A
186 waitq_prepost_release_reserve(reserved_prepost);
187}
188
189
1c79356b
A
190/*
191 * Routine: ipc_mqueue_member
192 * Purpose:
193 * Indicate whether the (port) mqueue is a member of
194 * this portset's mqueue. We do this by checking
195 * whether the portset mqueue's waitq is an member of
196 * the port's mqueue waitq.
197 * Conditions:
198 * the portset's mqueue is not already a member
199 * this may block while allocating linkage structures.
200 */
201
202boolean_t
203ipc_mqueue_member(
91447636
A
204 ipc_mqueue_t port_mqueue,
205 ipc_mqueue_t set_mqueue)
1c79356b 206{
3e170ce0
A
207 struct waitq *port_waitq = &port_mqueue->imq_wait_queue;
208 struct waitq_set *set_waitq = &set_mqueue->imq_set_queue;
1c79356b 209
3e170ce0 210 return waitq_member(port_waitq, set_waitq);
1c79356b
A
211
212}
213
214/*
215 * Routine: ipc_mqueue_remove
216 * Purpose:
217 * Remove the association between the queue and the specified
9bccf70c 218 * set message queue.
1c79356b
A
219 */
220
221kern_return_t
222ipc_mqueue_remove(
316670eb 223 ipc_mqueue_t mqueue,
3e170ce0 224 ipc_mqueue_t set_mqueue)
1c79356b 225{
3e170ce0
A
226 struct waitq *mq_waitq = &mqueue->imq_wait_queue;
227 struct waitq_set *set_waitq = &set_mqueue->imq_set_queue;
1c79356b 228
3e170ce0 229 return waitq_unlink(mq_waitq, set_waitq);
1c79356b
A
230}
231
232/*
9bccf70c 233 * Routine: ipc_mqueue_remove_from_all
1c79356b 234 * Purpose:
9bccf70c 235 * Remove the mqueue from all the sets it is a member of
1c79356b 236 * Conditions:
9bccf70c 237 * Nothing locked.
39037602
A
238 * Returns:
239 * mqueue unlocked and set links deallocated
1c79356b
A
240 */
241void
3e170ce0 242ipc_mqueue_remove_from_all(ipc_mqueue_t mqueue)
1c79356b 243{
3e170ce0 244 struct waitq *mq_waitq = &mqueue->imq_wait_queue;
39037602 245 kern_return_t kr;
1c79356b 246
39037602
A
247 imq_lock(mqueue);
248
249 assert(waitq_valid(mq_waitq));
250 kr = waitq_unlink_all_unlock(mq_waitq);
251 /* mqueue unlocked and set links deallocated */
9bccf70c
A
252}
253
254/*
255 * Routine: ipc_mqueue_remove_all
256 * Purpose:
257 * Remove all the member queues from the specified set.
3e170ce0 258 * Also removes the queue from any containing sets.
9bccf70c
A
259 * Conditions:
260 * Nothing locked.
39037602
A
261 * Returns:
262 * mqueue unlocked all set links deallocated
9bccf70c
A
263 */
264void
3e170ce0 265ipc_mqueue_remove_all(ipc_mqueue_t mqueue)
9bccf70c 266{
3e170ce0 267 struct waitq_set *mq_setq = &mqueue->imq_set_queue;
39037602
A
268
269 imq_lock(mqueue);
270 assert(waitqs_is_set(mq_setq));
271 waitq_set_unlink_all_unlock(mq_setq);
272 /* mqueue unlocked set links deallocated */
1c79356b
A
273}
274
275
276/*
277 * Routine: ipc_mqueue_add
278 * Purpose:
279 * Associate the portset's mqueue with the port's mqueue.
280 * This has to be done so that posting the port will wakeup
281 * a portset waiter. If there are waiters on the portset
282 * mqueue and messages on the port mqueue, try to match them
283 * up now.
284 * Conditions:
285 * May block.
286 */
287kern_return_t
288ipc_mqueue_add(
3e170ce0
A
289 ipc_mqueue_t port_mqueue,
290 ipc_mqueue_t set_mqueue,
291 uint64_t *reserved_link,
292 uint64_t *reserved_prepost)
1c79356b 293{
3e170ce0
A
294 struct waitq *port_waitq = &port_mqueue->imq_wait_queue;
295 struct waitq_set *set_waitq = &set_mqueue->imq_set_queue;
1c79356b
A
296 ipc_kmsg_queue_t kmsgq;
297 ipc_kmsg_t kmsg, next;
298 kern_return_t kr;
1c79356b 299
3e170ce0
A
300 assert(reserved_link && *reserved_link != 0);
301
3e170ce0
A
302 imq_lock(port_mqueue);
303
304 /*
305 * The link operation is now under the same lock-hold as
306 * message iteration and thread wakeup, but doesn't have to be...
307 */
308 kr = waitq_link(port_waitq, set_waitq, WAITQ_ALREADY_LOCKED, reserved_link);
309 if (kr != KERN_SUCCESS) {
310 imq_unlock(port_mqueue);
1c79356b 311 return kr;
3e170ce0 312 }
1c79356b
A
313
314 /*
315 * Now that the set has been added to the port, there may be
316 * messages queued on the port and threads waiting on the set
317 * waitq. Lets get them together.
318 */
1c79356b
A
319 kmsgq = &port_mqueue->imq_messages;
320 for (kmsg = ipc_kmsg_queue_first(kmsgq);
321 kmsg != IKM_NULL;
322 kmsg = next) {
323 next = ipc_kmsg_queue_next(kmsgq, kmsg);
324
325 for (;;) {
326 thread_t th;
b0d623f7 327 mach_msg_size_t msize;
3e170ce0 328 spl_t th_spl;
1c79356b 329
39037602 330 th = waitq_wakeup64_identify_locked(
9bccf70c
A
331 port_waitq,
332 IPC_MQUEUE_RECEIVE,
3e170ce0 333 THREAD_AWAKENED, &th_spl,
39037602
A
334 reserved_prepost, WAITQ_ALL_PRIORITIES,
335 WAITQ_KEEP_LOCKED);
1c79356b
A
336 /* waitq/mqueue still locked, thread locked */
337
338 if (th == THREAD_NULL)
339 goto leave;
340
b0d623f7
A
341 /*
342 * If the receiver waited with a facility not directly
343 * related to Mach messaging, then it isn't prepared to get
344 * handed the message directly. Just set it running, and
345 * go look for another thread that can.
346 */
347 if (th->ith_state != MACH_RCV_IN_PROGRESS) {
39037602
A
348 if (th->ith_state == MACH_PEEK_IN_PROGRESS) {
349 /*
350 * wakeup the peeking thread, but
351 * continue to loop over the threads
352 * waiting on the port's mqueue to see
353 * if there are any actual receivers
354 */
355 ipc_mqueue_peek_on_thread(port_mqueue,
356 th->ith_option,
357 th);
358 }
359 thread_unlock(th);
360 splx(th_spl);
361 continue;
b0d623f7
A
362 }
363
1c79356b
A
364 /*
365 * Found a receiver. see if they can handle the message
366 * correctly (the message is not too large for them, or
367 * they didn't care to be informed that the message was
368 * too large). If they can't handle it, take them off
369 * the list and let them go back and figure it out and
370 * just move onto the next.
371 */
b0d623f7 372 msize = ipc_kmsg_copyout_size(kmsg, th->map);
39037602 373 if (th->ith_rsize <
316670eb 374 (msize + REQUESTED_TRAILER_SIZE(thread_is_64bit(th), th->ith_option))) {
1c79356b 375 th->ith_state = MACH_RCV_TOO_LARGE;
b0d623f7 376 th->ith_msize = msize;
1c79356b
A
377 if (th->ith_option & MACH_RCV_LARGE) {
378 /*
379 * let him go without message
380 */
b0d623f7 381 th->ith_receiver_name = port_mqueue->imq_receiver_name;
1c79356b
A
382 th->ith_kmsg = IKM_NULL;
383 th->ith_seqno = 0;
384 thread_unlock(th);
3e170ce0 385 splx(th_spl);
1c79356b
A
386 continue; /* find another thread */
387 }
388 } else {
389 th->ith_state = MACH_MSG_SUCCESS;
390 }
391
392 /*
393 * This thread is going to take this message,
394 * so give it to him.
395 */
1c79356b 396 ipc_kmsg_rmqueue(kmsgq, kmsg);
39037602
A
397#if MACH_FLIPC
398 mach_node_t node = kmsg->ikm_node;
399#endif
3e170ce0 400 ipc_mqueue_release_msgcount(port_mqueue, IMQ_NULL);
91447636 401
1c79356b
A
402 th->ith_kmsg = kmsg;
403 th->ith_seqno = port_mqueue->imq_seqno++;
404 thread_unlock(th);
3e170ce0 405 splx(th_spl);
39037602
A
406#if MACH_FLIPC
407 if (MACH_NODE_VALID(node) && FPORT_VALID(port_mqueue->imq_fport))
408 flipc_msg_ack(node, port_mqueue, TRUE);
409#endif
1c79356b
A
410 break; /* go to next message */
411 }
1c79356b
A
412 }
413 leave:
414 imq_unlock(port_mqueue);
1c79356b
A
415 return KERN_SUCCESS;
416}
417
418/*
419 * Routine: ipc_mqueue_changed
420 * Purpose:
421 * Wake up receivers waiting in a message queue.
422 * Conditions:
423 * The message queue is locked.
424 */
425
426void
427ipc_mqueue_changed(
428 ipc_mqueue_t mqueue)
429{
39037602
A
430 /* Indicate that this message queue is vanishing */
431 knote_vanish(&mqueue->imq_klist);
432
3e170ce0
A
433 waitq_wakeup64_all_locked(&mqueue->imq_wait_queue,
434 IPC_MQUEUE_RECEIVE,
435 THREAD_RESTART,
436 NULL,
437 WAITQ_ALL_PRIORITIES,
438 WAITQ_KEEP_LOCKED);
1c79356b
A
439}
440
441
442
443
444/*
445 * Routine: ipc_mqueue_send
446 * Purpose:
447 * Send a message to a message queue. The message holds a reference
448 * for the destination port for this message queue in the
449 * msgh_remote_port field.
450 *
451 * If unsuccessful, the caller still has possession of
452 * the message and must do something with it. If successful,
453 * the message is queued, given to a receiver, or destroyed.
454 * Conditions:
39236c6e 455 * mqueue is locked.
1c79356b
A
456 * Returns:
457 * MACH_MSG_SUCCESS The message was accepted.
458 * MACH_SEND_TIMED_OUT Caller still has message.
459 * MACH_SEND_INTERRUPTED Caller still has message.
460 */
461mach_msg_return_t
462ipc_mqueue_send(
463 ipc_mqueue_t mqueue,
b0d623f7 464 ipc_kmsg_t kmsg,
1c79356b 465 mach_msg_option_t option,
39037602 466 mach_msg_timeout_t send_timeout)
1c79356b 467{
9bccf70c 468 int wresult;
1c79356b
A
469
470 /*
471 * Don't block if:
472 * 1) We're under the queue limit.
473 * 2) Caller used the MACH_SEND_ALWAYS internal option.
474 * 3) Message is sent to a send-once right.
475 */
1c79356b 476 if (!imq_full(mqueue) ||
c910b4d9
A
477 (!imq_full_kernel(mqueue) &&
478 ((option & MACH_SEND_ALWAYS) ||
479 (MACH_MSGH_BITS_REMOTE(kmsg->ikm_header->msgh_bits) ==
480 MACH_MSG_TYPE_PORT_SEND_ONCE)))) {
1c79356b 481 mqueue->imq_msgcount++;
91447636 482 assert(mqueue->imq_msgcount > 0);
1c79356b 483 imq_unlock(mqueue);
1c79356b 484 } else {
55e303ae 485 thread_t cur_thread = current_thread();
91447636 486 uint64_t deadline;
1c79356b
A
487
488 /*
489 * We have to wait for space to be granted to us.
490 */
91447636 491 if ((option & MACH_SEND_TIMEOUT) && (send_timeout == 0)) {
1c79356b 492 imq_unlock(mqueue);
1c79356b
A
493 return MACH_SEND_TIMED_OUT;
494 }
c910b4d9
A
495 if (imq_full_kernel(mqueue)) {
496 imq_unlock(mqueue);
c910b4d9
A
497 return MACH_SEND_NO_BUFFER;
498 }
1c79356b 499 mqueue->imq_fullwaiters = TRUE;
39037602 500
91447636
A
501 if (option & MACH_SEND_TIMEOUT)
502 clock_interval_to_deadline(send_timeout, 1000*NSEC_PER_USEC, &deadline);
503 else
504 deadline = 0;
3e170ce0 505 wresult = waitq_assert_wait64_locked(
9bccf70c
A
506 &mqueue->imq_wait_queue,
507 IPC_MQUEUE_FULL,
39236c6e
A
508 THREAD_ABORTSAFE,
509 TIMEOUT_URGENCY_USER_NORMAL,
3e170ce0 510 deadline, TIMEOUT_NO_LEEWAY,
55e303ae 511 cur_thread);
39037602 512
55e303ae 513 imq_unlock(mqueue);
1c79356b 514
9bccf70c 515 if (wresult == THREAD_WAITING) {
91447636 516 wresult = thread_block(THREAD_CONTINUE_NULL);
9bccf70c
A
517 counter(c_ipc_mqueue_send_block++);
518 }
1c79356b 519
9bccf70c 520 switch (wresult) {
3e170ce0
A
521
522 case THREAD_AWAKENED:
523 /*
524 * we can proceed - inherited msgcount from waker
525 * or the message queue has been destroyed and the msgcount
526 * has been reset to zero (will detect in ipc_mqueue_post()).
527 */
528 break;
529
1c79356b
A
530 case THREAD_TIMED_OUT:
531 assert(option & MACH_SEND_TIMEOUT);
532 return MACH_SEND_TIMED_OUT;
533
1c79356b 534 case THREAD_INTERRUPTED:
1c79356b
A
535 return MACH_SEND_INTERRUPTED;
536
537 case THREAD_RESTART:
b0d623f7
A
538 /* mqueue is being destroyed */
539 return MACH_SEND_INVALID_DEST;
1c79356b
A
540 default:
541 panic("ipc_mqueue_send");
542 }
543 }
544
39037602 545 ipc_mqueue_post(mqueue, kmsg, option);
1c79356b
A
546 return MACH_MSG_SUCCESS;
547}
548
39037602
A
549/*
550 * Routine: ipc_mqueue_override_send
551 * Purpose:
552 * Set an override qos on the first message in the queue
553 * (if the queue is full). This is a send-possible override
554 * that will go away as soon as we drain a message from the
555 * queue.
556 *
557 * Conditions:
558 * The message queue is not locked.
559 * The caller holds a reference on the message queue.
560 */
561extern void ipc_mqueue_override_send(
562 ipc_mqueue_t mqueue,
563 mach_msg_priority_t override)
564{
565 boolean_t __unused full_queue_empty = FALSE;
566
567 imq_lock(mqueue);
568 assert(imq_valid(mqueue));
569 assert(!imq_is_set(mqueue));
570
571 if (imq_full(mqueue)) {
572 ipc_kmsg_t first = ipc_kmsg_queue_first(&mqueue->imq_messages);
573
574 if (first && ipc_kmsg_override_qos(&mqueue->imq_messages, first, override))
575 KNOTE(&mqueue->imq_klist, 0);
576 if (!first)
577 full_queue_empty = TRUE;
578 }
579 imq_unlock(mqueue);
580
581#if DEVELOPMENT || DEBUG
582 if (full_queue_empty) {
583 ipc_port_t port = ip_from_mq(mqueue);
584 int dst_pid = 0;
585 if (ip_active(port) && !port->ip_tempowner &&
586 port->ip_receiver_name && port->ip_receiver &&
587 port->ip_receiver != ipc_space_kernel) {
588 dst_pid = task_pid(port->ip_receiver->is_task);
589 }
590 printf("%s[%d] could not override mqueue (dst:%d) with 0x%x: "
591 "queue slots are full, but there are no messages!\n",
592 proc_name_address(current_task()->bsd_info),
593 task_pid(current_task()), dst_pid, override);
594 }
595#endif
596}
39236c6e 597
1c79356b
A
598/*
599 * Routine: ipc_mqueue_release_msgcount
600 * Purpose:
601 * Release a message queue reference in the case where we
602 * found a waiter.
603 *
604 * Conditions:
91447636
A
605 * The message queue is locked.
606 * The message corresponding to this reference is off the queue.
3e170ce0
A
607 * There is no need to pass reserved preposts because this will
608 * never prepost to anyone
1c79356b
A
609 */
610void
3e170ce0 611ipc_mqueue_release_msgcount(ipc_mqueue_t port_mq, ipc_mqueue_t set_mq)
1c79356b 612{
3e170ce0
A
613 (void)set_mq;
614 assert(imq_held(port_mq));
615 assert(port_mq->imq_msgcount > 1 || ipc_kmsg_queue_empty(&port_mq->imq_messages));
1c79356b 616
3e170ce0 617 port_mq->imq_msgcount--;
91447636 618
3e170ce0
A
619 if (!imq_full(port_mq) && port_mq->imq_fullwaiters) {
620 /*
621 * boost the priority of the awoken thread
622 * (WAITQ_PROMOTE_PRIORITY) to ensure it uses
623 * the message queue slot we've just reserved.
624 *
625 * NOTE: this will never prepost
626 */
627 if (waitq_wakeup64_one_locked(&port_mq->imq_wait_queue,
628 IPC_MQUEUE_FULL,
629 THREAD_AWAKENED,
630 NULL,
631 WAITQ_PROMOTE_PRIORITY,
632 WAITQ_KEEP_LOCKED) != KERN_SUCCESS) {
633 port_mq->imq_fullwaiters = FALSE;
1c79356b 634 } else {
91447636 635 /* gave away our slot - add reference back */
3e170ce0 636 port_mq->imq_msgcount++;
1c79356b
A
637 }
638 }
3e170ce0
A
639
640 if (ipc_kmsg_queue_empty(&port_mq->imq_messages)) {
641 /* no more msgs: invalidate the port's prepost object */
39037602 642 waitq_clear_prepost_locked(&port_mq->imq_wait_queue);
3e170ce0 643 }
1c79356b
A
644}
645
646/*
647 * Routine: ipc_mqueue_post
648 * Purpose:
649 * Post a message to a waiting receiver or enqueue it. If a
650 * receiver is waiting, we can release our reserved space in
651 * the message queue.
652 *
653 * Conditions:
3e170ce0 654 * mqueue is unlocked
1c79356b
A
655 * If we need to queue, our space in the message queue is reserved.
656 */
657void
658ipc_mqueue_post(
39037602
A
659 ipc_mqueue_t mqueue,
660 ipc_kmsg_t kmsg,
661 mach_msg_option_t __unused option)
1c79356b 662{
3e170ce0 663 uint64_t reserved_prepost = 0;
39037602
A
664 boolean_t destroy_msg = FALSE;
665
666 ipc_kmsg_trace_send(kmsg, option);
1c79356b
A
667
668 /*
669 * While the msg queue is locked, we have control of the
670 * kmsg, so the ref in it for the port is still good.
671 *
672 * Check for a receiver for the message.
673 */
39037602
A
674 imq_reserve_and_lock(mqueue, &reserved_prepost);
675
676 /* we may have raced with port destruction! */
677 if (!imq_valid(mqueue)) {
678 destroy_msg = TRUE;
679 goto out_unlock;
680 }
681
1c79356b 682 for (;;) {
3e170ce0
A
683 struct waitq *waitq = &mqueue->imq_wait_queue;
684 spl_t th_spl;
1c79356b 685 thread_t receiver;
b0d623f7 686 mach_msg_size_t msize;
1c79356b 687
39037602
A
688 receiver = waitq_wakeup64_identify_locked(waitq,
689 IPC_MQUEUE_RECEIVE,
690 THREAD_AWAKENED,
691 &th_spl,
692 &reserved_prepost,
693 WAITQ_ALL_PRIORITIES,
694 WAITQ_KEEP_LOCKED);
1c79356b
A
695 /* waitq still locked, thread locked */
696
697 if (receiver == THREAD_NULL) {
39037602 698
1c79356b 699 /*
39037602
A
700 * no receivers; queue kmsg if space still reserved
701 * Reservations are cancelled when the port goes inactive.
702 * note that this will enqueue the message for any
703 * "peeking" receivers.
704 *
705 * Also, post the knote to wake up any threads waiting
706 * on that style of interface if this insertion is of
707 * note (first insertion, or adjusted override qos all
708 * the way to the head of the queue).
709 *
710 * This is just for ports. portset knotes are stay-active,
711 * and their threads get awakened through the !MACH_RCV_IN_PROGRESS
712 * logic below).
1c79356b 713 */
3e170ce0 714 if (mqueue->imq_msgcount > 0) {
39037602
A
715 if (ipc_kmsg_enqueue_qos(&mqueue->imq_messages, kmsg))
716 KNOTE(&mqueue->imq_klist, 0);
3e170ce0
A
717 break;
718 }
719
720 /*
721 * Otherwise, the message queue must belong to an inactive
722 * port, so just destroy the message and pretend it was posted.
723 */
39037602
A
724 destroy_msg = TRUE;
725 goto out_unlock;
1c79356b 726 }
b0d623f7
A
727
728 /*
39037602
A
729 * If a thread is attempting a "peek" into the message queue
730 * (MACH_PEEK_IN_PROGRESS), then we enqueue the message and set the
731 * thread running. A successful peek is essentially the same as
732 * message delivery since the peeking thread takes responsibility
733 * for delivering the message and (eventually) removing it from
734 * the mqueue. Only one thread can successfully use the peek
735 * facility on any given port, so we exit the waitq loop after
736 * encountering such a thread.
737 */
738 if (receiver->ith_state == MACH_PEEK_IN_PROGRESS && mqueue->imq_msgcount > 0) {
739 ipc_kmsg_enqueue_qos(&mqueue->imq_messages, kmsg);
740 ipc_mqueue_peek_on_thread(mqueue, receiver->ith_option, receiver);
741 thread_unlock(receiver);
742 splx(th_spl);
743 break; /* Message was posted, so break out of loop */
744 }
745
746 /*
747 * If the receiver waited with a facility not directly related
748 * to Mach messaging, then it isn't prepared to get handed the
749 * message directly. Just set it running, and go look for
750 * another thread that can.
b0d623f7
A
751 */
752 if (receiver->ith_state != MACH_RCV_IN_PROGRESS) {
39037602
A
753 thread_unlock(receiver);
754 splx(th_spl);
755 continue;
b0d623f7
A
756 }
757
758
1c79356b
A
759 /*
760 * We found a waiting thread.
761 * If the message is too large or the scatter list is too small
762 * the thread we wake up will get that as its status.
763 */
b0d623f7 764 msize = ipc_kmsg_copyout_size(kmsg, receiver->map);
39037602 765 if (receiver->ith_rsize <
316670eb 766 (msize + REQUESTED_TRAILER_SIZE(thread_is_64bit(receiver), receiver->ith_option))) {
b0d623f7 767 receiver->ith_msize = msize;
1c79356b
A
768 receiver->ith_state = MACH_RCV_TOO_LARGE;
769 } else {
770 receiver->ith_state = MACH_MSG_SUCCESS;
771 }
772
773 /*
774 * If there is no problem with the upcoming receive, or the
775 * receiver thread didn't specifically ask for special too
776 * large error condition, go ahead and select it anyway.
777 */
778 if ((receiver->ith_state == MACH_MSG_SUCCESS) ||
779 !(receiver->ith_option & MACH_RCV_LARGE)) {
1c79356b
A
780 receiver->ith_kmsg = kmsg;
781 receiver->ith_seqno = mqueue->imq_seqno++;
39037602
A
782#if MACH_FLIPC
783 mach_node_t node = kmsg->ikm_node;
784#endif
1c79356b 785 thread_unlock(receiver);
3e170ce0 786 splx(th_spl);
1c79356b
A
787
788 /* we didn't need our reserved spot in the queue */
3e170ce0 789 ipc_mqueue_release_msgcount(mqueue, IMQ_NULL);
39037602
A
790
791#if MACH_FLIPC
792 if (MACH_NODE_VALID(node) && FPORT_VALID(mqueue->imq_fport))
793 flipc_msg_ack(node, mqueue, TRUE);
794#endif
1c79356b
A
795 break;
796 }
797
798 /*
799 * Otherwise, this thread needs to be released to run
800 * and handle its error without getting the message. We
801 * need to go back and pick another one.
802 */
39236c6e 803 receiver->ith_receiver_name = mqueue->imq_receiver_name;
1c79356b
A
804 receiver->ith_kmsg = IKM_NULL;
805 receiver->ith_seqno = 0;
806 thread_unlock(receiver);
3e170ce0 807 splx(th_spl);
1c79356b
A
808 }
809
39037602 810out_unlock:
3e170ce0
A
811 /* clear the waitq boost we may have been given */
812 waitq_clear_promotion_locked(&mqueue->imq_wait_queue, current_thread());
39037602
A
813 imq_release_and_unlock(mqueue, reserved_prepost);
814 if (destroy_msg)
815 ipc_kmsg_destroy(kmsg);
816
1c79356b
A
817 current_task()->messages_sent++;
818 return;
819}
820
821
91447636
A
822/* static */ void
823ipc_mqueue_receive_results(wait_result_t saved_wait_result)
1c79356b
A
824{
825 thread_t self = current_thread();
826 mach_msg_option_t option = self->ith_option;
1c79356b
A
827
828 /*
829 * why did we wake up?
830 */
831 switch (saved_wait_result) {
832 case THREAD_TIMED_OUT:
833 self->ith_state = MACH_RCV_TIMED_OUT;
834 return;
835
836 case THREAD_INTERRUPTED:
1c79356b
A
837 self->ith_state = MACH_RCV_INTERRUPTED;
838 return;
839
840 case THREAD_RESTART:
841 /* something bad happened to the port/set */
1c79356b
A
842 self->ith_state = MACH_RCV_PORT_CHANGED;
843 return;
844
845 case THREAD_AWAKENED:
846 /*
847 * We do not need to go select a message, somebody
848 * handed us one (or a too-large indication).
849 */
1c79356b
A
850 switch (self->ith_state) {
851 case MACH_RCV_SCATTER_SMALL:
852 case MACH_RCV_TOO_LARGE:
853 /*
854 * Somebody tried to give us a too large
855 * message. If we indicated that we cared,
856 * then they only gave us the indication,
857 * otherwise they gave us the indication
858 * AND the message anyway.
859 */
860 if (option & MACH_RCV_LARGE) {
861 return;
862 }
863
864 case MACH_MSG_SUCCESS:
39037602 865 case MACH_PEEK_READY:
1c79356b
A
866 return;
867
868 default:
869 panic("ipc_mqueue_receive_results: strange ith_state");
870 }
871
872 default:
873 panic("ipc_mqueue_receive_results: strange wait_result");
874 }
875}
876
877void
91447636
A
878ipc_mqueue_receive_continue(
879 __unused void *param,
880 wait_result_t wresult)
1c79356b 881{
91447636 882 ipc_mqueue_receive_results(wresult);
1c79356b
A
883 mach_msg_receive_continue(); /* hard-coded for now */
884}
885
886/*
887 * Routine: ipc_mqueue_receive
888 * Purpose:
889 * Receive a message from a message queue.
890 *
1c79356b
A
891 * Conditions:
892 * Our caller must hold a reference for the port or port set
893 * to which this queue belongs, to keep the queue
894 * from being deallocated.
895 *
896 * The kmsg is returned with clean header fields
39037602
A
897 * and with the circular bit turned off through the ith_kmsg
898 * field of the thread's receive continuation state.
1c79356b 899 * Returns:
39037602
A
900 * MACH_MSG_SUCCESS Message returned in ith_kmsg.
901 * MACH_RCV_TOO_LARGE Message size returned in ith_msize.
1c79356b
A
902 * MACH_RCV_TIMED_OUT No message obtained.
903 * MACH_RCV_INTERRUPTED No message obtained.
904 * MACH_RCV_PORT_DIED Port/set died; no message.
905 * MACH_RCV_PORT_CHANGED Port moved into set; no msg.
906 *
907 */
908
909void
910ipc_mqueue_receive(
b0d623f7
A
911 ipc_mqueue_t mqueue,
912 mach_msg_option_t option,
913 mach_msg_size_t max_size,
914 mach_msg_timeout_t rcv_timeout,
915 int interruptible)
916{
917 wait_result_t wresult;
39037602
A
918 thread_t self = current_thread();
919
920 imq_lock(mqueue);
921 wresult = ipc_mqueue_receive_on_thread(mqueue, option, max_size,
922 rcv_timeout, interruptible,
923 self);
924 /* mqueue unlocked */
925 if (wresult == THREAD_NOT_WAITING)
926 return;
b0d623f7
A
927
928 if (wresult == THREAD_WAITING) {
929 counter((interruptible == THREAD_ABORTSAFE) ?
930 c_ipc_mqueue_receive_block_user++ :
931 c_ipc_mqueue_receive_block_kernel++);
932
933 if (self->ith_continuation)
934 thread_block(ipc_mqueue_receive_continue);
935 /* NOTREACHED */
936
937 wresult = thread_block(THREAD_CONTINUE_NULL);
938 }
939 ipc_mqueue_receive_results(wresult);
940}
941
3e170ce0
A
942static int mqueue_process_prepost_receive(void *ctx, struct waitq *waitq,
943 struct waitq_set *wqset)
944{
945 ipc_mqueue_t port_mq, *pmq_ptr;
946
947 (void)wqset;
948 port_mq = (ipc_mqueue_t)waitq;
949
950 /*
951 * If there are no messages on this queue, skip it and remove
952 * it from the prepost list
953 */
954 if (ipc_kmsg_queue_empty(&port_mq->imq_messages))
955 return WQ_ITERATE_INVALIDATE_CONTINUE;
956
957 /*
958 * There are messages waiting on this port.
959 * Instruct the prepost iteration logic to break, but keep the
960 * waitq locked.
961 */
962 pmq_ptr = (ipc_mqueue_t *)ctx;
963 if (pmq_ptr)
964 *pmq_ptr = port_mq;
965 return WQ_ITERATE_BREAK_KEEP_LOCKED;
966}
967
39037602
A
968/*
969 * Routine: ipc_mqueue_receive_on_thread
970 * Purpose:
971 * Receive a message from a message queue using a specified thread.
972 * If no message available, assert_wait on the appropriate waitq.
973 *
974 * Conditions:
975 * Assumes thread is self.
976 * Called with mqueue locked.
977 * Returns with mqueue unlocked.
978 * May have assert-waited. Caller must block in those cases.
979 */
b0d623f7
A
980wait_result_t
981ipc_mqueue_receive_on_thread(
39037602 982 ipc_mqueue_t mqueue,
b0d623f7
A
983 mach_msg_option_t option,
984 mach_msg_size_t max_size,
985 mach_msg_timeout_t rcv_timeout,
986 int interruptible,
987 thread_t thread)
1c79356b 988{
91447636 989 wait_result_t wresult;
b0d623f7 990 uint64_t deadline;
1c79356b 991
39037602
A
992 /* called with mqueue locked */
993
3e170ce0 994 /* no need to reserve anything: we never prepost to anyone */
39037602
A
995
996 if (!imq_valid(mqueue)) {
997 /* someone raced us to destroy this mqueue/port! */
998 imq_unlock(mqueue);
999 /*
1000 * ipc_mqueue_receive_results updates the thread's ith_state
1001 * TODO: differentiate between rights being moved and
1002 * rights/ports being destroyed (21885327)
1003 */
1004 return THREAD_RESTART;
1005 }
1c79356b
A
1006
1007 if (imq_is_set(mqueue)) {
3e170ce0 1008 ipc_mqueue_t port_mq = IMQ_NULL;
1c79356b 1009
3e170ce0
A
1010 (void)waitq_set_iterate_preposts(&mqueue->imq_set_queue,
1011 &port_mq,
39037602 1012 mqueue_process_prepost_receive);
1c79356b 1013
3e170ce0 1014 if (port_mq != IMQ_NULL) {
1c79356b 1015 /*
3e170ce0
A
1016 * We get here if there is at least one message
1017 * waiting on port_mq. We have instructed the prepost
1018 * iteration logic to leave both the port_mq and the
1019 * set mqueue locked.
1020 *
1021 * TODO: previously, we would place this port at the
1022 * back of the prepost list...
1c79356b 1023 */
3e170ce0 1024 imq_unlock(mqueue);
b0d623f7 1025
b0d623f7
A
1026 /*
1027 * Continue on to handling the message with just
1028 * the port mqueue locked.
1029 */
39037602
A
1030 if (option & MACH_PEEK_MSG)
1031 ipc_mqueue_peek_on_thread(port_mq, option, thread);
1032 else
1033 ipc_mqueue_select_on_thread(port_mq, mqueue, option,
1034 max_size, thread);
3e170ce0 1035
1c79356b 1036 imq_unlock(port_mq);
b0d623f7 1037 return THREAD_NOT_WAITING;
1c79356b 1038 }
39037602 1039 } else if (imq_is_queue(mqueue)) {
3e170ce0 1040 ipc_kmsg_queue_t kmsgs;
1c79356b
A
1041
1042 /*
1043 * Receive on a single port. Just try to get the messages.
1044 */
1045 kmsgs = &mqueue->imq_messages;
1046 if (ipc_kmsg_queue_first(kmsgs) != IKM_NULL) {
39037602
A
1047 if (option & MACH_PEEK_MSG)
1048 ipc_mqueue_peek_on_thread(mqueue, option, thread);
1049 else
1050 ipc_mqueue_select_on_thread(mqueue, IMQ_NULL, option,
1051 max_size, thread);
1c79356b 1052 imq_unlock(mqueue);
b0d623f7 1053 return THREAD_NOT_WAITING;
1c79356b 1054 }
39037602
A
1055 } else {
1056 panic("Unknown mqueue type 0x%x: likely memory corruption!\n",
1057 mqueue->imq_wait_queue.waitq_type);
1c79356b 1058 }
b0d623f7 1059
1c79356b
A
1060 /*
1061 * Looks like we'll have to block. The mqueue we will
1062 * block on (whether the set's or the local port's) is
1063 * still locked.
1064 */
1c79356b 1065 if (option & MACH_RCV_TIMEOUT) {
91447636 1066 if (rcv_timeout == 0) {
1c79356b 1067 imq_unlock(mqueue);
b0d623f7
A
1068 thread->ith_state = MACH_RCV_TIMED_OUT;
1069 return THREAD_NOT_WAITING;
1c79356b
A
1070 }
1071 }
1072
b0d623f7 1073 thread->ith_option = option;
39037602
A
1074 thread->ith_rsize = max_size;
1075 thread->ith_msize = 0;
1076
1077 if (option & MACH_PEEK_MSG)
1078 thread->ith_state = MACH_PEEK_IN_PROGRESS;
1079 else
1080 thread->ith_state = MACH_RCV_IN_PROGRESS;
55e303ae 1081
91447636
A
1082 if (option & MACH_RCV_TIMEOUT)
1083 clock_interval_to_deadline(rcv_timeout, 1000*NSEC_PER_USEC, &deadline);
1084 else
1085 deadline = 0;
1086
3e170ce0
A
1087 wresult = waitq_assert_wait64_locked(&mqueue->imq_wait_queue,
1088 IPC_MQUEUE_RECEIVE,
1089 interruptible,
1090 TIMEOUT_URGENCY_USER_NORMAL,
1091 deadline,
1092 TIMEOUT_NO_LEEWAY,
1093 thread);
b0d623f7
A
1094 /* preposts should be detected above, not here */
1095 if (wresult == THREAD_AWAKENED)
1096 panic("ipc_mqueue_receive_on_thread: sleep walking");
1097
55e303ae 1098 imq_unlock(mqueue);
39037602 1099
b0d623f7 1100 return wresult;
1c79356b
A
1101}
1102
1103
39037602
A
1104/*
1105 * Routine: ipc_mqueue_peek_on_thread
1106 * Purpose:
1107 * A receiver discovered that there was a message on the queue
1108 * before he had to block. Tell a thread about the message queue,
1109 * but don't pick off any messages.
1110 * Conditions:
1111 * port_mq locked
1112 * at least one message on port_mq's message queue
1113 *
1114 * Returns: (on thread->ith_state)
1115 * MACH_PEEK_READY ith_peekq contains a message queue
1116 */
1117void
1118ipc_mqueue_peek_on_thread(
1119 ipc_mqueue_t port_mq,
1120 mach_msg_option_t option,
1121 thread_t thread)
1122{
1123 (void)option;
1124 assert(option & MACH_PEEK_MSG);
1125 assert(ipc_kmsg_queue_first(&port_mq->imq_messages) != IKM_NULL);
1126
1127 /*
1128 * Take a reference on the mqueue's associated port:
1129 * the peeking thread will be responsible to release this reference
1130 * using ip_release_mq()
1131 */
1132 ip_reference_mq(port_mq);
1133 thread->ith_peekq = port_mq;
1134 thread->ith_state = MACH_PEEK_READY;
1135}
1136
1c79356b 1137/*
b0d623f7 1138 * Routine: ipc_mqueue_select_on_thread
1c79356b
A
1139 * Purpose:
1140 * A receiver discovered that there was a message on the queue
1141 * before he had to block. Pick the message off the queue and
b0d623f7 1142 * "post" it to thread.
1c79356b
A
1143 * Conditions:
1144 * mqueue locked.
b0d623f7 1145 * thread not locked.
1c79356b 1146 * There is a message.
3e170ce0
A
1147 * No need to reserve prepost objects - it will never prepost
1148 *
1c79356b
A
1149 * Returns:
1150 * MACH_MSG_SUCCESS Actually selected a message for ourselves.
1151 * MACH_RCV_TOO_LARGE May or may not have pull it, but it is large
1152 */
1153void
b0d623f7 1154ipc_mqueue_select_on_thread(
3e170ce0
A
1155 ipc_mqueue_t port_mq,
1156 ipc_mqueue_t set_mq,
1c79356b 1157 mach_msg_option_t option,
b0d623f7
A
1158 mach_msg_size_t max_size,
1159 thread_t thread)
1c79356b 1160{
1c79356b 1161 ipc_kmsg_t kmsg;
b0d623f7 1162 mach_msg_return_t mr = MACH_MSG_SUCCESS;
39037602 1163 mach_msg_size_t msize;
1c79356b 1164
1c79356b
A
1165 /*
1166 * Do some sanity checking of our ability to receive
1167 * before pulling the message off the queue.
1168 */
3e170ce0 1169 kmsg = ipc_kmsg_queue_first(&port_mq->imq_messages);
1c79356b
A
1170 assert(kmsg != IKM_NULL);
1171
1c79356b
A
1172 /*
1173 * If we really can't receive it, but we had the
1174 * MACH_RCV_LARGE option set, then don't take it off
1175 * the queue, instead return the appropriate error
1176 * (and size needed).
1177 */
39037602
A
1178 msize = ipc_kmsg_copyout_size(kmsg, thread->map);
1179 if (msize + REQUESTED_TRAILER_SIZE(thread_is_64bit(thread), option) > max_size) {
91447636
A
1180 mr = MACH_RCV_TOO_LARGE;
1181 if (option & MACH_RCV_LARGE) {
3e170ce0 1182 thread->ith_receiver_name = port_mq->imq_receiver_name;
b0d623f7 1183 thread->ith_kmsg = IKM_NULL;
39037602 1184 thread->ith_msize = msize;
b0d623f7
A
1185 thread->ith_seqno = 0;
1186 thread->ith_state = mr;
91447636
A
1187 return;
1188 }
1c79356b
A
1189 }
1190
39037602
A
1191 ipc_kmsg_rmqueue(&port_mq->imq_messages, kmsg);
1192#if MACH_FLIPC
1193 if (MACH_NODE_VALID(kmsg->ikm_node) && FPORT_VALID(port_mq->imq_fport))
1194 flipc_msg_ack(kmsg->ikm_node, port_mq, TRUE);
1195#endif
3e170ce0
A
1196 ipc_mqueue_release_msgcount(port_mq, set_mq);
1197 thread->ith_seqno = port_mq->imq_seqno++;
b0d623f7
A
1198 thread->ith_kmsg = kmsg;
1199 thread->ith_state = mr;
1c79356b
A
1200
1201 current_task()->messages_received++;
1202 return;
1203}
1204
b0d623f7 1205/*
39037602 1206 * Routine: ipc_mqueue_peek_locked
b0d623f7 1207 * Purpose:
39236c6e
A
1208 * Peek at a (non-set) message queue to see if it has a message
1209 * matching the sequence number provided (if zero, then the
1210 * first message in the queue) and return vital info about the
1211 * message.
1212 *
1213 * Conditions:
39037602
A
1214 * The ipc_mqueue_t is locked by callers.
1215 * Other locks may be held by callers, so this routine cannot block.
39236c6e
A
1216 * Caller holds reference on the message queue.
1217 */
1218unsigned
39037602
A
1219ipc_mqueue_peek_locked(ipc_mqueue_t mq,
1220 mach_port_seqno_t * seqnop,
1221 mach_msg_size_t * msg_sizep,
1222 mach_msg_id_t * msg_idp,
1223 mach_msg_max_trailer_t * msg_trailerp,
1224 ipc_kmsg_t *kmsgp)
39236c6e
A
1225{
1226 ipc_kmsg_queue_t kmsgq;
3e170ce0 1227 ipc_kmsg_t kmsg;
39236c6e 1228 mach_port_seqno_t seqno, msgoff;
39037602 1229 unsigned res = 0;
39236c6e
A
1230
1231 assert(!imq_is_set(mq));
1232
3e170ce0
A
1233 seqno = 0;
1234 if (seqnop != NULL)
1235 seqno = *seqnop;
39236c6e
A
1236
1237 if (seqno == 0) {
1238 seqno = mq->imq_seqno;
1239 msgoff = 0;
1240 } else if (seqno >= mq->imq_seqno &&
1241 seqno < mq->imq_seqno + mq->imq_msgcount) {
1242 msgoff = seqno - mq->imq_seqno;
1243 } else
1244 goto out;
1245
1246 /* look for the message that would match that seqno */
1247 kmsgq = &mq->imq_messages;
1248 kmsg = ipc_kmsg_queue_first(kmsgq);
1249 while (msgoff-- && kmsg != IKM_NULL) {
1250 kmsg = ipc_kmsg_queue_next(kmsgq, kmsg);
1251 }
1252 if (kmsg == IKM_NULL)
1253 goto out;
1254
1255 /* found one - return the requested info */
1256 if (seqnop != NULL)
1257 *seqnop = seqno;
1258 if (msg_sizep != NULL)
1259 *msg_sizep = kmsg->ikm_header->msgh_size;
1260 if (msg_idp != NULL)
1261 *msg_idp = kmsg->ikm_header->msgh_id;
1262 if (msg_trailerp != NULL)
1263 memcpy(msg_trailerp,
1264 (mach_msg_max_trailer_t *)((vm_offset_t)kmsg->ikm_header +
1265 round_msg(kmsg->ikm_header->msgh_size)),
1266 sizeof(mach_msg_max_trailer_t));
39037602
A
1267 if (kmsgp != NULL)
1268 *kmsgp = kmsg;
1269
39236c6e
A
1270 res = 1;
1271
39037602
A
1272out:
1273 return res;
1274}
1275
1276
1277/*
1278 * Routine: ipc_mqueue_peek
1279 * Purpose:
1280 * Peek at a (non-set) message queue to see if it has a message
1281 * matching the sequence number provided (if zero, then the
1282 * first message in the queue) and return vital info about the
1283 * message.
1284 *
1285 * Conditions:
1286 * The ipc_mqueue_t is unlocked.
1287 * Locks may be held by callers, so this routine cannot block.
1288 * Caller holds reference on the message queue.
1289 */
1290unsigned
1291ipc_mqueue_peek(ipc_mqueue_t mq,
1292 mach_port_seqno_t * seqnop,
1293 mach_msg_size_t * msg_sizep,
1294 mach_msg_id_t * msg_idp,
1295 mach_msg_max_trailer_t * msg_trailerp,
1296 ipc_kmsg_t *kmsgp)
1297{
1298 unsigned res;
1299
1300 imq_lock(mq);
1301
1302 res = ipc_mqueue_peek_locked(mq, seqnop, msg_sizep, msg_idp,
1303 msg_trailerp, kmsgp);
1304
39236c6e 1305 imq_unlock(mq);
39236c6e
A
1306 return res;
1307}
1308
39037602
A
1309/*
1310 * Routine: ipc_mqueue_release_peek_ref
1311 * Purpose:
1312 * Release the reference on an mqueue's associated port which was
1313 * granted to a thread in ipc_mqueue_peek_on_thread (on the
1314 * MACH_PEEK_MSG thread wakeup path).
1315 *
1316 * Conditions:
1317 * The ipc_mqueue_t should be locked on entry.
1318 * The ipc_mqueue_t will be _unlocked_ on return
1319 * (and potentially invalid!)
1320 *
1321 */
1322void ipc_mqueue_release_peek_ref(ipc_mqueue_t mq)
1323{
1324 assert(!imq_is_set(mq));
1325 assert(imq_held(mq));
1326
1327 /*
1328 * clear any preposts this mq may have generated
1329 * (which would cause subsequent immediate wakeups)
1330 */
1331 waitq_clear_prepost_locked(&mq->imq_wait_queue);
1332
1333 imq_unlock(mq);
1334
1335 /*
1336 * release the port reference: we need to do this outside the lock
1337 * because we might be holding the last port reference!
1338 **/
1339 ip_release_mq(mq);
1340}
3e170ce0
A
1341
1342/*
1343 * peek at the contained port message queues, break prepost iteration as soon
1344 * as we spot a message on one of the message queues referenced by the set's
1345 * prepost list. No need to lock each message queue, as only the head of each
1346 * queue is checked. If a message wasn't there before we entered here, no need
1347 * to find it (if we do, great).
1348 */
1349static int mqueue_peek_iterator(void *ctx, struct waitq *waitq,
1350 struct waitq_set *wqset)
1351{
1352 ipc_mqueue_t port_mq = (ipc_mqueue_t)waitq;
1353 ipc_kmsg_queue_t kmsgs = &port_mq->imq_messages;
1354
1355 (void)ctx;
1356 (void)wqset;
1357
1358 if (ipc_kmsg_queue_first(kmsgs) != IKM_NULL)
1359 return WQ_ITERATE_BREAK; /* break out of the prepost iteration */
1360
1361 return WQ_ITERATE_CONTINUE;
1362}
1363
39236c6e
A
1364/*
1365 * Routine: ipc_mqueue_set_peek
1366 * Purpose:
1367 * Peek at a message queue set to see if it has any ports
1368 * with messages.
b0d623f7
A
1369 *
1370 * Conditions:
1371 * Locks may be held by callers, so this routine cannot block.
1372 * Caller holds reference on the message queue.
1373 */
6d2010ae 1374unsigned
39236c6e 1375ipc_mqueue_set_peek(ipc_mqueue_t mq)
b0d623f7 1376{
3e170ce0 1377 int ret;
b0d623f7 1378
6d2010ae 1379 imq_lock(mq);
b0d623f7 1380
39037602
A
1381 /*
1382 * We may have raced with port destruction where the mqueue is marked
1383 * as invalid. In that case, even though we don't have messages, we
1384 * have an end-of-life event to deliver.
1385 */
1386 if (!imq_is_valid(mq))
1387 return 1;
1388
3e170ce0 1389 ret = waitq_set_iterate_preposts(&mq->imq_set_queue, NULL,
39037602 1390 mqueue_peek_iterator);
3e170ce0 1391
b0d623f7 1392 imq_unlock(mq);
39037602 1393
3e170ce0 1394 return (ret == WQ_ITERATE_BREAK);
39236c6e
A
1395}
1396
1397/*
1398 * Routine: ipc_mqueue_set_gather_member_names
1399 * Purpose:
3e170ce0
A
1400 * Discover all ports which are members of a given port set.
1401 * Because the waitq linkage mechanism was redesigned to save
1402 * significan amounts of memory, it no longer keeps back-pointers
1403 * from a port set to a port. Therefore, we must iterate over all
1404 * ports within a given IPC space and individually query them to
1405 * see if they are members of the given set. Port names of ports
1406 * found to be members of the given set will be gathered into the
1407 * provided 'names' array. Actual returned names are limited to
1408 * maxnames entries, but we keep counting the actual number of
1409 * members to let the caller decide to retry if necessary.
39236c6e
A
1410 *
1411 * Conditions:
1412 * Locks may be held by callers, so this routine cannot block.
3e170ce0 1413 * Caller holds reference on the message queue (via port set).
39236c6e
A
1414 */
1415void
1416ipc_mqueue_set_gather_member_names(
3e170ce0
A
1417 ipc_space_t space,
1418 ipc_mqueue_t set_mq,
1419 ipc_entry_num_t maxnames,
39236c6e
A
1420 mach_port_name_t *names,
1421 ipc_entry_num_t *actualp)
1422{
3e170ce0
A
1423 ipc_entry_t table;
1424 ipc_entry_num_t tsize;
1425 struct waitq_set *wqset;
39236c6e
A
1426 ipc_entry_num_t actual = 0;
1427
3e170ce0
A
1428 assert(set_mq != IMQ_NULL);
1429 wqset = &set_mq->imq_set_queue;
39236c6e 1430
3e170ce0
A
1431 assert(space != IS_NULL);
1432 is_read_lock(space);
1433 if (!is_active(space)) {
1434 is_read_unlock(space);
1435 goto out;
1436 }
39236c6e 1437
3e170ce0
A
1438 if (!waitq_set_is_valid(wqset)) {
1439 is_read_unlock(space);
1440 goto out;
1441 }
39236c6e 1442
3e170ce0
A
1443 table = space->is_table;
1444 tsize = space->is_table_size;
1445 for (ipc_entry_num_t idx = 0; idx < tsize; idx++) {
1446 ipc_entry_t entry = &table[idx];
1447
1448 /* only receive rights can be members of port sets */
1449 if ((entry->ie_bits & MACH_PORT_TYPE_RECEIVE) != MACH_PORT_TYPE_NONE) {
1450 __IGNORE_WCASTALIGN(ipc_port_t port = (ipc_port_t)entry->ie_object);
1451 ipc_mqueue_t mq = &port->ip_messages;
1452
1453 assert(IP_VALID(port));
1454 if (ip_active(port) &&
1455 waitq_member(&mq->imq_wait_queue, wqset)) {
1456 if (actual < maxnames)
1457 names[actual] = mq->imq_receiver_name;
1458 actual++;
1459 }
1460 }
39236c6e 1461 }
39236c6e 1462
3e170ce0
A
1463 is_read_unlock(space);
1464
1465out:
39236c6e 1466 *actualp = actual;
b0d623f7
A
1467}
1468
39236c6e 1469
1c79356b 1470/*
39037602 1471 * Routine: ipc_mqueue_destroy_locked
1c79356b 1472 * Purpose:
6d2010ae
A
1473 * Destroy a (non-set) message queue.
1474 * Set any blocked senders running.
1c79356b
A
1475 * Destroy the kmsgs in the queue.
1476 * Conditions:
39037602 1477 * mqueue locked
1c79356b
A
1478 * Receivers were removed when the receive right was "changed"
1479 */
39037602
A
1480boolean_t
1481ipc_mqueue_destroy_locked(ipc_mqueue_t mqueue)
1c79356b
A
1482{
1483 ipc_kmsg_queue_t kmqueue;
1484 ipc_kmsg_t kmsg;
6d2010ae 1485 boolean_t reap = FALSE;
1c79356b 1486
3e170ce0
A
1487 assert(!imq_is_set(mqueue));
1488
1c79356b
A
1489 /*
1490 * rouse all blocked senders
3e170ce0
A
1491 * (don't boost anyone - we're tearing this queue down)
1492 * (never preposts)
1c79356b
A
1493 */
1494 mqueue->imq_fullwaiters = FALSE;
3e170ce0
A
1495 waitq_wakeup64_all_locked(&mqueue->imq_wait_queue,
1496 IPC_MQUEUE_FULL,
1497 THREAD_RESTART,
1498 NULL,
1499 WAITQ_ALL_PRIORITIES,
1500 WAITQ_KEEP_LOCKED);
1c79356b 1501
6d2010ae
A
1502 /*
1503 * Move messages from the specified queue to the per-thread
1504 * clean/drain queue while we have the mqueue lock.
1505 */
1c79356b 1506 kmqueue = &mqueue->imq_messages;
1c79356b 1507 while ((kmsg = ipc_kmsg_dequeue(kmqueue)) != IKM_NULL) {
39037602
A
1508#if MACH_FLIPC
1509 if (MACH_NODE_VALID(kmsg->ikm_node) && FPORT_VALID(mqueue->imq_fport))
1510 flipc_msg_ack(kmsg->ikm_node, mqueue, TRUE);
1511#endif
6d2010ae
A
1512 boolean_t first;
1513 first = ipc_kmsg_delayed_destroy(kmsg);
1514 if (first)
1515 reap = first;
1c79356b 1516 }
6d2010ae 1517
3e170ce0
A
1518 /*
1519 * Wipe out message count, both for messages about to be
1520 * reaped and for reserved space for (previously) woken senders.
1521 * This is the indication to them that their reserved space is gone
1522 * (the mqueue was destroyed).
1523 */
1524 mqueue->imq_msgcount = 0;
1525
39037602
A
1526 /* invalidate the waitq for subsequent mqueue operations */
1527 waitq_invalidate_locked(&mqueue->imq_wait_queue);
3e170ce0 1528
39037602
A
1529 /* clear out any preposting we may have done */
1530 waitq_clear_prepost_locked(&mqueue->imq_wait_queue);
6d2010ae 1531
3e170ce0 1532 /*
39037602
A
1533 * assert that we are destroying / invalidating a queue that's
1534 * not a member of any other queue.
3e170ce0 1535 */
39037602
A
1536 assert(mqueue->imq_preposts == 0);
1537 assert(mqueue->imq_in_pset == 0);
3e170ce0 1538
39037602 1539 return reap;
1c79356b
A
1540}
1541
1542/*
1543 * Routine: ipc_mqueue_set_qlimit
1544 * Purpose:
1545 * Changes a message queue limit; the maximum number
1546 * of messages which may be queued.
1547 * Conditions:
1548 * Nothing locked.
1549 */
1550
1551void
1552ipc_mqueue_set_qlimit(
1553 ipc_mqueue_t mqueue,
1554 mach_port_msgcount_t qlimit)
1555{
1c79356b 1556
91447636
A
1557 assert(qlimit <= MACH_PORT_QLIMIT_MAX);
1558
1c79356b 1559 /* wake up senders allowed by the new qlimit */
1c79356b
A
1560 imq_lock(mqueue);
1561 if (qlimit > mqueue->imq_qlimit) {
1562 mach_port_msgcount_t i, wakeup;
1563
1564 /* caution: wakeup, qlimit are unsigned */
1565 wakeup = qlimit - mqueue->imq_qlimit;
1566
1567 for (i = 0; i < wakeup; i++) {
3e170ce0
A
1568 /*
1569 * boost the priority of the awoken thread
1570 * (WAITQ_PROMOTE_PRIORITY) to ensure it uses
1571 * the message queue slot we've just reserved.
1572 *
1573 * NOTE: this will never prepost
1574 */
1575 if (waitq_wakeup64_one_locked(&mqueue->imq_wait_queue,
1576 IPC_MQUEUE_FULL,
1577 THREAD_AWAKENED,
1578 NULL,
1579 WAITQ_PROMOTE_PRIORITY,
1580 WAITQ_KEEP_LOCKED) == KERN_NOT_WAITING) {
1581 mqueue->imq_fullwaiters = FALSE;
1582 break;
1583 }
1584 mqueue->imq_msgcount++; /* give it to the awakened thread */
1c79356b 1585 }
3e170ce0 1586 }
1c79356b
A
1587 mqueue->imq_qlimit = qlimit;
1588 imq_unlock(mqueue);
1c79356b
A
1589}
1590
1591/*
1592 * Routine: ipc_mqueue_set_seqno
1593 * Purpose:
1594 * Changes an mqueue's sequence number.
1595 * Conditions:
1596 * Caller holds a reference to the queue's containing object.
1597 */
1598void
1599ipc_mqueue_set_seqno(
1600 ipc_mqueue_t mqueue,
1601 mach_port_seqno_t seqno)
1602{
1c79356b
A
1603 imq_lock(mqueue);
1604 mqueue->imq_seqno = seqno;
1605 imq_unlock(mqueue);
1c79356b
A
1606}
1607
1608
1609/*
1610 * Routine: ipc_mqueue_copyin
1611 * Purpose:
1612 * Convert a name in a space to a message queue.
1613 * Conditions:
1614 * Nothing locked. If successful, the caller gets a ref for
1615 * for the object. This ref ensures the continued existence of
1616 * the queue.
1617 * Returns:
1618 * MACH_MSG_SUCCESS Found a message queue.
1619 * MACH_RCV_INVALID_NAME The space is dead.
1620 * MACH_RCV_INVALID_NAME The name doesn't denote a right.
1621 * MACH_RCV_INVALID_NAME
1622 * The denoted right is not receive or port set.
1623 * MACH_RCV_IN_SET Receive right is a member of a set.
1624 */
1625
1626mach_msg_return_t
1627ipc_mqueue_copyin(
1628 ipc_space_t space,
1629 mach_port_name_t name,
1630 ipc_mqueue_t *mqueuep,
1631 ipc_object_t *objectp)
1632{
1633 ipc_entry_t entry;
1634 ipc_object_t object;
1635 ipc_mqueue_t mqueue;
1636
1637 is_read_lock(space);
316670eb 1638 if (!is_active(space)) {
1c79356b
A
1639 is_read_unlock(space);
1640 return MACH_RCV_INVALID_NAME;
1641 }
1642
1643 entry = ipc_entry_lookup(space, name);
1644 if (entry == IE_NULL) {
1645 is_read_unlock(space);
1646 return MACH_RCV_INVALID_NAME;
1647 }
1648
1649 object = entry->ie_object;
1650
1651 if (entry->ie_bits & MACH_PORT_TYPE_RECEIVE) {
1652 ipc_port_t port;
1c79356b 1653
3e170ce0 1654 __IGNORE_WCASTALIGN(port = (ipc_port_t) object);
1c79356b
A
1655 assert(port != IP_NULL);
1656
1657 ip_lock(port);
1658 assert(ip_active(port));
1659 assert(port->ip_receiver_name == name);
1660 assert(port->ip_receiver == space);
1661 is_read_unlock(space);
1662 mqueue = &port->ip_messages;
1663
1664 } else if (entry->ie_bits & MACH_PORT_TYPE_PORT_SET) {
1665 ipc_pset_t pset;
1666
3e170ce0 1667 __IGNORE_WCASTALIGN(pset = (ipc_pset_t) object);
1c79356b
A
1668 assert(pset != IPS_NULL);
1669
1670 ips_lock(pset);
1671 assert(ips_active(pset));
1c79356b
A
1672 is_read_unlock(space);
1673
1674 mqueue = &pset->ips_messages;
1675 } else {
1676 is_read_unlock(space);
1677 return MACH_RCV_INVALID_NAME;
1678 }
1679
1680 /*
1681 * At this point, the object is locked and active,
1682 * the space is unlocked, and mqueue is initialized.
1683 */
1684
1685 io_reference(object);
1686 io_unlock(object);
1687
1688 *objectp = object;
1689 *mqueuep = mqueue;
1690 return MACH_MSG_SUCCESS;
1691}