]>
Commit | Line | Data |
---|---|---|
1c79356b | 1 | /* |
2d21ac55 | 2 | * Copyright (c) 2000-2007 Apple Inc. All rights reserved. |
1c79356b | 3 | * |
2d21ac55 | 4 | * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ |
0a7de745 | 5 | * |
2d21ac55 A |
6 | * This file contains Original Code and/or Modifications of Original Code |
7 | * as defined in and that are subject to the Apple Public Source License | |
8 | * Version 2.0 (the 'License'). You may not use this file except in | |
9 | * compliance with the License. The rights granted to you under the License | |
10 | * may not be used to create, or enable the creation or redistribution of, | |
11 | * unlawful or unlicensed copies of an Apple operating system, or to | |
12 | * circumvent, violate, or enable the circumvention or violation of, any | |
13 | * terms of an Apple operating system software license agreement. | |
0a7de745 | 14 | * |
2d21ac55 A |
15 | * Please obtain a copy of the License at |
16 | * http://www.opensource.apple.com/apsl/ and read it before using this file. | |
0a7de745 | 17 | * |
2d21ac55 A |
18 | * The Original Code and all software distributed under the License are |
19 | * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER | |
8f6c56a5 A |
20 | * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, |
21 | * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, | |
2d21ac55 A |
22 | * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. |
23 | * Please see the License for the specific language governing rights and | |
24 | * limitations under the License. | |
0a7de745 | 25 | * |
2d21ac55 | 26 | * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ |
1c79356b A |
27 | */ |
28 | /* | |
29 | * @OSF_FREE_COPYRIGHT@ | |
30 | */ | |
0a7de745 | 31 | /* |
1c79356b A |
32 | * Mach Operating System |
33 | * Copyright (c) 1991,1990,1989 Carnegie Mellon University | |
34 | * All Rights Reserved. | |
0a7de745 | 35 | * |
1c79356b A |
36 | * Permission to use, copy, modify and distribute this software and its |
37 | * documentation is hereby granted, provided that both the copyright | |
38 | * notice and this permission notice appear in all copies of the | |
39 | * software, derivative works or modified versions, and any portions | |
40 | * thereof, and that both notices appear in supporting documentation. | |
0a7de745 | 41 | * |
1c79356b A |
42 | * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS" |
43 | * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR | |
44 | * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE. | |
0a7de745 | 45 | * |
1c79356b | 46 | * Carnegie Mellon requests users of this software to return to |
0a7de745 | 47 | * |
1c79356b A |
48 | * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU |
49 | * School of Computer Science | |
50 | * Carnegie Mellon University | |
51 | * Pittsburgh PA 15213-3890 | |
0a7de745 | 52 | * |
1c79356b A |
53 | * any improvements or extensions that they make and grant Carnegie Mellon |
54 | * the rights to redistribute these changes. | |
55 | */ | |
56 | /* | |
57 | */ | |
58 | /* | |
59 | * File: ipc/ipc_mqueue.c | |
60 | * Author: Rich Draves | |
61 | * Date: 1989 | |
62 | * | |
63 | * Functions to manipulate IPC message queues. | |
64 | */ | |
2d21ac55 A |
65 | /* |
66 | * NOTICE: This file was modified by SPARTA, Inc. in 2006 to introduce | |
67 | * support for mandatory and extensible security protections. This notice | |
68 | * is included in support of clause 2.2 (b) of the Apple Public License, | |
69 | * Version 2.0. | |
70 | */ | |
0a7de745 | 71 | |
1c79356b A |
72 | |
73 | #include <mach/port.h> | |
74 | #include <mach/message.h> | |
75 | #include <mach/sync_policy.h> | |
76 | ||
77 | #include <kern/assert.h> | |
78 | #include <kern/counters.h> | |
79 | #include <kern/sched_prim.h> | |
80 | #include <kern/ipc_kobject.h> | |
0a7de745 | 81 | #include <kern/ipc_mig.h> /* XXX - for mach_msg_receive_continue */ |
1c79356b A |
82 | #include <kern/misc_protos.h> |
83 | #include <kern/task.h> | |
84 | #include <kern/thread.h> | |
3e170ce0 | 85 | #include <kern/waitq.h> |
1c79356b | 86 | |
cb323159 | 87 | #include <ipc/port.h> |
1c79356b A |
88 | #include <ipc/ipc_mqueue.h> |
89 | #include <ipc/ipc_kmsg.h> | |
90 | #include <ipc/ipc_port.h> | |
91 | #include <ipc/ipc_pset.h> | |
92 | #include <ipc/ipc_space.h> | |
93 | ||
39037602 A |
94 | #if MACH_FLIPC |
95 | #include <ipc/flipc.h> | |
96 | #endif | |
97 | ||
b0d623f7 A |
98 | #ifdef __LP64__ |
99 | #include <vm/vm_map.h> | |
100 | #endif | |
1c79356b | 101 | |
39037602 A |
102 | #include <sys/event.h> |
103 | ||
0a7de745 | 104 | extern char *proc_name_address(void *p); |
39037602 | 105 | |
0a7de745 A |
106 | int ipc_mqueue_full; /* address is event for queue space */ |
107 | int ipc_mqueue_rcv; /* address is event for message arrival */ | |
1c79356b | 108 | |
91447636 | 109 | /* forward declarations */ |
cb323159 | 110 | static void ipc_mqueue_receive_results(wait_result_t result); |
39037602 A |
111 | static void ipc_mqueue_peek_on_thread( |
112 | ipc_mqueue_t port_mq, | |
113 | mach_msg_option_t option, | |
114 | thread_t thread); | |
91447636 | 115 | |
1c79356b A |
116 | /* |
117 | * Routine: ipc_mqueue_init | |
118 | * Purpose: | |
119 | * Initialize a newly-allocated message queue. | |
120 | */ | |
121 | void | |
122 | ipc_mqueue_init( | |
94ff46dc A |
123 | ipc_mqueue_t mqueue, |
124 | ipc_mqueue_kind_t kind) | |
1c79356b | 125 | { |
94ff46dc A |
126 | switch (kind) { |
127 | case IPC_MQUEUE_KIND_SET: | |
3e170ce0 | 128 | waitq_set_init(&mqueue->imq_set_queue, |
0a7de745 A |
129 | SYNC_POLICY_FIFO | SYNC_POLICY_PREPOST, |
130 | NULL, NULL); | |
94ff46dc A |
131 | break; |
132 | case IPC_MQUEUE_KIND_NONE: /* cheat: we really should have "no" mqueue */ | |
133 | case IPC_MQUEUE_KIND_PORT: | |
134 | waitq_init(&mqueue->imq_wait_queue, | |
135 | SYNC_POLICY_FIFO | SYNC_POLICY_TURNSTILE_PROXY); | |
1c79356b A |
136 | ipc_kmsg_queue_init(&mqueue->imq_messages); |
137 | mqueue->imq_seqno = 0; | |
138 | mqueue->imq_msgcount = 0; | |
139 | mqueue->imq_qlimit = MACH_PORT_QLIMIT_DEFAULT; | |
cb323159 | 140 | mqueue->imq_context = 0; |
1c79356b | 141 | mqueue->imq_fullwaiters = FALSE; |
39037602 A |
142 | #if MACH_FLIPC |
143 | mqueue->imq_fport = FPORT_NULL; | |
144 | #endif | |
94ff46dc | 145 | break; |
1c79356b | 146 | } |
39037602 | 147 | klist_init(&mqueue->imq_klist); |
1c79356b A |
148 | } |
149 | ||
0a7de745 A |
150 | void |
151 | ipc_mqueue_deinit( | |
152 | ipc_mqueue_t mqueue) | |
3e170ce0 A |
153 | { |
154 | boolean_t is_set = imq_is_set(mqueue); | |
155 | ||
0a7de745 | 156 | if (is_set) { |
3e170ce0 | 157 | waitq_set_deinit(&mqueue->imq_set_queue); |
0a7de745 | 158 | } else { |
3e170ce0 | 159 | waitq_deinit(&mqueue->imq_wait_queue); |
0a7de745 | 160 | } |
3e170ce0 A |
161 | } |
162 | ||
163 | /* | |
164 | * Routine: imq_reserve_and_lock | |
165 | * Purpose: | |
166 | * Atomically lock an ipc_mqueue_t object and reserve | |
167 | * an appropriate number of prepost linkage objects for | |
168 | * use in wakeup operations. | |
169 | * Conditions: | |
170 | * mq is unlocked | |
171 | */ | |
172 | void | |
39037602 | 173 | imq_reserve_and_lock(ipc_mqueue_t mq, uint64_t *reserved_prepost) |
3e170ce0 A |
174 | { |
175 | *reserved_prepost = waitq_prepost_reserve(&mq->imq_wait_queue, 0, | |
0a7de745 | 176 | WAITQ_KEEP_LOCKED); |
3e170ce0 A |
177 | } |
178 | ||
179 | ||
180 | /* | |
181 | * Routine: imq_release_and_unlock | |
182 | * Purpose: | |
183 | * Unlock an ipc_mqueue_t object, re-enable interrupts, | |
184 | * and release any unused prepost object reservations. | |
185 | * Conditions: | |
186 | * mq is locked | |
187 | */ | |
188 | void | |
39037602 | 189 | imq_release_and_unlock(ipc_mqueue_t mq, uint64_t reserved_prepost) |
3e170ce0 A |
190 | { |
191 | assert(imq_held(mq)); | |
192 | waitq_unlock(&mq->imq_wait_queue); | |
3e170ce0 A |
193 | waitq_prepost_release_reserve(reserved_prepost); |
194 | } | |
195 | ||
196 | ||
1c79356b A |
197 | /* |
198 | * Routine: ipc_mqueue_member | |
199 | * Purpose: | |
200 | * Indicate whether the (port) mqueue is a member of | |
201 | * this portset's mqueue. We do this by checking | |
202 | * whether the portset mqueue's waitq is an member of | |
203 | * the port's mqueue waitq. | |
204 | * Conditions: | |
205 | * the portset's mqueue is not already a member | |
206 | * this may block while allocating linkage structures. | |
207 | */ | |
208 | ||
209 | boolean_t | |
210 | ipc_mqueue_member( | |
0a7de745 A |
211 | ipc_mqueue_t port_mqueue, |
212 | ipc_mqueue_t set_mqueue) | |
1c79356b | 213 | { |
3e170ce0 A |
214 | struct waitq *port_waitq = &port_mqueue->imq_wait_queue; |
215 | struct waitq_set *set_waitq = &set_mqueue->imq_set_queue; | |
1c79356b | 216 | |
3e170ce0 | 217 | return waitq_member(port_waitq, set_waitq); |
1c79356b A |
218 | } |
219 | ||
220 | /* | |
221 | * Routine: ipc_mqueue_remove | |
222 | * Purpose: | |
223 | * Remove the association between the queue and the specified | |
9bccf70c | 224 | * set message queue. |
1c79356b A |
225 | */ |
226 | ||
227 | kern_return_t | |
228 | ipc_mqueue_remove( | |
0a7de745 A |
229 | ipc_mqueue_t mqueue, |
230 | ipc_mqueue_t set_mqueue) | |
1c79356b | 231 | { |
3e170ce0 A |
232 | struct waitq *mq_waitq = &mqueue->imq_wait_queue; |
233 | struct waitq_set *set_waitq = &set_mqueue->imq_set_queue; | |
1c79356b | 234 | |
3e170ce0 | 235 | return waitq_unlink(mq_waitq, set_waitq); |
1c79356b A |
236 | } |
237 | ||
238 | /* | |
9bccf70c | 239 | * Routine: ipc_mqueue_remove_from_all |
1c79356b | 240 | * Purpose: |
9bccf70c | 241 | * Remove the mqueue from all the sets it is a member of |
1c79356b | 242 | * Conditions: |
9bccf70c | 243 | * Nothing locked. |
39037602 A |
244 | * Returns: |
245 | * mqueue unlocked and set links deallocated | |
1c79356b A |
246 | */ |
247 | void | |
0a7de745 | 248 | ipc_mqueue_remove_from_all(ipc_mqueue_t mqueue) |
1c79356b | 249 | { |
3e170ce0 | 250 | struct waitq *mq_waitq = &mqueue->imq_wait_queue; |
39037602 | 251 | kern_return_t kr; |
1c79356b | 252 | |
39037602 A |
253 | imq_lock(mqueue); |
254 | ||
255 | assert(waitq_valid(mq_waitq)); | |
256 | kr = waitq_unlink_all_unlock(mq_waitq); | |
257 | /* mqueue unlocked and set links deallocated */ | |
9bccf70c A |
258 | } |
259 | ||
260 | /* | |
261 | * Routine: ipc_mqueue_remove_all | |
262 | * Purpose: | |
263 | * Remove all the member queues from the specified set. | |
3e170ce0 | 264 | * Also removes the queue from any containing sets. |
9bccf70c A |
265 | * Conditions: |
266 | * Nothing locked. | |
39037602 A |
267 | * Returns: |
268 | * mqueue unlocked all set links deallocated | |
9bccf70c A |
269 | */ |
270 | void | |
0a7de745 | 271 | ipc_mqueue_remove_all(ipc_mqueue_t mqueue) |
9bccf70c | 272 | { |
3e170ce0 | 273 | struct waitq_set *mq_setq = &mqueue->imq_set_queue; |
39037602 A |
274 | |
275 | imq_lock(mqueue); | |
276 | assert(waitqs_is_set(mq_setq)); | |
277 | waitq_set_unlink_all_unlock(mq_setq); | |
278 | /* mqueue unlocked set links deallocated */ | |
1c79356b A |
279 | } |
280 | ||
281 | ||
282 | /* | |
283 | * Routine: ipc_mqueue_add | |
284 | * Purpose: | |
285 | * Associate the portset's mqueue with the port's mqueue. | |
286 | * This has to be done so that posting the port will wakeup | |
287 | * a portset waiter. If there are waiters on the portset | |
288 | * mqueue and messages on the port mqueue, try to match them | |
289 | * up now. | |
290 | * Conditions: | |
291 | * May block. | |
292 | */ | |
293 | kern_return_t | |
294 | ipc_mqueue_add( | |
0a7de745 A |
295 | ipc_mqueue_t port_mqueue, |
296 | ipc_mqueue_t set_mqueue, | |
297 | uint64_t *reserved_link, | |
298 | uint64_t *reserved_prepost) | |
1c79356b | 299 | { |
3e170ce0 A |
300 | struct waitq *port_waitq = &port_mqueue->imq_wait_queue; |
301 | struct waitq_set *set_waitq = &set_mqueue->imq_set_queue; | |
1c79356b A |
302 | ipc_kmsg_queue_t kmsgq; |
303 | ipc_kmsg_t kmsg, next; | |
0a7de745 | 304 | kern_return_t kr; |
1c79356b | 305 | |
3e170ce0 | 306 | assert(reserved_link && *reserved_link != 0); |
d9a64523 | 307 | assert(waitqs_is_linked(set_waitq)); |
3e170ce0 | 308 | |
3e170ce0 A |
309 | imq_lock(port_mqueue); |
310 | ||
311 | /* | |
312 | * The link operation is now under the same lock-hold as | |
313 | * message iteration and thread wakeup, but doesn't have to be... | |
314 | */ | |
315 | kr = waitq_link(port_waitq, set_waitq, WAITQ_ALREADY_LOCKED, reserved_link); | |
316 | if (kr != KERN_SUCCESS) { | |
317 | imq_unlock(port_mqueue); | |
1c79356b | 318 | return kr; |
3e170ce0 | 319 | } |
1c79356b A |
320 | |
321 | /* | |
322 | * Now that the set has been added to the port, there may be | |
323 | * messages queued on the port and threads waiting on the set | |
324 | * waitq. Lets get them together. | |
325 | */ | |
1c79356b A |
326 | kmsgq = &port_mqueue->imq_messages; |
327 | for (kmsg = ipc_kmsg_queue_first(kmsgq); | |
0a7de745 A |
328 | kmsg != IKM_NULL; |
329 | kmsg = next) { | |
1c79356b A |
330 | next = ipc_kmsg_queue_next(kmsgq, kmsg); |
331 | ||
332 | for (;;) { | |
333 | thread_t th; | |
b0d623f7 | 334 | mach_msg_size_t msize; |
3e170ce0 | 335 | spl_t th_spl; |
1c79356b | 336 | |
39037602 | 337 | th = waitq_wakeup64_identify_locked( |
0a7de745 A |
338 | port_waitq, |
339 | IPC_MQUEUE_RECEIVE, | |
340 | THREAD_AWAKENED, &th_spl, | |
341 | reserved_prepost, WAITQ_ALL_PRIORITIES, | |
342 | WAITQ_KEEP_LOCKED); | |
1c79356b A |
343 | /* waitq/mqueue still locked, thread locked */ |
344 | ||
0a7de745 | 345 | if (th == THREAD_NULL) { |
1c79356b | 346 | goto leave; |
0a7de745 | 347 | } |
1c79356b | 348 | |
b0d623f7 A |
349 | /* |
350 | * If the receiver waited with a facility not directly | |
351 | * related to Mach messaging, then it isn't prepared to get | |
352 | * handed the message directly. Just set it running, and | |
353 | * go look for another thread that can. | |
354 | */ | |
355 | if (th->ith_state != MACH_RCV_IN_PROGRESS) { | |
39037602 A |
356 | if (th->ith_state == MACH_PEEK_IN_PROGRESS) { |
357 | /* | |
358 | * wakeup the peeking thread, but | |
359 | * continue to loop over the threads | |
360 | * waiting on the port's mqueue to see | |
361 | * if there are any actual receivers | |
362 | */ | |
363 | ipc_mqueue_peek_on_thread(port_mqueue, | |
0a7de745 A |
364 | th->ith_option, |
365 | th); | |
39037602 A |
366 | } |
367 | thread_unlock(th); | |
368 | splx(th_spl); | |
369 | continue; | |
b0d623f7 A |
370 | } |
371 | ||
1c79356b A |
372 | /* |
373 | * Found a receiver. see if they can handle the message | |
374 | * correctly (the message is not too large for them, or | |
375 | * they didn't care to be informed that the message was | |
376 | * too large). If they can't handle it, take them off | |
377 | * the list and let them go back and figure it out and | |
378 | * just move onto the next. | |
379 | */ | |
b0d623f7 | 380 | msize = ipc_kmsg_copyout_size(kmsg, th->map); |
39037602 | 381 | if (th->ith_rsize < |
0a7de745 | 382 | (msize + REQUESTED_TRAILER_SIZE(thread_is_64bit_addr(th), th->ith_option))) { |
1c79356b | 383 | th->ith_state = MACH_RCV_TOO_LARGE; |
b0d623f7 | 384 | th->ith_msize = msize; |
1c79356b A |
385 | if (th->ith_option & MACH_RCV_LARGE) { |
386 | /* | |
387 | * let him go without message | |
388 | */ | |
b0d623f7 | 389 | th->ith_receiver_name = port_mqueue->imq_receiver_name; |
1c79356b A |
390 | th->ith_kmsg = IKM_NULL; |
391 | th->ith_seqno = 0; | |
392 | thread_unlock(th); | |
3e170ce0 | 393 | splx(th_spl); |
1c79356b A |
394 | continue; /* find another thread */ |
395 | } | |
396 | } else { | |
397 | th->ith_state = MACH_MSG_SUCCESS; | |
398 | } | |
399 | ||
400 | /* | |
401 | * This thread is going to take this message, | |
402 | * so give it to him. | |
403 | */ | |
1c79356b | 404 | ipc_kmsg_rmqueue(kmsgq, kmsg); |
39037602 A |
405 | #if MACH_FLIPC |
406 | mach_node_t node = kmsg->ikm_node; | |
407 | #endif | |
3e170ce0 | 408 | ipc_mqueue_release_msgcount(port_mqueue, IMQ_NULL); |
91447636 | 409 | |
1c79356b A |
410 | th->ith_kmsg = kmsg; |
411 | th->ith_seqno = port_mqueue->imq_seqno++; | |
412 | thread_unlock(th); | |
3e170ce0 | 413 | splx(th_spl); |
39037602 | 414 | #if MACH_FLIPC |
0a7de745 A |
415 | if (MACH_NODE_VALID(node) && FPORT_VALID(port_mqueue->imq_fport)) { |
416 | flipc_msg_ack(node, port_mqueue, TRUE); | |
417 | } | |
39037602 | 418 | #endif |
1c79356b A |
419 | break; /* go to next message */ |
420 | } | |
1c79356b | 421 | } |
0a7de745 | 422 | leave: |
1c79356b | 423 | imq_unlock(port_mqueue); |
1c79356b A |
424 | return KERN_SUCCESS; |
425 | } | |
426 | ||
cb323159 A |
427 | |
428 | /* | |
429 | * Routine: ipc_mqueue_has_klist | |
430 | * Purpose: | |
431 | * Returns whether the given mqueue imq_klist field can be used as a klist. | |
432 | */ | |
433 | static inline bool | |
434 | ipc_mqueue_has_klist(ipc_mqueue_t mqueue) | |
435 | { | |
436 | ipc_object_t object = imq_to_object(mqueue); | |
437 | if (io_otype(object) != IOT_PORT) { | |
438 | return true; | |
439 | } | |
440 | ipc_port_t port = ip_from_mq(mqueue); | |
441 | if (port->ip_specialreply) { | |
442 | return false; | |
443 | } | |
444 | return port->ip_sync_link_state == PORT_SYNC_LINK_ANY; | |
445 | } | |
446 | ||
1c79356b A |
447 | /* |
448 | * Routine: ipc_mqueue_changed | |
449 | * Purpose: | |
450 | * Wake up receivers waiting in a message queue. | |
451 | * Conditions: | |
452 | * The message queue is locked. | |
453 | */ | |
1c79356b A |
454 | void |
455 | ipc_mqueue_changed( | |
0a7de745 A |
456 | ipc_space_t space, |
457 | ipc_mqueue_t mqueue) | |
1c79356b | 458 | { |
cb323159 | 459 | if (ipc_mqueue_has_klist(mqueue) && SLIST_FIRST(&mqueue->imq_klist)) { |
d9a64523 A |
460 | /* |
461 | * Indicate that this message queue is vanishing | |
462 | * | |
463 | * When this is called, the associated receive right may be in flight | |
464 | * between two tasks: the one it used to live in, and the one that armed | |
465 | * a port destroyed notification for it. | |
466 | * | |
467 | * The new process may want to register the port it gets back with an | |
468 | * EVFILT_MACHPORT filter again, and may have pending sync IPC on this | |
469 | * port pending already, in which case we want the imq_klist field to be | |
cb323159 | 470 | * reusable for nefarious purposes. |
d9a64523 A |
471 | * |
472 | * Fortunately, we really don't need this linkage anymore after this | |
473 | * point as EV_VANISHED / EV_EOF will be the last thing delivered ever. | |
0a7de745 A |
474 | * |
475 | * Note: we don't have the space lock here, however, this covers the | |
476 | * case of when a task is terminating the space, triggering | |
477 | * several knote_vanish() calls. | |
478 | * | |
479 | * We don't need the lock to observe that the space is inactive as | |
480 | * we just deactivated it on the same thread. | |
481 | * | |
482 | * We still need to call knote_vanish() so that the knote is | |
483 | * marked with EV_VANISHED or EV_EOF so that the detach step | |
484 | * in filt_machportdetach is skipped correctly. | |
d9a64523 | 485 | */ |
0a7de745 A |
486 | assert(space); |
487 | knote_vanish(&mqueue->imq_klist, is_active(space)); | |
cb323159 A |
488 | } |
489 | ||
490 | if (io_otype(imq_to_object(mqueue)) == IOT_PORT) { | |
491 | ipc_port_adjust_sync_link_state_locked(ip_from_mq(mqueue), PORT_SYNC_LINK_ANY, NULL); | |
492 | } else { | |
d9a64523 A |
493 | klist_init(&mqueue->imq_klist); |
494 | } | |
39037602 | 495 | |
3e170ce0 | 496 | waitq_wakeup64_all_locked(&mqueue->imq_wait_queue, |
0a7de745 A |
497 | IPC_MQUEUE_RECEIVE, |
498 | THREAD_RESTART, | |
499 | NULL, | |
500 | WAITQ_ALL_PRIORITIES, | |
501 | WAITQ_KEEP_LOCKED); | |
1c79356b A |
502 | } |
503 | ||
504 | ||
d9a64523 | 505 | |
1c79356b A |
506 | |
507 | /* | |
508 | * Routine: ipc_mqueue_send | |
509 | * Purpose: | |
510 | * Send a message to a message queue. The message holds a reference | |
d9a64523 | 511 | * for the destination port for this message queue in the |
1c79356b A |
512 | * msgh_remote_port field. |
513 | * | |
514 | * If unsuccessful, the caller still has possession of | |
515 | * the message and must do something with it. If successful, | |
516 | * the message is queued, given to a receiver, or destroyed. | |
517 | * Conditions: | |
39236c6e | 518 | * mqueue is locked. |
1c79356b A |
519 | * Returns: |
520 | * MACH_MSG_SUCCESS The message was accepted. | |
521 | * MACH_SEND_TIMED_OUT Caller still has message. | |
522 | * MACH_SEND_INTERRUPTED Caller still has message. | |
523 | */ | |
524 | mach_msg_return_t | |
525 | ipc_mqueue_send( | |
0a7de745 A |
526 | ipc_mqueue_t mqueue, |
527 | ipc_kmsg_t kmsg, | |
528 | mach_msg_option_t option, | |
39037602 | 529 | mach_msg_timeout_t send_timeout) |
1c79356b | 530 | { |
9bccf70c | 531 | int wresult; |
1c79356b A |
532 | |
533 | /* | |
534 | * Don't block if: | |
535 | * 1) We're under the queue limit. | |
536 | * 2) Caller used the MACH_SEND_ALWAYS internal option. | |
537 | * 3) Message is sent to a send-once right. | |
538 | */ | |
1c79356b | 539 | if (!imq_full(mqueue) || |
d9a64523 | 540 | (!imq_full_kernel(mqueue) && |
0a7de745 A |
541 | ((option & MACH_SEND_ALWAYS) || |
542 | (MACH_MSGH_BITS_REMOTE(kmsg->ikm_header->msgh_bits) == | |
543 | MACH_MSG_TYPE_PORT_SEND_ONCE)))) { | |
1c79356b | 544 | mqueue->imq_msgcount++; |
91447636 | 545 | assert(mqueue->imq_msgcount > 0); |
1c79356b | 546 | imq_unlock(mqueue); |
1c79356b | 547 | } else { |
55e303ae | 548 | thread_t cur_thread = current_thread(); |
d9a64523 A |
549 | ipc_port_t port = ip_from_mq(mqueue); |
550 | struct turnstile *send_turnstile = TURNSTILE_NULL; | |
91447636 | 551 | uint64_t deadline; |
1c79356b | 552 | |
d9a64523 | 553 | /* |
1c79356b A |
554 | * We have to wait for space to be granted to us. |
555 | */ | |
91447636 | 556 | if ((option & MACH_SEND_TIMEOUT) && (send_timeout == 0)) { |
1c79356b | 557 | imq_unlock(mqueue); |
1c79356b A |
558 | return MACH_SEND_TIMED_OUT; |
559 | } | |
c910b4d9 A |
560 | if (imq_full_kernel(mqueue)) { |
561 | imq_unlock(mqueue); | |
c910b4d9 A |
562 | return MACH_SEND_NO_BUFFER; |
563 | } | |
1c79356b | 564 | mqueue->imq_fullwaiters = TRUE; |
39037602 | 565 | |
0a7de745 A |
566 | if (option & MACH_SEND_TIMEOUT) { |
567 | clock_interval_to_deadline(send_timeout, 1000 * NSEC_PER_USEC, &deadline); | |
568 | } else { | |
91447636 | 569 | deadline = 0; |
0a7de745 | 570 | } |
813fb2f6 A |
571 | |
572 | thread_set_pending_block_hint(cur_thread, kThreadWaitPortSend); | |
d9a64523 A |
573 | |
574 | send_turnstile = turnstile_prepare((uintptr_t)port, | |
0a7de745 A |
575 | port_send_turnstile_address(port), |
576 | TURNSTILE_NULL, TURNSTILE_SYNC_IPC); | |
d9a64523 | 577 | |
cb323159 A |
578 | ipc_port_send_update_inheritor(port, send_turnstile, |
579 | TURNSTILE_DELAYED_UPDATE); | |
d9a64523 A |
580 | |
581 | wresult = waitq_assert_wait64_leeway( | |
0a7de745 A |
582 | &send_turnstile->ts_waitq, |
583 | IPC_MQUEUE_FULL, | |
584 | THREAD_ABORTSAFE, | |
585 | TIMEOUT_URGENCY_USER_NORMAL, | |
586 | deadline, | |
587 | TIMEOUT_NO_LEEWAY); | |
39037602 | 588 | |
55e303ae | 589 | imq_unlock(mqueue); |
d9a64523 | 590 | turnstile_update_inheritor_complete(send_turnstile, |
0a7de745 | 591 | TURNSTILE_INTERLOCK_NOT_HELD); |
d9a64523 | 592 | |
9bccf70c | 593 | if (wresult == THREAD_WAITING) { |
91447636 | 594 | wresult = thread_block(THREAD_CONTINUE_NULL); |
9bccf70c A |
595 | counter(c_ipc_mqueue_send_block++); |
596 | } | |
d9a64523 A |
597 | |
598 | /* Call turnstile complete with interlock held */ | |
599 | imq_lock(mqueue); | |
cb323159 | 600 | turnstile_complete((uintptr_t)port, port_send_turnstile_address(port), NULL, TURNSTILE_SYNC_IPC); |
d9a64523 A |
601 | imq_unlock(mqueue); |
602 | ||
603 | /* Call cleanup after dropping the interlock */ | |
604 | turnstile_cleanup(); | |
605 | ||
9bccf70c | 606 | switch (wresult) { |
3e170ce0 | 607 | case THREAD_AWAKENED: |
d9a64523 | 608 | /* |
3e170ce0 A |
609 | * we can proceed - inherited msgcount from waker |
610 | * or the message queue has been destroyed and the msgcount | |
611 | * has been reset to zero (will detect in ipc_mqueue_post()). | |
612 | */ | |
613 | break; | |
d9a64523 | 614 | |
1c79356b A |
615 | case THREAD_TIMED_OUT: |
616 | assert(option & MACH_SEND_TIMEOUT); | |
617 | return MACH_SEND_TIMED_OUT; | |
d9a64523 | 618 | |
1c79356b | 619 | case THREAD_INTERRUPTED: |
1c79356b | 620 | return MACH_SEND_INTERRUPTED; |
d9a64523 | 621 | |
1c79356b | 622 | case THREAD_RESTART: |
b0d623f7 A |
623 | /* mqueue is being destroyed */ |
624 | return MACH_SEND_INVALID_DEST; | |
1c79356b A |
625 | default: |
626 | panic("ipc_mqueue_send"); | |
627 | } | |
628 | } | |
629 | ||
39037602 | 630 | ipc_mqueue_post(mqueue, kmsg, option); |
1c79356b A |
631 | return MACH_MSG_SUCCESS; |
632 | } | |
633 | ||
39037602 A |
634 | /* |
635 | * Routine: ipc_mqueue_override_send | |
636 | * Purpose: | |
637 | * Set an override qos on the first message in the queue | |
638 | * (if the queue is full). This is a send-possible override | |
639 | * that will go away as soon as we drain a message from the | |
640 | * queue. | |
641 | * | |
642 | * Conditions: | |
643 | * The message queue is not locked. | |
644 | * The caller holds a reference on the message queue. | |
645 | */ | |
0a7de745 A |
646 | extern void |
647 | ipc_mqueue_override_send( | |
39037602 A |
648 | ipc_mqueue_t mqueue, |
649 | mach_msg_priority_t override) | |
650 | { | |
651 | boolean_t __unused full_queue_empty = FALSE; | |
652 | ||
653 | imq_lock(mqueue); | |
654 | assert(imq_valid(mqueue)); | |
655 | assert(!imq_is_set(mqueue)); | |
d9a64523 | 656 | |
39037602 A |
657 | if (imq_full(mqueue)) { |
658 | ipc_kmsg_t first = ipc_kmsg_queue_first(&mqueue->imq_messages); | |
659 | ||
d9a64523 | 660 | if (first && ipc_kmsg_override_qos(&mqueue->imq_messages, first, override)) { |
cb323159 A |
661 | ipc_object_t object = imq_to_object(mqueue); |
662 | assert(io_otype(object) == IOT_PORT); | |
663 | ipc_port_t port = ip_object_to_port(object); | |
0a7de745 A |
664 | if (ip_active(port) && |
665 | port->ip_receiver_name != MACH_PORT_NULL && | |
666 | is_active(port->ip_receiver) && | |
cb323159 | 667 | ipc_mqueue_has_klist(mqueue)) { |
d9a64523 | 668 | KNOTE(&mqueue->imq_klist, 0); |
0a7de745 | 669 | } |
d9a64523 | 670 | } |
0a7de745 | 671 | if (!first) { |
39037602 | 672 | full_queue_empty = TRUE; |
0a7de745 | 673 | } |
39037602 A |
674 | } |
675 | imq_unlock(mqueue); | |
676 | ||
677 | #if DEVELOPMENT || DEBUG | |
678 | if (full_queue_empty) { | |
679 | ipc_port_t port = ip_from_mq(mqueue); | |
680 | int dst_pid = 0; | |
681 | if (ip_active(port) && !port->ip_tempowner && | |
682 | port->ip_receiver_name && port->ip_receiver && | |
683 | port->ip_receiver != ipc_space_kernel) { | |
684 | dst_pid = task_pid(port->ip_receiver->is_task); | |
685 | } | |
39037602 A |
686 | } |
687 | #endif | |
688 | } | |
39236c6e | 689 | |
1c79356b A |
690 | /* |
691 | * Routine: ipc_mqueue_release_msgcount | |
692 | * Purpose: | |
693 | * Release a message queue reference in the case where we | |
694 | * found a waiter. | |
695 | * | |
696 | * Conditions: | |
91447636 A |
697 | * The message queue is locked. |
698 | * The message corresponding to this reference is off the queue. | |
3e170ce0 A |
699 | * There is no need to pass reserved preposts because this will |
700 | * never prepost to anyone | |
1c79356b A |
701 | */ |
702 | void | |
3e170ce0 | 703 | ipc_mqueue_release_msgcount(ipc_mqueue_t port_mq, ipc_mqueue_t set_mq) |
1c79356b | 704 | { |
d9a64523 | 705 | struct turnstile *send_turnstile = port_send_turnstile(ip_from_mq(port_mq)); |
3e170ce0 A |
706 | (void)set_mq; |
707 | assert(imq_held(port_mq)); | |
708 | assert(port_mq->imq_msgcount > 1 || ipc_kmsg_queue_empty(&port_mq->imq_messages)); | |
1c79356b | 709 | |
3e170ce0 | 710 | port_mq->imq_msgcount--; |
91447636 | 711 | |
d9a64523 | 712 | if (!imq_full(port_mq) && port_mq->imq_fullwaiters && |
0a7de745 | 713 | send_turnstile != TURNSTILE_NULL) { |
3e170ce0 A |
714 | /* |
715 | * boost the priority of the awoken thread | |
716 | * (WAITQ_PROMOTE_PRIORITY) to ensure it uses | |
717 | * the message queue slot we've just reserved. | |
718 | * | |
719 | * NOTE: this will never prepost | |
d9a64523 A |
720 | * |
721 | * The wakeup happens on a turnstile waitq | |
722 | * which will wakeup the highest priority waiter. | |
723 | * A potential downside of this would be starving low | |
724 | * priority senders if there is a constant churn of | |
725 | * high priority threads trying to send to this port. | |
3e170ce0 | 726 | */ |
d9a64523 | 727 | if (waitq_wakeup64_one(&send_turnstile->ts_waitq, |
0a7de745 A |
728 | IPC_MQUEUE_FULL, |
729 | THREAD_AWAKENED, | |
730 | WAITQ_PROMOTE_PRIORITY) != KERN_SUCCESS) { | |
3e170ce0 | 731 | port_mq->imq_fullwaiters = FALSE; |
1c79356b | 732 | } else { |
91447636 | 733 | /* gave away our slot - add reference back */ |
3e170ce0 | 734 | port_mq->imq_msgcount++; |
1c79356b A |
735 | } |
736 | } | |
3e170ce0 A |
737 | |
738 | if (ipc_kmsg_queue_empty(&port_mq->imq_messages)) { | |
739 | /* no more msgs: invalidate the port's prepost object */ | |
39037602 | 740 | waitq_clear_prepost_locked(&port_mq->imq_wait_queue); |
3e170ce0 | 741 | } |
1c79356b A |
742 | } |
743 | ||
744 | /* | |
745 | * Routine: ipc_mqueue_post | |
746 | * Purpose: | |
747 | * Post a message to a waiting receiver or enqueue it. If a | |
748 | * receiver is waiting, we can release our reserved space in | |
749 | * the message queue. | |
750 | * | |
751 | * Conditions: | |
3e170ce0 | 752 | * mqueue is unlocked |
1c79356b A |
753 | * If we need to queue, our space in the message queue is reserved. |
754 | */ | |
755 | void | |
756 | ipc_mqueue_post( | |
39037602 A |
757 | ipc_mqueue_t mqueue, |
758 | ipc_kmsg_t kmsg, | |
759 | mach_msg_option_t __unused option) | |
1c79356b | 760 | { |
3e170ce0 | 761 | uint64_t reserved_prepost = 0; |
39037602 A |
762 | boolean_t destroy_msg = FALSE; |
763 | ||
764 | ipc_kmsg_trace_send(kmsg, option); | |
1c79356b A |
765 | |
766 | /* | |
767 | * While the msg queue is locked, we have control of the | |
768 | * kmsg, so the ref in it for the port is still good. | |
769 | * | |
770 | * Check for a receiver for the message. | |
771 | */ | |
39037602 A |
772 | imq_reserve_and_lock(mqueue, &reserved_prepost); |
773 | ||
774 | /* we may have raced with port destruction! */ | |
775 | if (!imq_valid(mqueue)) { | |
776 | destroy_msg = TRUE; | |
777 | goto out_unlock; | |
778 | } | |
779 | ||
1c79356b | 780 | for (;;) { |
3e170ce0 A |
781 | struct waitq *waitq = &mqueue->imq_wait_queue; |
782 | spl_t th_spl; | |
1c79356b | 783 | thread_t receiver; |
b0d623f7 | 784 | mach_msg_size_t msize; |
1c79356b | 785 | |
39037602 | 786 | receiver = waitq_wakeup64_identify_locked(waitq, |
0a7de745 A |
787 | IPC_MQUEUE_RECEIVE, |
788 | THREAD_AWAKENED, | |
789 | &th_spl, | |
790 | &reserved_prepost, | |
791 | WAITQ_ALL_PRIORITIES, | |
792 | WAITQ_KEEP_LOCKED); | |
1c79356b A |
793 | /* waitq still locked, thread locked */ |
794 | ||
795 | if (receiver == THREAD_NULL) { | |
d9a64523 | 796 | /* |
39037602 A |
797 | * no receivers; queue kmsg if space still reserved |
798 | * Reservations are cancelled when the port goes inactive. | |
799 | * note that this will enqueue the message for any | |
d9a64523 | 800 | * "peeking" receivers. |
39037602 A |
801 | * |
802 | * Also, post the knote to wake up any threads waiting | |
803 | * on that style of interface if this insertion is of | |
804 | * note (first insertion, or adjusted override qos all | |
805 | * the way to the head of the queue). | |
d9a64523 | 806 | * |
39037602 A |
807 | * This is just for ports. portset knotes are stay-active, |
808 | * and their threads get awakened through the !MACH_RCV_IN_PROGRESS | |
809 | * logic below). | |
1c79356b | 810 | */ |
3e170ce0 | 811 | if (mqueue->imq_msgcount > 0) { |
d9a64523 | 812 | if (ipc_kmsg_enqueue_qos(&mqueue->imq_messages, kmsg)) { |
0a7de745 | 813 | /* if the space is dead there is no point calling KNOTE */ |
cb323159 A |
814 | ipc_object_t object = imq_to_object(mqueue); |
815 | assert(io_otype(object) == IOT_PORT); | |
816 | ipc_port_t port = ip_object_to_port(object); | |
0a7de745 A |
817 | if (ip_active(port) && |
818 | port->ip_receiver_name != MACH_PORT_NULL && | |
819 | is_active(port->ip_receiver) && | |
cb323159 | 820 | ipc_mqueue_has_klist(mqueue)) { |
d9a64523 | 821 | KNOTE(&mqueue->imq_klist, 0); |
0a7de745 | 822 | } |
d9a64523 | 823 | } |
3e170ce0 A |
824 | break; |
825 | } | |
826 | ||
827 | /* | |
828 | * Otherwise, the message queue must belong to an inactive | |
829 | * port, so just destroy the message and pretend it was posted. | |
830 | */ | |
39037602 A |
831 | destroy_msg = TRUE; |
832 | goto out_unlock; | |
1c79356b | 833 | } |
d9a64523 | 834 | |
b0d623f7 | 835 | /* |
39037602 A |
836 | * If a thread is attempting a "peek" into the message queue |
837 | * (MACH_PEEK_IN_PROGRESS), then we enqueue the message and set the | |
838 | * thread running. A successful peek is essentially the same as | |
839 | * message delivery since the peeking thread takes responsibility | |
840 | * for delivering the message and (eventually) removing it from | |
841 | * the mqueue. Only one thread can successfully use the peek | |
842 | * facility on any given port, so we exit the waitq loop after | |
843 | * encountering such a thread. | |
844 | */ | |
845 | if (receiver->ith_state == MACH_PEEK_IN_PROGRESS && mqueue->imq_msgcount > 0) { | |
846 | ipc_kmsg_enqueue_qos(&mqueue->imq_messages, kmsg); | |
847 | ipc_mqueue_peek_on_thread(mqueue, receiver->ith_option, receiver); | |
848 | thread_unlock(receiver); | |
849 | splx(th_spl); | |
850 | break; /* Message was posted, so break out of loop */ | |
851 | } | |
852 | ||
853 | /* | |
854 | * If the receiver waited with a facility not directly related | |
855 | * to Mach messaging, then it isn't prepared to get handed the | |
856 | * message directly. Just set it running, and go look for | |
857 | * another thread that can. | |
b0d623f7 A |
858 | */ |
859 | if (receiver->ith_state != MACH_RCV_IN_PROGRESS) { | |
39037602 A |
860 | thread_unlock(receiver); |
861 | splx(th_spl); | |
862 | continue; | |
b0d623f7 A |
863 | } |
864 | ||
d9a64523 | 865 | |
1c79356b A |
866 | /* |
867 | * We found a waiting thread. | |
868 | * If the message is too large or the scatter list is too small | |
869 | * the thread we wake up will get that as its status. | |
870 | */ | |
0a7de745 | 871 | msize = ipc_kmsg_copyout_size(kmsg, receiver->map); |
39037602 | 872 | if (receiver->ith_rsize < |
0a7de745 | 873 | (msize + REQUESTED_TRAILER_SIZE(thread_is_64bit_addr(receiver), receiver->ith_option))) { |
b0d623f7 | 874 | receiver->ith_msize = msize; |
1c79356b A |
875 | receiver->ith_state = MACH_RCV_TOO_LARGE; |
876 | } else { | |
877 | receiver->ith_state = MACH_MSG_SUCCESS; | |
878 | } | |
879 | ||
880 | /* | |
881 | * If there is no problem with the upcoming receive, or the | |
882 | * receiver thread didn't specifically ask for special too | |
883 | * large error condition, go ahead and select it anyway. | |
884 | */ | |
885 | if ((receiver->ith_state == MACH_MSG_SUCCESS) || | |
886 | !(receiver->ith_option & MACH_RCV_LARGE)) { | |
1c79356b A |
887 | receiver->ith_kmsg = kmsg; |
888 | receiver->ith_seqno = mqueue->imq_seqno++; | |
39037602 A |
889 | #if MACH_FLIPC |
890 | mach_node_t node = kmsg->ikm_node; | |
891 | #endif | |
1c79356b | 892 | thread_unlock(receiver); |
3e170ce0 | 893 | splx(th_spl); |
1c79356b A |
894 | |
895 | /* we didn't need our reserved spot in the queue */ | |
3e170ce0 | 896 | ipc_mqueue_release_msgcount(mqueue, IMQ_NULL); |
39037602 A |
897 | |
898 | #if MACH_FLIPC | |
0a7de745 A |
899 | if (MACH_NODE_VALID(node) && FPORT_VALID(mqueue->imq_fport)) { |
900 | flipc_msg_ack(node, mqueue, TRUE); | |
901 | } | |
39037602 | 902 | #endif |
1c79356b A |
903 | break; |
904 | } | |
905 | ||
906 | /* | |
907 | * Otherwise, this thread needs to be released to run | |
908 | * and handle its error without getting the message. We | |
909 | * need to go back and pick another one. | |
910 | */ | |
39236c6e | 911 | receiver->ith_receiver_name = mqueue->imq_receiver_name; |
1c79356b A |
912 | receiver->ith_kmsg = IKM_NULL; |
913 | receiver->ith_seqno = 0; | |
914 | thread_unlock(receiver); | |
3e170ce0 | 915 | splx(th_spl); |
1c79356b A |
916 | } |
917 | ||
39037602 | 918 | out_unlock: |
3e170ce0 A |
919 | /* clear the waitq boost we may have been given */ |
920 | waitq_clear_promotion_locked(&mqueue->imq_wait_queue, current_thread()); | |
39037602 | 921 | imq_release_and_unlock(mqueue, reserved_prepost); |
0a7de745 | 922 | if (destroy_msg) { |
39037602 | 923 | ipc_kmsg_destroy(kmsg); |
0a7de745 | 924 | } |
39037602 | 925 | |
1c79356b A |
926 | current_task()->messages_sent++; |
927 | return; | |
928 | } | |
929 | ||
930 | ||
cb323159 | 931 | static void |
91447636 | 932 | ipc_mqueue_receive_results(wait_result_t saved_wait_result) |
1c79356b | 933 | { |
0a7de745 A |
934 | thread_t self = current_thread(); |
935 | mach_msg_option_t option = self->ith_option; | |
1c79356b A |
936 | |
937 | /* | |
938 | * why did we wake up? | |
939 | */ | |
940 | switch (saved_wait_result) { | |
941 | case THREAD_TIMED_OUT: | |
942 | self->ith_state = MACH_RCV_TIMED_OUT; | |
943 | return; | |
944 | ||
945 | case THREAD_INTERRUPTED: | |
1c79356b A |
946 | self->ith_state = MACH_RCV_INTERRUPTED; |
947 | return; | |
948 | ||
949 | case THREAD_RESTART: | |
950 | /* something bad happened to the port/set */ | |
1c79356b A |
951 | self->ith_state = MACH_RCV_PORT_CHANGED; |
952 | return; | |
953 | ||
954 | case THREAD_AWAKENED: | |
955 | /* | |
956 | * We do not need to go select a message, somebody | |
957 | * handed us one (or a too-large indication). | |
958 | */ | |
1c79356b A |
959 | switch (self->ith_state) { |
960 | case MACH_RCV_SCATTER_SMALL: | |
961 | case MACH_RCV_TOO_LARGE: | |
962 | /* | |
963 | * Somebody tried to give us a too large | |
964 | * message. If we indicated that we cared, | |
965 | * then they only gave us the indication, | |
966 | * otherwise they gave us the indication | |
967 | * AND the message anyway. | |
968 | */ | |
969 | if (option & MACH_RCV_LARGE) { | |
970 | return; | |
971 | } | |
972 | ||
973 | case MACH_MSG_SUCCESS: | |
39037602 | 974 | case MACH_PEEK_READY: |
1c79356b A |
975 | return; |
976 | ||
977 | default: | |
978 | panic("ipc_mqueue_receive_results: strange ith_state"); | |
979 | } | |
980 | ||
981 | default: | |
982 | panic("ipc_mqueue_receive_results: strange wait_result"); | |
983 | } | |
984 | } | |
985 | ||
986 | void | |
91447636 A |
987 | ipc_mqueue_receive_continue( |
988 | __unused void *param, | |
989 | wait_result_t wresult) | |
1c79356b | 990 | { |
91447636 | 991 | ipc_mqueue_receive_results(wresult); |
1c79356b A |
992 | mach_msg_receive_continue(); /* hard-coded for now */ |
993 | } | |
994 | ||
995 | /* | |
996 | * Routine: ipc_mqueue_receive | |
997 | * Purpose: | |
998 | * Receive a message from a message queue. | |
999 | * | |
1c79356b A |
1000 | * Conditions: |
1001 | * Our caller must hold a reference for the port or port set | |
1002 | * to which this queue belongs, to keep the queue | |
1003 | * from being deallocated. | |
1004 | * | |
1005 | * The kmsg is returned with clean header fields | |
39037602 A |
1006 | * and with the circular bit turned off through the ith_kmsg |
1007 | * field of the thread's receive continuation state. | |
1c79356b | 1008 | * Returns: |
39037602 A |
1009 | * MACH_MSG_SUCCESS Message returned in ith_kmsg. |
1010 | * MACH_RCV_TOO_LARGE Message size returned in ith_msize. | |
1c79356b A |
1011 | * MACH_RCV_TIMED_OUT No message obtained. |
1012 | * MACH_RCV_INTERRUPTED No message obtained. | |
1013 | * MACH_RCV_PORT_DIED Port/set died; no message. | |
1014 | * MACH_RCV_PORT_CHANGED Port moved into set; no msg. | |
1015 | * | |
1016 | */ | |
1017 | ||
1018 | void | |
1019 | ipc_mqueue_receive( | |
b0d623f7 A |
1020 | ipc_mqueue_t mqueue, |
1021 | mach_msg_option_t option, | |
1022 | mach_msg_size_t max_size, | |
1023 | mach_msg_timeout_t rcv_timeout, | |
1024 | int interruptible) | |
1025 | { | |
1026 | wait_result_t wresult; | |
39037602 A |
1027 | thread_t self = current_thread(); |
1028 | ||
1029 | imq_lock(mqueue); | |
1030 | wresult = ipc_mqueue_receive_on_thread(mqueue, option, max_size, | |
0a7de745 A |
1031 | rcv_timeout, interruptible, |
1032 | self); | |
39037602 | 1033 | /* mqueue unlocked */ |
0a7de745 | 1034 | if (wresult == THREAD_NOT_WAITING) { |
39037602 | 1035 | return; |
0a7de745 | 1036 | } |
b0d623f7 A |
1037 | |
1038 | if (wresult == THREAD_WAITING) { | |
d9a64523 | 1039 | counter((interruptible == THREAD_ABORTSAFE) ? |
0a7de745 A |
1040 | c_ipc_mqueue_receive_block_user++ : |
1041 | c_ipc_mqueue_receive_block_kernel++); | |
b0d623f7 | 1042 | |
0a7de745 | 1043 | if (self->ith_continuation) { |
b0d623f7 | 1044 | thread_block(ipc_mqueue_receive_continue); |
0a7de745 A |
1045 | } |
1046 | /* NOTREACHED */ | |
b0d623f7 A |
1047 | |
1048 | wresult = thread_block(THREAD_CONTINUE_NULL); | |
1049 | } | |
1050 | ipc_mqueue_receive_results(wresult); | |
1051 | } | |
1052 | ||
0a7de745 A |
1053 | static int |
1054 | mqueue_process_prepost_receive(void *ctx, struct waitq *waitq, | |
1055 | struct waitq_set *wqset) | |
3e170ce0 A |
1056 | { |
1057 | ipc_mqueue_t port_mq, *pmq_ptr; | |
1058 | ||
1059 | (void)wqset; | |
1060 | port_mq = (ipc_mqueue_t)waitq; | |
1061 | ||
1062 | /* | |
1063 | * If there are no messages on this queue, skip it and remove | |
1064 | * it from the prepost list | |
1065 | */ | |
0a7de745 | 1066 | if (ipc_kmsg_queue_empty(&port_mq->imq_messages)) { |
3e170ce0 | 1067 | return WQ_ITERATE_INVALIDATE_CONTINUE; |
0a7de745 | 1068 | } |
3e170ce0 A |
1069 | |
1070 | /* | |
1071 | * There are messages waiting on this port. | |
1072 | * Instruct the prepost iteration logic to break, but keep the | |
1073 | * waitq locked. | |
1074 | */ | |
1075 | pmq_ptr = (ipc_mqueue_t *)ctx; | |
0a7de745 | 1076 | if (pmq_ptr) { |
3e170ce0 | 1077 | *pmq_ptr = port_mq; |
0a7de745 | 1078 | } |
3e170ce0 A |
1079 | return WQ_ITERATE_BREAK_KEEP_LOCKED; |
1080 | } | |
1081 | ||
39037602 A |
1082 | /* |
1083 | * Routine: ipc_mqueue_receive_on_thread | |
1084 | * Purpose: | |
1085 | * Receive a message from a message queue using a specified thread. | |
1086 | * If no message available, assert_wait on the appropriate waitq. | |
1087 | * | |
1088 | * Conditions: | |
1089 | * Assumes thread is self. | |
1090 | * Called with mqueue locked. | |
1091 | * Returns with mqueue unlocked. | |
1092 | * May have assert-waited. Caller must block in those cases. | |
1093 | */ | |
b0d623f7 A |
1094 | wait_result_t |
1095 | ipc_mqueue_receive_on_thread( | |
39037602 | 1096 | ipc_mqueue_t mqueue, |
b0d623f7 A |
1097 | mach_msg_option_t option, |
1098 | mach_msg_size_t max_size, | |
1099 | mach_msg_timeout_t rcv_timeout, | |
1100 | int interruptible, | |
1101 | thread_t thread) | |
1c79356b | 1102 | { |
91447636 | 1103 | wait_result_t wresult; |
0a7de745 | 1104 | uint64_t deadline; |
d9a64523 | 1105 | struct turnstile *rcv_turnstile = TURNSTILE_NULL; |
1c79356b | 1106 | |
39037602 A |
1107 | /* called with mqueue locked */ |
1108 | ||
3e170ce0 | 1109 | /* no need to reserve anything: we never prepost to anyone */ |
39037602 A |
1110 | |
1111 | if (!imq_valid(mqueue)) { | |
1112 | /* someone raced us to destroy this mqueue/port! */ | |
1113 | imq_unlock(mqueue); | |
1114 | /* | |
1115 | * ipc_mqueue_receive_results updates the thread's ith_state | |
1116 | * TODO: differentiate between rights being moved and | |
1117 | * rights/ports being destroyed (21885327) | |
1118 | */ | |
1119 | return THREAD_RESTART; | |
1120 | } | |
d9a64523 | 1121 | |
1c79356b | 1122 | if (imq_is_set(mqueue)) { |
3e170ce0 | 1123 | ipc_mqueue_t port_mq = IMQ_NULL; |
1c79356b | 1124 | |
3e170ce0 | 1125 | (void)waitq_set_iterate_preposts(&mqueue->imq_set_queue, |
0a7de745 A |
1126 | &port_mq, |
1127 | mqueue_process_prepost_receive); | |
1c79356b | 1128 | |
3e170ce0 | 1129 | if (port_mq != IMQ_NULL) { |
1c79356b | 1130 | /* |
3e170ce0 A |
1131 | * We get here if there is at least one message |
1132 | * waiting on port_mq. We have instructed the prepost | |
1133 | * iteration logic to leave both the port_mq and the | |
1134 | * set mqueue locked. | |
1135 | * | |
1136 | * TODO: previously, we would place this port at the | |
1137 | * back of the prepost list... | |
1c79356b | 1138 | */ |
3e170ce0 | 1139 | imq_unlock(mqueue); |
b0d623f7 | 1140 | |
b0d623f7 A |
1141 | /* |
1142 | * Continue on to handling the message with just | |
1143 | * the port mqueue locked. | |
1144 | */ | |
0a7de745 | 1145 | if (option & MACH_PEEK_MSG) { |
39037602 | 1146 | ipc_mqueue_peek_on_thread(port_mq, option, thread); |
0a7de745 | 1147 | } else { |
39037602 | 1148 | ipc_mqueue_select_on_thread(port_mq, mqueue, option, |
0a7de745 A |
1149 | max_size, thread); |
1150 | } | |
3e170ce0 | 1151 | |
1c79356b | 1152 | imq_unlock(port_mq); |
b0d623f7 | 1153 | return THREAD_NOT_WAITING; |
1c79356b | 1154 | } |
94ff46dc | 1155 | } else if (imq_is_queue(mqueue) || imq_is_turnstile_proxy(mqueue)) { |
3e170ce0 | 1156 | ipc_kmsg_queue_t kmsgs; |
1c79356b A |
1157 | |
1158 | /* | |
1159 | * Receive on a single port. Just try to get the messages. | |
1160 | */ | |
d9a64523 | 1161 | kmsgs = &mqueue->imq_messages; |
1c79356b | 1162 | if (ipc_kmsg_queue_first(kmsgs) != IKM_NULL) { |
0a7de745 | 1163 | if (option & MACH_PEEK_MSG) { |
39037602 | 1164 | ipc_mqueue_peek_on_thread(mqueue, option, thread); |
0a7de745 | 1165 | } else { |
39037602 | 1166 | ipc_mqueue_select_on_thread(mqueue, IMQ_NULL, option, |
0a7de745 A |
1167 | max_size, thread); |
1168 | } | |
1c79356b | 1169 | imq_unlock(mqueue); |
b0d623f7 | 1170 | return THREAD_NOT_WAITING; |
1c79356b | 1171 | } |
39037602 A |
1172 | } else { |
1173 | panic("Unknown mqueue type 0x%x: likely memory corruption!\n", | |
0a7de745 | 1174 | mqueue->imq_wait_queue.waitq_type); |
1c79356b | 1175 | } |
d9a64523 | 1176 | |
1c79356b A |
1177 | /* |
1178 | * Looks like we'll have to block. The mqueue we will | |
1179 | * block on (whether the set's or the local port's) is | |
1180 | * still locked. | |
1181 | */ | |
1c79356b | 1182 | if (option & MACH_RCV_TIMEOUT) { |
91447636 | 1183 | if (rcv_timeout == 0) { |
1c79356b | 1184 | imq_unlock(mqueue); |
b0d623f7 A |
1185 | thread->ith_state = MACH_RCV_TIMED_OUT; |
1186 | return THREAD_NOT_WAITING; | |
1c79356b A |
1187 | } |
1188 | } | |
1189 | ||
b0d623f7 | 1190 | thread->ith_option = option; |
39037602 A |
1191 | thread->ith_rsize = max_size; |
1192 | thread->ith_msize = 0; | |
1193 | ||
0a7de745 | 1194 | if (option & MACH_PEEK_MSG) { |
39037602 | 1195 | thread->ith_state = MACH_PEEK_IN_PROGRESS; |
0a7de745 | 1196 | } else { |
39037602 | 1197 | thread->ith_state = MACH_RCV_IN_PROGRESS; |
0a7de745 | 1198 | } |
55e303ae | 1199 | |
0a7de745 A |
1200 | if (option & MACH_RCV_TIMEOUT) { |
1201 | clock_interval_to_deadline(rcv_timeout, 1000 * NSEC_PER_USEC, &deadline); | |
1202 | } else { | |
91447636 | 1203 | deadline = 0; |
0a7de745 | 1204 | } |
91447636 | 1205 | |
d9a64523 | 1206 | /* |
94ff46dc | 1207 | * Threads waiting on a reply port (not portset) |
cb323159 A |
1208 | * will wait on its receive turnstile. |
1209 | * | |
d9a64523 A |
1210 | * Donate waiting thread's turnstile and |
1211 | * setup inheritor for special reply port. | |
1212 | * Based on the state of the special reply | |
1213 | * port, the inheritor would be the send | |
1214 | * turnstile of the connection port on which | |
1215 | * the send of sync ipc would happen or | |
1216 | * workloop's turnstile who would reply to | |
1217 | * the sync ipc message. | |
1218 | * | |
1219 | * Pass in mqueue wait in waitq_assert_wait to | |
1220 | * support port set wakeup. The mqueue waitq of port | |
1221 | * will be converted to to turnstile waitq | |
1222 | * in waitq_assert_wait instead of global waitqs. | |
1223 | */ | |
94ff46dc | 1224 | if (imq_is_turnstile_proxy(mqueue)) { |
d9a64523 A |
1225 | ipc_port_t port = ip_from_mq(mqueue); |
1226 | rcv_turnstile = turnstile_prepare((uintptr_t)port, | |
0a7de745 A |
1227 | port_rcv_turnstile_address(port), |
1228 | TURNSTILE_NULL, TURNSTILE_SYNC_IPC); | |
d9a64523 | 1229 | |
cb323159 A |
1230 | ipc_port_recv_update_inheritor(port, rcv_turnstile, |
1231 | TURNSTILE_DELAYED_UPDATE); | |
d9a64523 A |
1232 | } |
1233 | ||
813fb2f6 | 1234 | thread_set_pending_block_hint(thread, kThreadWaitPortReceive); |
3e170ce0 | 1235 | wresult = waitq_assert_wait64_locked(&mqueue->imq_wait_queue, |
0a7de745 A |
1236 | IPC_MQUEUE_RECEIVE, |
1237 | interruptible, | |
1238 | TIMEOUT_URGENCY_USER_NORMAL, | |
1239 | deadline, | |
1240 | TIMEOUT_NO_LEEWAY, | |
1241 | thread); | |
b0d623f7 | 1242 | /* preposts should be detected above, not here */ |
0a7de745 | 1243 | if (wresult == THREAD_AWAKENED) { |
b0d623f7 | 1244 | panic("ipc_mqueue_receive_on_thread: sleep walking"); |
0a7de745 | 1245 | } |
b0d623f7 | 1246 | |
55e303ae | 1247 | imq_unlock(mqueue); |
39037602 | 1248 | |
d9a64523 A |
1249 | /* Check if its a port mqueue and if it needs to call turnstile_update_inheritor_complete */ |
1250 | if (rcv_turnstile != TURNSTILE_NULL) { | |
1251 | turnstile_update_inheritor_complete(rcv_turnstile, TURNSTILE_INTERLOCK_NOT_HELD); | |
1252 | } | |
1253 | /* Its callers responsibility to call turnstile_complete to get the turnstile back */ | |
1254 | ||
b0d623f7 | 1255 | return wresult; |
1c79356b A |
1256 | } |
1257 | ||
1258 | ||
39037602 A |
1259 | /* |
1260 | * Routine: ipc_mqueue_peek_on_thread | |
1261 | * Purpose: | |
1262 | * A receiver discovered that there was a message on the queue | |
1263 | * before he had to block. Tell a thread about the message queue, | |
1264 | * but don't pick off any messages. | |
1265 | * Conditions: | |
1266 | * port_mq locked | |
1267 | * at least one message on port_mq's message queue | |
1268 | * | |
1269 | * Returns: (on thread->ith_state) | |
1270 | * MACH_PEEK_READY ith_peekq contains a message queue | |
1271 | */ | |
1272 | void | |
1273 | ipc_mqueue_peek_on_thread( | |
1274 | ipc_mqueue_t port_mq, | |
1275 | mach_msg_option_t option, | |
1276 | thread_t thread) | |
1277 | { | |
1278 | (void)option; | |
1279 | assert(option & MACH_PEEK_MSG); | |
1280 | assert(ipc_kmsg_queue_first(&port_mq->imq_messages) != IKM_NULL); | |
1281 | ||
1282 | /* | |
1283 | * Take a reference on the mqueue's associated port: | |
1284 | * the peeking thread will be responsible to release this reference | |
1285 | * using ip_release_mq() | |
1286 | */ | |
1287 | ip_reference_mq(port_mq); | |
1288 | thread->ith_peekq = port_mq; | |
1289 | thread->ith_state = MACH_PEEK_READY; | |
1290 | } | |
1291 | ||
1c79356b | 1292 | /* |
b0d623f7 | 1293 | * Routine: ipc_mqueue_select_on_thread |
1c79356b A |
1294 | * Purpose: |
1295 | * A receiver discovered that there was a message on the queue | |
1296 | * before he had to block. Pick the message off the queue and | |
b0d623f7 | 1297 | * "post" it to thread. |
1c79356b A |
1298 | * Conditions: |
1299 | * mqueue locked. | |
b0d623f7 | 1300 | * thread not locked. |
1c79356b | 1301 | * There is a message. |
3e170ce0 A |
1302 | * No need to reserve prepost objects - it will never prepost |
1303 | * | |
1c79356b A |
1304 | * Returns: |
1305 | * MACH_MSG_SUCCESS Actually selected a message for ourselves. | |
1306 | * MACH_RCV_TOO_LARGE May or may not have pull it, but it is large | |
1307 | */ | |
1308 | void | |
b0d623f7 | 1309 | ipc_mqueue_select_on_thread( |
0a7de745 A |
1310 | ipc_mqueue_t port_mq, |
1311 | ipc_mqueue_t set_mq, | |
1312 | mach_msg_option_t option, | |
1313 | mach_msg_size_t max_size, | |
b0d623f7 | 1314 | thread_t thread) |
1c79356b | 1315 | { |
1c79356b | 1316 | ipc_kmsg_t kmsg; |
b0d623f7 | 1317 | mach_msg_return_t mr = MACH_MSG_SUCCESS; |
39037602 | 1318 | mach_msg_size_t msize; |
1c79356b | 1319 | |
1c79356b A |
1320 | /* |
1321 | * Do some sanity checking of our ability to receive | |
1322 | * before pulling the message off the queue. | |
1323 | */ | |
3e170ce0 | 1324 | kmsg = ipc_kmsg_queue_first(&port_mq->imq_messages); |
1c79356b A |
1325 | assert(kmsg != IKM_NULL); |
1326 | ||
1c79356b A |
1327 | /* |
1328 | * If we really can't receive it, but we had the | |
1329 | * MACH_RCV_LARGE option set, then don't take it off | |
1330 | * the queue, instead return the appropriate error | |
1331 | * (and size needed). | |
1332 | */ | |
39037602 | 1333 | msize = ipc_kmsg_copyout_size(kmsg, thread->map); |
d9a64523 | 1334 | if (msize + REQUESTED_TRAILER_SIZE(thread_is_64bit_addr(thread), option) > max_size) { |
91447636 A |
1335 | mr = MACH_RCV_TOO_LARGE; |
1336 | if (option & MACH_RCV_LARGE) { | |
3e170ce0 | 1337 | thread->ith_receiver_name = port_mq->imq_receiver_name; |
b0d623f7 | 1338 | thread->ith_kmsg = IKM_NULL; |
39037602 | 1339 | thread->ith_msize = msize; |
b0d623f7 A |
1340 | thread->ith_seqno = 0; |
1341 | thread->ith_state = mr; | |
91447636 A |
1342 | return; |
1343 | } | |
1c79356b A |
1344 | } |
1345 | ||
39037602 A |
1346 | ipc_kmsg_rmqueue(&port_mq->imq_messages, kmsg); |
1347 | #if MACH_FLIPC | |
0a7de745 A |
1348 | if (MACH_NODE_VALID(kmsg->ikm_node) && FPORT_VALID(port_mq->imq_fport)) { |
1349 | flipc_msg_ack(kmsg->ikm_node, port_mq, TRUE); | |
1350 | } | |
39037602 | 1351 | #endif |
3e170ce0 A |
1352 | ipc_mqueue_release_msgcount(port_mq, set_mq); |
1353 | thread->ith_seqno = port_mq->imq_seqno++; | |
b0d623f7 A |
1354 | thread->ith_kmsg = kmsg; |
1355 | thread->ith_state = mr; | |
1c79356b A |
1356 | |
1357 | current_task()->messages_received++; | |
1358 | return; | |
1359 | } | |
1360 | ||
b0d623f7 | 1361 | /* |
39037602 | 1362 | * Routine: ipc_mqueue_peek_locked |
b0d623f7 | 1363 | * Purpose: |
39236c6e A |
1364 | * Peek at a (non-set) message queue to see if it has a message |
1365 | * matching the sequence number provided (if zero, then the | |
1366 | * first message in the queue) and return vital info about the | |
1367 | * message. | |
1368 | * | |
1369 | * Conditions: | |
39037602 A |
1370 | * The ipc_mqueue_t is locked by callers. |
1371 | * Other locks may be held by callers, so this routine cannot block. | |
39236c6e A |
1372 | * Caller holds reference on the message queue. |
1373 | */ | |
1374 | unsigned | |
39037602 | 1375 | ipc_mqueue_peek_locked(ipc_mqueue_t mq, |
0a7de745 A |
1376 | mach_port_seqno_t * seqnop, |
1377 | mach_msg_size_t * msg_sizep, | |
1378 | mach_msg_id_t * msg_idp, | |
1379 | mach_msg_max_trailer_t * msg_trailerp, | |
1380 | ipc_kmsg_t *kmsgp) | |
39236c6e A |
1381 | { |
1382 | ipc_kmsg_queue_t kmsgq; | |
3e170ce0 | 1383 | ipc_kmsg_t kmsg; |
39236c6e | 1384 | mach_port_seqno_t seqno, msgoff; |
39037602 | 1385 | unsigned res = 0; |
39236c6e A |
1386 | |
1387 | assert(!imq_is_set(mq)); | |
1388 | ||
3e170ce0 | 1389 | seqno = 0; |
0a7de745 | 1390 | if (seqnop != NULL) { |
3e170ce0 | 1391 | seqno = *seqnop; |
0a7de745 | 1392 | } |
39236c6e A |
1393 | |
1394 | if (seqno == 0) { | |
1395 | seqno = mq->imq_seqno; | |
1396 | msgoff = 0; | |
d9a64523 | 1397 | } else if (seqno >= mq->imq_seqno && |
0a7de745 | 1398 | seqno < mq->imq_seqno + mq->imq_msgcount) { |
39236c6e | 1399 | msgoff = seqno - mq->imq_seqno; |
0a7de745 | 1400 | } else { |
39236c6e | 1401 | goto out; |
0a7de745 | 1402 | } |
39236c6e A |
1403 | |
1404 | /* look for the message that would match that seqno */ | |
1405 | kmsgq = &mq->imq_messages; | |
1406 | kmsg = ipc_kmsg_queue_first(kmsgq); | |
1407 | while (msgoff-- && kmsg != IKM_NULL) { | |
1408 | kmsg = ipc_kmsg_queue_next(kmsgq, kmsg); | |
1409 | } | |
0a7de745 | 1410 | if (kmsg == IKM_NULL) { |
39236c6e | 1411 | goto out; |
0a7de745 | 1412 | } |
39236c6e A |
1413 | |
1414 | /* found one - return the requested info */ | |
0a7de745 | 1415 | if (seqnop != NULL) { |
39236c6e | 1416 | *seqnop = seqno; |
0a7de745 A |
1417 | } |
1418 | if (msg_sizep != NULL) { | |
39236c6e | 1419 | *msg_sizep = kmsg->ikm_header->msgh_size; |
0a7de745 A |
1420 | } |
1421 | if (msg_idp != NULL) { | |
39236c6e | 1422 | *msg_idp = kmsg->ikm_header->msgh_id; |
0a7de745 A |
1423 | } |
1424 | if (msg_trailerp != NULL) { | |
d9a64523 | 1425 | memcpy(msg_trailerp, |
0a7de745 A |
1426 | (mach_msg_max_trailer_t *)((vm_offset_t)kmsg->ikm_header + |
1427 | round_msg(kmsg->ikm_header->msgh_size)), | |
1428 | sizeof(mach_msg_max_trailer_t)); | |
1429 | } | |
1430 | if (kmsgp != NULL) { | |
39037602 | 1431 | *kmsgp = kmsg; |
0a7de745 | 1432 | } |
39037602 | 1433 | |
39236c6e A |
1434 | res = 1; |
1435 | ||
39037602 A |
1436 | out: |
1437 | return res; | |
1438 | } | |
1439 | ||
1440 | ||
1441 | /* | |
1442 | * Routine: ipc_mqueue_peek | |
1443 | * Purpose: | |
1444 | * Peek at a (non-set) message queue to see if it has a message | |
1445 | * matching the sequence number provided (if zero, then the | |
1446 | * first message in the queue) and return vital info about the | |
1447 | * message. | |
1448 | * | |
1449 | * Conditions: | |
1450 | * The ipc_mqueue_t is unlocked. | |
1451 | * Locks may be held by callers, so this routine cannot block. | |
1452 | * Caller holds reference on the message queue. | |
1453 | */ | |
1454 | unsigned | |
1455 | ipc_mqueue_peek(ipc_mqueue_t mq, | |
0a7de745 A |
1456 | mach_port_seqno_t * seqnop, |
1457 | mach_msg_size_t * msg_sizep, | |
1458 | mach_msg_id_t * msg_idp, | |
1459 | mach_msg_max_trailer_t * msg_trailerp, | |
1460 | ipc_kmsg_t *kmsgp) | |
39037602 A |
1461 | { |
1462 | unsigned res; | |
1463 | ||
1464 | imq_lock(mq); | |
1465 | ||
1466 | res = ipc_mqueue_peek_locked(mq, seqnop, msg_sizep, msg_idp, | |
0a7de745 | 1467 | msg_trailerp, kmsgp); |
39037602 | 1468 | |
39236c6e | 1469 | imq_unlock(mq); |
39236c6e A |
1470 | return res; |
1471 | } | |
1472 | ||
39037602 A |
1473 | /* |
1474 | * Routine: ipc_mqueue_release_peek_ref | |
1475 | * Purpose: | |
1476 | * Release the reference on an mqueue's associated port which was | |
1477 | * granted to a thread in ipc_mqueue_peek_on_thread (on the | |
1478 | * MACH_PEEK_MSG thread wakeup path). | |
1479 | * | |
1480 | * Conditions: | |
1481 | * The ipc_mqueue_t should be locked on entry. | |
1482 | * The ipc_mqueue_t will be _unlocked_ on return | |
1483 | * (and potentially invalid!) | |
1484 | * | |
1485 | */ | |
0a7de745 A |
1486 | void |
1487 | ipc_mqueue_release_peek_ref(ipc_mqueue_t mq) | |
39037602 A |
1488 | { |
1489 | assert(!imq_is_set(mq)); | |
1490 | assert(imq_held(mq)); | |
1491 | ||
1492 | /* | |
1493 | * clear any preposts this mq may have generated | |
1494 | * (which would cause subsequent immediate wakeups) | |
1495 | */ | |
1496 | waitq_clear_prepost_locked(&mq->imq_wait_queue); | |
1497 | ||
1498 | imq_unlock(mq); | |
1499 | ||
1500 | /* | |
1501 | * release the port reference: we need to do this outside the lock | |
1502 | * because we might be holding the last port reference! | |
1503 | **/ | |
1504 | ip_release_mq(mq); | |
1505 | } | |
3e170ce0 A |
1506 | |
1507 | /* | |
1508 | * peek at the contained port message queues, break prepost iteration as soon | |
1509 | * as we spot a message on one of the message queues referenced by the set's | |
1510 | * prepost list. No need to lock each message queue, as only the head of each | |
1511 | * queue is checked. If a message wasn't there before we entered here, no need | |
1512 | * to find it (if we do, great). | |
1513 | */ | |
0a7de745 A |
1514 | static int |
1515 | mqueue_peek_iterator(void *ctx, struct waitq *waitq, | |
1516 | struct waitq_set *wqset) | |
3e170ce0 A |
1517 | { |
1518 | ipc_mqueue_t port_mq = (ipc_mqueue_t)waitq; | |
1519 | ipc_kmsg_queue_t kmsgs = &port_mq->imq_messages; | |
1520 | ||
1521 | (void)ctx; | |
1522 | (void)wqset; | |
d9a64523 | 1523 | |
0a7de745 | 1524 | if (ipc_kmsg_queue_first(kmsgs) != IKM_NULL) { |
3e170ce0 | 1525 | return WQ_ITERATE_BREAK; /* break out of the prepost iteration */ |
0a7de745 | 1526 | } |
3e170ce0 A |
1527 | return WQ_ITERATE_CONTINUE; |
1528 | } | |
1529 | ||
39236c6e A |
1530 | /* |
1531 | * Routine: ipc_mqueue_set_peek | |
1532 | * Purpose: | |
1533 | * Peek at a message queue set to see if it has any ports | |
1534 | * with messages. | |
b0d623f7 A |
1535 | * |
1536 | * Conditions: | |
1537 | * Locks may be held by callers, so this routine cannot block. | |
1538 | * Caller holds reference on the message queue. | |
1539 | */ | |
6d2010ae | 1540 | unsigned |
39236c6e | 1541 | ipc_mqueue_set_peek(ipc_mqueue_t mq) |
b0d623f7 | 1542 | { |
3e170ce0 | 1543 | int ret; |
b0d623f7 | 1544 | |
6d2010ae | 1545 | imq_lock(mq); |
b0d623f7 | 1546 | |
39037602 A |
1547 | /* |
1548 | * We may have raced with port destruction where the mqueue is marked | |
1549 | * as invalid. In that case, even though we don't have messages, we | |
1550 | * have an end-of-life event to deliver. | |
1551 | */ | |
0a7de745 | 1552 | if (!imq_is_valid(mq)) { |
39037602 | 1553 | return 1; |
0a7de745 | 1554 | } |
39037602 | 1555 | |
3e170ce0 | 1556 | ret = waitq_set_iterate_preposts(&mq->imq_set_queue, NULL, |
0a7de745 | 1557 | mqueue_peek_iterator); |
3e170ce0 | 1558 | |
b0d623f7 | 1559 | imq_unlock(mq); |
39037602 | 1560 | |
0a7de745 | 1561 | return ret == WQ_ITERATE_BREAK; |
39236c6e A |
1562 | } |
1563 | ||
1564 | /* | |
1565 | * Routine: ipc_mqueue_set_gather_member_names | |
1566 | * Purpose: | |
3e170ce0 A |
1567 | * Discover all ports which are members of a given port set. |
1568 | * Because the waitq linkage mechanism was redesigned to save | |
1569 | * significan amounts of memory, it no longer keeps back-pointers | |
1570 | * from a port set to a port. Therefore, we must iterate over all | |
1571 | * ports within a given IPC space and individually query them to | |
1572 | * see if they are members of the given set. Port names of ports | |
1573 | * found to be members of the given set will be gathered into the | |
1574 | * provided 'names' array. Actual returned names are limited to | |
1575 | * maxnames entries, but we keep counting the actual number of | |
1576 | * members to let the caller decide to retry if necessary. | |
39236c6e A |
1577 | * |
1578 | * Conditions: | |
1579 | * Locks may be held by callers, so this routine cannot block. | |
3e170ce0 | 1580 | * Caller holds reference on the message queue (via port set). |
39236c6e A |
1581 | */ |
1582 | void | |
1583 | ipc_mqueue_set_gather_member_names( | |
3e170ce0 A |
1584 | ipc_space_t space, |
1585 | ipc_mqueue_t set_mq, | |
1586 | ipc_entry_num_t maxnames, | |
39236c6e A |
1587 | mach_port_name_t *names, |
1588 | ipc_entry_num_t *actualp) | |
1589 | { | |
3e170ce0 A |
1590 | ipc_entry_t table; |
1591 | ipc_entry_num_t tsize; | |
1592 | struct waitq_set *wqset; | |
39236c6e A |
1593 | ipc_entry_num_t actual = 0; |
1594 | ||
3e170ce0 A |
1595 | assert(set_mq != IMQ_NULL); |
1596 | wqset = &set_mq->imq_set_queue; | |
39236c6e | 1597 | |
3e170ce0 A |
1598 | assert(space != IS_NULL); |
1599 | is_read_lock(space); | |
1600 | if (!is_active(space)) { | |
1601 | is_read_unlock(space); | |
1602 | goto out; | |
1603 | } | |
39236c6e | 1604 | |
3e170ce0 A |
1605 | if (!waitq_set_is_valid(wqset)) { |
1606 | is_read_unlock(space); | |
1607 | goto out; | |
1608 | } | |
39236c6e | 1609 | |
3e170ce0 A |
1610 | table = space->is_table; |
1611 | tsize = space->is_table_size; | |
1612 | for (ipc_entry_num_t idx = 0; idx < tsize; idx++) { | |
1613 | ipc_entry_t entry = &table[idx]; | |
1614 | ||
1615 | /* only receive rights can be members of port sets */ | |
1616 | if ((entry->ie_bits & MACH_PORT_TYPE_RECEIVE) != MACH_PORT_TYPE_NONE) { | |
cb323159 | 1617 | ipc_port_t port = ip_object_to_port(entry->ie_object); |
3e170ce0 A |
1618 | ipc_mqueue_t mq = &port->ip_messages; |
1619 | ||
1620 | assert(IP_VALID(port)); | |
1621 | if (ip_active(port) && | |
1622 | waitq_member(&mq->imq_wait_queue, wqset)) { | |
0a7de745 | 1623 | if (actual < maxnames) { |
3e170ce0 | 1624 | names[actual] = mq->imq_receiver_name; |
0a7de745 | 1625 | } |
3e170ce0 A |
1626 | actual++; |
1627 | } | |
1628 | } | |
39236c6e | 1629 | } |
39236c6e | 1630 | |
3e170ce0 A |
1631 | is_read_unlock(space); |
1632 | ||
1633 | out: | |
39236c6e | 1634 | *actualp = actual; |
b0d623f7 A |
1635 | } |
1636 | ||
39236c6e | 1637 | |
1c79356b | 1638 | /* |
39037602 | 1639 | * Routine: ipc_mqueue_destroy_locked |
1c79356b | 1640 | * Purpose: |
6d2010ae A |
1641 | * Destroy a (non-set) message queue. |
1642 | * Set any blocked senders running. | |
0a7de745 | 1643 | * Destroy the kmsgs in the queue. |
1c79356b | 1644 | * Conditions: |
39037602 | 1645 | * mqueue locked |
1c79356b A |
1646 | * Receivers were removed when the receive right was "changed" |
1647 | */ | |
39037602 A |
1648 | boolean_t |
1649 | ipc_mqueue_destroy_locked(ipc_mqueue_t mqueue) | |
1c79356b A |
1650 | { |
1651 | ipc_kmsg_queue_t kmqueue; | |
1652 | ipc_kmsg_t kmsg; | |
6d2010ae | 1653 | boolean_t reap = FALSE; |
d9a64523 | 1654 | struct turnstile *send_turnstile = port_send_turnstile(ip_from_mq(mqueue)); |
1c79356b | 1655 | |
3e170ce0 A |
1656 | assert(!imq_is_set(mqueue)); |
1657 | ||
1c79356b A |
1658 | /* |
1659 | * rouse all blocked senders | |
3e170ce0 A |
1660 | * (don't boost anyone - we're tearing this queue down) |
1661 | * (never preposts) | |
1c79356b A |
1662 | */ |
1663 | mqueue->imq_fullwaiters = FALSE; | |
d9a64523 A |
1664 | |
1665 | if (send_turnstile != TURNSTILE_NULL) { | |
1666 | waitq_wakeup64_all(&send_turnstile->ts_waitq, | |
0a7de745 A |
1667 | IPC_MQUEUE_FULL, |
1668 | THREAD_RESTART, | |
1669 | WAITQ_ALL_PRIORITIES); | |
d9a64523 | 1670 | } |
1c79356b | 1671 | |
6d2010ae A |
1672 | /* |
1673 | * Move messages from the specified queue to the per-thread | |
1674 | * clean/drain queue while we have the mqueue lock. | |
1675 | */ | |
1c79356b | 1676 | kmqueue = &mqueue->imq_messages; |
1c79356b | 1677 | while ((kmsg = ipc_kmsg_dequeue(kmqueue)) != IKM_NULL) { |
39037602 | 1678 | #if MACH_FLIPC |
0a7de745 A |
1679 | if (MACH_NODE_VALID(kmsg->ikm_node) && FPORT_VALID(mqueue->imq_fport)) { |
1680 | flipc_msg_ack(kmsg->ikm_node, mqueue, TRUE); | |
1681 | } | |
39037602 | 1682 | #endif |
6d2010ae A |
1683 | boolean_t first; |
1684 | first = ipc_kmsg_delayed_destroy(kmsg); | |
0a7de745 | 1685 | if (first) { |
6d2010ae | 1686 | reap = first; |
0a7de745 | 1687 | } |
1c79356b | 1688 | } |
6d2010ae | 1689 | |
3e170ce0 A |
1690 | /* |
1691 | * Wipe out message count, both for messages about to be | |
1692 | * reaped and for reserved space for (previously) woken senders. | |
1693 | * This is the indication to them that their reserved space is gone | |
1694 | * (the mqueue was destroyed). | |
1695 | */ | |
1696 | mqueue->imq_msgcount = 0; | |
1697 | ||
39037602 A |
1698 | /* invalidate the waitq for subsequent mqueue operations */ |
1699 | waitq_invalidate_locked(&mqueue->imq_wait_queue); | |
3e170ce0 | 1700 | |
39037602 A |
1701 | /* clear out any preposting we may have done */ |
1702 | waitq_clear_prepost_locked(&mqueue->imq_wait_queue); | |
6d2010ae | 1703 | |
3e170ce0 | 1704 | /* |
39037602 A |
1705 | * assert that we are destroying / invalidating a queue that's |
1706 | * not a member of any other queue. | |
3e170ce0 | 1707 | */ |
39037602 A |
1708 | assert(mqueue->imq_preposts == 0); |
1709 | assert(mqueue->imq_in_pset == 0); | |
3e170ce0 | 1710 | |
39037602 | 1711 | return reap; |
1c79356b A |
1712 | } |
1713 | ||
1714 | /* | |
1715 | * Routine: ipc_mqueue_set_qlimit | |
1716 | * Purpose: | |
1717 | * Changes a message queue limit; the maximum number | |
1718 | * of messages which may be queued. | |
1719 | * Conditions: | |
1720 | * Nothing locked. | |
1721 | */ | |
1722 | ||
1723 | void | |
1724 | ipc_mqueue_set_qlimit( | |
0a7de745 A |
1725 | ipc_mqueue_t mqueue, |
1726 | mach_port_msgcount_t qlimit) | |
1c79356b | 1727 | { |
0a7de745 | 1728 | assert(qlimit <= MACH_PORT_QLIMIT_MAX); |
1c79356b | 1729 | |
0a7de745 A |
1730 | /* wake up senders allowed by the new qlimit */ |
1731 | imq_lock(mqueue); | |
1732 | if (qlimit > mqueue->imq_qlimit) { | |
1733 | mach_port_msgcount_t i, wakeup; | |
1734 | struct turnstile *send_turnstile = port_send_turnstile(ip_from_mq(mqueue)); | |
1c79356b | 1735 | |
0a7de745 A |
1736 | /* caution: wakeup, qlimit are unsigned */ |
1737 | wakeup = qlimit - mqueue->imq_qlimit; | |
1c79356b | 1738 | |
0a7de745 | 1739 | for (i = 0; i < wakeup; i++) { |
3e170ce0 A |
1740 | /* |
1741 | * boost the priority of the awoken thread | |
1742 | * (WAITQ_PROMOTE_PRIORITY) to ensure it uses | |
1743 | * the message queue slot we've just reserved. | |
1744 | * | |
1745 | * NOTE: this will never prepost | |
1746 | */ | |
d9a64523 A |
1747 | if (send_turnstile == TURNSTILE_NULL || |
1748 | waitq_wakeup64_one(&send_turnstile->ts_waitq, | |
0a7de745 A |
1749 | IPC_MQUEUE_FULL, |
1750 | THREAD_AWAKENED, | |
1751 | WAITQ_PROMOTE_PRIORITY) == KERN_NOT_WAITING) { | |
3e170ce0 A |
1752 | mqueue->imq_fullwaiters = FALSE; |
1753 | break; | |
1754 | } | |
1755 | mqueue->imq_msgcount++; /* give it to the awakened thread */ | |
0a7de745 | 1756 | } |
3e170ce0 | 1757 | } |
1c79356b A |
1758 | mqueue->imq_qlimit = qlimit; |
1759 | imq_unlock(mqueue); | |
1c79356b A |
1760 | } |
1761 | ||
1762 | /* | |
1763 | * Routine: ipc_mqueue_set_seqno | |
1764 | * Purpose: | |
1765 | * Changes an mqueue's sequence number. | |
1766 | * Conditions: | |
1767 | * Caller holds a reference to the queue's containing object. | |
1768 | */ | |
1769 | void | |
1770 | ipc_mqueue_set_seqno( | |
0a7de745 A |
1771 | ipc_mqueue_t mqueue, |
1772 | mach_port_seqno_t seqno) | |
1c79356b | 1773 | { |
1c79356b A |
1774 | imq_lock(mqueue); |
1775 | mqueue->imq_seqno = seqno; | |
1776 | imq_unlock(mqueue); | |
1c79356b A |
1777 | } |
1778 | ||
1779 | ||
1780 | /* | |
1781 | * Routine: ipc_mqueue_copyin | |
1782 | * Purpose: | |
1783 | * Convert a name in a space to a message queue. | |
1784 | * Conditions: | |
1785 | * Nothing locked. If successful, the caller gets a ref for | |
1786 | * for the object. This ref ensures the continued existence of | |
1787 | * the queue. | |
1788 | * Returns: | |
1789 | * MACH_MSG_SUCCESS Found a message queue. | |
1790 | * MACH_RCV_INVALID_NAME The space is dead. | |
1791 | * MACH_RCV_INVALID_NAME The name doesn't denote a right. | |
1792 | * MACH_RCV_INVALID_NAME | |
1793 | * The denoted right is not receive or port set. | |
1794 | * MACH_RCV_IN_SET Receive right is a member of a set. | |
1795 | */ | |
1796 | ||
1797 | mach_msg_return_t | |
1798 | ipc_mqueue_copyin( | |
0a7de745 A |
1799 | ipc_space_t space, |
1800 | mach_port_name_t name, | |
1801 | ipc_mqueue_t *mqueuep, | |
1802 | ipc_object_t *objectp) | |
1c79356b A |
1803 | { |
1804 | ipc_entry_t entry; | |
cb323159 | 1805 | ipc_entry_bits_t bits; |
1c79356b A |
1806 | ipc_object_t object; |
1807 | ipc_mqueue_t mqueue; | |
1808 | ||
1809 | is_read_lock(space); | |
316670eb | 1810 | if (!is_active(space)) { |
1c79356b A |
1811 | is_read_unlock(space); |
1812 | return MACH_RCV_INVALID_NAME; | |
1813 | } | |
1814 | ||
1815 | entry = ipc_entry_lookup(space, name); | |
1816 | if (entry == IE_NULL) { | |
1817 | is_read_unlock(space); | |
1818 | return MACH_RCV_INVALID_NAME; | |
1819 | } | |
1820 | ||
cb323159 | 1821 | bits = entry->ie_bits; |
1c79356b A |
1822 | object = entry->ie_object; |
1823 | ||
cb323159 A |
1824 | if (bits & MACH_PORT_TYPE_RECEIVE) { |
1825 | ipc_port_t port = ip_object_to_port(object); | |
1c79356b | 1826 | |
1c79356b A |
1827 | assert(port != IP_NULL); |
1828 | ||
1829 | ip_lock(port); | |
cb323159 | 1830 | require_ip_active(port); |
1c79356b A |
1831 | assert(port->ip_receiver_name == name); |
1832 | assert(port->ip_receiver == space); | |
1833 | is_read_unlock(space); | |
1834 | mqueue = &port->ip_messages; | |
cb323159 A |
1835 | } else if (bits & MACH_PORT_TYPE_PORT_SET) { |
1836 | ipc_pset_t pset = ips_object_to_pset(object); | |
1c79356b | 1837 | |
1c79356b A |
1838 | assert(pset != IPS_NULL); |
1839 | ||
1840 | ips_lock(pset); | |
1841 | assert(ips_active(pset)); | |
1c79356b A |
1842 | is_read_unlock(space); |
1843 | ||
1844 | mqueue = &pset->ips_messages; | |
1845 | } else { | |
1846 | is_read_unlock(space); | |
cb323159 A |
1847 | /* guard exception if we never held the receive right in this entry */ |
1848 | if ((bits & MACH_PORT_TYPE_EX_RECEIVE) == 0) { | |
1849 | mach_port_guard_exception(name, 0, 0, kGUARD_EXC_RCV_INVALID_NAME); | |
1850 | } | |
1c79356b A |
1851 | return MACH_RCV_INVALID_NAME; |
1852 | } | |
1853 | ||
1854 | /* | |
1855 | * At this point, the object is locked and active, | |
1856 | * the space is unlocked, and mqueue is initialized. | |
1857 | */ | |
1858 | ||
1859 | io_reference(object); | |
1860 | io_unlock(object); | |
1861 | ||
1862 | *objectp = object; | |
1863 | *mqueuep = mqueue; | |
1864 | return MACH_MSG_SUCCESS; | |
1865 | } | |
cb323159 A |
1866 | |
1867 | void | |
1868 | imq_lock(ipc_mqueue_t mq) | |
1869 | { | |
1870 | ipc_object_t object = imq_to_object(mq); | |
1871 | ipc_object_validate(object); | |
1872 | waitq_lock(&(mq)->imq_wait_queue); | |
1873 | } | |
1874 | ||
1875 | unsigned int | |
1876 | imq_lock_try(ipc_mqueue_t mq) | |
1877 | { | |
1878 | ipc_object_t object = imq_to_object(mq); | |
1879 | ipc_object_validate(object); | |
1880 | return waitq_lock_try(&(mq)->imq_wait_queue); | |
1881 | } |