]> git.saurik.com Git - apple/xnu.git/blame - osfmk/ipc/ipc_pset.c
xnu-4570.71.2.tar.gz
[apple/xnu.git] / osfmk / ipc / ipc_pset.c
CommitLineData
1c79356b 1/*
91447636 2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58/*
59 * File: ipc/ipc_pset.c
60 * Author: Rich Draves
61 * Date: 1989
62 *
63 * Functions to manipulate IPC port sets.
64 */
65
66#include <mach/port.h>
67#include <mach/kern_return.h>
68#include <mach/message.h>
69#include <ipc/ipc_mqueue.h>
70#include <ipc/ipc_object.h>
71#include <ipc/ipc_pset.h>
72#include <ipc/ipc_right.h>
73#include <ipc/ipc_space.h>
74#include <ipc/ipc_port.h>
1c79356b 75
91447636 76#include <kern/kern_types.h>
b0d623f7
A
77
78#include <vm/vm_map.h>
5ba3f43e 79#include <libkern/section_keywords.h>
b0d623f7 80
1c79356b
A
81/*
82 * Routine: ipc_pset_alloc
83 * Purpose:
84 * Allocate a port set.
85 * Conditions:
86 * Nothing locked. If successful, the port set is returned
87 * locked. (The caller doesn't have a reference.)
88 * Returns:
89 * KERN_SUCCESS The port set is allocated.
90 * KERN_INVALID_TASK The space is dead.
91 * KERN_NO_SPACE No room for an entry in the space.
92 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
93 */
94
95kern_return_t
96ipc_pset_alloc(
97 ipc_space_t space,
98 mach_port_name_t *namep,
99 ipc_pset_t *psetp)
100{
101 ipc_pset_t pset;
102 mach_port_name_t name;
103 kern_return_t kr;
3e170ce0
A
104 uint64_t reserved_link;
105
106 reserved_link = waitq_link_reserve(NULL);
1c79356b
A
107
108 kr = ipc_object_alloc(space, IOT_PORT_SET,
109 MACH_PORT_TYPE_PORT_SET, 0,
110 &name, (ipc_object_t *) &pset);
3e170ce0
A
111 if (kr != KERN_SUCCESS) {
112 waitq_link_release(reserved_link);
1c79356b 113 return kr;
3e170ce0 114 }
99c3a104 115 /* pset and space are locked */
1c79356b 116
3e170ce0 117 ipc_mqueue_init(&pset->ips_messages, TRUE /* set */, &reserved_link);
99c3a104 118 is_write_unlock(space);
1c79356b 119
3e170ce0
A
120 waitq_link_release(reserved_link);
121
1c79356b
A
122 *namep = name;
123 *psetp = pset;
124 return KERN_SUCCESS;
125}
126
127/*
128 * Routine: ipc_pset_alloc_name
129 * Purpose:
130 * Allocate a port set, with a specific name.
131 * Conditions:
132 * Nothing locked. If successful, the port set is returned
133 * locked. (The caller doesn't have a reference.)
134 * Returns:
135 * KERN_SUCCESS The port set is allocated.
136 * KERN_INVALID_TASK The space is dead.
137 * KERN_NAME_EXISTS The name already denotes a right.
138 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
139 */
140
141kern_return_t
142ipc_pset_alloc_name(
143 ipc_space_t space,
144 mach_port_name_t name,
145 ipc_pset_t *psetp)
146{
147 ipc_pset_t pset;
148 kern_return_t kr;
3e170ce0
A
149 uint64_t reserved_link;
150
1c79356b 151
3e170ce0 152 reserved_link = waitq_link_reserve(NULL);
1c79356b
A
153
154 kr = ipc_object_alloc_name(space, IOT_PORT_SET,
155 MACH_PORT_TYPE_PORT_SET, 0,
156 name, (ipc_object_t *) &pset);
3e170ce0
A
157 if (kr != KERN_SUCCESS) {
158 waitq_link_release(reserved_link);
1c79356b 159 return kr;
3e170ce0 160 }
1c79356b
A
161 /* pset is locked */
162
3e170ce0
A
163 ipc_mqueue_init(&pset->ips_messages, TRUE /* set */, &reserved_link);
164
165 waitq_link_release(reserved_link);
1c79356b
A
166
167 *psetp = pset;
168 return KERN_SUCCESS;
169}
170
39037602
A
171
172/*
173 * Routine: ipc_pset_alloc_special
174 * Purpose:
175 * Allocate a port set in a special space.
176 * The new port set is returned with one ref.
177 * If unsuccessful, IPS_NULL is returned.
178 * Conditions:
179 * Nothing locked.
180 */
181ipc_pset_t
182ipc_pset_alloc_special(
183 __assert_only ipc_space_t space)
184{
185 ipc_pset_t pset;
186 uint64_t reserved_link;
187
188 assert(space != IS_NULL);
189 assert(space->is_table == IE_NULL);
190 assert(!is_active(space));
191
192 reserved_link = waitq_link_reserve(NULL);
193
194 __IGNORE_WCASTALIGN(pset = (ipc_pset_t)io_alloc(IOT_PORT_SET));
5c9f4661
A
195 if (pset == IPS_NULL) {
196 waitq_link_release(reserved_link);
39037602 197 return IPS_NULL;
5c9f4661 198 }
39037602
A
199
200 bzero((char *)pset, sizeof(*pset));
201
202 io_lock_init(&pset->ips_object);
203 pset->ips_references = 1;
204 pset->ips_object.io_bits = io_makebits(TRUE, IOT_PORT_SET, 0);
205
206 ipc_mqueue_init(&pset->ips_messages, TRUE /* set */, &reserved_link);
207
208 waitq_link_release(reserved_link);
209
210 return pset;
211}
212
213
1c79356b
A
214/*
215 * Routine: ipc_pset_member
216 * Purpose:
217 * Checks to see if a port is a member of a pset
218 * Conditions:
219 * Both port and port set are locked.
220 * The port must be active.
221 */
222boolean_t
223ipc_pset_member(
224 ipc_pset_t pset,
225 ipc_port_t port)
226{
227 assert(ip_active(port));
228
229 return (ipc_mqueue_member(&port->ip_messages, &pset->ips_messages));
230}
231
232
233/*
234 * Routine: ipc_pset_add
235 * Purpose:
236 * Puts a port into a port set.
1c79356b
A
237 * Conditions:
238 * Both port and port set are locked and active.
239 * The owner of the port set is also receiver for the port.
240 */
241
242kern_return_t
243ipc_pset_add(
316670eb
A
244 ipc_pset_t pset,
245 ipc_port_t port,
3e170ce0
A
246 uint64_t *reserved_link,
247 uint64_t *reserved_prepost)
1c79356b 248{
9bccf70c
A
249 kern_return_t kr;
250
1c79356b
A
251 assert(ips_active(pset));
252 assert(ip_active(port));
253
3e170ce0
A
254 kr = ipc_mqueue_add(&port->ip_messages, &pset->ips_messages,
255 reserved_link, reserved_prepost);
1c79356b 256
9bccf70c 257 return kr;
1c79356b
A
258}
259
260
261
262/*
263 * Routine: ipc_pset_remove
264 * Purpose:
265 * Removes a port from a port set.
266 * The port set loses a reference.
267 * Conditions:
268 * Both port and port set are locked.
269 * The port must be active.
270 */
271
272kern_return_t
273ipc_pset_remove(
316670eb 274 ipc_pset_t pset,
3e170ce0 275 ipc_port_t port)
1c79356b 276{
9bccf70c 277 kern_return_t kr;
1c79356b
A
278
279 assert(ip_active(port));
280
3e170ce0 281 if (port->ip_in_pset == 0)
1c79356b
A
282 return KERN_NOT_IN_SET;
283
3e170ce0 284 kr = ipc_mqueue_remove(&port->ip_messages, &pset->ips_messages);
9bccf70c
A
285
286 return kr;
1c79356b
A
287}
288
289/*
9bccf70c 290 * Routine: ipc_pset_remove_from_all
1c79356b
A
291 * Purpose:
292 * Removes a port from all it's port sets.
1c79356b
A
293 * Conditions:
294 * port is locked and active.
295 */
296
297kern_return_t
9bccf70c 298ipc_pset_remove_from_all(
3e170ce0 299 ipc_port_t port)
1c79356b 300{
3e170ce0 301 if (port->ip_in_pset == 0)
1c79356b
A
302 return KERN_NOT_IN_SET;
303
304 /*
9bccf70c 305 * Remove the port's mqueue from all sets
1c79356b 306 */
3e170ce0 307 ipc_mqueue_remove_from_all(&port->ip_messages);
1c79356b
A
308 return KERN_SUCCESS;
309}
310
311
312/*
313 * Routine: ipc_pset_destroy
314 * Purpose:
315 * Destroys a port_set.
1c79356b
A
316 * Conditions:
317 * The port_set is locked and alive.
318 * The caller has a reference, which is consumed.
319 * Afterwards, the port_set is unlocked and dead.
320 */
321
322void
323ipc_pset_destroy(
324 ipc_pset_t pset)
325{
1c79356b
A
326 assert(ips_active(pset));
327
328 pset->ips_object.io_bits &= ~IO_BITS_ACTIVE;
329
9bccf70c
A
330 /*
331 * remove all the member message queues
3e170ce0 332 * AND remove this message queue from any containing sets
9bccf70c 333 */
3e170ce0 334 ipc_mqueue_remove_all(&pset->ips_messages);
743345f9 335
b0d623f7
A
336 /*
337 * Set all waiters on the portset running to
338 * discover the change.
339 */
1c79356b
A
340 imq_lock(&pset->ips_messages);
341 ipc_mqueue_changed(&pset->ips_messages);
342 imq_unlock(&pset->ips_messages);
1c79356b 343
3e170ce0
A
344 ipc_mqueue_deinit(&pset->ips_messages);
345
316670eb
A
346 ips_unlock(pset);
347 ips_release(pset); /* consume the ref our caller gave us */
1c79356b
A
348}
349
b0d623f7
A
350/* Kqueue EVFILT_MACHPORT support */
351
39037602 352#include <sys/event.h>
b0d623f7
A
353#include <sys/errno.h>
354
5ba3f43e 355static int filt_machportattach(struct knote *kn, struct kevent_internal_s *kev);
b0d623f7
A
356static void filt_machportdetach(struct knote *kn);
357static int filt_machport(struct knote *kn, long hint);
39037602
A
358static int filt_machporttouch(struct knote *kn, struct kevent_internal_s *kev);
359static int filt_machportprocess(struct knote *kn, struct filt_process_s *data, struct kevent_internal_s *kev);
6d2010ae 360static unsigned filt_machportpeek(struct knote *kn);
5ba3f43e
A
361SECURITY_READ_ONLY_EARLY(struct filterops) machport_filtops = {
362 .f_adjusts_qos = 1,
363 .f_attach = filt_machportattach,
364 .f_detach = filt_machportdetach,
365 .f_event = filt_machport,
366 .f_touch = filt_machporttouch,
367 .f_process = filt_machportprocess,
b0d623f7
A
368 .f_peek = filt_machportpeek,
369};
370
371static int
372filt_machportattach(
5ba3f43e
A
373 struct knote *kn,
374 __unused struct kevent_internal_s *kev)
b0d623f7 375{
39037602
A
376 mach_port_name_t name = (mach_port_name_t)kn->kn_kevent.ident;
377 uint64_t wq_link_id = waitq_link_reserve(NULL);
378 ipc_space_t space = current_space();
379 ipc_kmsg_t first;
380
39037602
A
381 int error;
382 int result = 0;
383 kern_return_t kr;
384 ipc_entry_t entry;
385 ipc_mqueue_t mqueue;
386
387 kr = ipc_right_lookup_read(space, name, &entry);
388 if (kr == KERN_SUCCESS) {
389 /* space is read-locked and active */
390
391 if (entry->ie_bits & MACH_PORT_TYPE_PORT_SET) {
392 ipc_pset_t pset;
393
394 __IGNORE_WCASTALIGN(pset = (ipc_pset_t)entry->ie_object);
395 mqueue = &pset->ips_messages;
5ba3f43e 396 ips_reference(pset);
39037602 397
39037602 398 imq_lock(mqueue);
5ba3f43e 399 kn->kn_ptr.p_mqueue = mqueue;
39037602 400
743345f9 401 /*
39037602
A
402 * Bind the portset wait queue directly to knote/kqueue.
403 * This allows us to just use wait_queue foo to effect a wakeup,
404 * rather than having to call knote() from the Mach code on each
405 * message. We still attach the knote to the mqueue klist for
406 * NOTE_REVOKE purposes only.
407 */
408 error = knote_link_waitq(kn, &mqueue->imq_wait_queue, &wq_link_id);
409 if (!error) {
39037602 410 KNOTE_ATTACH(&mqueue->imq_klist, kn);
5ba3f43e
A
411 imq_unlock(mqueue);
412
413 }
414 else {
415 kn->kn_ptr.p_mqueue = IMQ_NULL;
416 imq_unlock(mqueue);
417 ips_release(pset);
39037602 418 }
743345f9 419
39037602
A
420 is_read_unlock(space);
421
422 /*
423 * linked knotes are marked stay-active and therefore don't
424 * need an indication of their fired state to be returned
425 * from the attach operation.
426 */
427
428 } else if (entry->ie_bits & MACH_PORT_TYPE_RECEIVE) {
429 ipc_port_t port;
430
431 __IGNORE_WCASTALIGN(port = (ipc_port_t)entry->ie_object);
432 mqueue = &port->ip_messages;
433 ip_reference(port);
743345f9 434
39037602
A
435 /*
436 * attach knote to port and determine result
437 * If the filter requested direct message receipt,
438 * we may need to adjust the qos of the knote to
439 * reflect the requested and override qos of the
440 * first message in the queue.
441 */
39037602
A
442 imq_lock(mqueue);
443 kn->kn_ptr.p_mqueue = mqueue;
444 KNOTE_ATTACH(&mqueue->imq_klist, kn);
445 if ((first = ipc_kmsg_queue_first(&mqueue->imq_messages)) != IKM_NULL) {
5ba3f43e 446 int sync_qos_override_index = ipc_port_get_max_sync_qos_index(port);
39037602 447 if (kn->kn_sfflags & MACH_RCV_MSG)
5ba3f43e
A
448 knote_adjust_qos(kn, first->ikm_qos, first->ikm_qos_override,
449 sync_qos_override_index);
39037602
A
450 result = 1;
451 }
452 imq_unlock(mqueue);
39037602
A
453
454 is_read_unlock(space);
455 error = 0;
456 } else {
457 is_read_unlock(space);
458 error = ENOTSUP;
459 }
460 } else {
461 error = ENOENT;
462 }
b0d623f7 463
39037602
A
464 waitq_link_release(wq_link_id);
465
466 /* bail out on errors */
467 if (error) {
468 kn->kn_flags |= EV_ERROR;
469 kn->kn_data = error;
316670eb
A
470 return 0;
471 }
472
b0d623f7
A
473 return result;
474}
475
39037602
A
476/* NOT proud of these - we should have a stricter relationship between mqueue and ipc object */
477#define mqueue_to_pset(mq) ((ipc_pset_t)((uintptr_t)mq-offsetof(struct ipc_pset, ips_messages)))
478#define mqueue_to_port(mq) ((ipc_port_t)((uintptr_t)mq-offsetof(struct ipc_port, ip_messages)))
479#define mqueue_to_object(mq) (((ipc_object_t)(mq)) - 1)
480
481
b0d623f7
A
482static void
483filt_machportdetach(
39037602 484 struct knote *kn)
b0d623f7 485{
39037602
A
486 ipc_mqueue_t mqueue = kn->kn_ptr.p_mqueue;
487 ipc_object_t object = mqueue_to_object(mqueue);
b0d623f7 488
39037602
A
489 imq_lock(mqueue);
490 KNOTE_DETACH(&mqueue->imq_klist, kn);
491 kn->kn_ptr.p_mqueue = IMQ_NULL;
492 imq_unlock(mqueue);
39037602
A
493
494 if (io_otype(object) == IOT_PORT_SET) {
495 /*
496 * Unlink the portset wait queue from knote/kqueue.
497 * JMM - Does this need to be atomic under the mq lock?
498 */
499 (void)knote_unlink_waitq(kn, &mqueue->imq_wait_queue);
500 }
501 io_release(object);
b0d623f7
A
502}
503
39037602
A
504/*
505 * filt_machport - deliver events into the mach port filter
506 *
507 * Mach port message arrival events are currently only posted via the
508 * kqueue filter routine for ports. Port sets are marked stay-active
509 * and the wait queue code will break any kqueue waiters out to go
510 * poll the stay-queued knotes again.
511 *
512 * If there is a message at the head of the queue,
513 * we indicate that the knote should go active. If
514 * the message is to be direct-received, we adjust the
515 * QoS of the knote according the requested and override
516 * QoS of that first message.
517 *
518 * NOTE_REVOKE events are a legacy way to indicate that the port/portset
519 * was deallocated or left the current Mach portspace (modern technique
520 * is with an EV_VANISHED protocol). If we see NOTE_REVOKE, deliver an
521 * EV_EOF event for these changes (hopefully it will get delivered before
522 * the port name recycles to the same generation count and someone tries
523 * to re-register a kevent for it or the events are udata-specific -
524 * avoiding a conflict).
525 */
b0d623f7
A
526static int
527filt_machport(
39037602
A
528 struct knote *kn,
529 long hint)
530{
531 ipc_mqueue_t mqueue = kn->kn_ptr.p_mqueue;
532 ipc_kmsg_t first;
533 int result = 0;
534
535 /* mqueue locked by caller */
536 assert(imq_held(mqueue));
537
538 if (hint == NOTE_REVOKE) {
539 kn->kn_flags |= EV_EOF | EV_ONESHOT;
540 result = 1;
541 } else if (imq_is_valid(mqueue)) {
542 assert(!imq_is_set(mqueue));
543 if ((first = ipc_kmsg_queue_first(&mqueue->imq_messages)) != IKM_NULL) {
5ba3f43e
A
544 ipc_port_t port = ip_from_mq(mqueue);
545 int sync_qos_override_index = ipc_port_get_max_sync_qos_index(port);
546
39037602 547 if (kn->kn_sfflags & MACH_RCV_MSG)
5ba3f43e
A
548 knote_adjust_qos(kn, first->ikm_qos, first->ikm_qos_override,
549 sync_qos_override_index);
39037602
A
550 result = 1;
551 }
552 }
553
554 return result;
555}
556
557static int
558filt_machporttouch(
559 struct knote *kn,
560 struct kevent_internal_s *kev)
b0d623f7 561{
39037602
A
562 ipc_mqueue_t mqueue = kn->kn_ptr.p_mqueue;
563 ipc_kmsg_t first;
564 int result = 0;
39037602 565
39037602 566 imq_lock(mqueue);
b0d623f7 567
39037602
A
568 /* copy in new settings and save off new input fflags */
569 kn->kn_sfflags = kev->fflags;
570 kn->kn_ext[0] = kev->ext[0];
571 kn->kn_ext[1] = kev->ext[1];
572 if ((kn->kn_status & KN_UDATA_SPECIFIC) == 0)
573 kn->kn_udata = kev->udata;
b0d623f7
A
574
575 /*
39037602
A
576 * If the mqueue is a valid port and there is a message
577 * that will be direct-received from the knote, update
578 * the knote qos based on the first message and trigger
579 * the event. If there are no more messages, reset the
580 * QoS to the value provided by the kevent.
b0d623f7 581 */
39037602
A
582 if (imq_is_valid(mqueue) && !imq_is_set(mqueue) &&
583 (first = ipc_kmsg_queue_first(&mqueue->imq_messages)) != IKM_NULL) {
5ba3f43e
A
584 ipc_port_t port = ip_from_mq(mqueue);
585 int sync_qos_override_index = ipc_port_get_max_sync_qos_index(port);
586
39037602 587 if (kn->kn_sfflags & MACH_RCV_MSG)
5ba3f43e
A
588 knote_adjust_qos(kn, first->ikm_qos, first->ikm_qos_override,
589 sync_qos_override_index);
39037602
A
590 result = 1;
591 } else if (kn->kn_sfflags & MACH_RCV_MSG) {
592 knote_adjust_qos(kn,
593 MACH_MSG_PRIORITY_UNSPECIFIED,
5ba3f43e
A
594 MACH_MSG_PRIORITY_UNSPECIFIED,
595 THREAD_QOS_UNSPECIFIED);
39037602
A
596 }
597 imq_unlock(mqueue);
b0d623f7 598
39037602
A
599 return result;
600}
601
602static int
603filt_machportprocess(
604 struct knote *kn,
605 struct filt_process_s *process_data,
606 struct kevent_internal_s *kev)
607{
608 ipc_mqueue_t mqueue = kn->kn_ptr.p_mqueue;
609 ipc_object_t object = mqueue_to_object(mqueue);
610 thread_t self = current_thread();
611 boolean_t used_filtprocess_data = FALSE;
612
613 wait_result_t wresult;
614 mach_msg_option_t option;
615 mach_vm_address_t addr;
616 mach_msg_size_t size;
617
618 imq_lock(mqueue);
619
620 /* Capture current state */
621 *kev = kn->kn_kevent;
622
623 /* If already deallocated/moved return one last EOF event */
624 if (kev->flags & EV_EOF) {
625 imq_unlock(mqueue);
626 return 1;
627 }
b0d623f7
A
628
629 /*
630 * Only honor supported receive options. If no options are
631 * provided, just force a MACH_RCV_TOO_LARGE to detect the
632 * name of the port and sizeof the waiting message.
633 */
fe8ab488 634 option = kn->kn_sfflags & (MACH_RCV_MSG|MACH_RCV_LARGE|MACH_RCV_LARGE_IDENTITY|
39037602
A
635 MACH_RCV_TRAILER_MASK|MACH_RCV_VOUCHER);
636
b0d623f7 637 if (option & MACH_RCV_MSG) {
39037602
A
638 addr = (mach_vm_address_t) kn->kn_ext[0];
639 size = (mach_msg_size_t) kn->kn_ext[1];
640
641 /*
642 * If the kevent didn't specify a buffer and length, carve a buffer
643 * from the filter processing data according to the flags.
644 */
645 if (size == 0 && process_data != NULL) {
646 used_filtprocess_data = TRUE;
647
648 addr = (mach_vm_address_t)process_data->fp_data_out;
649 size = (mach_msg_size_t)process_data->fp_data_resid;
650 option |= (MACH_RCV_LARGE | MACH_RCV_LARGE_IDENTITY);
651 if (process_data->fp_flags & KEVENT_FLAG_STACK_DATA)
652 option |= MACH_RCV_STACK;
653 }
b0d623f7 654 } else {
39037602 655 /* just detect the port name (if a set) and size of the first message */
b0d623f7 656 option = MACH_RCV_LARGE;
39037602 657 addr = 0;
b0d623f7
A
658 size = 0;
659 }
660
39037602
A
661 /* just use the reference from here on out */
662 io_reference(object);
663
b0d623f7
A
664 /*
665 * Set up to receive a message or the notification of a
666 * too large message. But never allow this call to wait.
667 * If the user provided aditional options, like trailer
668 * options, pass those through here. But we don't support
669 * scatter lists through this interface.
670 */
39037602
A
671 self->ith_object = object;
672 self->ith_msg_addr = addr;
673 self->ith_rsize = size;
674 self->ith_msize = 0;
b0d623f7 675 self->ith_option = option;
b0d623f7
A
676 self->ith_receiver_name = MACH_PORT_NULL;
677 self->ith_continuation = NULL;
678 option |= MACH_RCV_TIMEOUT; // never wait
39236c6e 679 self->ith_state = MACH_RCV_IN_PROGRESS;
5ba3f43e 680 self->ith_knote = kn;
b0d623f7
A
681
682 wresult = ipc_mqueue_receive_on_thread(
39037602 683 mqueue,
b0d623f7
A
684 option,
685 size, /* max_size */
686 0, /* immediate timeout */
687 THREAD_INTERRUPTIBLE,
688 self);
39037602 689 /* mqueue unlocked */
b0d623f7
A
690
691 /*
39037602
A
692 * If we timed out, or the process is exiting, just release the
693 * reference on the ipc_object and return zero.
b0d623f7 694 */
39037602
A
695 if (wresult == THREAD_RESTART || self->ith_state == MACH_RCV_TIMED_OUT) {
696 io_release(object);
b0d623f7
A
697 return 0;
698 }
699
39037602
A
700 assert(wresult == THREAD_NOT_WAITING);
701 assert(self->ith_state != MACH_RCV_IN_PROGRESS);
702
b0d623f7
A
703 /*
704 * If we weren't attempting to receive a message
705 * directly, we need to return the port name in
706 * the kevent structure.
707 */
708 if ((option & MACH_RCV_MSG) != MACH_RCV_MSG) {
709 assert(self->ith_state == MACH_RCV_TOO_LARGE);
710 assert(self->ith_kmsg == IKM_NULL);
39037602
A
711 kev->data = self->ith_receiver_name;
712 io_release(object);
b0d623f7
A
713 return 1;
714 }
715
716 /*
717 * Attempt to receive the message directly, returning
718 * the results in the fflags field.
719 */
39037602
A
720 kev->fflags = mach_msg_receive_results(&size);
721
722 /* kmsg and object reference consumed */
39236c6e
A
723
724 /*
725 * if the user asked for the identity of ports containing a
726 * a too-large message, return it in the data field (as we
727 * do for messages we didn't try to receive).
728 */
39037602
A
729 if (kev->fflags == MACH_RCV_TOO_LARGE) {
730 kev->ext[1] = self->ith_msize;
731 if (option & MACH_RCV_LARGE_IDENTITY)
732 kev->data = self->ith_receiver_name;
733 else
734 kev->data = MACH_PORT_NULL;
735 } else {
736 kev->ext[1] = size;
737 kev->data = MACH_PORT_NULL;
738 }
b0d623f7 739
39037602
A
740 /*
741 * If we used a data buffer carved out from the filt_process data,
742 * store the address used in the knote and adjust the residual and
743 * other parameters for future use.
744 */
745 if (used_filtprocess_data) {
746 assert(process_data->fp_data_resid >= size);
747 process_data->fp_data_resid -= size;
748 if ((process_data->fp_flags & KEVENT_FLAG_STACK_DATA) == 0) {
749 kev->ext[0] = process_data->fp_data_out;
750 process_data->fp_data_out += size;
751 } else {
752 assert(option & MACH_RCV_STACK);
753 kev->ext[0] = process_data->fp_data_out +
754 process_data->fp_data_resid;
b0d623f7 755 }
39037602
A
756 }
757
758 /*
759 * Apply message-based QoS values to output kevent as prescribed.
760 * The kev->qos field gets max(msg-qos, kn->kn_qos).
761 * The kev->ext[2] field gets (msg-qos << 32) | (override-qos).
762 *
763 * The mach_msg_receive_results() call saved off the message
764 * QoS values in the continuation save area on successful receive.
765 */
766 if (kev->fflags == MACH_MSG_SUCCESS) {
767 kev->qos = mach_msg_priority_combine(self->ith_qos, kn->kn_qos);
768 kev->ext[2] = ((uint64_t)self->ith_qos << 32) |
769 (uint64_t)self->ith_qos_override;
770 }
771
772 return 1;
b0d623f7
A
773}
774
775/*
39037602 776 * Peek to see if the message queue associated with the knote has any
b0d623f7 777 * events. This pre-hook is called when a filter uses the stay-
39037602
A
778 * on-queue mechanism (as the knote_link_waitq mechanism does for
779 * portsets) and someone calls select() against the containing kqueue.
b0d623f7
A
780 *
781 * Just peek at the pre-post status of the portset's wait queue
782 * to determine if it has anything interesting. We can do it
783 * without holding the lock, as it is just a snapshot in time
784 * (if this is used as part of really waiting for events, we
785 * will catch changes in this status when the event gets posted
786 * up to the knote's kqueue).
787 */
6d2010ae 788static unsigned
b0d623f7
A
789filt_machportpeek(struct knote *kn)
790{
39037602 791 ipc_mqueue_t mqueue = kn->kn_ptr.p_mqueue;
b0d623f7 792
39037602 793 return (ipc_mqueue_set_peek(mqueue));
b0d623f7 794}