]> git.saurik.com Git - apple/xnu.git/blame - osfmk/ipc/ipc_pset.c
xnu-4570.20.62.tar.gz
[apple/xnu.git] / osfmk / ipc / ipc_pset.c
CommitLineData
1c79356b 1/*
91447636 2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
1c79356b 3 *
2d21ac55 4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
1c79356b 5 *
2d21ac55
A
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
8f6c56a5 14 *
2d21ac55
A
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
8f6c56a5
A
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
2d21ac55
A
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
8f6c56a5 25 *
2d21ac55 26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
1c79356b
A
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58/*
59 * File: ipc/ipc_pset.c
60 * Author: Rich Draves
61 * Date: 1989
62 *
63 * Functions to manipulate IPC port sets.
64 */
65
66#include <mach/port.h>
67#include <mach/kern_return.h>
68#include <mach/message.h>
69#include <ipc/ipc_mqueue.h>
70#include <ipc/ipc_object.h>
71#include <ipc/ipc_pset.h>
72#include <ipc/ipc_right.h>
73#include <ipc/ipc_space.h>
74#include <ipc/ipc_port.h>
1c79356b 75
91447636 76#include <kern/kern_types.h>
b0d623f7
A
77
78#include <vm/vm_map.h>
5ba3f43e 79#include <libkern/section_keywords.h>
b0d623f7 80
1c79356b
A
81/*
82 * Routine: ipc_pset_alloc
83 * Purpose:
84 * Allocate a port set.
85 * Conditions:
86 * Nothing locked. If successful, the port set is returned
87 * locked. (The caller doesn't have a reference.)
88 * Returns:
89 * KERN_SUCCESS The port set is allocated.
90 * KERN_INVALID_TASK The space is dead.
91 * KERN_NO_SPACE No room for an entry in the space.
92 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
93 */
94
95kern_return_t
96ipc_pset_alloc(
97 ipc_space_t space,
98 mach_port_name_t *namep,
99 ipc_pset_t *psetp)
100{
101 ipc_pset_t pset;
102 mach_port_name_t name;
103 kern_return_t kr;
3e170ce0
A
104 uint64_t reserved_link;
105
106 reserved_link = waitq_link_reserve(NULL);
1c79356b
A
107
108 kr = ipc_object_alloc(space, IOT_PORT_SET,
109 MACH_PORT_TYPE_PORT_SET, 0,
110 &name, (ipc_object_t *) &pset);
3e170ce0
A
111 if (kr != KERN_SUCCESS) {
112 waitq_link_release(reserved_link);
1c79356b 113 return kr;
3e170ce0 114 }
99c3a104 115 /* pset and space are locked */
1c79356b 116
3e170ce0 117 ipc_mqueue_init(&pset->ips_messages, TRUE /* set */, &reserved_link);
99c3a104 118 is_write_unlock(space);
1c79356b 119
3e170ce0
A
120 waitq_link_release(reserved_link);
121
1c79356b
A
122 *namep = name;
123 *psetp = pset;
124 return KERN_SUCCESS;
125}
126
127/*
128 * Routine: ipc_pset_alloc_name
129 * Purpose:
130 * Allocate a port set, with a specific name.
131 * Conditions:
132 * Nothing locked. If successful, the port set is returned
133 * locked. (The caller doesn't have a reference.)
134 * Returns:
135 * KERN_SUCCESS The port set is allocated.
136 * KERN_INVALID_TASK The space is dead.
137 * KERN_NAME_EXISTS The name already denotes a right.
138 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
139 */
140
141kern_return_t
142ipc_pset_alloc_name(
143 ipc_space_t space,
144 mach_port_name_t name,
145 ipc_pset_t *psetp)
146{
147 ipc_pset_t pset;
148 kern_return_t kr;
3e170ce0
A
149 uint64_t reserved_link;
150
1c79356b 151
3e170ce0 152 reserved_link = waitq_link_reserve(NULL);
1c79356b
A
153
154 kr = ipc_object_alloc_name(space, IOT_PORT_SET,
155 MACH_PORT_TYPE_PORT_SET, 0,
156 name, (ipc_object_t *) &pset);
3e170ce0
A
157 if (kr != KERN_SUCCESS) {
158 waitq_link_release(reserved_link);
1c79356b 159 return kr;
3e170ce0 160 }
1c79356b
A
161 /* pset is locked */
162
3e170ce0
A
163 ipc_mqueue_init(&pset->ips_messages, TRUE /* set */, &reserved_link);
164
165 waitq_link_release(reserved_link);
1c79356b
A
166
167 *psetp = pset;
168 return KERN_SUCCESS;
169}
170
39037602
A
171
172/*
173 * Routine: ipc_pset_alloc_special
174 * Purpose:
175 * Allocate a port set in a special space.
176 * The new port set is returned with one ref.
177 * If unsuccessful, IPS_NULL is returned.
178 * Conditions:
179 * Nothing locked.
180 */
181ipc_pset_t
182ipc_pset_alloc_special(
183 __assert_only ipc_space_t space)
184{
185 ipc_pset_t pset;
186 uint64_t reserved_link;
187
188 assert(space != IS_NULL);
189 assert(space->is_table == IE_NULL);
190 assert(!is_active(space));
191
192 reserved_link = waitq_link_reserve(NULL);
193
194 __IGNORE_WCASTALIGN(pset = (ipc_pset_t)io_alloc(IOT_PORT_SET));
195 if (pset == IPS_NULL)
196 return IPS_NULL;
197
198 bzero((char *)pset, sizeof(*pset));
199
200 io_lock_init(&pset->ips_object);
201 pset->ips_references = 1;
202 pset->ips_object.io_bits = io_makebits(TRUE, IOT_PORT_SET, 0);
203
204 ipc_mqueue_init(&pset->ips_messages, TRUE /* set */, &reserved_link);
205
206 waitq_link_release(reserved_link);
207
208 return pset;
209}
210
211
1c79356b
A
212/*
213 * Routine: ipc_pset_member
214 * Purpose:
215 * Checks to see if a port is a member of a pset
216 * Conditions:
217 * Both port and port set are locked.
218 * The port must be active.
219 */
220boolean_t
221ipc_pset_member(
222 ipc_pset_t pset,
223 ipc_port_t port)
224{
225 assert(ip_active(port));
226
227 return (ipc_mqueue_member(&port->ip_messages, &pset->ips_messages));
228}
229
230
231/*
232 * Routine: ipc_pset_add
233 * Purpose:
234 * Puts a port into a port set.
1c79356b
A
235 * Conditions:
236 * Both port and port set are locked and active.
237 * The owner of the port set is also receiver for the port.
238 */
239
240kern_return_t
241ipc_pset_add(
316670eb
A
242 ipc_pset_t pset,
243 ipc_port_t port,
3e170ce0
A
244 uint64_t *reserved_link,
245 uint64_t *reserved_prepost)
1c79356b 246{
9bccf70c
A
247 kern_return_t kr;
248
1c79356b
A
249 assert(ips_active(pset));
250 assert(ip_active(port));
251
3e170ce0
A
252 kr = ipc_mqueue_add(&port->ip_messages, &pset->ips_messages,
253 reserved_link, reserved_prepost);
1c79356b 254
9bccf70c 255 return kr;
1c79356b
A
256}
257
258
259
260/*
261 * Routine: ipc_pset_remove
262 * Purpose:
263 * Removes a port from a port set.
264 * The port set loses a reference.
265 * Conditions:
266 * Both port and port set are locked.
267 * The port must be active.
268 */
269
270kern_return_t
271ipc_pset_remove(
316670eb 272 ipc_pset_t pset,
3e170ce0 273 ipc_port_t port)
1c79356b 274{
9bccf70c 275 kern_return_t kr;
1c79356b
A
276
277 assert(ip_active(port));
278
3e170ce0 279 if (port->ip_in_pset == 0)
1c79356b
A
280 return KERN_NOT_IN_SET;
281
3e170ce0 282 kr = ipc_mqueue_remove(&port->ip_messages, &pset->ips_messages);
9bccf70c
A
283
284 return kr;
1c79356b
A
285}
286
287/*
9bccf70c 288 * Routine: ipc_pset_remove_from_all
1c79356b
A
289 * Purpose:
290 * Removes a port from all it's port sets.
1c79356b
A
291 * Conditions:
292 * port is locked and active.
293 */
294
295kern_return_t
9bccf70c 296ipc_pset_remove_from_all(
3e170ce0 297 ipc_port_t port)
1c79356b 298{
3e170ce0 299 if (port->ip_in_pset == 0)
1c79356b
A
300 return KERN_NOT_IN_SET;
301
302 /*
9bccf70c 303 * Remove the port's mqueue from all sets
1c79356b 304 */
3e170ce0 305 ipc_mqueue_remove_from_all(&port->ip_messages);
1c79356b
A
306 return KERN_SUCCESS;
307}
308
309
310/*
311 * Routine: ipc_pset_destroy
312 * Purpose:
313 * Destroys a port_set.
1c79356b
A
314 * Conditions:
315 * The port_set is locked and alive.
316 * The caller has a reference, which is consumed.
317 * Afterwards, the port_set is unlocked and dead.
318 */
319
320void
321ipc_pset_destroy(
322 ipc_pset_t pset)
323{
1c79356b
A
324 assert(ips_active(pset));
325
326 pset->ips_object.io_bits &= ~IO_BITS_ACTIVE;
327
9bccf70c
A
328 /*
329 * remove all the member message queues
3e170ce0 330 * AND remove this message queue from any containing sets
9bccf70c 331 */
3e170ce0 332 ipc_mqueue_remove_all(&pset->ips_messages);
743345f9 333
b0d623f7
A
334 /*
335 * Set all waiters on the portset running to
336 * discover the change.
337 */
1c79356b
A
338 imq_lock(&pset->ips_messages);
339 ipc_mqueue_changed(&pset->ips_messages);
340 imq_unlock(&pset->ips_messages);
1c79356b 341
3e170ce0
A
342 ipc_mqueue_deinit(&pset->ips_messages);
343
316670eb
A
344 ips_unlock(pset);
345 ips_release(pset); /* consume the ref our caller gave us */
1c79356b
A
346}
347
b0d623f7
A
348/* Kqueue EVFILT_MACHPORT support */
349
39037602 350#include <sys/event.h>
b0d623f7
A
351#include <sys/errno.h>
352
5ba3f43e 353static int filt_machportattach(struct knote *kn, struct kevent_internal_s *kev);
b0d623f7
A
354static void filt_machportdetach(struct knote *kn);
355static int filt_machport(struct knote *kn, long hint);
39037602
A
356static int filt_machporttouch(struct knote *kn, struct kevent_internal_s *kev);
357static int filt_machportprocess(struct knote *kn, struct filt_process_s *data, struct kevent_internal_s *kev);
6d2010ae 358static unsigned filt_machportpeek(struct knote *kn);
5ba3f43e
A
359SECURITY_READ_ONLY_EARLY(struct filterops) machport_filtops = {
360 .f_adjusts_qos = 1,
361 .f_attach = filt_machportattach,
362 .f_detach = filt_machportdetach,
363 .f_event = filt_machport,
364 .f_touch = filt_machporttouch,
365 .f_process = filt_machportprocess,
b0d623f7
A
366 .f_peek = filt_machportpeek,
367};
368
369static int
370filt_machportattach(
5ba3f43e
A
371 struct knote *kn,
372 __unused struct kevent_internal_s *kev)
b0d623f7 373{
39037602
A
374 mach_port_name_t name = (mach_port_name_t)kn->kn_kevent.ident;
375 uint64_t wq_link_id = waitq_link_reserve(NULL);
376 ipc_space_t space = current_space();
377 ipc_kmsg_t first;
378
39037602
A
379 int error;
380 int result = 0;
381 kern_return_t kr;
382 ipc_entry_t entry;
383 ipc_mqueue_t mqueue;
384
385 kr = ipc_right_lookup_read(space, name, &entry);
386 if (kr == KERN_SUCCESS) {
387 /* space is read-locked and active */
388
389 if (entry->ie_bits & MACH_PORT_TYPE_PORT_SET) {
390 ipc_pset_t pset;
391
392 __IGNORE_WCASTALIGN(pset = (ipc_pset_t)entry->ie_object);
393 mqueue = &pset->ips_messages;
5ba3f43e 394 ips_reference(pset);
39037602 395
39037602 396 imq_lock(mqueue);
5ba3f43e 397 kn->kn_ptr.p_mqueue = mqueue;
39037602 398
743345f9 399 /*
39037602
A
400 * Bind the portset wait queue directly to knote/kqueue.
401 * This allows us to just use wait_queue foo to effect a wakeup,
402 * rather than having to call knote() from the Mach code on each
403 * message. We still attach the knote to the mqueue klist for
404 * NOTE_REVOKE purposes only.
405 */
406 error = knote_link_waitq(kn, &mqueue->imq_wait_queue, &wq_link_id);
407 if (!error) {
39037602 408 KNOTE_ATTACH(&mqueue->imq_klist, kn);
5ba3f43e
A
409 imq_unlock(mqueue);
410
411 }
412 else {
413 kn->kn_ptr.p_mqueue = IMQ_NULL;
414 imq_unlock(mqueue);
415 ips_release(pset);
39037602 416 }
743345f9 417
39037602
A
418 is_read_unlock(space);
419
420 /*
421 * linked knotes are marked stay-active and therefore don't
422 * need an indication of their fired state to be returned
423 * from the attach operation.
424 */
425
426 } else if (entry->ie_bits & MACH_PORT_TYPE_RECEIVE) {
427 ipc_port_t port;
428
429 __IGNORE_WCASTALIGN(port = (ipc_port_t)entry->ie_object);
430 mqueue = &port->ip_messages;
431 ip_reference(port);
743345f9 432
39037602
A
433 /*
434 * attach knote to port and determine result
435 * If the filter requested direct message receipt,
436 * we may need to adjust the qos of the knote to
437 * reflect the requested and override qos of the
438 * first message in the queue.
439 */
39037602
A
440 imq_lock(mqueue);
441 kn->kn_ptr.p_mqueue = mqueue;
442 KNOTE_ATTACH(&mqueue->imq_klist, kn);
443 if ((first = ipc_kmsg_queue_first(&mqueue->imq_messages)) != IKM_NULL) {
5ba3f43e 444 int sync_qos_override_index = ipc_port_get_max_sync_qos_index(port);
39037602 445 if (kn->kn_sfflags & MACH_RCV_MSG)
5ba3f43e
A
446 knote_adjust_qos(kn, first->ikm_qos, first->ikm_qos_override,
447 sync_qos_override_index);
39037602
A
448 result = 1;
449 }
450 imq_unlock(mqueue);
39037602
A
451
452 is_read_unlock(space);
453 error = 0;
454 } else {
455 is_read_unlock(space);
456 error = ENOTSUP;
457 }
458 } else {
459 error = ENOENT;
460 }
b0d623f7 461
39037602
A
462 waitq_link_release(wq_link_id);
463
464 /* bail out on errors */
465 if (error) {
466 kn->kn_flags |= EV_ERROR;
467 kn->kn_data = error;
316670eb
A
468 return 0;
469 }
470
b0d623f7
A
471 return result;
472}
473
39037602
A
474/* NOT proud of these - we should have a stricter relationship between mqueue and ipc object */
475#define mqueue_to_pset(mq) ((ipc_pset_t)((uintptr_t)mq-offsetof(struct ipc_pset, ips_messages)))
476#define mqueue_to_port(mq) ((ipc_port_t)((uintptr_t)mq-offsetof(struct ipc_port, ip_messages)))
477#define mqueue_to_object(mq) (((ipc_object_t)(mq)) - 1)
478
479
b0d623f7
A
480static void
481filt_machportdetach(
39037602 482 struct knote *kn)
b0d623f7 483{
39037602
A
484 ipc_mqueue_t mqueue = kn->kn_ptr.p_mqueue;
485 ipc_object_t object = mqueue_to_object(mqueue);
b0d623f7 486
39037602
A
487 imq_lock(mqueue);
488 KNOTE_DETACH(&mqueue->imq_klist, kn);
489 kn->kn_ptr.p_mqueue = IMQ_NULL;
490 imq_unlock(mqueue);
39037602
A
491
492 if (io_otype(object) == IOT_PORT_SET) {
493 /*
494 * Unlink the portset wait queue from knote/kqueue.
495 * JMM - Does this need to be atomic under the mq lock?
496 */
497 (void)knote_unlink_waitq(kn, &mqueue->imq_wait_queue);
498 }
499 io_release(object);
b0d623f7
A
500}
501
39037602
A
502/*
503 * filt_machport - deliver events into the mach port filter
504 *
505 * Mach port message arrival events are currently only posted via the
506 * kqueue filter routine for ports. Port sets are marked stay-active
507 * and the wait queue code will break any kqueue waiters out to go
508 * poll the stay-queued knotes again.
509 *
510 * If there is a message at the head of the queue,
511 * we indicate that the knote should go active. If
512 * the message is to be direct-received, we adjust the
513 * QoS of the knote according the requested and override
514 * QoS of that first message.
515 *
516 * NOTE_REVOKE events are a legacy way to indicate that the port/portset
517 * was deallocated or left the current Mach portspace (modern technique
518 * is with an EV_VANISHED protocol). If we see NOTE_REVOKE, deliver an
519 * EV_EOF event for these changes (hopefully it will get delivered before
520 * the port name recycles to the same generation count and someone tries
521 * to re-register a kevent for it or the events are udata-specific -
522 * avoiding a conflict).
523 */
b0d623f7
A
524static int
525filt_machport(
39037602
A
526 struct knote *kn,
527 long hint)
528{
529 ipc_mqueue_t mqueue = kn->kn_ptr.p_mqueue;
530 ipc_kmsg_t first;
531 int result = 0;
532
533 /* mqueue locked by caller */
534 assert(imq_held(mqueue));
535
536 if (hint == NOTE_REVOKE) {
537 kn->kn_flags |= EV_EOF | EV_ONESHOT;
538 result = 1;
539 } else if (imq_is_valid(mqueue)) {
540 assert(!imq_is_set(mqueue));
541 if ((first = ipc_kmsg_queue_first(&mqueue->imq_messages)) != IKM_NULL) {
5ba3f43e
A
542 ipc_port_t port = ip_from_mq(mqueue);
543 int sync_qos_override_index = ipc_port_get_max_sync_qos_index(port);
544
39037602 545 if (kn->kn_sfflags & MACH_RCV_MSG)
5ba3f43e
A
546 knote_adjust_qos(kn, first->ikm_qos, first->ikm_qos_override,
547 sync_qos_override_index);
39037602
A
548 result = 1;
549 }
550 }
551
552 return result;
553}
554
555static int
556filt_machporttouch(
557 struct knote *kn,
558 struct kevent_internal_s *kev)
b0d623f7 559{
39037602
A
560 ipc_mqueue_t mqueue = kn->kn_ptr.p_mqueue;
561 ipc_kmsg_t first;
562 int result = 0;
39037602 563
39037602 564 imq_lock(mqueue);
b0d623f7 565
39037602
A
566 /* copy in new settings and save off new input fflags */
567 kn->kn_sfflags = kev->fflags;
568 kn->kn_ext[0] = kev->ext[0];
569 kn->kn_ext[1] = kev->ext[1];
570 if ((kn->kn_status & KN_UDATA_SPECIFIC) == 0)
571 kn->kn_udata = kev->udata;
b0d623f7
A
572
573 /*
39037602
A
574 * If the mqueue is a valid port and there is a message
575 * that will be direct-received from the knote, update
576 * the knote qos based on the first message and trigger
577 * the event. If there are no more messages, reset the
578 * QoS to the value provided by the kevent.
b0d623f7 579 */
39037602
A
580 if (imq_is_valid(mqueue) && !imq_is_set(mqueue) &&
581 (first = ipc_kmsg_queue_first(&mqueue->imq_messages)) != IKM_NULL) {
5ba3f43e
A
582 ipc_port_t port = ip_from_mq(mqueue);
583 int sync_qos_override_index = ipc_port_get_max_sync_qos_index(port);
584
39037602 585 if (kn->kn_sfflags & MACH_RCV_MSG)
5ba3f43e
A
586 knote_adjust_qos(kn, first->ikm_qos, first->ikm_qos_override,
587 sync_qos_override_index);
39037602
A
588 result = 1;
589 } else if (kn->kn_sfflags & MACH_RCV_MSG) {
590 knote_adjust_qos(kn,
591 MACH_MSG_PRIORITY_UNSPECIFIED,
5ba3f43e
A
592 MACH_MSG_PRIORITY_UNSPECIFIED,
593 THREAD_QOS_UNSPECIFIED);
39037602
A
594 }
595 imq_unlock(mqueue);
b0d623f7 596
39037602
A
597 return result;
598}
599
600static int
601filt_machportprocess(
602 struct knote *kn,
603 struct filt_process_s *process_data,
604 struct kevent_internal_s *kev)
605{
606 ipc_mqueue_t mqueue = kn->kn_ptr.p_mqueue;
607 ipc_object_t object = mqueue_to_object(mqueue);
608 thread_t self = current_thread();
609 boolean_t used_filtprocess_data = FALSE;
610
611 wait_result_t wresult;
612 mach_msg_option_t option;
613 mach_vm_address_t addr;
614 mach_msg_size_t size;
615
616 imq_lock(mqueue);
617
618 /* Capture current state */
619 *kev = kn->kn_kevent;
620
621 /* If already deallocated/moved return one last EOF event */
622 if (kev->flags & EV_EOF) {
623 imq_unlock(mqueue);
624 return 1;
625 }
b0d623f7
A
626
627 /*
628 * Only honor supported receive options. If no options are
629 * provided, just force a MACH_RCV_TOO_LARGE to detect the
630 * name of the port and sizeof the waiting message.
631 */
fe8ab488 632 option = kn->kn_sfflags & (MACH_RCV_MSG|MACH_RCV_LARGE|MACH_RCV_LARGE_IDENTITY|
39037602
A
633 MACH_RCV_TRAILER_MASK|MACH_RCV_VOUCHER);
634
b0d623f7 635 if (option & MACH_RCV_MSG) {
39037602
A
636 addr = (mach_vm_address_t) kn->kn_ext[0];
637 size = (mach_msg_size_t) kn->kn_ext[1];
638
639 /*
640 * If the kevent didn't specify a buffer and length, carve a buffer
641 * from the filter processing data according to the flags.
642 */
643 if (size == 0 && process_data != NULL) {
644 used_filtprocess_data = TRUE;
645
646 addr = (mach_vm_address_t)process_data->fp_data_out;
647 size = (mach_msg_size_t)process_data->fp_data_resid;
648 option |= (MACH_RCV_LARGE | MACH_RCV_LARGE_IDENTITY);
649 if (process_data->fp_flags & KEVENT_FLAG_STACK_DATA)
650 option |= MACH_RCV_STACK;
651 }
b0d623f7 652 } else {
39037602 653 /* just detect the port name (if a set) and size of the first message */
b0d623f7 654 option = MACH_RCV_LARGE;
39037602 655 addr = 0;
b0d623f7
A
656 size = 0;
657 }
658
39037602
A
659 /* just use the reference from here on out */
660 io_reference(object);
661
b0d623f7
A
662 /*
663 * Set up to receive a message or the notification of a
664 * too large message. But never allow this call to wait.
665 * If the user provided aditional options, like trailer
666 * options, pass those through here. But we don't support
667 * scatter lists through this interface.
668 */
39037602
A
669 self->ith_object = object;
670 self->ith_msg_addr = addr;
671 self->ith_rsize = size;
672 self->ith_msize = 0;
b0d623f7 673 self->ith_option = option;
b0d623f7
A
674 self->ith_receiver_name = MACH_PORT_NULL;
675 self->ith_continuation = NULL;
676 option |= MACH_RCV_TIMEOUT; // never wait
39236c6e 677 self->ith_state = MACH_RCV_IN_PROGRESS;
5ba3f43e 678 self->ith_knote = kn;
b0d623f7
A
679
680 wresult = ipc_mqueue_receive_on_thread(
39037602 681 mqueue,
b0d623f7
A
682 option,
683 size, /* max_size */
684 0, /* immediate timeout */
685 THREAD_INTERRUPTIBLE,
686 self);
39037602 687 /* mqueue unlocked */
b0d623f7
A
688
689 /*
39037602
A
690 * If we timed out, or the process is exiting, just release the
691 * reference on the ipc_object and return zero.
b0d623f7 692 */
39037602
A
693 if (wresult == THREAD_RESTART || self->ith_state == MACH_RCV_TIMED_OUT) {
694 io_release(object);
b0d623f7
A
695 return 0;
696 }
697
39037602
A
698 assert(wresult == THREAD_NOT_WAITING);
699 assert(self->ith_state != MACH_RCV_IN_PROGRESS);
700
b0d623f7
A
701 /*
702 * If we weren't attempting to receive a message
703 * directly, we need to return the port name in
704 * the kevent structure.
705 */
706 if ((option & MACH_RCV_MSG) != MACH_RCV_MSG) {
707 assert(self->ith_state == MACH_RCV_TOO_LARGE);
708 assert(self->ith_kmsg == IKM_NULL);
39037602
A
709 kev->data = self->ith_receiver_name;
710 io_release(object);
b0d623f7
A
711 return 1;
712 }
713
714 /*
715 * Attempt to receive the message directly, returning
716 * the results in the fflags field.
717 */
39037602
A
718 kev->fflags = mach_msg_receive_results(&size);
719
720 /* kmsg and object reference consumed */
39236c6e
A
721
722 /*
723 * if the user asked for the identity of ports containing a
724 * a too-large message, return it in the data field (as we
725 * do for messages we didn't try to receive).
726 */
39037602
A
727 if (kev->fflags == MACH_RCV_TOO_LARGE) {
728 kev->ext[1] = self->ith_msize;
729 if (option & MACH_RCV_LARGE_IDENTITY)
730 kev->data = self->ith_receiver_name;
731 else
732 kev->data = MACH_PORT_NULL;
733 } else {
734 kev->ext[1] = size;
735 kev->data = MACH_PORT_NULL;
736 }
b0d623f7 737
39037602
A
738 /*
739 * If we used a data buffer carved out from the filt_process data,
740 * store the address used in the knote and adjust the residual and
741 * other parameters for future use.
742 */
743 if (used_filtprocess_data) {
744 assert(process_data->fp_data_resid >= size);
745 process_data->fp_data_resid -= size;
746 if ((process_data->fp_flags & KEVENT_FLAG_STACK_DATA) == 0) {
747 kev->ext[0] = process_data->fp_data_out;
748 process_data->fp_data_out += size;
749 } else {
750 assert(option & MACH_RCV_STACK);
751 kev->ext[0] = process_data->fp_data_out +
752 process_data->fp_data_resid;
b0d623f7 753 }
39037602
A
754 }
755
756 /*
757 * Apply message-based QoS values to output kevent as prescribed.
758 * The kev->qos field gets max(msg-qos, kn->kn_qos).
759 * The kev->ext[2] field gets (msg-qos << 32) | (override-qos).
760 *
761 * The mach_msg_receive_results() call saved off the message
762 * QoS values in the continuation save area on successful receive.
763 */
764 if (kev->fflags == MACH_MSG_SUCCESS) {
765 kev->qos = mach_msg_priority_combine(self->ith_qos, kn->kn_qos);
766 kev->ext[2] = ((uint64_t)self->ith_qos << 32) |
767 (uint64_t)self->ith_qos_override;
768 }
769
770 return 1;
b0d623f7
A
771}
772
773/*
39037602 774 * Peek to see if the message queue associated with the knote has any
b0d623f7 775 * events. This pre-hook is called when a filter uses the stay-
39037602
A
776 * on-queue mechanism (as the knote_link_waitq mechanism does for
777 * portsets) and someone calls select() against the containing kqueue.
b0d623f7
A
778 *
779 * Just peek at the pre-post status of the portset's wait queue
780 * to determine if it has anything interesting. We can do it
781 * without holding the lock, as it is just a snapshot in time
782 * (if this is used as part of really waiting for events, we
783 * will catch changes in this status when the event gets posted
784 * up to the knote's kqueue).
785 */
6d2010ae 786static unsigned
b0d623f7
A
787filt_machportpeek(struct knote *kn)
788{
39037602 789 ipc_mqueue_t mqueue = kn->kn_ptr.p_mqueue;
b0d623f7 790
39037602 791 return (ipc_mqueue_set_peek(mqueue));
b0d623f7 792}