]> git.saurik.com Git - apple/xnu.git/blob - osfmk/ipc/ipc_pset.c
xnu-4570.31.3.tar.gz
[apple/xnu.git] / osfmk / ipc / ipc_pset.c
1 /*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * @OSF_COPYRIGHT@
30 */
31 /*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56 /*
57 */
58 /*
59 * File: ipc/ipc_pset.c
60 * Author: Rich Draves
61 * Date: 1989
62 *
63 * Functions to manipulate IPC port sets.
64 */
65
66 #include <mach/port.h>
67 #include <mach/kern_return.h>
68 #include <mach/message.h>
69 #include <ipc/ipc_mqueue.h>
70 #include <ipc/ipc_object.h>
71 #include <ipc/ipc_pset.h>
72 #include <ipc/ipc_right.h>
73 #include <ipc/ipc_space.h>
74 #include <ipc/ipc_port.h>
75
76 #include <kern/kern_types.h>
77
78 #include <vm/vm_map.h>
79 #include <libkern/section_keywords.h>
80
81 /*
82 * Routine: ipc_pset_alloc
83 * Purpose:
84 * Allocate a port set.
85 * Conditions:
86 * Nothing locked. If successful, the port set is returned
87 * locked. (The caller doesn't have a reference.)
88 * Returns:
89 * KERN_SUCCESS The port set is allocated.
90 * KERN_INVALID_TASK The space is dead.
91 * KERN_NO_SPACE No room for an entry in the space.
92 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
93 */
94
95 kern_return_t
96 ipc_pset_alloc(
97 ipc_space_t space,
98 mach_port_name_t *namep,
99 ipc_pset_t *psetp)
100 {
101 ipc_pset_t pset;
102 mach_port_name_t name;
103 kern_return_t kr;
104 uint64_t reserved_link;
105
106 reserved_link = waitq_link_reserve(NULL);
107
108 kr = ipc_object_alloc(space, IOT_PORT_SET,
109 MACH_PORT_TYPE_PORT_SET, 0,
110 &name, (ipc_object_t *) &pset);
111 if (kr != KERN_SUCCESS) {
112 waitq_link_release(reserved_link);
113 return kr;
114 }
115 /* pset and space are locked */
116
117 ipc_mqueue_init(&pset->ips_messages, TRUE /* set */, &reserved_link);
118 is_write_unlock(space);
119
120 waitq_link_release(reserved_link);
121
122 *namep = name;
123 *psetp = pset;
124 return KERN_SUCCESS;
125 }
126
127 /*
128 * Routine: ipc_pset_alloc_name
129 * Purpose:
130 * Allocate a port set, with a specific name.
131 * Conditions:
132 * Nothing locked. If successful, the port set is returned
133 * locked. (The caller doesn't have a reference.)
134 * Returns:
135 * KERN_SUCCESS The port set is allocated.
136 * KERN_INVALID_TASK The space is dead.
137 * KERN_NAME_EXISTS The name already denotes a right.
138 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
139 */
140
141 kern_return_t
142 ipc_pset_alloc_name(
143 ipc_space_t space,
144 mach_port_name_t name,
145 ipc_pset_t *psetp)
146 {
147 ipc_pset_t pset;
148 kern_return_t kr;
149 uint64_t reserved_link;
150
151
152 reserved_link = waitq_link_reserve(NULL);
153
154 kr = ipc_object_alloc_name(space, IOT_PORT_SET,
155 MACH_PORT_TYPE_PORT_SET, 0,
156 name, (ipc_object_t *) &pset);
157 if (kr != KERN_SUCCESS) {
158 waitq_link_release(reserved_link);
159 return kr;
160 }
161 /* pset is locked */
162
163 ipc_mqueue_init(&pset->ips_messages, TRUE /* set */, &reserved_link);
164
165 waitq_link_release(reserved_link);
166
167 *psetp = pset;
168 return KERN_SUCCESS;
169 }
170
171
172 /*
173 * Routine: ipc_pset_alloc_special
174 * Purpose:
175 * Allocate a port set in a special space.
176 * The new port set is returned with one ref.
177 * If unsuccessful, IPS_NULL is returned.
178 * Conditions:
179 * Nothing locked.
180 */
181 ipc_pset_t
182 ipc_pset_alloc_special(
183 __assert_only ipc_space_t space)
184 {
185 ipc_pset_t pset;
186 uint64_t reserved_link;
187
188 assert(space != IS_NULL);
189 assert(space->is_table == IE_NULL);
190 assert(!is_active(space));
191
192 reserved_link = waitq_link_reserve(NULL);
193
194 __IGNORE_WCASTALIGN(pset = (ipc_pset_t)io_alloc(IOT_PORT_SET));
195 if (pset == IPS_NULL) {
196 waitq_link_release(reserved_link);
197 return IPS_NULL;
198 }
199
200 bzero((char *)pset, sizeof(*pset));
201
202 io_lock_init(&pset->ips_object);
203 pset->ips_references = 1;
204 pset->ips_object.io_bits = io_makebits(TRUE, IOT_PORT_SET, 0);
205
206 ipc_mqueue_init(&pset->ips_messages, TRUE /* set */, &reserved_link);
207
208 waitq_link_release(reserved_link);
209
210 return pset;
211 }
212
213
214 /*
215 * Routine: ipc_pset_member
216 * Purpose:
217 * Checks to see if a port is a member of a pset
218 * Conditions:
219 * Both port and port set are locked.
220 * The port must be active.
221 */
222 boolean_t
223 ipc_pset_member(
224 ipc_pset_t pset,
225 ipc_port_t port)
226 {
227 assert(ip_active(port));
228
229 return (ipc_mqueue_member(&port->ip_messages, &pset->ips_messages));
230 }
231
232
233 /*
234 * Routine: ipc_pset_add
235 * Purpose:
236 * Puts a port into a port set.
237 * Conditions:
238 * Both port and port set are locked and active.
239 * The owner of the port set is also receiver for the port.
240 */
241
242 kern_return_t
243 ipc_pset_add(
244 ipc_pset_t pset,
245 ipc_port_t port,
246 uint64_t *reserved_link,
247 uint64_t *reserved_prepost)
248 {
249 kern_return_t kr;
250
251 assert(ips_active(pset));
252 assert(ip_active(port));
253
254 kr = ipc_mqueue_add(&port->ip_messages, &pset->ips_messages,
255 reserved_link, reserved_prepost);
256
257 return kr;
258 }
259
260
261
262 /*
263 * Routine: ipc_pset_remove
264 * Purpose:
265 * Removes a port from a port set.
266 * The port set loses a reference.
267 * Conditions:
268 * Both port and port set are locked.
269 * The port must be active.
270 */
271
272 kern_return_t
273 ipc_pset_remove(
274 ipc_pset_t pset,
275 ipc_port_t port)
276 {
277 kern_return_t kr;
278
279 assert(ip_active(port));
280
281 if (port->ip_in_pset == 0)
282 return KERN_NOT_IN_SET;
283
284 kr = ipc_mqueue_remove(&port->ip_messages, &pset->ips_messages);
285
286 return kr;
287 }
288
289 /*
290 * Routine: ipc_pset_remove_from_all
291 * Purpose:
292 * Removes a port from all it's port sets.
293 * Conditions:
294 * port is locked and active.
295 */
296
297 kern_return_t
298 ipc_pset_remove_from_all(
299 ipc_port_t port)
300 {
301 if (port->ip_in_pset == 0)
302 return KERN_NOT_IN_SET;
303
304 /*
305 * Remove the port's mqueue from all sets
306 */
307 ipc_mqueue_remove_from_all(&port->ip_messages);
308 return KERN_SUCCESS;
309 }
310
311
312 /*
313 * Routine: ipc_pset_destroy
314 * Purpose:
315 * Destroys a port_set.
316 * Conditions:
317 * The port_set is locked and alive.
318 * The caller has a reference, which is consumed.
319 * Afterwards, the port_set is unlocked and dead.
320 */
321
322 void
323 ipc_pset_destroy(
324 ipc_pset_t pset)
325 {
326 assert(ips_active(pset));
327
328 pset->ips_object.io_bits &= ~IO_BITS_ACTIVE;
329
330 /*
331 * remove all the member message queues
332 * AND remove this message queue from any containing sets
333 */
334 ipc_mqueue_remove_all(&pset->ips_messages);
335
336 /*
337 * Set all waiters on the portset running to
338 * discover the change.
339 */
340 imq_lock(&pset->ips_messages);
341 ipc_mqueue_changed(&pset->ips_messages);
342 imq_unlock(&pset->ips_messages);
343
344 ipc_mqueue_deinit(&pset->ips_messages);
345
346 ips_unlock(pset);
347 ips_release(pset); /* consume the ref our caller gave us */
348 }
349
350 /* Kqueue EVFILT_MACHPORT support */
351
352 #include <sys/event.h>
353 #include <sys/errno.h>
354
355 static int filt_machportattach(struct knote *kn, struct kevent_internal_s *kev);
356 static void filt_machportdetach(struct knote *kn);
357 static int filt_machport(struct knote *kn, long hint);
358 static int filt_machporttouch(struct knote *kn, struct kevent_internal_s *kev);
359 static int filt_machportprocess(struct knote *kn, struct filt_process_s *data, struct kevent_internal_s *kev);
360 static unsigned filt_machportpeek(struct knote *kn);
361 SECURITY_READ_ONLY_EARLY(struct filterops) machport_filtops = {
362 .f_adjusts_qos = 1,
363 .f_attach = filt_machportattach,
364 .f_detach = filt_machportdetach,
365 .f_event = filt_machport,
366 .f_touch = filt_machporttouch,
367 .f_process = filt_machportprocess,
368 .f_peek = filt_machportpeek,
369 };
370
371 static int
372 filt_machportattach(
373 struct knote *kn,
374 __unused struct kevent_internal_s *kev)
375 {
376 mach_port_name_t name = (mach_port_name_t)kn->kn_kevent.ident;
377 uint64_t wq_link_id = waitq_link_reserve(NULL);
378 ipc_space_t space = current_space();
379 ipc_kmsg_t first;
380
381 int error;
382 int result = 0;
383 kern_return_t kr;
384 ipc_entry_t entry;
385 ipc_mqueue_t mqueue;
386
387 kr = ipc_right_lookup_read(space, name, &entry);
388 if (kr == KERN_SUCCESS) {
389 /* space is read-locked and active */
390
391 if (entry->ie_bits & MACH_PORT_TYPE_PORT_SET) {
392 ipc_pset_t pset;
393
394 __IGNORE_WCASTALIGN(pset = (ipc_pset_t)entry->ie_object);
395 mqueue = &pset->ips_messages;
396 ips_reference(pset);
397
398 imq_lock(mqueue);
399 kn->kn_ptr.p_mqueue = mqueue;
400
401 /*
402 * Bind the portset wait queue directly to knote/kqueue.
403 * This allows us to just use wait_queue foo to effect a wakeup,
404 * rather than having to call knote() from the Mach code on each
405 * message. We still attach the knote to the mqueue klist for
406 * NOTE_REVOKE purposes only.
407 */
408 error = knote_link_waitq(kn, &mqueue->imq_wait_queue, &wq_link_id);
409 if (!error) {
410 KNOTE_ATTACH(&mqueue->imq_klist, kn);
411 imq_unlock(mqueue);
412
413 }
414 else {
415 kn->kn_ptr.p_mqueue = IMQ_NULL;
416 imq_unlock(mqueue);
417 ips_release(pset);
418 }
419
420 is_read_unlock(space);
421
422 /*
423 * linked knotes are marked stay-active and therefore don't
424 * need an indication of their fired state to be returned
425 * from the attach operation.
426 */
427
428 } else if (entry->ie_bits & MACH_PORT_TYPE_RECEIVE) {
429 ipc_port_t port;
430
431 __IGNORE_WCASTALIGN(port = (ipc_port_t)entry->ie_object);
432 mqueue = &port->ip_messages;
433 ip_reference(port);
434
435 /*
436 * attach knote to port and determine result
437 * If the filter requested direct message receipt,
438 * we may need to adjust the qos of the knote to
439 * reflect the requested and override qos of the
440 * first message in the queue.
441 */
442 imq_lock(mqueue);
443 kn->kn_ptr.p_mqueue = mqueue;
444 KNOTE_ATTACH(&mqueue->imq_klist, kn);
445 if ((first = ipc_kmsg_queue_first(&mqueue->imq_messages)) != IKM_NULL) {
446 int sync_qos_override_index = ipc_port_get_max_sync_qos_index(port);
447 if (kn->kn_sfflags & MACH_RCV_MSG)
448 knote_adjust_qos(kn, first->ikm_qos, first->ikm_qos_override,
449 sync_qos_override_index);
450 result = 1;
451 }
452 imq_unlock(mqueue);
453
454 is_read_unlock(space);
455 error = 0;
456 } else {
457 is_read_unlock(space);
458 error = ENOTSUP;
459 }
460 } else {
461 error = ENOENT;
462 }
463
464 waitq_link_release(wq_link_id);
465
466 /* bail out on errors */
467 if (error) {
468 kn->kn_flags |= EV_ERROR;
469 kn->kn_data = error;
470 return 0;
471 }
472
473 return result;
474 }
475
476 /* NOT proud of these - we should have a stricter relationship between mqueue and ipc object */
477 #define mqueue_to_pset(mq) ((ipc_pset_t)((uintptr_t)mq-offsetof(struct ipc_pset, ips_messages)))
478 #define mqueue_to_port(mq) ((ipc_port_t)((uintptr_t)mq-offsetof(struct ipc_port, ip_messages)))
479 #define mqueue_to_object(mq) (((ipc_object_t)(mq)) - 1)
480
481
482 static void
483 filt_machportdetach(
484 struct knote *kn)
485 {
486 ipc_mqueue_t mqueue = kn->kn_ptr.p_mqueue;
487 ipc_object_t object = mqueue_to_object(mqueue);
488
489 imq_lock(mqueue);
490 KNOTE_DETACH(&mqueue->imq_klist, kn);
491 kn->kn_ptr.p_mqueue = IMQ_NULL;
492 imq_unlock(mqueue);
493
494 if (io_otype(object) == IOT_PORT_SET) {
495 /*
496 * Unlink the portset wait queue from knote/kqueue.
497 * JMM - Does this need to be atomic under the mq lock?
498 */
499 (void)knote_unlink_waitq(kn, &mqueue->imq_wait_queue);
500 }
501 io_release(object);
502 }
503
504 /*
505 * filt_machport - deliver events into the mach port filter
506 *
507 * Mach port message arrival events are currently only posted via the
508 * kqueue filter routine for ports. Port sets are marked stay-active
509 * and the wait queue code will break any kqueue waiters out to go
510 * poll the stay-queued knotes again.
511 *
512 * If there is a message at the head of the queue,
513 * we indicate that the knote should go active. If
514 * the message is to be direct-received, we adjust the
515 * QoS of the knote according the requested and override
516 * QoS of that first message.
517 *
518 * NOTE_REVOKE events are a legacy way to indicate that the port/portset
519 * was deallocated or left the current Mach portspace (modern technique
520 * is with an EV_VANISHED protocol). If we see NOTE_REVOKE, deliver an
521 * EV_EOF event for these changes (hopefully it will get delivered before
522 * the port name recycles to the same generation count and someone tries
523 * to re-register a kevent for it or the events are udata-specific -
524 * avoiding a conflict).
525 */
526 static int
527 filt_machport(
528 struct knote *kn,
529 long hint)
530 {
531 ipc_mqueue_t mqueue = kn->kn_ptr.p_mqueue;
532 ipc_kmsg_t first;
533 int result = 0;
534
535 /* mqueue locked by caller */
536 assert(imq_held(mqueue));
537
538 if (hint == NOTE_REVOKE) {
539 kn->kn_flags |= EV_EOF | EV_ONESHOT;
540 result = 1;
541 } else if (imq_is_valid(mqueue)) {
542 assert(!imq_is_set(mqueue));
543 if ((first = ipc_kmsg_queue_first(&mqueue->imq_messages)) != IKM_NULL) {
544 ipc_port_t port = ip_from_mq(mqueue);
545 int sync_qos_override_index = ipc_port_get_max_sync_qos_index(port);
546
547 if (kn->kn_sfflags & MACH_RCV_MSG)
548 knote_adjust_qos(kn, first->ikm_qos, first->ikm_qos_override,
549 sync_qos_override_index);
550 result = 1;
551 }
552 }
553
554 return result;
555 }
556
557 static int
558 filt_machporttouch(
559 struct knote *kn,
560 struct kevent_internal_s *kev)
561 {
562 ipc_mqueue_t mqueue = kn->kn_ptr.p_mqueue;
563 ipc_kmsg_t first;
564 int result = 0;
565
566 imq_lock(mqueue);
567
568 /* copy in new settings and save off new input fflags */
569 kn->kn_sfflags = kev->fflags;
570 kn->kn_ext[0] = kev->ext[0];
571 kn->kn_ext[1] = kev->ext[1];
572 if ((kn->kn_status & KN_UDATA_SPECIFIC) == 0)
573 kn->kn_udata = kev->udata;
574
575 /*
576 * If the mqueue is a valid port and there is a message
577 * that will be direct-received from the knote, update
578 * the knote qos based on the first message and trigger
579 * the event. If there are no more messages, reset the
580 * QoS to the value provided by the kevent.
581 */
582 if (imq_is_valid(mqueue) && !imq_is_set(mqueue) &&
583 (first = ipc_kmsg_queue_first(&mqueue->imq_messages)) != IKM_NULL) {
584 ipc_port_t port = ip_from_mq(mqueue);
585 int sync_qos_override_index = ipc_port_get_max_sync_qos_index(port);
586
587 if (kn->kn_sfflags & MACH_RCV_MSG)
588 knote_adjust_qos(kn, first->ikm_qos, first->ikm_qos_override,
589 sync_qos_override_index);
590 result = 1;
591 } else if (kn->kn_sfflags & MACH_RCV_MSG) {
592 knote_adjust_qos(kn,
593 MACH_MSG_PRIORITY_UNSPECIFIED,
594 MACH_MSG_PRIORITY_UNSPECIFIED,
595 THREAD_QOS_UNSPECIFIED);
596 }
597 imq_unlock(mqueue);
598
599 return result;
600 }
601
602 static int
603 filt_machportprocess(
604 struct knote *kn,
605 struct filt_process_s *process_data,
606 struct kevent_internal_s *kev)
607 {
608 ipc_mqueue_t mqueue = kn->kn_ptr.p_mqueue;
609 ipc_object_t object = mqueue_to_object(mqueue);
610 thread_t self = current_thread();
611 boolean_t used_filtprocess_data = FALSE;
612
613 wait_result_t wresult;
614 mach_msg_option_t option;
615 mach_vm_address_t addr;
616 mach_msg_size_t size;
617
618 imq_lock(mqueue);
619
620 /* Capture current state */
621 *kev = kn->kn_kevent;
622
623 /* If already deallocated/moved return one last EOF event */
624 if (kev->flags & EV_EOF) {
625 imq_unlock(mqueue);
626 return 1;
627 }
628
629 /*
630 * Only honor supported receive options. If no options are
631 * provided, just force a MACH_RCV_TOO_LARGE to detect the
632 * name of the port and sizeof the waiting message.
633 */
634 option = kn->kn_sfflags & (MACH_RCV_MSG|MACH_RCV_LARGE|MACH_RCV_LARGE_IDENTITY|
635 MACH_RCV_TRAILER_MASK|MACH_RCV_VOUCHER);
636
637 if (option & MACH_RCV_MSG) {
638 addr = (mach_vm_address_t) kn->kn_ext[0];
639 size = (mach_msg_size_t) kn->kn_ext[1];
640
641 /*
642 * If the kevent didn't specify a buffer and length, carve a buffer
643 * from the filter processing data according to the flags.
644 */
645 if (size == 0 && process_data != NULL) {
646 used_filtprocess_data = TRUE;
647
648 addr = (mach_vm_address_t)process_data->fp_data_out;
649 size = (mach_msg_size_t)process_data->fp_data_resid;
650 option |= (MACH_RCV_LARGE | MACH_RCV_LARGE_IDENTITY);
651 if (process_data->fp_flags & KEVENT_FLAG_STACK_DATA)
652 option |= MACH_RCV_STACK;
653 }
654 } else {
655 /* just detect the port name (if a set) and size of the first message */
656 option = MACH_RCV_LARGE;
657 addr = 0;
658 size = 0;
659 }
660
661 /* just use the reference from here on out */
662 io_reference(object);
663
664 /*
665 * Set up to receive a message or the notification of a
666 * too large message. But never allow this call to wait.
667 * If the user provided aditional options, like trailer
668 * options, pass those through here. But we don't support
669 * scatter lists through this interface.
670 */
671 self->ith_object = object;
672 self->ith_msg_addr = addr;
673 self->ith_rsize = size;
674 self->ith_msize = 0;
675 self->ith_option = option;
676 self->ith_receiver_name = MACH_PORT_NULL;
677 self->ith_continuation = NULL;
678 option |= MACH_RCV_TIMEOUT; // never wait
679 self->ith_state = MACH_RCV_IN_PROGRESS;
680 self->ith_knote = kn;
681
682 wresult = ipc_mqueue_receive_on_thread(
683 mqueue,
684 option,
685 size, /* max_size */
686 0, /* immediate timeout */
687 THREAD_INTERRUPTIBLE,
688 self);
689 /* mqueue unlocked */
690
691 /*
692 * If we timed out, or the process is exiting, just release the
693 * reference on the ipc_object and return zero.
694 */
695 if (wresult == THREAD_RESTART || self->ith_state == MACH_RCV_TIMED_OUT) {
696 io_release(object);
697 return 0;
698 }
699
700 assert(wresult == THREAD_NOT_WAITING);
701 assert(self->ith_state != MACH_RCV_IN_PROGRESS);
702
703 /*
704 * If we weren't attempting to receive a message
705 * directly, we need to return the port name in
706 * the kevent structure.
707 */
708 if ((option & MACH_RCV_MSG) != MACH_RCV_MSG) {
709 assert(self->ith_state == MACH_RCV_TOO_LARGE);
710 assert(self->ith_kmsg == IKM_NULL);
711 kev->data = self->ith_receiver_name;
712 io_release(object);
713 return 1;
714 }
715
716 /*
717 * Attempt to receive the message directly, returning
718 * the results in the fflags field.
719 */
720 kev->fflags = mach_msg_receive_results(&size);
721
722 /* kmsg and object reference consumed */
723
724 /*
725 * if the user asked for the identity of ports containing a
726 * a too-large message, return it in the data field (as we
727 * do for messages we didn't try to receive).
728 */
729 if (kev->fflags == MACH_RCV_TOO_LARGE) {
730 kev->ext[1] = self->ith_msize;
731 if (option & MACH_RCV_LARGE_IDENTITY)
732 kev->data = self->ith_receiver_name;
733 else
734 kev->data = MACH_PORT_NULL;
735 } else {
736 kev->ext[1] = size;
737 kev->data = MACH_PORT_NULL;
738 }
739
740 /*
741 * If we used a data buffer carved out from the filt_process data,
742 * store the address used in the knote and adjust the residual and
743 * other parameters for future use.
744 */
745 if (used_filtprocess_data) {
746 assert(process_data->fp_data_resid >= size);
747 process_data->fp_data_resid -= size;
748 if ((process_data->fp_flags & KEVENT_FLAG_STACK_DATA) == 0) {
749 kev->ext[0] = process_data->fp_data_out;
750 process_data->fp_data_out += size;
751 } else {
752 assert(option & MACH_RCV_STACK);
753 kev->ext[0] = process_data->fp_data_out +
754 process_data->fp_data_resid;
755 }
756 }
757
758 /*
759 * Apply message-based QoS values to output kevent as prescribed.
760 * The kev->qos field gets max(msg-qos, kn->kn_qos).
761 * The kev->ext[2] field gets (msg-qos << 32) | (override-qos).
762 *
763 * The mach_msg_receive_results() call saved off the message
764 * QoS values in the continuation save area on successful receive.
765 */
766 if (kev->fflags == MACH_MSG_SUCCESS) {
767 kev->qos = mach_msg_priority_combine(self->ith_qos, kn->kn_qos);
768 kev->ext[2] = ((uint64_t)self->ith_qos << 32) |
769 (uint64_t)self->ith_qos_override;
770 }
771
772 return 1;
773 }
774
775 /*
776 * Peek to see if the message queue associated with the knote has any
777 * events. This pre-hook is called when a filter uses the stay-
778 * on-queue mechanism (as the knote_link_waitq mechanism does for
779 * portsets) and someone calls select() against the containing kqueue.
780 *
781 * Just peek at the pre-post status of the portset's wait queue
782 * to determine if it has anything interesting. We can do it
783 * without holding the lock, as it is just a snapshot in time
784 * (if this is used as part of really waiting for events, we
785 * will catch changes in this status when the event gets posted
786 * up to the knote's kqueue).
787 */
788 static unsigned
789 filt_machportpeek(struct knote *kn)
790 {
791 ipc_mqueue_t mqueue = kn->kn_ptr.p_mqueue;
792
793 return (ipc_mqueue_set_peek(mqueue));
794 }