]> git.saurik.com Git - apple/xnu.git/blame_incremental - osfmk/ipc/ipc_pset.c
xnu-3789.21.4.tar.gz
[apple/xnu.git] / osfmk / ipc / ipc_pset.c
... / ...
CommitLineData
1/*
2 * Copyright (c) 2000-2004 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * @OSF_COPYRIGHT@
30 */
31/*
32 * Mach Operating System
33 * Copyright (c) 1991,1990,1989 Carnegie Mellon University
34 * All Rights Reserved.
35 *
36 * Permission to use, copy, modify and distribute this software and its
37 * documentation is hereby granted, provided that both the copyright
38 * notice and this permission notice appear in all copies of the
39 * software, derivative works or modified versions, and any portions
40 * thereof, and that both notices appear in supporting documentation.
41 *
42 * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
43 * CONDITION. CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND FOR
44 * ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
45 *
46 * Carnegie Mellon requests users of this software to return to
47 *
48 * Software Distribution Coordinator or Software.Distribution@CS.CMU.EDU
49 * School of Computer Science
50 * Carnegie Mellon University
51 * Pittsburgh PA 15213-3890
52 *
53 * any improvements or extensions that they make and grant Carnegie Mellon
54 * the rights to redistribute these changes.
55 */
56/*
57 */
58/*
59 * File: ipc/ipc_pset.c
60 * Author: Rich Draves
61 * Date: 1989
62 *
63 * Functions to manipulate IPC port sets.
64 */
65
66#include <mach/port.h>
67#include <mach/kern_return.h>
68#include <mach/message.h>
69#include <ipc/ipc_mqueue.h>
70#include <ipc/ipc_object.h>
71#include <ipc/ipc_pset.h>
72#include <ipc/ipc_right.h>
73#include <ipc/ipc_space.h>
74#include <ipc/ipc_port.h>
75
76#include <kern/kern_types.h>
77
78#include <vm/vm_map.h>
79
80/*
81 * Routine: ipc_pset_alloc
82 * Purpose:
83 * Allocate a port set.
84 * Conditions:
85 * Nothing locked. If successful, the port set is returned
86 * locked. (The caller doesn't have a reference.)
87 * Returns:
88 * KERN_SUCCESS The port set is allocated.
89 * KERN_INVALID_TASK The space is dead.
90 * KERN_NO_SPACE No room for an entry in the space.
91 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
92 */
93
94kern_return_t
95ipc_pset_alloc(
96 ipc_space_t space,
97 mach_port_name_t *namep,
98 ipc_pset_t *psetp)
99{
100 ipc_pset_t pset;
101 mach_port_name_t name;
102 kern_return_t kr;
103 uint64_t reserved_link;
104
105 reserved_link = waitq_link_reserve(NULL);
106
107 kr = ipc_object_alloc(space, IOT_PORT_SET,
108 MACH_PORT_TYPE_PORT_SET, 0,
109 &name, (ipc_object_t *) &pset);
110 if (kr != KERN_SUCCESS) {
111 waitq_link_release(reserved_link);
112 return kr;
113 }
114 /* pset and space are locked */
115
116 ipc_mqueue_init(&pset->ips_messages, TRUE /* set */, &reserved_link);
117 is_write_unlock(space);
118
119 waitq_link_release(reserved_link);
120
121 *namep = name;
122 *psetp = pset;
123 return KERN_SUCCESS;
124}
125
126/*
127 * Routine: ipc_pset_alloc_name
128 * Purpose:
129 * Allocate a port set, with a specific name.
130 * Conditions:
131 * Nothing locked. If successful, the port set is returned
132 * locked. (The caller doesn't have a reference.)
133 * Returns:
134 * KERN_SUCCESS The port set is allocated.
135 * KERN_INVALID_TASK The space is dead.
136 * KERN_NAME_EXISTS The name already denotes a right.
137 * KERN_RESOURCE_SHORTAGE Couldn't allocate memory.
138 */
139
140kern_return_t
141ipc_pset_alloc_name(
142 ipc_space_t space,
143 mach_port_name_t name,
144 ipc_pset_t *psetp)
145{
146 ipc_pset_t pset;
147 kern_return_t kr;
148 uint64_t reserved_link;
149
150
151 reserved_link = waitq_link_reserve(NULL);
152
153 kr = ipc_object_alloc_name(space, IOT_PORT_SET,
154 MACH_PORT_TYPE_PORT_SET, 0,
155 name, (ipc_object_t *) &pset);
156 if (kr != KERN_SUCCESS) {
157 waitq_link_release(reserved_link);
158 return kr;
159 }
160 /* pset is locked */
161
162 ipc_mqueue_init(&pset->ips_messages, TRUE /* set */, &reserved_link);
163
164 waitq_link_release(reserved_link);
165
166 *psetp = pset;
167 return KERN_SUCCESS;
168}
169
170
171/*
172 * Routine: ipc_pset_alloc_special
173 * Purpose:
174 * Allocate a port set in a special space.
175 * The new port set is returned with one ref.
176 * If unsuccessful, IPS_NULL is returned.
177 * Conditions:
178 * Nothing locked.
179 */
180ipc_pset_t
181ipc_pset_alloc_special(
182 __assert_only ipc_space_t space)
183{
184 ipc_pset_t pset;
185 uint64_t reserved_link;
186
187 assert(space != IS_NULL);
188 assert(space->is_table == IE_NULL);
189 assert(!is_active(space));
190
191 reserved_link = waitq_link_reserve(NULL);
192
193 __IGNORE_WCASTALIGN(pset = (ipc_pset_t)io_alloc(IOT_PORT_SET));
194 if (pset == IPS_NULL)
195 return IPS_NULL;
196
197 bzero((char *)pset, sizeof(*pset));
198
199 io_lock_init(&pset->ips_object);
200 pset->ips_references = 1;
201 pset->ips_object.io_bits = io_makebits(TRUE, IOT_PORT_SET, 0);
202
203 ipc_mqueue_init(&pset->ips_messages, TRUE /* set */, &reserved_link);
204
205 waitq_link_release(reserved_link);
206
207 return pset;
208}
209
210
211/*
212 * Routine: ipc_pset_member
213 * Purpose:
214 * Checks to see if a port is a member of a pset
215 * Conditions:
216 * Both port and port set are locked.
217 * The port must be active.
218 */
219boolean_t
220ipc_pset_member(
221 ipc_pset_t pset,
222 ipc_port_t port)
223{
224 assert(ip_active(port));
225
226 return (ipc_mqueue_member(&port->ip_messages, &pset->ips_messages));
227}
228
229
230/*
231 * Routine: ipc_pset_add
232 * Purpose:
233 * Puts a port into a port set.
234 * Conditions:
235 * Both port and port set are locked and active.
236 * The owner of the port set is also receiver for the port.
237 */
238
239kern_return_t
240ipc_pset_add(
241 ipc_pset_t pset,
242 ipc_port_t port,
243 uint64_t *reserved_link,
244 uint64_t *reserved_prepost)
245{
246 kern_return_t kr;
247
248 assert(ips_active(pset));
249 assert(ip_active(port));
250
251 kr = ipc_mqueue_add(&port->ip_messages, &pset->ips_messages,
252 reserved_link, reserved_prepost);
253
254 return kr;
255}
256
257
258
259/*
260 * Routine: ipc_pset_remove
261 * Purpose:
262 * Removes a port from a port set.
263 * The port set loses a reference.
264 * Conditions:
265 * Both port and port set are locked.
266 * The port must be active.
267 */
268
269kern_return_t
270ipc_pset_remove(
271 ipc_pset_t pset,
272 ipc_port_t port)
273{
274 kern_return_t kr;
275
276 assert(ip_active(port));
277
278 if (port->ip_in_pset == 0)
279 return KERN_NOT_IN_SET;
280
281 kr = ipc_mqueue_remove(&port->ip_messages, &pset->ips_messages);
282
283 return kr;
284}
285
286/*
287 * Routine: ipc_pset_remove_from_all
288 * Purpose:
289 * Removes a port from all it's port sets.
290 * Conditions:
291 * port is locked and active.
292 */
293
294kern_return_t
295ipc_pset_remove_from_all(
296 ipc_port_t port)
297{
298 if (port->ip_in_pset == 0)
299 return KERN_NOT_IN_SET;
300
301 /*
302 * Remove the port's mqueue from all sets
303 */
304 ipc_mqueue_remove_from_all(&port->ip_messages);
305 return KERN_SUCCESS;
306}
307
308
309/*
310 * Routine: ipc_pset_destroy
311 * Purpose:
312 * Destroys a port_set.
313 * Conditions:
314 * The port_set is locked and alive.
315 * The caller has a reference, which is consumed.
316 * Afterwards, the port_set is unlocked and dead.
317 */
318
319void
320ipc_pset_destroy(
321 ipc_pset_t pset)
322{
323 assert(ips_active(pset));
324
325 pset->ips_object.io_bits &= ~IO_BITS_ACTIVE;
326
327 /*
328 * remove all the member message queues
329 * AND remove this message queue from any containing sets
330 */
331 ipc_mqueue_remove_all(&pset->ips_messages);
332
333 /*
334 * Set all waiters on the portset running to
335 * discover the change.
336 */
337 imq_lock(&pset->ips_messages);
338 ipc_mqueue_changed(&pset->ips_messages);
339 imq_unlock(&pset->ips_messages);
340
341 ipc_mqueue_deinit(&pset->ips_messages);
342
343 ips_unlock(pset);
344 ips_release(pset); /* consume the ref our caller gave us */
345}
346
347/* Kqueue EVFILT_MACHPORT support */
348
349#include <sys/event.h>
350#include <sys/errno.h>
351
352static int filt_machportattach(struct knote *kn);
353static void filt_machportdetach(struct knote *kn);
354static int filt_machport(struct knote *kn, long hint);
355static int filt_machporttouch(struct knote *kn, struct kevent_internal_s *kev);
356static int filt_machportprocess(struct knote *kn, struct filt_process_s *data, struct kevent_internal_s *kev);
357static unsigned filt_machportpeek(struct knote *kn);
358struct filterops machport_filtops = {
359 .f_attach = filt_machportattach,
360 .f_detach = filt_machportdetach,
361 .f_event = filt_machport,
362 .f_touch = filt_machporttouch,
363 .f_process = filt_machportprocess,
364 .f_peek = filt_machportpeek,
365};
366
367static int
368filt_machportattach(
369 struct knote *kn)
370{
371 mach_port_name_t name = (mach_port_name_t)kn->kn_kevent.ident;
372 uint64_t wq_link_id = waitq_link_reserve(NULL);
373 ipc_space_t space = current_space();
374 ipc_kmsg_t first;
375
376 int error;
377 int result = 0;
378 kern_return_t kr;
379 ipc_entry_t entry;
380 ipc_mqueue_t mqueue;
381
382 kr = ipc_right_lookup_read(space, name, &entry);
383 if (kr == KERN_SUCCESS) {
384 /* space is read-locked and active */
385
386 if (entry->ie_bits & MACH_PORT_TYPE_PORT_SET) {
387 ipc_pset_t pset;
388
389 __IGNORE_WCASTALIGN(pset = (ipc_pset_t)entry->ie_object);
390 mqueue = &pset->ips_messages;
391
392 imq_lock(mqueue);
393
394 /*
395 * Bind the portset wait queue directly to knote/kqueue.
396 * This allows us to just use wait_queue foo to effect a wakeup,
397 * rather than having to call knote() from the Mach code on each
398 * message. We still attach the knote to the mqueue klist for
399 * NOTE_REVOKE purposes only.
400 */
401 error = knote_link_waitq(kn, &mqueue->imq_wait_queue, &wq_link_id);
402 if (!error) {
403 ips_reference(pset);
404 kn->kn_ptr.p_mqueue = mqueue;
405 KNOTE_ATTACH(&mqueue->imq_klist, kn);
406 }
407 imq_unlock(mqueue);
408
409 is_read_unlock(space);
410
411 /*
412 * linked knotes are marked stay-active and therefore don't
413 * need an indication of their fired state to be returned
414 * from the attach operation.
415 */
416
417 } else if (entry->ie_bits & MACH_PORT_TYPE_RECEIVE) {
418 ipc_port_t port;
419
420 __IGNORE_WCASTALIGN(port = (ipc_port_t)entry->ie_object);
421 mqueue = &port->ip_messages;
422 ip_reference(port);
423
424 /*
425 * attach knote to port and determine result
426 * If the filter requested direct message receipt,
427 * we may need to adjust the qos of the knote to
428 * reflect the requested and override qos of the
429 * first message in the queue.
430 */
431 imq_lock(mqueue);
432 kn->kn_ptr.p_mqueue = mqueue;
433 KNOTE_ATTACH(&mqueue->imq_klist, kn);
434 if ((first = ipc_kmsg_queue_first(&mqueue->imq_messages)) != IKM_NULL) {
435 if (kn->kn_sfflags & MACH_RCV_MSG)
436 knote_adjust_qos(kn, first->ikm_qos, first->ikm_qos_override);
437 result = 1;
438 }
439 imq_unlock(mqueue);
440
441 is_read_unlock(space);
442 error = 0;
443 } else {
444 is_read_unlock(space);
445 error = ENOTSUP;
446 }
447 } else {
448 error = ENOENT;
449 }
450
451 waitq_link_release(wq_link_id);
452
453 /* bail out on errors */
454 if (error) {
455 kn->kn_flags |= EV_ERROR;
456 kn->kn_data = error;
457 return 0;
458 }
459
460 return result;
461}
462
463/* NOT proud of these - we should have a stricter relationship between mqueue and ipc object */
464#define mqueue_to_pset(mq) ((ipc_pset_t)((uintptr_t)mq-offsetof(struct ipc_pset, ips_messages)))
465#define mqueue_to_port(mq) ((ipc_port_t)((uintptr_t)mq-offsetof(struct ipc_port, ip_messages)))
466#define mqueue_to_object(mq) (((ipc_object_t)(mq)) - 1)
467
468
469static void
470filt_machportdetach(
471 struct knote *kn)
472{
473 ipc_mqueue_t mqueue = kn->kn_ptr.p_mqueue;
474 ipc_object_t object = mqueue_to_object(mqueue);
475
476 imq_lock(mqueue);
477 KNOTE_DETACH(&mqueue->imq_klist, kn);
478 kn->kn_ptr.p_mqueue = IMQ_NULL;
479 imq_unlock(mqueue);
480
481 if (io_otype(object) == IOT_PORT_SET) {
482 /*
483 * Unlink the portset wait queue from knote/kqueue.
484 * JMM - Does this need to be atomic under the mq lock?
485 */
486 (void)knote_unlink_waitq(kn, &mqueue->imq_wait_queue);
487 }
488 io_release(object);
489}
490
491/*
492 * filt_machport - deliver events into the mach port filter
493 *
494 * Mach port message arrival events are currently only posted via the
495 * kqueue filter routine for ports. Port sets are marked stay-active
496 * and the wait queue code will break any kqueue waiters out to go
497 * poll the stay-queued knotes again.
498 *
499 * If there is a message at the head of the queue,
500 * we indicate that the knote should go active. If
501 * the message is to be direct-received, we adjust the
502 * QoS of the knote according the requested and override
503 * QoS of that first message.
504 *
505 * NOTE_REVOKE events are a legacy way to indicate that the port/portset
506 * was deallocated or left the current Mach portspace (modern technique
507 * is with an EV_VANISHED protocol). If we see NOTE_REVOKE, deliver an
508 * EV_EOF event for these changes (hopefully it will get delivered before
509 * the port name recycles to the same generation count and someone tries
510 * to re-register a kevent for it or the events are udata-specific -
511 * avoiding a conflict).
512 */
513static int
514filt_machport(
515 struct knote *kn,
516 long hint)
517{
518 ipc_mqueue_t mqueue = kn->kn_ptr.p_mqueue;
519 ipc_kmsg_t first;
520 int result = 0;
521
522 /* mqueue locked by caller */
523 assert(imq_held(mqueue));
524
525 if (hint == NOTE_REVOKE) {
526 kn->kn_flags |= EV_EOF | EV_ONESHOT;
527 result = 1;
528 } else if (imq_is_valid(mqueue)) {
529 assert(!imq_is_set(mqueue));
530 if ((first = ipc_kmsg_queue_first(&mqueue->imq_messages)) != IKM_NULL) {
531 if (kn->kn_sfflags & MACH_RCV_MSG)
532 knote_adjust_qos(kn, first->ikm_qos, first->ikm_qos_override);
533 result = 1;
534 }
535 }
536
537 return result;
538}
539
540static int
541filt_machporttouch(
542 struct knote *kn,
543 struct kevent_internal_s *kev)
544{
545 ipc_mqueue_t mqueue = kn->kn_ptr.p_mqueue;
546 ipc_kmsg_t first;
547 int result = 0;
548
549 imq_lock(mqueue);
550
551 /* copy in new settings and save off new input fflags */
552 kn->kn_sfflags = kev->fflags;
553 kn->kn_ext[0] = kev->ext[0];
554 kn->kn_ext[1] = kev->ext[1];
555 if ((kn->kn_status & KN_UDATA_SPECIFIC) == 0)
556 kn->kn_udata = kev->udata;
557
558 /*
559 * If the mqueue is a valid port and there is a message
560 * that will be direct-received from the knote, update
561 * the knote qos based on the first message and trigger
562 * the event. If there are no more messages, reset the
563 * QoS to the value provided by the kevent.
564 */
565 if (imq_is_valid(mqueue) && !imq_is_set(mqueue) &&
566 (first = ipc_kmsg_queue_first(&mqueue->imq_messages)) != IKM_NULL) {
567 if (kn->kn_sfflags & MACH_RCV_MSG)
568 knote_adjust_qos(kn, first->ikm_qos, first->ikm_qos_override);
569 result = 1;
570 } else if (kn->kn_sfflags & MACH_RCV_MSG) {
571 knote_adjust_qos(kn,
572 MACH_MSG_PRIORITY_UNSPECIFIED,
573 MACH_MSG_PRIORITY_UNSPECIFIED);
574 }
575 imq_unlock(mqueue);
576
577 return result;
578}
579
580static int
581filt_machportprocess(
582 struct knote *kn,
583 struct filt_process_s *process_data,
584 struct kevent_internal_s *kev)
585{
586 ipc_mqueue_t mqueue = kn->kn_ptr.p_mqueue;
587 ipc_object_t object = mqueue_to_object(mqueue);
588 thread_t self = current_thread();
589 boolean_t used_filtprocess_data = FALSE;
590
591 wait_result_t wresult;
592 mach_msg_option_t option;
593 mach_vm_address_t addr;
594 mach_msg_size_t size;
595
596 imq_lock(mqueue);
597
598 /* Capture current state */
599 *kev = kn->kn_kevent;
600
601 /* If already deallocated/moved return one last EOF event */
602 if (kev->flags & EV_EOF) {
603 imq_unlock(mqueue);
604 return 1;
605 }
606
607 /*
608 * Only honor supported receive options. If no options are
609 * provided, just force a MACH_RCV_TOO_LARGE to detect the
610 * name of the port and sizeof the waiting message.
611 */
612 option = kn->kn_sfflags & (MACH_RCV_MSG|MACH_RCV_LARGE|MACH_RCV_LARGE_IDENTITY|
613 MACH_RCV_TRAILER_MASK|MACH_RCV_VOUCHER);
614
615 if (option & MACH_RCV_MSG) {
616 addr = (mach_vm_address_t) kn->kn_ext[0];
617 size = (mach_msg_size_t) kn->kn_ext[1];
618
619 /*
620 * If the kevent didn't specify a buffer and length, carve a buffer
621 * from the filter processing data according to the flags.
622 */
623 if (size == 0 && process_data != NULL) {
624 used_filtprocess_data = TRUE;
625
626 addr = (mach_vm_address_t)process_data->fp_data_out;
627 size = (mach_msg_size_t)process_data->fp_data_resid;
628 option |= (MACH_RCV_LARGE | MACH_RCV_LARGE_IDENTITY);
629 if (process_data->fp_flags & KEVENT_FLAG_STACK_DATA)
630 option |= MACH_RCV_STACK;
631 }
632 } else {
633 /* just detect the port name (if a set) and size of the first message */
634 option = MACH_RCV_LARGE;
635 addr = 0;
636 size = 0;
637 }
638
639 /* just use the reference from here on out */
640 io_reference(object);
641
642 /*
643 * Set up to receive a message or the notification of a
644 * too large message. But never allow this call to wait.
645 * If the user provided aditional options, like trailer
646 * options, pass those through here. But we don't support
647 * scatter lists through this interface.
648 */
649 self->ith_object = object;
650 self->ith_msg_addr = addr;
651 self->ith_rsize = size;
652 self->ith_msize = 0;
653 self->ith_option = option;
654 self->ith_receiver_name = MACH_PORT_NULL;
655 self->ith_continuation = NULL;
656 option |= MACH_RCV_TIMEOUT; // never wait
657 self->ith_state = MACH_RCV_IN_PROGRESS;
658
659 wresult = ipc_mqueue_receive_on_thread(
660 mqueue,
661 option,
662 size, /* max_size */
663 0, /* immediate timeout */
664 THREAD_INTERRUPTIBLE,
665 self);
666 /* mqueue unlocked */
667
668 /*
669 * If we timed out, or the process is exiting, just release the
670 * reference on the ipc_object and return zero.
671 */
672 if (wresult == THREAD_RESTART || self->ith_state == MACH_RCV_TIMED_OUT) {
673 io_release(object);
674 return 0;
675 }
676
677 assert(wresult == THREAD_NOT_WAITING);
678 assert(self->ith_state != MACH_RCV_IN_PROGRESS);
679
680 /*
681 * If we weren't attempting to receive a message
682 * directly, we need to return the port name in
683 * the kevent structure.
684 */
685 if ((option & MACH_RCV_MSG) != MACH_RCV_MSG) {
686 assert(self->ith_state == MACH_RCV_TOO_LARGE);
687 assert(self->ith_kmsg == IKM_NULL);
688 kev->data = self->ith_receiver_name;
689 io_release(object);
690 return 1;
691 }
692
693 /*
694 * Attempt to receive the message directly, returning
695 * the results in the fflags field.
696 */
697 kev->fflags = mach_msg_receive_results(&size);
698
699 /* kmsg and object reference consumed */
700
701 /*
702 * if the user asked for the identity of ports containing a
703 * a too-large message, return it in the data field (as we
704 * do for messages we didn't try to receive).
705 */
706 if (kev->fflags == MACH_RCV_TOO_LARGE) {
707 kev->ext[1] = self->ith_msize;
708 if (option & MACH_RCV_LARGE_IDENTITY)
709 kev->data = self->ith_receiver_name;
710 else
711 kev->data = MACH_PORT_NULL;
712 } else {
713 kev->ext[1] = size;
714 kev->data = MACH_PORT_NULL;
715 }
716
717 /*
718 * If we used a data buffer carved out from the filt_process data,
719 * store the address used in the knote and adjust the residual and
720 * other parameters for future use.
721 */
722 if (used_filtprocess_data) {
723 assert(process_data->fp_data_resid >= size);
724 process_data->fp_data_resid -= size;
725 if ((process_data->fp_flags & KEVENT_FLAG_STACK_DATA) == 0) {
726 kev->ext[0] = process_data->fp_data_out;
727 process_data->fp_data_out += size;
728 } else {
729 assert(option & MACH_RCV_STACK);
730 kev->ext[0] = process_data->fp_data_out +
731 process_data->fp_data_resid;
732 }
733 }
734
735 /*
736 * Apply message-based QoS values to output kevent as prescribed.
737 * The kev->qos field gets max(msg-qos, kn->kn_qos).
738 * The kev->ext[2] field gets (msg-qos << 32) | (override-qos).
739 *
740 * The mach_msg_receive_results() call saved off the message
741 * QoS values in the continuation save area on successful receive.
742 */
743 if (kev->fflags == MACH_MSG_SUCCESS) {
744 kev->qos = mach_msg_priority_combine(self->ith_qos, kn->kn_qos);
745 kev->ext[2] = ((uint64_t)self->ith_qos << 32) |
746 (uint64_t)self->ith_qos_override;
747 }
748
749 return 1;
750}
751
752/*
753 * Peek to see if the message queue associated with the knote has any
754 * events. This pre-hook is called when a filter uses the stay-
755 * on-queue mechanism (as the knote_link_waitq mechanism does for
756 * portsets) and someone calls select() against the containing kqueue.
757 *
758 * Just peek at the pre-post status of the portset's wait queue
759 * to determine if it has anything interesting. We can do it
760 * without holding the lock, as it is just a snapshot in time
761 * (if this is used as part of really waiting for events, we
762 * will catch changes in this status when the event gets posted
763 * up to the knote's kqueue).
764 */
765static unsigned
766filt_machportpeek(struct knote *kn)
767{
768 ipc_mqueue_t mqueue = kn->kn_ptr.p_mqueue;
769
770 return (ipc_mqueue_set_peek(mqueue));
771}