X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/91447636331957f3d9b5ca5b508f07c526b0074d..4452a7af2eac33dbad800bcc91f2399d62c18f53:/bsd/kern/kern_event.c diff --git a/bsd/kern/kern_event.c b/bsd/kern/kern_event.c index 1bf948822..e785269ad 100644 --- a/bsd/kern/kern_event.c +++ b/bsd/kern/kern_event.c @@ -1,23 +1,29 @@ /* * Copyright (c) 2000-2005 Apple Computer, Inc. All rights reserved. * - * @APPLE_LICENSE_HEADER_START@ + * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * - * The contents of this file constitute Original Code as defined in and - * are subject to the Apple Public Source License Version 1.1 (the - * "License"). You may not use this file except in compliance with the - * License. Please obtain a copy of the License at - * http://www.apple.com/publicsource and read it before using this file. + * This file contains Original Code and/or Modifications of Original Code + * as defined in and that are subject to the Apple Public Source License + * Version 2.0 (the 'License'). You may not use this file except in + * compliance with the License. The rights granted to you under the License + * may not be used to create, or enable the creation or redistribution of, + * unlawful or unlicensed copies of an Apple operating system, or to + * circumvent, violate, or enable the circumvention or violation of, any + * terms of an Apple operating system software license agreement. * - * This Original Code and all software distributed under the License are - * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER + * Please obtain a copy of the License at + * http://www.opensource.apple.com/apsl/ and read it before using this file. + * + * The Original Code and all software distributed under the License are + * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES, * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the - * License for the specific language governing rights and limitations - * under the License. + * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT. + * Please see the License for the specific language governing rights and + * limitations under the License. * - * @APPLE_LICENSE_HEADER_END@ + * @APPLE_OSREFERENCE_LICENSE_HEADER_END@ * */ /*- @@ -73,6 +79,7 @@ #include #include #include +#include #include #include @@ -411,6 +418,7 @@ filt_procattach(struct knote *kn) } kn->kn_flags |= EV_CLEAR; /* automatically set */ + kn->kn_hookid = 1; /* mark exit not seen */ /* * internal flag indicating registration done by kernel @@ -431,11 +439,12 @@ filt_procattach(struct knote *kn) /* * The knote may be attached to a different process, which may exit, - * leaving nothing for the knote to be attached to. So when the process - * exits, the knote is marked as DETACHED and also flagged as ONESHOT so - * it will be deleted when read out. However, as part of the knote deletion, - * this routine is called, so a check is needed to avoid actually performing - * a detach, because the original process does not exist any more. + * leaving nothing for the knote to be attached to. In that case, + * we wont be able to find the process from its pid. But the exit + * code may still be processing the knote list for the target process. + * We may have to wait for that processing to complete before we can + * return (and presumably free the knote) without actually removing + * it from the dead process' knote list. */ static void filt_procdetach(struct knote *kn) @@ -446,66 +455,81 @@ filt_procdetach(struct knote *kn) funnel_state = thread_funnel_set(kernel_flock, TRUE); p = pfind(kn->kn_id); - if (p != (struct proc *)NULL) + if (p != (struct proc *)NULL) { KNOTE_DETACH(&p->p_klist, kn); - + } else if (kn->kn_hookid != 0) { /* if not NOTE_EXIT yet */ + kn->kn_hookid = -1; /* we are detaching but... */ + assert_wait(&kn->kn_hook, THREAD_UNINT); /* have to wait */ + thread_block(THREAD_CONTINUE_NULL); + } thread_funnel_set(kernel_flock, funnel_state); } static int filt_proc(struct knote *kn, long hint) { - u_int event; - int funnel_state; - funnel_state = thread_funnel_set(kernel_flock, TRUE); + if (hint != 0) { + u_int event; - /* - * mask off extra data - */ - event = (u_int)hint & NOTE_PCTRLMASK; + /* must hold the funnel when coming from below */ + assert(thread_funnel_get() != (funnel_t)0); - /* - * if the user is interested in this event, record it. - */ - if (kn->kn_sfflags & event) - kn->kn_fflags |= event; + /* + * mask off extra data + */ + event = (u_int)hint & NOTE_PCTRLMASK; - /* - * process is gone, so flag the event as finished. - */ - if (event == NOTE_EXIT) { - kn->kn_flags |= (EV_EOF | EV_ONESHOT); - thread_funnel_set(kernel_flock, funnel_state); - return (1); - } + /* + * if the user is interested in this event, record it. + */ + if (kn->kn_sfflags & event) + kn->kn_fflags |= event; - /* - * process forked, and user wants to track the new process, - * so attach a new knote to it, and immediately report an - * event with the parent's pid. - */ - if ((event == NOTE_FORK) && (kn->kn_sfflags & NOTE_TRACK)) { - struct kevent kev; - int error; + /* + * process is gone, so flag the event as finished. + * + * If someone was trying to detach, but couldn't + * find the proc to complete the detach, wake them + * up (nothing will ever need to walk the per-proc + * knote list again - so its safe for them to dump + * the knote now). + */ + if (event == NOTE_EXIT) { + boolean_t detaching = (kn->kn_hookid == -1); + + kn->kn_hookid = 0; + kn->kn_flags |= (EV_EOF | EV_ONESHOT); + if (detaching) + thread_wakeup(&kn->kn_hookid); + return (1); + } /* - * register knote with new process. + * process forked, and user wants to track the new process, + * so attach a new knote to it, and immediately report an + * event with the parent's pid. */ - kev.ident = hint & NOTE_PDATAMASK; /* pid */ - kev.filter = kn->kn_filter; - kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1; - kev.fflags = kn->kn_sfflags; - kev.data = kn->kn_id; /* parent */ - kev.udata = kn->kn_kevent.udata; /* preserve udata */ - error = kevent_register(kn->kn_kq, &kev, NULL); - if (error) - kn->kn_fflags |= NOTE_TRACKERR; + if ((event == NOTE_FORK) && (kn->kn_sfflags & NOTE_TRACK)) { + struct kevent kev; + int error; + + /* + * register knote with new process. + */ + kev.ident = hint & NOTE_PDATAMASK; /* pid */ + kev.filter = kn->kn_filter; + kev.flags = kn->kn_flags | EV_ADD | EV_ENABLE | EV_FLAG1; + kev.fflags = kn->kn_sfflags; + kev.data = kn->kn_id; /* parent */ + kev.udata = kn->kn_kevent.udata; /* preserve udata */ + error = kevent_register(kn->kn_kq, &kev, NULL); + if (error) + kn->kn_fflags |= NOTE_TRACKERR; + } } - event = kn->kn_fflags; - thread_funnel_set(kernel_flock, funnel_state); - return (event != 0); + return (kn->kn_fflags != 0); /* atomic check - no funnel needed from above */ } /* @@ -1055,21 +1079,21 @@ kevent(struct proc *p, struct kevent_args *uap, register_t *retval) /* register all the change requests the user provided... */ noutputs = 0; - while (nchanges > 0) { + while (nchanges > 0 && error == 0) { error = kevent_copyin(&changelist, &kev, p); if (error) break; kev.flags &= ~EV_SYSFLAGS; error = kevent_register(kq, &kev, p); - if (error) { - if (nevents == 0) - break; + if (error && nevents > 0) { kev.flags = EV_ERROR; kev.data = error; - (void) kevent_copyout(&kev, &ueventlist, p); - nevents--; - noutputs++; + error = kevent_copyout(&kev, &ueventlist, p); + if (error == 0) { + nevents--; + noutputs++; + } } nchanges--; } @@ -1351,42 +1375,46 @@ kevent_process(struct kqueue *kq, (kn = TAILQ_FIRST(&kq->kq_head)) != NULL) { /* - * move knote to the processed queue. - * this is also protected by the kq lock. - */ - assert(kn->kn_tq == &kq->kq_head); - TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe); - kn->kn_tq = &kq->kq_inprocess; - TAILQ_INSERT_TAIL(&kq->kq_inprocess, kn, kn_tqe); - - /* + * Take note off the active queue. + * * Non-EV_ONESHOT events must be re-validated. * * Convert our lock to a use-count and call the event's * filter routine to update. * - * If the event is dropping (or no longer valid), we - * already have it off the active queue, so just - * finish the job of deactivating it. + * If the event is valid, or triggered while the kq + * is unlocked, move to the inprocess queue for processing. */ + if ((kn->kn_flags & EV_ONESHOT) == 0) { int result; + knote_deactivate(kn); if (kqlock2knoteuse(kq, kn)) { /* call the filter with just a ref */ result = kn->kn_fop->f_event(kn, 0); - if (!knoteuse2kqlock(kq, kn) || result == 0) { - knote_deactivate(kn); + /* if it's still alive, make sure it's active */ + if (knoteuse2kqlock(kq, kn) && result) { + /* may have been reactivated in filter*/ + if (!(kn->kn_status & KN_ACTIVE)) { + knote_activate(kn); + } + } else { continue; } } else { - knote_deactivate(kn); continue; } } + /* knote is active: move onto inprocess queue */ + assert(kn->kn_tq == &kq->kq_head); + TAILQ_REMOVE(&kq->kq_head, kn, kn_tqe); + kn->kn_tq = &kq->kq_inprocess; + TAILQ_INSERT_TAIL(&kq->kq_inprocess, kn, kn_tqe); + /* * Got a valid triggered knote with the kqueue * still locked. Snapshot the data, and determine @@ -1947,13 +1975,11 @@ knote_init(void) /* allocate kq lock group attribute and group */ kq_lck_grp_attr= lck_grp_attr_alloc_init(); - lck_grp_attr_setstat(kq_lck_grp_attr); kq_lck_grp = lck_grp_alloc_init("kqueue", kq_lck_grp_attr); /* Allocate kq lock attribute */ kq_lck_attr = lck_attr_alloc_init(); - lck_attr_setdefault(kq_lck_attr); /* Initialize the timer filter lock */ lck_mtx_init(&_filt_timerlock, kq_lck_grp, kq_lck_attr); @@ -2047,7 +2073,6 @@ kern_event_init(void) * allocate the lock attribute for mutexes */ evt_mtx_attr = lck_attr_alloc_init(); - lck_attr_setdefault(evt_mtx_attr); evt_mutex = lck_mtx_alloc_init(evt_mtx_grp, evt_mtx_attr); if (evt_mutex == NULL) return (ENOMEM); @@ -2272,4 +2297,23 @@ kev_control(struct socket *so, +int +fill_kqueueinfo(struct kqueue *kq, struct kqueue_info * kinfo) +{ + struct stat * st; + + /* No need for the funnel as fd is kept alive */ + + st = &kinfo->kq_stat; + + st->st_size = kq->kq_count; + st->st_blksize = sizeof(struct kevent); + st->st_mode = S_IFIFO; + if (kq->kq_state & KQ_SEL) + kinfo->kq_state |= PROC_KQUEUE_SELECT; + if (kq->kq_state & KQ_SLEEP) + kinfo->kq_state |= PROC_KQUEUE_SLEEP; + + return(0); +}