X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/b0d623f7f2ae71ed96e60569f61f9a9a27016e80..b226f5e54a60dc81db17b1260381d7dbfea3cdf1:/bsd/kern/tty.c diff --git a/bsd/kern/tty.c b/bsd/kern/tty.c index a03e1f437..ed7e79658 100644 --- a/bsd/kern/tty.c +++ b/bsd/kern/tty.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 1997-2007 Apple Inc. All rights reserved. + * Copyright (c) 1997-2017 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -113,24 +113,14 @@ #include #include #include /* averunnable */ - -/* - * Debugging assertions for tty locks - */ -#define TTY_DEBUG 1 -#if TTY_DEBUG -#define TTY_LOCK_OWNED(tp) do {lck_mtx_assert(&tp->t_lock, LCK_MTX_ASSERT_OWNED); } while (0) -#define TTY_LOCK_NOTOWNED(tp) do {lck_mtx_assert(&tp->t_lock, LCK_MTX_ASSERT_NOTOWNED); } while (0) -#else -#define TTY_LOCK_OWNED(tp) -#define TTY_LOCK_NOTOWNED(tp) -#endif +#include +#include static lck_grp_t *tty_lck_grp; static lck_grp_attr_t *tty_lck_grp_attr; static lck_attr_t *tty_lck_attr; -static int ttnread(struct tty *tp); +__private_extern__ int ttnread(struct tty *tp); static void ttyecho(int c, struct tty *tp); static int ttyoutput(int c, struct tty *tp); static void ttypend(struct tty *tp); @@ -142,6 +132,9 @@ static void ttyunblock(struct tty *tp); static int ttywflush(struct tty *tp); static int proc_compare(proc_t p1, proc_t p2); +void ttyhold(struct tty *tp); +static void ttydeallocate(struct tty *tp); + static int isctty(proc_t p, struct tty *tp); static int isctty_sp(proc_t p, struct tty *tp, struct session *sessp); @@ -327,7 +320,6 @@ tty_unlock(struct tty *tp) lck_mtx_unlock(&tp->t_lock); } - /* * ttyopen (LDISC) * @@ -338,10 +330,6 @@ tty_unlock(struct tty *tp) int ttyopen(dev_t device, struct tty *tp) { - proc_t p = current_proc(); - struct pgrp * pg, * oldpg; - struct session *sessp, *oldsess; - TTY_LOCK_OWNED(tp); /* debug assert */ tp->t_dev = device; @@ -353,55 +341,6 @@ ttyopen(dev_t device, struct tty *tp) bzero(&tp->t_winsize, sizeof(tp->t_winsize)); } - pg = proc_pgrp(p); - sessp = proc_session(p); - - /* - * First tty open affter setsid() call makes this tty its controlling - * tty, if the tty does not already have a session associated with it. - * Only do this if the process - */ - if (SESS_LEADER(p, sessp) && /* process is session leader */ - sessp->s_ttyvp == NULL && /* but has no controlling tty */ - tp->t_session == NULL ) { /* and tty not controlling */ - session_lock(sessp); - if ((sessp->s_flags & S_NOCTTY) == 0) { /* and no O_NOCTTY */ - /* Hold on to the reference */ - sessp->s_ttyp = tp; /* XXX NOT A REFERENCE */ - OSBitOrAtomic(P_CONTROLT, &p->p_flag); - session_unlock(sessp); - proc_list_lock(); - oldpg = tp->t_pgrp; - oldsess = tp->t_session; - if (oldsess != SESSION_NULL) - oldsess->s_ttypgrpid = NO_PID; - tp->t_session = sessp; - tp->t_pgrp = pg; - sessp->s_ttypgrpid = pg->pg_id; - proc_list_unlock(); - /* SAFE: All callers drop the lock on return */ - tty_unlock(tp); - if (oldpg != PGRP_NULL) - pg_rele(oldpg); - if (oldsess != SESSION_NULL) - session_rele(oldsess); - tty_lock(tp); - goto out; - } - session_unlock(sessp); - } - - /* SAFE: All callers drop the lock on return */ - tty_unlock(tp); - if (sessp != SESSION_NULL) - session_rele(sessp); - if (pg != PGRP_NULL) - pg_rele(pg); - tty_lock(tp); - -out: - - /* XXX may be an error code */ return (0); } @@ -422,6 +361,7 @@ ttyclose(struct tty *tp) { struct pgrp * oldpg; struct session * oldsessp; + struct knote *kn; TTY_LOCK_OWNED(tp); /* debug assert */ @@ -458,8 +398,15 @@ ttyclose(struct tty *tp) pg_rele(oldpg); tty_lock(tp); tp->t_state = 0; + SLIST_FOREACH(kn, &tp->t_wsel.si_note, kn_selnext) { + KNOTE_DETACH(&tp->t_wsel.si_note, kn); + } selthreadclear(&tp->t_wsel); + SLIST_FOREACH(kn, &tp->t_rsel.si_note, kn_selnext) { + KNOTE_DETACH(&tp->t_rsel.si_note, kn); + } selthreadclear(&tp->t_rsel); + return (0); } @@ -954,6 +901,43 @@ ttyoutput(int c, struct tty *tp) return (-1); } +/* + * Sets the tty state to not allow any more changes of foreground process + * group. This is required to be done so that a subsequent revoke on a vnode + * is able to always successfully complete. + * + * Locks : Assumes tty_lock held on entry + */ +void +ttysetpgrphup(struct tty *tp) +{ + TTY_LOCK_OWNED(tp); /* debug assert */ + SET(tp->t_state, TS_PGRPHUP); + /* + * Also wake up sleeping readers which may or may not belong to the + * current foreground process group. + * + * This forces any non-fg readers (which entered read when + * that process group was in the fg) to return with EIO (if they're + * catching SIGTTIN or with SIGTTIN). The ones which do belong to the fg + * process group will promptly go back to sleep and get a SIGHUP shortly + * This would normally happen as part of the close in revoke but if + * there is a sleeping reader from a non-fg process group we never get + * to the close because the sleeping reader holds an iocount on the + * vnode of the terminal which is going to get revoked->reclaimed. + */ + wakeup(TSA_HUP_OR_INPUT(tp)); +} + +/* + * Locks : Assumes tty lock held on entry + */ +void +ttyclrpgrphup(struct tty *tp) +{ + TTY_LOCK_OWNED(tp); /* debug assert */ + CLR(tp->t_state, TS_PGRPHUP); +} /* * ttioctl @@ -1046,9 +1030,11 @@ int ttioctl_locked(struct tty *tp, u_long cmd, caddr_t data, int flag, proc_t p) { int error = 0; + int bogusData = 1; struct uthread *ut; - struct pgrp * pg, *oldpg; - struct session *sessp, * oldsessp; + struct pgrp *pg, *oldpg; + struct session *sessp, *oldsessp; + struct tty *oldtp; TTY_LOCK_OWNED(tp); /* debug assert */ @@ -1141,7 +1127,6 @@ ttioctl_locked(struct tty *tp, u_long cmd, caddr_t data, int flag, proc_t p) } case TIOCSCONS: { /* Set current console device to this line */ - int bogusData = 1; data = (caddr_t) &bogusData; /* No break - Fall through to BSD code */ @@ -1378,21 +1363,58 @@ ttioctl_locked(struct tty *tp, u_long cmd, caddr_t data, int flag, proc_t p) break; case TIOCSCTTY: /* become controlling tty */ /* Session ctty vnode pointer set in vnode layer. */ - pg = proc_pgrp(p); sessp = proc_session(p); - if (!SESS_LEADER(p, sessp) || - ((sessp->s_ttyvp || tp->t_session) && - (tp->t_session != sessp))) { + if (sessp == SESSION_NULL) { + error = EPERM; + goto out; + } + + /* + * This can only be done by a session leader. + */ + if (!SESS_LEADER(p, sessp)) { /* SAFE: All callers drop the lock on return */ tty_unlock(tp); - if (sessp != SESSION_NULL) - session_rele(sessp); - if (pg != PGRP_NULL) + session_rele(sessp); + tty_lock(tp); + error = EPERM; + goto out; + } + /* + * If this terminal is already the controlling terminal for the + * session, nothing to do here. + */ + if (tp->t_session == sessp) { + /* SAFE: All callers drop the lock on return */ + tty_unlock(tp); + session_rele(sessp); + tty_lock(tp); + error = 0; + goto out; + } + pg = proc_pgrp(p); + /* + * Deny if the terminal is already attached to another session or + * the session already has a terminal vnode. + */ + session_lock(sessp); + if (sessp->s_ttyvp || tp->t_session) { + session_unlock(sessp); + /* SAFE: All callers drop the lock on return */ + tty_unlock(tp); + if (pg != PGRP_NULL) { pg_rele(pg); + } + session_rele(sessp); tty_lock(tp); error = EPERM; goto out; } + sessp->s_ttypgrpid = pg->pg_id; + oldtp = sessp->s_ttyp; + ttyhold(tp); + sessp->s_ttyp = tp; + session_unlock(sessp); proc_list_lock(); oldsessp = tp->t_session; oldpg = tp->t_pgrp; @@ -1400,12 +1422,8 @@ ttioctl_locked(struct tty *tp, u_long cmd, caddr_t data, int flag, proc_t p) oldsessp->s_ttypgrpid = NO_PID; /* do not drop refs on sessp and pg as tp holds them */ tp->t_session = sessp; - sessp->s_ttypgrpid = pg->pg_id; tp->t_pgrp = pg; proc_list_unlock(); - session_lock(sessp); - sessp->s_ttyp = tp; /* XXX NOT A REFERENCE */ - session_unlock(sessp); OSBitOrAtomic(P_CONTROLT, &p->p_flag); /* SAFE: All callers drop the lock on return */ tty_unlock(tp); @@ -1414,6 +1432,8 @@ ttioctl_locked(struct tty *tp, u_long cmd, caddr_t data, int flag, proc_t p) session_rele(oldsessp); if (oldpg != PGRP_NULL) pg_rele(oldpg); + if (NULL != oldtp) + ttyfree(oldtp); tty_lock(tp); break; @@ -1442,11 +1462,40 @@ ttioctl_locked(struct tty *tp, u_long cmd, caddr_t data, int flag, proc_t p) error = EPERM; goto out; } + /* + * The session leader is going away and is possibly going to revoke + * the terminal, we can't change the process group when that is the + * case. + */ + if (ISSET(tp->t_state, TS_PGRPHUP)) { + if (sessp != SESSION_NULL) + session_rele(sessp); + pg_rele(pgrp); + error = EPERM; + goto out; + } proc_list_lock(); oldpg = tp->t_pgrp; tp->t_pgrp = pgrp; sessp->s_ttypgrpid = pgrp->pg_id; proc_list_unlock(); + + /* + * Wakeup readers to recheck if they are still the foreground + * process group. + * + * ttwakeup() isn't called because the readers aren't getting + * woken up becuse there is something to read but to force + * the re-evaluation of their foreground process group status. + * + * Ordinarily leaving these readers waiting wouldn't be an issue + * as launchd would send them a termination signal eventually + * (if nobody else does). But if this terminal happens to be + * /dev/console, launchd itself could get blocked forever behind + * a revoke of /dev/console and leave the system deadlocked. + */ + wakeup(TSA_HUP_OR_INPUT(tp)); + /* SAFE: All callers drop the lock on return */ tty_unlock(tp); if (oldpg != PGRP_NULL) @@ -1498,27 +1547,59 @@ out: int ttyselect(struct tty *tp, int rw, void *wql, proc_t p) { - if (tp == NULL) - return (ENXIO); + int retval = 0; + /* + * Attaching knotes to TTYs needs to call selrecord in order to hook + * up the waitq to the selinfo, regardless of data being ready. See + * filt_ttyattach. + */ + bool needs_selrecord = rw & FMARK; + rw &= ~FMARK; - TTY_LOCK_OWNED(tp); /* debug assert */ + if (tp == NULL) { + return ENXIO; + } + + TTY_LOCK_OWNED(tp); + + if (tp->t_state & TS_ZOMBIE) { + retval = 1; + goto out; + } switch (rw) { case FREAD: - if (ttnread(tp) > 0 || ISSET(tp->t_state, TS_ZOMBIE)) - return(1); + retval = ttnread(tp); + if (retval > 0) { + break; + } + selrecord(p, &tp->t_rsel, wql); break; case FWRITE: - if ((tp->t_outq.c_cc <= tp->t_lowat && - ISSET(tp->t_state, TS_CONNECTED)) - || ISSET(tp->t_state, TS_ZOMBIE)) { - return (1); + if ((tp->t_outq.c_cc <= tp->t_lowat) && + (tp->t_state & TS_CONNECTED)) { + retval = tp->t_hiwat - tp->t_outq.c_cc; + break; } + selrecord(p, &tp->t_wsel, wql); break; } - return (0); + +out: + if (retval > 0 && needs_selrecord) { + switch (rw) { + case FREAD: + selrecord(p, &tp->t_rsel, wql); + break; + case FWRITE: + selrecord(p, &tp->t_wsel, wql); + break; + } + } + + return retval; } @@ -1545,7 +1626,7 @@ ttselect(dev_t dev, int rw, void *wql, proc_t p) /* * Locks: Assumes tp is locked on entry, remains locked on exit */ -static int +__private_extern__ int ttnread(struct tty *tp) { int nread; @@ -2120,7 +2201,7 @@ read: char ibuf[IBUFSIZ]; int icc; - icc = min(uio_resid(uio), IBUFSIZ); + icc = MIN(uio_resid(uio), IBUFSIZ); icc = q_to_b(qp, (u_char *)ibuf, icc); if (icc <= 0) { if (first) @@ -2161,8 +2242,8 @@ slowcase: tty_pgsignal(tp, SIGTSTP, 1); tty_lock(tp); if (first) { - error = ttysleep(tp, &lbolt, TTIPRI | PCATCH, - "ttybg3", 0); + error = ttysleep(tp, &ttread, TTIPRI | PCATCH, + "ttybg3", hz); if (error) break; goto loop; @@ -2341,7 +2422,7 @@ loop: * leftover from last time. */ if (cc == 0) { - cc = min(uio_resid(uio), OBUFSIZ); + cc = MIN(uio_resid(uio), OBUFSIZ); cp = obuf; error = uiomove(cp, cc, uio); if (error) { @@ -2394,7 +2475,9 @@ loop: i = b_to_q((u_char *)cp, ce, &tp->t_outq); ce -= i; tp->t_column += ce; - cp += ce, cc -= ce, tk_nout += ce; + cp += ce; + cc -= ce; + tk_nout += ce; tp->t_outcc += ce; if (i > 0) { /* out of space */ @@ -3022,22 +3105,59 @@ ttymalloc(void) /* output queue doesn't need quoting */ clalloc(&tp->t_outq, TTYCLSIZE, 0); lck_mtx_init(&tp->t_lock, tty_lck_grp, tty_lck_attr); + klist_init(&tp->t_rsel.si_note); + klist_init(&tp->t_wsel.si_note); + tp->t_refcnt = 1; } - return(tp); + return (tp); } +/* + * Increment the reference count on a tty. + */ +void +ttyhold(struct tty *tp) +{ + TTY_LOCK_OWNED(tp); + tp->t_refcnt++; +} /* - * Free a tty structure and its buffers. - * - * Locks: The tty_lock() is assumed to not be held at the time of - * the free; this functions destroys the mutex. + * Drops a reference count on a tty structure; if the reference count reaches + * zero, then also frees the structure and associated buffers. */ void ttyfree(struct tty *tp) +{ + TTY_LOCK_NOTOWNED(tp); + + tty_lock(tp); + if (--tp->t_refcnt == 0) { + tty_unlock(tp); + ttydeallocate(tp); + } else if (tp->t_refcnt < 0) { + panic("%s: freeing free tty %p", __func__, tp); + } else + tty_unlock(tp); +} + +/* + * Deallocate a tty structure and its buffers. + * + * Locks: The tty_lock() is assumed to not be held at the time of + * the free; this function destroys the mutex. + */ +static void +ttydeallocate(struct tty *tp) { TTY_LOCK_NOTOWNED(tp); /* debug assert */ +#if DEBUG + if (!(SLIST_EMPTY(&tp->t_rsel.si_note) && SLIST_EMPTY(&tp->t_wsel.si_note))) { + panic("knotes hooked into a tty when the tty is freed.\n"); + } +#endif /* DEBUG */ + clfree(&tp->t_rawq); clfree(&tp->t_canq); clfree(&tp->t_outq); @@ -3076,3 +3196,390 @@ isctty_sp(proc_t p, struct tty *tp, struct session *sessp) } + +static int filt_ttyattach(struct knote *kn, struct kevent_internal_s *kev); +static void filt_ttydetach(struct knote *kn); +static int filt_ttyevent(struct knote *kn, long hint); +static int filt_ttytouch(struct knote *kn, struct kevent_internal_s *kev); +static int filt_ttyprocess(struct knote *kn, struct filt_process_s *data, struct kevent_internal_s *kev); + +SECURITY_READ_ONLY_EARLY(struct filterops) tty_filtops = { + .f_isfd = 1, + .f_attach = filt_ttyattach, + .f_detach = filt_ttydetach, + .f_event = filt_ttyevent, + .f_touch = filt_ttytouch, + .f_process = filt_ttyprocess +}; + +/* + * Called with struct tty locked. Returns non-zero if there is data to be read + * or written. + */ +static int +filt_tty_common(struct knote *kn, struct tty *tp) +{ + int retval = 0; + + TTY_LOCK_OWNED(tp); /* debug assert */ + + if (tp->t_state & TS_ZOMBIE) { + kn->kn_flags |= EV_EOF; + return 1; + } + + switch (knote_get_seltype(kn)) { + case FREAD: + retval = ttnread(tp); + break; + case FWRITE: + if ((tp->t_outq.c_cc <= tp->t_lowat) && + (tp->t_state & TS_CONNECTED)) { + retval = tp->t_hiwat - tp->t_outq.c_cc; + } + break; + } + + kn->kn_data = retval; + + /* + * TODO(mwidmann, jandrus): For native knote low watermark support, + * check the kn_sfflags for NOTE_LOWAT and check against kn_sdata. + * + * res = ((kn->kn_sfflags & NOTE_LOWAT) != 0) ? + * (kn->kn_data >= kn->kn_sdata) : kn->kn_data; + */ + + return retval; +} + +/* + * Find the struct tty from a waitq, which is a member of one of the two struct + * selinfos inside the struct tty. Use the seltype to determine which selinfo. + */ +static struct tty * +tty_from_waitq(struct waitq *wq, int seltype) +{ + struct selinfo *si; + struct tty *tp = NULL; + + /* + * The waitq is part of the selinfo structure managed by the driver. For + * certain drivers, we want to hook the knote into the selinfo + * structure's si_note field so selwakeup can call KNOTE. + * + * While 'wq' is not really a queue element, this macro only uses the + * pointer to calculate the offset into a structure given an element + * name. + */ + si = qe_element(wq, struct selinfo, si_waitq); + + /* + * For TTY drivers, the selinfo structure is somewhere in the struct + * tty. There are two different selinfo structures, and the one used + * corresponds to the type of filter requested. + * + * While 'si' is not really a queue element, this macro only uses the + * pointer to calculate the offset into a structure given an element + * name. + */ + switch (seltype) { + case FREAD: + tp = qe_element(si, struct tty, t_rsel); + break; + case FWRITE: + tp = qe_element(si, struct tty, t_wsel); + break; + } + + return tp; +} + +static struct tty * +tty_from_knote(struct knote *kn) +{ + return (struct tty *)kn->kn_hook; +} + +/* + * Try to lock the TTY structure associated with a knote. + * + * On success, this function returns a locked TTY structure. Otherwise, NULL is + * returned. + */ +__attribute__((warn_unused_result)) +static struct tty * +tty_lock_from_knote(struct knote *kn) +{ + struct tty *tp = tty_from_knote(kn); + if (tp) { + tty_lock(tp); + } + + return tp; +} + +/* + * Set the knote's struct tty to the kn_hook field. + * + * The idea is to fake a call to select with our own waitq set. If the driver + * calls selrecord, we'll get a link to their waitq and access to the tty + * structure. + * + * Returns -1 on failure, with the error set in the knote, or selres on success. + */ +static int +tty_set_knote_hook(struct knote *kn) +{ + uthread_t uth; + vfs_context_t ctx; + vnode_t vp; + kern_return_t kr; + struct waitq *wq = NULL; + struct waitq_set *old_wqs; + struct waitq_set tmp_wqs; + uint64_t rsvd, rsvd_arg; + uint64_t *rlptr = NULL; + int selres = -1; + struct tty *tp; + + uth = get_bsdthread_info(current_thread()); + + ctx = vfs_context_current(); + vp = (vnode_t)kn->kn_fp->f_fglob->fg_data; + + /* + * Reserve a link element to avoid potential allocation under + * a spinlock. + */ + rsvd = rsvd_arg = waitq_link_reserve(NULL); + rlptr = (void *)&rsvd_arg; + + /* + * Trick selrecord into hooking a known waitq set into the device's selinfo + * waitq. Once the link is in place, we can get back into the selinfo from + * the waitq and subsequently the tty (see tty_from_waitq). + * + * We can't use a real waitq set (such as the kqueue's) because wakeups + * might happen before we can unlink it. + */ + kr = waitq_set_init(&tmp_wqs, SYNC_POLICY_FIFO | SYNC_POLICY_PREPOST, NULL, + NULL); + assert(kr == KERN_SUCCESS); + + /* + * Lazy allocate the waitqset to avoid potential allocation under + * a spinlock; + */ + waitq_set_lazy_init_link(&tmp_wqs); + + old_wqs = uth->uu_wqset; + uth->uu_wqset = &tmp_wqs; + /* + * FMARK forces selects to always call selrecord, even if data is + * available. See ttselect, ptsselect, ptcselect. + * + * selres also contains the data currently available in the tty. + */ + selres = VNOP_SELECT(vp, knote_get_seltype(kn) | FMARK, 0, rlptr, ctx); + uth->uu_wqset = old_wqs; + + /* + * Make sure to cleanup the reserved link - this guards against + * drivers that may not actually call selrecord(). + */ + waitq_link_release(rsvd); + if (rsvd == rsvd_arg) { + /* + * The driver didn't call selrecord -- there's no tty hooked up so we + * can't attach. + */ + knote_set_error(kn, ENOTTY); + selres = -1; + goto out; + } + + /* rlptr may not point to a properly aligned pointer */ + memcpy(&wq, rlptr, sizeof(void *)); + + tp = tty_from_waitq(wq, knote_get_seltype(kn)); + assert(tp != NULL); + + /* + * Take a reference and stash the tty in the knote. + */ + tty_lock(tp); + ttyhold(tp); + kn->kn_hook = tp; + tty_unlock(tp); + +out: + /* + * Cleaning up the wqset will unlink its waitq and clean up any preposts + * that occurred as a result of data coming in while the tty was attached. + */ + waitq_set_deinit(&tmp_wqs); + + return selres; +} + +static int +filt_ttyattach(struct knote *kn, __unused struct kevent_internal_s *kev) +{ + int selres = 0; + struct tty *tp; + + /* + * This function should be called from filt_specattach (spec_vnops.c), + * so most of the knote data structure should already be initialized. + */ + + /* don't support offsets in ttys or drivers that don't use struct tty */ + if (kn->kn_vnode_use_ofst || !kn->kn_vnode_kqok) { + knote_set_error(kn, ENOTSUP); + return 0; + } + + /* + * Connect the struct tty to the knote through the selinfo structure + * referenced by the waitq within the selinfo. + */ + selres = tty_set_knote_hook(kn); + if (selres < 0) { + return 0; + } + + /* + * Attach the knote to selinfo's klist. + */ + tp = tty_lock_from_knote(kn); + if (!tp) { + knote_set_error(kn, ENOENT); + return 0; + } + + switch (knote_get_seltype(kn)) { + case FREAD: + KNOTE_ATTACH(&tp->t_rsel.si_note, kn); + break; + case FWRITE: + KNOTE_ATTACH(&tp->t_wsel.si_note, kn); + break; + } + + tty_unlock(tp); + + return selres; +} + +static void +filt_ttydetach(struct knote *kn) +{ + struct tty *tp; + + tp = tty_lock_from_knote(kn); + if (!tp) { + knote_set_error(kn, ENOENT); + return; + } + + struct selinfo *si = NULL; + switch (knote_get_seltype(kn)) { + case FREAD: + si = &tp->t_rsel; + break; + case FWRITE: + si = &tp->t_wsel; + break; + /* knote_get_seltype will panic on default */ + } + + KNOTE_DETACH(&si->si_note, kn); + kn->kn_hook = NULL; + + tty_unlock(tp); + ttyfree(tp); +} + +static int +filt_ttyevent(struct knote *kn, long hint) +{ + int ret; + struct tty *tp; + bool revoked = hint & NOTE_REVOKE; + hint &= ~NOTE_REVOKE; + + tp = tty_from_knote(kn); + if (!tp) { + knote_set_error(kn, ENOENT); + return 0; + } + + if (!hint) { + tty_lock(tp); + } + + if (revoked) { + kn->kn_flags |= EV_EOF | EV_ONESHOT; + ret = 1; + } else { + ret = filt_tty_common(kn, tp); + } + + if (!hint) { + tty_unlock(tp); + } + + return ret; +} + +static int +filt_ttytouch(struct knote *kn, struct kevent_internal_s *kev) +{ + struct tty *tp; + int res = 0; + + tp = tty_lock_from_knote(kn); + if (!tp) { + knote_set_error(kn, ENOENT); + return 0; + } + + kn->kn_sdata = kev->data; + kn->kn_sfflags = kev->fflags; + + if (kn->kn_vnode_kqok) { + res = filt_tty_common(kn, tp); + } + + tty_unlock(tp); + + return res; +} + +static int +filt_ttyprocess(struct knote *kn, __unused struct filt_process_s *data, struct kevent_internal_s *kev) +{ + struct tty *tp; + int res; + + tp = tty_lock_from_knote(kn); + if (!tp) { + knote_set_error(kn, ENOENT); + return 0; + } + + res = filt_tty_common(kn, tp); + + if (res) { + *kev = kn->kn_kevent; + if (kn->kn_flags & EV_CLEAR) { + kn->kn_fflags = 0; + kn->kn_data = 0; + } + } + + tty_unlock(tp); + + return res; +}