/*
- * Copyright (c) 1997-2007 Apple Inc. All rights reserved.
+ * Copyright (c) 1997-2017 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#include <sys/file_internal.h>
#include <sys/conf.h>
#include <sys/dkstat.h>
-#include <sys/uio.h>
+#include <sys/uio_internal.h>
#include <sys/kernel.h>
#include <sys/vnode.h>
#include <sys/syslog.h>
#include <dev/kmreg_com.h>
#include <machine/cons.h>
#include <sys/resource.h> /* averunnable */
+#include <kern/waitq.h>
+#include <libkern/section_keywords.h>
-static int ttnread(struct tty *tp);
+static lck_grp_t *tty_lck_grp;
+static lck_grp_attr_t *tty_lck_grp_attr;
+static lck_attr_t *tty_lck_attr;
+
+__private_extern__ int ttnread(struct tty *tp);
static void ttyecho(int c, struct tty *tp);
static int ttyoutput(int c, struct tty *tp);
static void ttypend(struct tty *tp);
static int ttywflush(struct tty *tp);
static int proc_compare(proc_t p1, proc_t p2);
+void ttyhold(struct tty *tp);
+static void ttydeallocate(struct tty *tp);
+
static int isctty(proc_t p, struct tty *tp);
static int isctty_sp(proc_t p, struct tty *tp, struct session *sessp);
#define I_LOW_WATER ((TTYHOG - 2 * 256) * 7 / 8) /* XXX */
static void
-termios32to64(struct termios *in, struct user_termios *out)
+termios32to64(struct termios32 *in, struct user_termios *out)
{
out->c_iflag = (user_tcflag_t)in->c_iflag;
out->c_oflag = (user_tcflag_t)in->c_oflag;
}
static void
-termios64to32(struct user_termios *in, struct termios *out)
+termios64to32(struct user_termios *in, struct termios32 *out)
{
out->c_iflag = (tcflag_t)in->c_iflag;
out->c_oflag = (tcflag_t)in->c_oflag;
/*
+ * tty_init
+ *
+ * Initialize the tty line discipline subsystem.
+ *
+ * Parameters: void
+ *
+ * Returns: void
+ *
+ * Locks: No ttys can be allocated and no tty locks can be used
+ * until after this function is called
+ *
+ * Notes: The intent of this is to set up a log group attribute,
+ * lock group, and loc atribute for subsequent per-tty locks.
+ * This function is called early in bsd_init(), prior to the
+ * console device initialization.
+ */
+void
+tty_init(void)
+{
+ tty_lck_grp_attr = lck_grp_attr_alloc_init();
+ tty_lck_grp = lck_grp_alloc_init("tty", tty_lck_grp_attr);
+ tty_lck_attr = lck_attr_alloc_init();
+}
+
+
+/*
+ * tty_lock
+ *
+ * Lock the requested tty structure.
+ *
+ * Parameters: tp The tty we want to lock
+ *
+ * Returns: void
+ *
+ * Locks: On return, tp is locked
+ */
+void
+tty_lock(struct tty *tp)
+{
+ TTY_LOCK_NOTOWNED(tp); /* debug assert */
+ lck_mtx_lock(&tp->t_lock);
+}
+
+
+/*
+ * tty_unlock
+ *
+ * Unlock the requested tty structure.
+ *
+ * Parameters: tp The tty we want to unlock
+ *
+ * Returns: void
+ *
+ * Locks: On return, tp is unlocked
+ */
+void
+tty_unlock(struct tty *tp)
+{
+ TTY_LOCK_OWNED(tp); /* debug assert */
+ lck_mtx_unlock(&tp->t_lock);
+}
+
+/*
+ * ttyopen (LDISC)
+ *
* Initial open of tty, or (re)entry to standard tty line discipline.
+ *
+ * Locks: Assumes tty_lock() is held prior to calling.
*/
int
ttyopen(dev_t device, struct tty *tp)
{
- boolean_t funnel_state;
- proc_t p = current_proc();
- struct pgrp * pg, * oldpg;
- struct session *sessp, *oldsess;
-
- funnel_state = thread_funnel_set(kernel_flock, TRUE);
+ TTY_LOCK_OWNED(tp); /* debug assert */
tp->t_dev = device;
bzero(&tp->t_winsize, sizeof(tp->t_winsize));
}
- pg = proc_pgrp(p);
- sessp = proc_session(p);
-
- /*
- * First tty open affter setsid() call makes this tty its controlling
- * tty, if the tty does not already have a session associated with it.
- * Only do this if the process
- */
- if (SESS_LEADER(p, sessp) && /* process is session leader */
- sessp->s_ttyvp == NULL && /* but has no controlling tty */
- tp->t_session == NULL ) { /* and tty not controlling */
- session_lock(sessp);
- if ((sessp->s_flags & S_NOCTTY) == 0) { /* and no O_NOCTTY */
- /* Hold on to the reference */
- sessp->s_ttyp = tp;
- OSBitOrAtomic(P_CONTROLT, (UInt32 *)&p->p_flag);
- session_unlock(sessp);
- proc_list_lock();
- oldpg = tp->t_pgrp;
- oldsess = tp->t_session;
- if (oldsess != SESSION_NULL)
- oldsess->s_ttypgrpid = NO_PID;
- tp->t_session = sessp;
- tp->t_pgrp = pg;
- sessp->s_ttypgrpid = pg->pg_id;
- proc_list_unlock();
- if (oldpg != PGRP_NULL)
- pg_rele(oldpg);
- if (oldsess != SESSION_NULL)
- session_rele(oldsess);
- goto out;
- }
- session_unlock(sessp);
- }
-
- if (sessp != SESSION_NULL)
- session_rele(sessp);
- if (pg != PGRP_NULL)
- pg_rele(pg);
-
-out:
- thread_funnel_set(kernel_flock, funnel_state);
-
return (0);
}
/*
+ * ttyclose
+ *
* Handle close() on a tty line: flush and set to initial state,
* bumping generation number so that pending read/write calls
* can detect recycling of the tty.
* XXX our caller should have done `spltty(); l_close(); ttyclose();'
* and l_close() should have flushed, but we repeat the spltty() and
* the flush in case there are buggy callers.
+ *
+ * Locks: Assumes tty_lock() is held prior to calling.
*/
int
ttyclose(struct tty *tp)
{
struct pgrp * oldpg;
struct session * oldsessp;
+ struct knote *kn;
+
+ TTY_LOCK_OWNED(tp); /* debug assert */
if (constty == tp) {
constty = NULL;
oldsessp->s_ttypgrpid = NO_PID;
proc_list_unlock();
/* drop the reference on prev session and pgrp */
+ /* SAFE: All callers drop the lock on return */
+ tty_unlock(tp);
if (oldsessp != SESSION_NULL)
session_rele(oldsessp);
if (oldpg != PGRP_NULL)
pg_rele(oldpg);
+ tty_lock(tp);
tp->t_state = 0;
+ SLIST_FOREACH(kn, &tp->t_wsel.si_note, kn_selnext) {
+ KNOTE_DETACH(&tp->t_wsel.si_note, kn);
+ }
selthreadclear(&tp->t_wsel);
+ SLIST_FOREACH(kn, &tp->t_rsel.si_note, kn_selnext) {
+ KNOTE_DETACH(&tp->t_rsel.si_note, kn);
+ }
selthreadclear(&tp->t_rsel);
+
return (0);
}
(c) != _POSIX_VDISABLE))
/*
+ * ttyinput (LDISC)
+ *
* Process input of a single character received on a tty.
+ *
+ * Parameters: c The character received
+ * tp The tty on which it was received
+ *
+ * Returns: .
+ *
+ * Locks: Assumes tty_lock() is held prior to calling.
*/
int
ttyinput(int c, struct tty *tp)
{
tcflag_t iflag, lflag;
cc_t *cc;
- int i, err, retval;
- boolean_t funnel_state;
+ int i, err;
+ int retval = 0; /* default return value */
- funnel_state = thread_funnel_set(kernel_flock, TRUE);
+ TTY_LOCK_OWNED(tp); /* debug assert */
/*
* If input is pending take it first.
CLR(c, TTY_ERRORMASK);
if (ISSET(err, TTY_BI)) {
if (ISSET(iflag, IGNBRK)) {
- thread_funnel_set(kernel_flock, funnel_state);
- return (0);
+ goto out;
}
if (ISSET(iflag, BRKINT)) {
ttyflush(tp, FREAD | FWRITE);
+ /* SAFE: All callers drop the lock on return */
+ tty_unlock(tp);
tty_pgsignal(tp, SIGINT, 1);
+ tty_lock(tp);
goto endcase;
}
if (ISSET(iflag, PARMRK))
} else if ((ISSET(err, TTY_PE) && ISSET(iflag, INPCK))
|| ISSET(err, TTY_FE)) {
if (ISSET(iflag, IGNPAR)) {
- thread_funnel_set(kernel_flock, funnel_state);
- return (0);
+ goto out;
}
else if (ISSET(iflag, PARMRK)) {
parmrk:
if (!ISSET(lflag, NOFLSH))
ttyflush(tp, FREAD | FWRITE);
ttyecho(c, tp);
+ /*
+ * SAFE: All callers drop the lock on return;
+ * SAFE: if we lose a threaded race on change
+ * SAFE: of the interrupt character, we could
+ * SAFE: have lost that race anyway due to the
+ * SAFE: scheduler executing threads in
+ * SAFE: priority order rather than "last
+ * SAFE: active thread" order (FEATURE).
+ */
+ tty_unlock(tp);
tty_pgsignal(tp,
CCEQ(cc[VINTR], c) ? SIGINT : SIGQUIT, 1);
+ tty_lock(tp);
goto endcase;
}
if (CCEQ(cc[VSUSP], c)) {
if (!ISSET(lflag, NOFLSH))
ttyflush(tp, FREAD);
ttyecho(c, tp);
+ /* SAFE: All callers drop the lock on return */
+ tty_unlock(tp);
tty_pgsignal(tp, SIGTSTP, 1);
+ tty_lock(tp);
goto endcase;
}
}
if (!ISSET(tp->t_state, TS_TTSTOP)) {
SET(tp->t_state, TS_TTSTOP);
ttystop(tp, 0);
- thread_funnel_set(kernel_flock, funnel_state);
- return (0);
+ goto out;
}
if (!CCEQ(cc[VSTART], c)) {
- thread_funnel_set(kernel_flock, funnel_state);
- return (0);
+ goto out;
}
/*
* if VSTART == VSTOP then toggle
*/
if (c == '\r') {
if (ISSET(iflag, IGNCR)) {
- thread_funnel_set(kernel_flock, funnel_state);
- return (0);
+ goto out;
}
else if (ISSET(iflag, ICRNL))
c = '\n';
* ^T - kernel info and generate SIGINFO
*/
if (CCEQ(cc[VSTATUS], c) && ISSET(lflag, IEXTEN)) {
- if (ISSET(lflag, ISIG))
+ if (ISSET(lflag, ISIG)) {
+ /* SAFE: All callers drop the lock on return */
+ tty_unlock(tp);
tty_pgsignal(tp, SIGINFO, 1);
+ tty_lock(tp);
+ }
if (!ISSET(lflag, NOKERNINFO))
- ttyinfo(tp);
+ ttyinfo_locked(tp);
goto endcase;
}
}
}
}
}
+
endcase:
/*
* IXANY means allow any character to restart output.
*/
if (ISSET(tp->t_state, TS_TTSTOP) &&
!ISSET(iflag, IXANY) && cc[VSTART] != cc[VSTOP]) {
- thread_funnel_set(kernel_flock, funnel_state);
- return (0);
+ goto out;
}
+
restartoutput:
CLR(tp->t_lflag, FLUSHO);
CLR(tp->t_state, TS_TTSTOP);
+
startoutput:
- retval = ttstart(tp);
- thread_funnel_set(kernel_flock, funnel_state);
+ /* Start the output */
+ retval = ttstart(tp);
+
+out:
return (retval);
}
+
/*
+ * ttyoutput
+ *
* Output a single character on a tty, doing output processing
* as needed (expanding tabs, newline processing, etc.).
- * Returns < 0 if succeeds, otherwise returns char to resend.
- * Must be recursive.
+ *
+ * Parameters: c The character to output
+ * tp The tty on which to output on the tty
+ *
+ * Returns: < 0 Success
+ * >= 0 Character to resend (failure)
+ *
+ * Locks: Assumes tp is locked on entry, remains locked on exit
+ *
+ * Notes: Must be recursive.
*/
static int
ttyoutput(int c, struct tty *tp)
tcflag_t oflag;
int col;
+ TTY_LOCK_OWNED(tp); /* debug assert */
+
oflag = tp->t_oflag;
if (!ISSET(oflag, OPOST)) {
if (ISSET(tp->t_lflag, FLUSHO))
}
/*
- * Ioctls for all tty devices. Called after line-discipline specific ioctl
- * has been called to do discipline-specific functions and/or reject any
- * of these ioctl commands.
+ * Sets the tty state to not allow any more changes of foreground process
+ * group. This is required to be done so that a subsequent revoke on a vnode
+ * is able to always successfully complete.
+ *
+ * Locks : Assumes tty_lock held on entry
+ */
+void
+ttysetpgrphup(struct tty *tp)
+{
+ TTY_LOCK_OWNED(tp); /* debug assert */
+ SET(tp->t_state, TS_PGRPHUP);
+ /*
+ * Also wake up sleeping readers which may or may not belong to the
+ * current foreground process group.
+ *
+ * This forces any non-fg readers (which entered read when
+ * that process group was in the fg) to return with EIO (if they're
+ * catching SIGTTIN or with SIGTTIN). The ones which do belong to the fg
+ * process group will promptly go back to sleep and get a SIGHUP shortly
+ * This would normally happen as part of the close in revoke but if
+ * there is a sleeping reader from a non-fg process group we never get
+ * to the close because the sleeping reader holds an iocount on the
+ * vnode of the terminal which is going to get revoked->reclaimed.
+ */
+ wakeup(TSA_HUP_OR_INPUT(tp));
+}
+
+/*
+ * Locks : Assumes tty lock held on entry
+ */
+void
+ttyclrpgrphup(struct tty *tp)
+{
+ TTY_LOCK_OWNED(tp); /* debug assert */
+ CLR(tp->t_state, TS_PGRPHUP);
+}
+
+/*
+ * ttioctl
+ *
+ * Identical to ttioctl_locked, only the lock is not held
+ *
+ * Parameters: <See ttioctl_locked()>
+ *
+ * Returns: <See ttioctl_locked()>
+ *
+ * Locks: This function assumes the tty_lock() is not held on entry;
+ * it takes the lock, and releases it before returning.
+ *
+ * Notes: This is supported to ensure the line discipline interfaces
+ * all have the same locking semantics.
+ *
+ * This function is called from
+ */
+int
+ttioctl(struct tty *tp, u_long cmd, caddr_t data, int flag, proc_t p)
+{
+ int retval;
+
+ tty_lock(tp);
+ retval = ttioctl_locked(tp, cmd, data, flag, p);
+ tty_unlock(tp);
+
+ return (retval);
+}
+
+
+/*
+ * ttioctl_locked
+ *
+ * Ioctls for all tty devices.
+ *
+ * Parameters: tp Tty on which ioctl() is being called
+ * cmd ioctl() command parameter
+ * data ioctl() data argument (if any)
+ * flag fileglob open modes from fcntl.h;
+ * if called internally, this is usually
+ * set to 0, rather than something useful
+ * p Process context for the call; if the
+ * call is proxied to a worker thread,
+ * this will not be the current process!!!
+ *
+ * Returns: 0 Success
+ * EIO I/O error (no process group, job
+ * control, etc.)
+ * EINTR Interrupted by signal
+ * EBUSY Attempt to become the console while
+ * the console is busy
+ * ENOTTY TIOCGPGRP on a non-controlling tty
+ * EINVAL Invalid baud rate
+ * ENXIO TIOCSETD of invalid line discipline
+ * EPERM TIOCSTI, not root, not open for read
+ * EACCES TIOCSTI, not root, not your controlling
+ * tty
+ * EPERM TIOCSCTTY failed
+ * ENOTTY/EINVAL/EPERM TIOCSPGRP failed
+ * EPERM TIOCSDRAINWAIT as non-root user
+ * suser:EPERM Console control denied
+ * ttywait:EIO t_timeout too small/expired
+ * ttywait:ERESTART Upper layer must redrive the call;
+ * this is usually done by the Libc
+ * stub in user space
+ * ttywait:EINTR Interrupted (usually a signal)
+ * ttcompat:EINVAL
+ * ttcompat:ENOTTY
+ * ttcompat:EIOCTL
+ * ttcompat:ENOTTY TIOCGSID, if no session or session
+ * leader
+ * ttcompat:ENOTTY All unrecognized ioctls
+ * *tp->t_param:? TIOCSETA* underlying function
+ * *linesw[t].l_open:? TIOCSETD line discipline open failure
+ *
+ *
+ * Locks: This function assumes that the tty_lock() is held for the
+ * tp at the time of the call. The lock remains held on return.
+ *
+ * Notes: This function is called after line-discipline specific ioctl
+ * has been called to do discipline-specific functions and/or
+ * reject any of these ioctl() commands.
+ *
+ * This function calls ttcompat(), which can re-call ttioctl()
+ * to a depth of one (FORTRAN style mutual recursion); at some
+ * point, we should just in-line ttcompat() here.
*/
int
-ttioctl(struct tty *tp,
- u_long cmd, caddr_t data, int flag,
- proc_t p)
+ttioctl_locked(struct tty *tp, u_long cmd, caddr_t data, int flag, proc_t p)
{
int error = 0;
+ int bogusData = 1;
struct uthread *ut;
- struct pgrp * pg, *oldpg;
- struct session *sessp, * oldsessp;
+ struct pgrp *pg, *oldpg;
+ struct session *sessp, *oldsessp;
+ struct tty *oldtp;
+
+ TTY_LOCK_OWNED(tp); /* debug assert */
ut = (struct uthread *)get_bsdthread_info(current_thread());
/* If the ioctl involves modification, signal if in the background. */
case TIOCFLUSH:
case TIOCSTOP:
case TIOCSTART:
- case TIOCSETA:
+ case TIOCSETA_32:
case TIOCSETA_64:
case TIOCSETD:
- case TIOCSETAF:
+ case TIOCSETAF_32:
case TIOCSETAF_64:
- case TIOCSETAW:
+ case TIOCSETAW_32:
case TIOCSETAW_64:
case TIOCSPGRP:
case TIOCSTAT:
case TIOCSTI:
case TIOCSWINSZ:
-#if COMPAT_43_TTY || defined(COMPAT_SUNOS)
case TIOCLBIC:
case TIOCLBIS:
case TIOCLSET:
case TIOCSETN:
case TIOCSETP:
case TIOCSLTC:
-#endif
while (isbackground(p, tp) &&
(p->p_lflag & P_LPPWAIT) == 0 &&
(p->p_sigignore & sigmask(SIGTTOU)) == 0 &&
error = EIO;
goto out;
}
+ /* SAFE: All callers drop the lock on return */
+ tty_unlock(tp);
if (pg->pg_jobc == 0) {
pg_rele(pg);
+ tty_lock(tp);
error = EIO;
goto out;
}
pgsignal(pg, SIGTTOU, 1);
pg_rele(pg);
+ tty_lock(tp);
/*
}
case TIOCSCONS: {
/* Set current console device to this line */
- int bogusData = 1;
data = (caddr_t) &bogusData;
/* No break - Fall through to BSD code */
constty = NULL;
}
if (constty) {
- (*cdevsw[major(cons.t_dev)].d_ioctl)
- (cons.t_dev, KMIOCDISABLCONS, NULL, 0, p);
+ (*cdevsw[major(constty->t_dev)].d_ioctl)
+ (constty->t_dev, KMIOCDISABLCONS, NULL, 0, p);
} else {
(*cdevsw[major(tp->t_dev)].d_ioctl)
(tp->t_dev, KMIOCDISABLCONS, NULL, 0, p);
if (error)
goto out;
break;
- case TIOCGETA: /* get termios struct */
- case TIOCGETA_64: { /* get termios struct */
- if (IS_64BIT_PROCESS(p)) {
- termios32to64(&tp->t_termios, (struct user_termios *)data);
- } else {
- bcopy(&tp->t_termios, data, sizeof(struct termios));
- }
+ case TIOCGETA_32: /* get termios struct */
+#ifdef __LP64__
+ termios64to32((struct user_termios *)&tp->t_termios, (struct termios32 *)data);
+#else
+ bcopy(&tp->t_termios, data, sizeof(struct termios));
+#endif
+ break;
+ case TIOCGETA_64: /* get termios struct */
+#ifdef __LP64__
+ bcopy(&tp->t_termios, data, sizeof(struct termios));
+#else
+ termios32to64((struct termios32 *)&tp->t_termios, (struct user_termios *)data);
+#endif
break;
- }
case TIOCGETD: /* get line discipline */
*(int *)data = tp->t_line;
break;
case TIOCOUTQ: /* output queue size */
*(int *)data = tp->t_outq.c_cc;
break;
- case TIOCSETA: /* set termios struct */
+ case TIOCSETA_32: /* set termios struct */
case TIOCSETA_64:
- case TIOCSETAW: /* drain output, set */
+ case TIOCSETAW_32: /* drain output, set */
case TIOCSETAW_64:
- case TIOCSETAF: /* drn out, fls in, set */
- case TIOCSETAF_64: { /* drn out, fls in, set */
+ case TIOCSETAF_32: /* drn out, fls in, set */
+ case TIOCSETAF_64:
+ { /* drn out, fls in, set */
struct termios *t = (struct termios *)data;
struct termios lcl_termios;
- if (IS_64BIT_PROCESS(p)) {
- termios64to32((struct user_termios *)data, &lcl_termios);
+#ifdef __LP64__
+ if (cmd==TIOCSETA_32 || cmd==TIOCSETAW_32 || cmd==TIOCSETAF_32) {
+ termios32to64((struct termios32 *)data, (struct user_termios *)&lcl_termios);
+ t = &lcl_termios;
+ }
+#else
+ if (cmd==TIOCSETA_64 || cmd==TIOCSETAW_64 || cmd==TIOCSETAF_64) {
+ termios64to32((struct user_termios *)data, (struct termios32 *)&lcl_termios);
t = &lcl_termios;
}
+#endif
#if 0
/* XXX bogus test; always false */
if (t->c_ispeed < 0 || t->c_ospeed < 0) {
#endif /* 0 - leave in; may end up being a conformance issue */
if (t->c_ispeed == 0)
t->c_ispeed = t->c_ospeed;
- if (cmd == TIOCSETAW || cmd == TIOCSETAF ||
+ if (cmd == TIOCSETAW_32 || cmd == TIOCSETAF_32 ||
cmd == TIOCSETAW_64 || cmd == TIOCSETAF_64) {
error = ttywait(tp);
if (error) {
goto out;
}
- if (cmd == TIOCSETAF || cmd == TIOCSETAF_64)
+ if (cmd == TIOCSETAF_32 || cmd == TIOCSETAF_64)
ttyflush(tp, FREAD);
}
if (!ISSET(t->c_cflag, CIGNORE)) {
ttsetwater(tp);
}
if (ISSET(t->c_lflag, ICANON) != ISSET(tp->t_lflag, ICANON) &&
- cmd != TIOCSETAF && cmd != TIOCSETAF_64) {
+ cmd != TIOCSETAF_32 && cmd != TIOCSETAF_64) {
if (ISSET(t->c_lflag, ICANON))
SET(tp->t_lflag, PENDIN);
else {
error = ENXIO;
goto out;
}
+ /*
+ * If the new line discipline is not equal to the old one,
+ * close the old one and open the new one.
+ */
if (t != tp->t_line) {
(*linesw[tp->t_line].l_close)(tp, flag);
error = (*linesw[t].l_open)(device, tp);
if (error) {
+ /* This is racy; it's possible to lose both */
(void)(*linesw[tp->t_line].l_open)(device, tp);
goto out;
}
break;
case TIOCSCTTY: /* become controlling tty */
/* Session ctty vnode pointer set in vnode layer. */
- pg = proc_pgrp(p);
sessp = proc_session(p);
- if (!SESS_LEADER(p, sessp) ||
- ((sessp->s_ttyvp || tp->t_session) &&
- (tp->t_session != sessp))) {
- if (sessp != SESSION_NULL)
- session_rele(sessp);
- if (pg != PGRP_NULL)
+ if (sessp == SESSION_NULL) {
+ error = EPERM;
+ goto out;
+ }
+
+ /*
+ * This can only be done by a session leader.
+ */
+ if (!SESS_LEADER(p, sessp)) {
+ /* SAFE: All callers drop the lock on return */
+ tty_unlock(tp);
+ session_rele(sessp);
+ tty_lock(tp);
+ error = EPERM;
+ goto out;
+ }
+ /*
+ * If this terminal is already the controlling terminal for the
+ * session, nothing to do here.
+ */
+ if (tp->t_session == sessp) {
+ /* SAFE: All callers drop the lock on return */
+ tty_unlock(tp);
+ session_rele(sessp);
+ tty_lock(tp);
+ error = 0;
+ goto out;
+ }
+ pg = proc_pgrp(p);
+ /*
+ * Deny if the terminal is already attached to another session or
+ * the session already has a terminal vnode.
+ */
+ session_lock(sessp);
+ if (sessp->s_ttyvp || tp->t_session) {
+ session_unlock(sessp);
+ /* SAFE: All callers drop the lock on return */
+ tty_unlock(tp);
+ if (pg != PGRP_NULL) {
pg_rele(pg);
+ }
+ session_rele(sessp);
+ tty_lock(tp);
error = EPERM;
goto out;
}
+ sessp->s_ttypgrpid = pg->pg_id;
+ oldtp = sessp->s_ttyp;
+ ttyhold(tp);
+ sessp->s_ttyp = tp;
+ session_unlock(sessp);
proc_list_lock();
oldsessp = tp->t_session;
oldpg = tp->t_pgrp;
oldsessp->s_ttypgrpid = NO_PID;
/* do not drop refs on sessp and pg as tp holds them */
tp->t_session = sessp;
- sessp->s_ttypgrpid = pg->pg_id;
tp->t_pgrp = pg;
proc_list_unlock();
- session_lock(sessp);
- sessp->s_ttyp = tp;
- session_unlock(sessp);
- OSBitOrAtomic(P_CONTROLT, (UInt32 *)&p->p_flag);
+ OSBitOrAtomic(P_CONTROLT, &p->p_flag);
+ /* SAFE: All callers drop the lock on return */
+ tty_unlock(tp);
/* drop the reference on prev session and pgrp */
if (oldsessp != SESSION_NULL)
session_rele(oldsessp);
if (oldpg != PGRP_NULL)
pg_rele(oldpg);
+ if (NULL != oldtp)
+ ttyfree(oldtp);
+ tty_lock(tp);
break;
case TIOCSPGRP: { /* set pgrp of tty */
error = EINVAL;
goto out;
} else if (pgrp->pg_session != sessp) {
+ /* SAFE: All callers drop the lock on return */
+ tty_unlock(tp);
+ if (sessp != SESSION_NULL)
+ session_rele(sessp);
+ pg_rele(pgrp);
+ tty_lock(tp);
+ error = EPERM;
+ goto out;
+ }
+ /*
+ * The session leader is going away and is possibly going to revoke
+ * the terminal, we can't change the process group when that is the
+ * case.
+ */
+ if (ISSET(tp->t_state, TS_PGRPHUP)) {
if (sessp != SESSION_NULL)
session_rele(sessp);
pg_rele(pgrp);
tp->t_pgrp = pgrp;
sessp->s_ttypgrpid = pgrp->pg_id;
proc_list_unlock();
+
+ /*
+ * Wakeup readers to recheck if they are still the foreground
+ * process group.
+ *
+ * ttwakeup() isn't called because the readers aren't getting
+ * woken up becuse there is something to read but to force
+ * the re-evaluation of their foreground process group status.
+ *
+ * Ordinarily leaving these readers waiting wouldn't be an issue
+ * as launchd would send them a termination signal eventually
+ * (if nobody else does). But if this terminal happens to be
+ * /dev/console, launchd itself could get blocked forever behind
+ * a revoke of /dev/console and leave the system deadlocked.
+ */
+ wakeup(TSA_HUP_OR_INPUT(tp));
+
+ /* SAFE: All callers drop the lock on return */
+ tty_unlock(tp);
if (oldpg != PGRP_NULL)
pg_rele(oldpg);
if (sessp != SESSION_NULL)
session_rele(sessp);
+ tty_lock(tp);
break;
}
case TIOCSTAT: /* simulate control-T */
- ttyinfo(tp);
+ ttyinfo_locked(tp);
break;
case TIOCSWINSZ: /* set window size */
if (bcmp((caddr_t)&tp->t_winsize, data,
sizeof (struct winsize))) {
tp->t_winsize = *(struct winsize *)data;
+ /* SAFE: All callers drop the lock on return */
+ tty_unlock(tp);
tty_pgsignal(tp, SIGWINCH, 1);
+ tty_lock(tp);
}
break;
case TIOCSDRAINWAIT:
*(int *)data = tp->t_timeout / hz;
break;
default:
-#if COMPAT_43_TTY || defined(COMPAT_SUNOS)
error = ttcompat(tp, cmd, data, flag, p);
-#else
- error = ENOTTY;
-#endif
goto out;
}
return(error);
}
+
+/*
+ * Locks: Assumes tp is locked on entry, remains locked on exit
+ */
int
ttyselect(struct tty *tp, int rw, void *wql, proc_t p)
{
- if (tp == NULL)
- return (ENXIO);
+ int retval = 0;
+ /*
+ * Attaching knotes to TTYs needs to call selrecord in order to hook
+ * up the waitq to the selinfo, regardless of data being ready. See
+ * filt_ttyattach.
+ */
+ bool needs_selrecord = rw & FMARK;
+ rw &= ~FMARK;
+
+ if (tp == NULL) {
+ return ENXIO;
+ }
+
+ TTY_LOCK_OWNED(tp);
+
+ if (tp->t_state & TS_ZOMBIE) {
+ retval = 1;
+ goto out;
+ }
switch (rw) {
case FREAD:
- if (ttnread(tp) > 0 || ISSET(tp->t_state, TS_ZOMBIE))
- return(1);
+ retval = ttnread(tp);
+ if (retval > 0) {
+ break;
+ }
+
selrecord(p, &tp->t_rsel, wql);
break;
case FWRITE:
- if ((tp->t_outq.c_cc <= tp->t_lowat &&
- ISSET(tp->t_state, TS_CONNECTED))
- || ISSET(tp->t_state, TS_ZOMBIE)) {
- return (1);
+ if ((tp->t_outq.c_cc <= tp->t_lowat) &&
+ (tp->t_state & TS_CONNECTED)) {
+ retval = tp->t_hiwat - tp->t_outq.c_cc;
+ break;
}
+
selrecord(p, &tp->t_wsel, wql);
break;
}
- return (0);
+
+out:
+ if (retval > 0 && needs_selrecord) {
+ switch (rw) {
+ case FREAD:
+ selrecord(p, &tp->t_rsel, wql);
+ break;
+ case FWRITE:
+ selrecord(p, &tp->t_wsel, wql);
+ break;
+ }
+ }
+
+ return retval;
}
+
/*
* This is a wrapper for compatibility with the select vector used by
* cdevsw. It relies on a proper xxxdevtotty routine.
+ *
+ * Locks: Assumes tty_lock() is not held prior to calling.
*/
int
ttselect(dev_t dev, int rw, void *wql, proc_t p)
{
- return ttyselect(cdevsw[major(dev)].d_ttys[minor(dev)], rw, wql, p);
+ int rv;
+ struct tty *tp = cdevsw[major(dev)].d_ttys[minor(dev)];
+
+ tty_lock(tp);
+ rv = ttyselect(tp, rw, wql, p);
+ tty_unlock(tp);
+
+ return (rv);
}
+
/*
- * Must be called at spltty().
+ * Locks: Assumes tp is locked on entry, remains locked on exit
*/
-static int
+__private_extern__ int
ttnread(struct tty *tp)
{
int nread;
+ TTY_LOCK_OWNED(tp); /* debug assert */
+
if (ISSET(tp->t_lflag, PENDIN))
ttypend(tp);
nread = tp->t_canq.c_cc;
return (nread);
}
+
/*
+ * ttywait
+ *
* Wait for output to drain.
+ *
+ * Parameters: tp Tty on which to wait for output to drain
+ *
+ * Returns: 0 Success
+ * EIO t_timeout too small/expired
+ * ttysleep:ERESTART Upper layer must redrive the call;
+ * this is usually done by the Libc
+ * stub in user space
+ * ttysleep:EINTR Interrupted (usually a signal)
+ *
+ * Notes: Called from proc_exit() and vproc_exit().
+ *
+ * Locks: Assumes tp is locked on entry, remains locked on exit
*/
int
ttywait(struct tty *tp)
{
int error;
+ TTY_LOCK_OWNED(tp); /* debug assert */
+
error = 0;
while ((tp->t_outq.c_cc || ISSET(tp->t_state, TS_BUSY)) &&
ISSET(tp->t_state, TS_CONNECTED) && tp->t_oproc) {
return (error);
}
+/*
+ * Stop the underlying device driver.
+ *
+ * Locks: Assumes tty_lock() is held prior to calling.
+ */
static void
ttystop(struct tty *tp, int rw)
{
+ TTY_LOCK_OWNED(tp); /* debug assert */
+
(*cdevsw[major(tp->t_dev)].d_stop)(tp, rw);
}
/*
* Flush if successfully wait.
+ *
+ * Locks: Assumes tty_lock() is held prior to calling.
*/
static int
ttywflush(struct tty *tp)
{
int error;
+ TTY_LOCK_OWNED(tp); /* debug assert */
+
if ((error = ttywait(tp)) == 0)
ttyflush(tp, FREAD);
return (error);
/*
* Flush tty read and/or write queues, notifying anyone waiting.
+ *
+ * Locks: Assumes tty_lock() is held prior to calling.
*/
void
ttyflush(struct tty *tp, int rw)
{
+ TTY_LOCK_OWNED(tp); /* debug assert */
+
#if 0
again:
#endif
/*
* Copy in the default termios characters.
+ *
+ * Locks: Assumes tty_lock() is held prior to calling.
+ *
+ * Notes: No assertion; tp is not in scope.
*/
void
termioschars(struct termios *t)
bcopy(ttydefchars, t->c_cc, sizeof t->c_cc);
}
-/*
- * Old interface.
- */
-void
-ttychars(struct tty *tp)
-{
- termioschars(&tp->t_termios);
-}
/*
* Handle input high water. Send stop character for the IXOFF case. Turn
* on our input flow control bit and propagate the changes to the driver.
* XXX the stop character should be put in a special high priority queue.
+ *
+ * Locks: Assumes tty_lock() is held for the call.
*/
void
ttyblock(struct tty *tp)
{
+ TTY_LOCK_OWNED(tp); /* debug assert */
+
SET(tp->t_state, TS_TBLOCK);
if (ISSET(tp->t_iflag, IXOFF) && tp->t_cc[VSTOP] != _POSIX_VDISABLE &&
putc(tp->t_cc[VSTOP], &tp->t_outq) != 0)
ttstart(tp);
}
+
/*
* Handle input low water. Send start character for the IXOFF case. Turn
* off our input flow control bit and propagate the changes to the driver.
* XXX the start character should be put in a special high priority queue.
+ *
+ * Locks: Assumes tty_lock() is held for the call.
*/
static void
ttyunblock(struct tty *tp)
{
+ TTY_LOCK_OWNED(tp); /* debug assert */
+
CLR(tp->t_state, TS_TBLOCK);
if (ISSET(tp->t_iflag, IXOFF) && tp->t_cc[VSTART] != _POSIX_VDISABLE &&
putc(tp->t_cc[VSTART], &tp->t_outq) != 0)
ttstart(tp);
}
-/* FreeBSD: Not used by any current (i386) drivers. */
+
/*
- * Restart after an inter-char delay.
+ * ttstart
+ *
+ * Start tty output
+ *
+ * Parameters: tp tty on which to start output
+ *
+ * Returns: 0 Success
+ *
+ * Locks: Assumes tty_lock() is held for the call.
+ *
+ * Notes: This function might as well be void; it always returns success
+ *
+ * Called from ttioctl_locked(), LDISC routines, and
+ * ttycheckoutq(), ttyblock(), ttyunblock(), and tputchar()
*/
-void
-ttrstrt(void *tp_arg)
-{
- struct tty *tp;
-
-#if DIAGNOSTIC
- if (tp_arg == NULL)
- panic("ttrstrt");
-#endif
- tp = tp_arg;
-
- CLR(tp->t_state, TS_TIMEOUT);
- ttstart(tp);
-
-}
-
int
ttstart(struct tty *tp)
{
- boolean_t funnel_state;
-
- funnel_state = thread_funnel_set(kernel_flock, TRUE);
+ TTY_LOCK_OWNED(tp); /* debug assert */
if (tp->t_oproc != NULL) /* XXX: Kludge for pty. */
(*tp->t_oproc)(tp);
- thread_funnel_set(kernel_flock, funnel_state);
+
return (0);
}
+
/*
+ * ttylclose (LDISC)
+ *
* "close" a line discipline
+ *
+ * Locks: Assumes tty_lock() is held prior to calling.
*/
int
ttylclose(struct tty *tp, int flag)
{
- boolean_t funnel_state;
+ TTY_LOCK_OWNED(tp); /* debug assert */
- funnel_state = thread_funnel_set(kernel_flock, TRUE);
if ( (flag & FNONBLOCK) || ttywflush(tp))
ttyflush(tp, FREAD | FWRITE);
- thread_funnel_set(kernel_flock, funnel_state);
+
return (0);
}
+
/*
+ * ttymodem (LDISC)
+ *
* Handle modem control transition on a tty.
* Flag indicates new state of carrier.
* Returns 0 if the line should be turned off, otherwise 1.
+ *
+ * Locks: Assumes tty_lock() is held prior to calling.
*/
int
ttymodem(struct tty *tp, int flag)
{
- boolean_t funnel_state;
+ int rval = 1; /* default return value */
- funnel_state = thread_funnel_set(kernel_flock, TRUE);
+ TTY_LOCK_OWNED(tp); /* debug assert */
if (ISSET(tp->t_state, TS_CARR_ON) && ISSET(tp->t_cflag, MDMBUF)) {
/*
if (tp->t_session && tp->t_session->s_leader)
psignal(tp->t_session->s_leader, SIGHUP);
ttyflush(tp, FREAD | FWRITE);
- thread_funnel_set(kernel_flock, funnel_state);
- return (0);
+ rval = 0;
+ goto out;
}
} else {
/*
ttwakeup(tp);
ttwwakeup(tp);
}
- thread_funnel_set(kernel_flock, funnel_state);
- return (1);
+
+out:
+ return (rval);
}
+
/*
* Reinput pending characters after state switch
* call at spltty().
+ *
+ * Locks: Assumes tty_lock() is held for the call.
*/
static void
ttypend(struct tty *tp)
struct clist tq;
int c;
+ TTY_LOCK_OWNED(tp); /* debug assert */
+
CLR(tp->t_lflag, PENDIN);
SET(tp->t_state, TS_TYPEN);
tq = tp->t_rawq;
CLR(tp->t_state, TS_TYPEN);
}
+
/*
+ * ttread (LDISC)
+ *
* Process a read call on a tty device.
+ *
+ * Locks: Assumes tty_lock() is held prior to calling.
*/
int
ttread(struct tty *tp, struct uio *uio, int flag)
int first, error = 0;
int has_etime = 0, last_cc = 0;
long slp = 0; /* XXX this should be renamed `timo'. */
- boolean_t funnel_state;
struct uthread *ut;
struct pgrp * pg;
- funnel_state = thread_funnel_set(kernel_flock, TRUE);
+ TTY_LOCK_OWNED(tp); /* debug assert */
ut = (struct uthread *)get_bsdthread_info(current_thread());
if ((p->p_sigignore & sigmask(SIGTTIN)) ||
(ut->uu_sigmask & sigmask(SIGTTIN)) ||
p->p_lflag & P_LPPWAIT) {
- thread_funnel_set(kernel_flock, funnel_state);
- return (EIO);
+ error = EIO;
+ goto err;
}
pg = proc_pgrp(p);
if (pg == PGRP_NULL) {
- thread_funnel_set(kernel_flock, funnel_state);
- return (EIO);
+ error = EIO;
+ goto err;
}
if (pg->pg_jobc == 0) {
+ /* SAFE: All callers drop the lock on return */
+ tty_unlock(tp);
pg_rele(pg);
- thread_funnel_set(kernel_flock, funnel_state);
- return (EIO);
+ tty_lock(tp);
+ error = EIO;
+ goto err;
}
+ /* SAFE: All callers drop the lock on return */
+ tty_unlock(tp);
pgsignal(pg, SIGTTIN, 1);
pg_rele(pg);
+ tty_lock(tp);
/*
* We signalled ourself, so we need to act as if we
* the signal. If it's a signal that stops the
* process, that's handled in the signal sending code.
*/
- thread_funnel_set(kernel_flock, funnel_state);
- return (EINTR);
+ error = EINTR;
+ goto err;
}
if (ISSET(tp->t_state, TS_ZOMBIE)) {
- thread_funnel_set(kernel_flock, funnel_state);
- return (0); /* EOF */
+ /* EOF - returning 0 */
+ goto err;
}
/*
if (flag & IO_NDELAY) {
if (qp->c_cc > 0)
goto read;
- if (!ISSET(lflag, ICANON) && cc[VMIN] == 0) {
- thread_funnel_set(kernel_flock, funnel_state);
- return (0);
+ if (ISSET(lflag, ICANON) || cc[VMIN] != 0) {
+ error = EWOULDBLOCK;
}
- thread_funnel_set(kernel_flock, funnel_state);
- return (EWOULDBLOCK);
+ /* else polling - returning 0 */
+ goto err;
}
if (!ISSET(lflag, ICANON)) {
int m = cc[VMIN];
goto read;
/* m, t and qp->c_cc are all 0. 0 is enough input. */
- thread_funnel_set(kernel_flock, funnel_state);
- return (0);
+ goto err;
}
t *= 100000; /* time in us */
#define diff(t1, t2) (((t1).tv_sec - (t2).tv_sec) * 1000000 + \
} else {
if (timercmp(&etime, &timecopy, <=)) {
/* Timed out, but 0 is enough input. */
- thread_funnel_set(kernel_flock, funnel_state);
- return (0);
+ goto err;
}
slp = diff(etime, timecopy);
}
* is large (divide by `tick' and/or arrange to
* use hzto() if hz is large).
*/
- slp = (long) (((u_long)slp * hz) + 999999) / 1000000;
+ slp = (long) (((u_int32_t)slp * hz) + 999999) / 1000000;
goto sleep;
}
if (qp->c_cc <= 0) {
if (error == EWOULDBLOCK)
error = 0;
else if (error) {
- thread_funnel_set(kernel_flock, funnel_state);
- return (error);
+ goto err;
}
/*
* XXX what happens if another process eats some input
char ibuf[IBUFSIZ];
int icc;
- icc = min(uio_resid(uio), IBUFSIZ);
+ icc = MIN(uio_resid(uio), IBUFSIZ);
icc = q_to_b(qp, (u_char *)ibuf, icc);
if (icc <= 0) {
if (first)
*/
if (CCEQ(cc[VDSUSP], c) &&
ISSET(lflag, IEXTEN | ISIG) == (IEXTEN | ISIG)) {
+ /*
+ * SAFE: All callers drop the lock on return and
+ * SAFE: current thread will not change out from
+ * SAFE: under us in the "goto loop" case.
+ */
+ tty_unlock(tp);
tty_pgsignal(tp, SIGTSTP, 1);
+ tty_lock(tp);
if (first) {
- error = ttysleep(tp, &lbolt, TTIPRI | PCATCH,
- "ttybg3", 0);
+ error = ttysleep(tp, &ttread, TTIPRI | PCATCH,
+ "ttybg3", hz);
if (error)
break;
goto loop;
tp->t_rawq.c_cc + tp->t_canq.c_cc <= I_LOW_WATER)
ttyunblock(tp);
- thread_funnel_set(kernel_flock, funnel_state);
+err:
return (error);
}
+
/*
* Check the output queue on tp for space for a kernel message (from uprintf
* or tprintf). Allow some space over the normal hiwater mark so we don't
* lose messages due to normal flow control, but don't let the tty run amok.
* Sleeps here are not interruptible, but we return prematurely if new signals
* arrive.
+ *
+ * Locks: Assumes tty_lock() is held before calling
+ *
+ * Notes: This function is called from tprintf() in subr_prf.c
*/
int
ttycheckoutq(struct tty *tp, int wait)
sigset_t oldsig;
struct uthread *ut;
+ TTY_LOCK_OWNED(tp); /* debug assert */
+
ut = (struct uthread *)get_bsdthread_info(current_thread());
hiwat = tp->t_hiwat;
return (0);
}
SET(tp->t_state, TS_SO_OLOWAT);
- tsleep(TSA_OLOWAT(tp), PZERO - 1, "ttoutq", hz);
+ ttysleep(tp, TSA_OLOWAT(tp), PZERO - 1, "ttoutq", hz);
}
return (1);
}
+
/*
+ * ttwrite (LDISC)
+ *
* Process a write call on a tty device.
+ *
+ * Locks: Assumes tty_lock() is held prior to calling.
*/
int
ttwrite(struct tty *tp, struct uio *uio, int flag)
char *cp = NULL;
int cc, ce;
proc_t p;
- int i, hiwat, count, error;
+ int i, hiwat, error;
+ user_ssize_t count;
char obuf[OBUFSIZ];
- boolean_t funnel_state;
struct uthread *ut;
struct pgrp * pg;
- funnel_state = thread_funnel_set(kernel_flock, TRUE);
+ TTY_LOCK_OWNED(tp); /* debug assert */
ut = (struct uthread *)get_bsdthread_info(current_thread());
hiwat = tp->t_hiwat;
- // LP64todo - fix this!
count = uio_resid(uio);
error = 0;
cc = 0;
goto out;
}
if (pg->pg_jobc == 0) {
+ /* SAFE: All callers drop the lock on return */
+ tty_unlock(tp);
pg_rele(pg);
+ tty_lock(tp);
error = EIO;
goto out;
}
+ /* SAFE: All callers drop the lock on return */
+ tty_unlock(tp);
pgsignal(pg, SIGTTOU, 1);
pg_rele(pg);
+ tty_lock(tp);
/*
* We signalled ourself, so we need to act as if we
* have been "interrupted" from a "sleep" to act on
while (uio_resid(uio) > 0 || cc > 0) {
if (ISSET(tp->t_lflag, FLUSHO)) {
uio_setresid(uio, 0);
- thread_funnel_set(kernel_flock, funnel_state);
return (0);
}
if (tp->t_outq.c_cc > hiwat)
* leftover from last time.
*/
if (cc == 0) {
- cc = min(uio_resid(uio), OBUFSIZ);
+ cc = MIN(uio_resid(uio), OBUFSIZ);
cp = obuf;
error = uiomove(cp, cc, uio);
if (error) {
i = b_to_q((u_char *)cp, ce, &tp->t_outq);
ce -= i;
tp->t_column += ce;
- cp += ce, cc -= ce, tk_nout += ce;
+ cp += ce;
+ cc -= ce;
+ tk_nout += ce;
tp->t_outcc += ce;
if (i > 0) {
/* out of space */
* (the call will either return short or restart with a new uio).
*/
uio_setresid(uio, (uio_resid(uio) + cc));
- thread_funnel_set(kernel_flock, funnel_state);
return (error);
overfull:
}
if (flag & IO_NDELAY) {
uio_setresid(uio, (uio_resid(uio) + cc));
- thread_funnel_set(kernel_flock, funnel_state);
return (uio_resid(uio) == count ? EWOULDBLOCK : 0);
}
SET(tp->t_state, TS_SO_OLOWAT);
goto loop;
}
+
/*
* Rubout one character from the rawq of tp
* as cleanly as possible.
+ *
+ * Locks: Assumes tty_lock() is held prior to calling.
*/
static void
ttyrub(int c, struct tty *tp)
int savecol;
int tabc;
+ TTY_LOCK_OWNED(tp); /* debug assert */
+
if (!ISSET(tp->t_lflag, ECHO) || ISSET(tp->t_lflag, EXTPROC))
return;
CLR(tp->t_lflag, FLUSHO);
--tp->t_rocount;
}
+
/*
* Back over count characters, erasing them.
+ *
+ * Locks: Assumes tty_lock() is held prior to calling.
*/
static void
ttyrubo(struct tty *tp, int count)
{
+ TTY_LOCK_OWNED(tp); /* debug assert */
+
while (count-- > 0) {
(void)ttyoutput('\b', tp);
(void)ttyoutput(' ', tp);
}
}
+
/*
* ttyretype --
* Reprint the rawq line. Note, it is assumed that c_cc has already
* been checked.
+ *
+ * Locks: Assumes tty_lock() is held prior to calling.
*/
static void
ttyretype(struct tty *tp)
u_char *cp;
int c;
+ TTY_LOCK_OWNED(tp); /* debug assert */
+
/* Echo the reprint character. */
if (tp->t_cc[VREPRINT] != _POSIX_VDISABLE)
ttyecho(tp->t_cc[VREPRINT], tp);
tp->t_rocol = 0;
}
+
/*
* Echo a typed character to the terminal.
+ *
+ * Locks: Assumes tty_lock() is held prior to calling.
*/
static void
ttyecho(int c, struct tty *tp)
{
+ TTY_LOCK_OWNED(tp); /* debug assert */
+
if (!ISSET(tp->t_state, TS_CNTTB))
CLR(tp->t_lflag, FLUSHO);
if ((!ISSET(tp->t_lflag, ECHO) &&
(void)ttyoutput(c, tp);
}
+
/*
* Wake up any readers on a tty.
+ *
+ * Locks: Assumes tty_lock() is held for the call.
*/
void
ttwakeup(struct tty *tp)
{
+ TTY_LOCK_OWNED(tp); /* debug assert */
+
selwakeup(&tp->t_rsel);
- if (ISSET(tp->t_state, TS_ASYNC))
+ KNOTE(&tp->t_rsel.si_note, 1);
+ if (ISSET(tp->t_state, TS_ASYNC)) {
+ /*
+ * XXX: Callers may not revalidate it the tty is closed
+ * XXX: out from under them by another thread, but we do
+ * XXX: not support queued signals. This should be safe,
+ * XXX: since the process we intend to wakeup is in the
+ * XXX: process group, and will wake up because of the
+ * XXX: signal anyway.
+ */
+ tty_unlock(tp);
tty_pgsignal(tp, SIGIO, 1);
+ tty_lock(tp);
+ }
wakeup(TSA_HUP_OR_INPUT(tp));
}
+
/*
+ * ttwwakeup (LDISC)
+ *
* Wake up any writers on a tty.
+ *
+ * Locks: Assumes tty_lock() is held prior to calling.
*/
void
ttwwakeup(struct tty *tp)
{
- if (tp->t_outq.c_cc <= tp->t_lowat)
+ TTY_LOCK_OWNED(tp); /* debug assert */
+
+ if (tp->t_outq.c_cc <= tp->t_lowat) {
selwakeup(&tp->t_wsel);
+ KNOTE(&tp->t_wsel.si_note, 1);
+ }
if (ISSET(tp->t_state, TS_BUSY | TS_SO_OCOMPLETE) ==
TS_SO_OCOMPLETE && tp->t_outq.c_cc == 0) {
CLR(tp->t_state, TS_SO_OCOMPLETE);
}
}
+
/*
* Look up a code for a specified speed in a conversion table;
* used by drivers to map software speed values to hardware parameters.
+ *
+ * Notes: No locks are assumed for this function; it does not
+ * directly access struct tty.
*/
int
ttspeedtab(int speed, struct speedtab *table)
return (-1);
}
+
/*
* Set tty hi and low water marks.
*
* Try to arrange the dynamics so there's about one second
* from hi to low water.
*
+ * Locks: Assumes tty_lock() is held prior to calling.
*/
void
ttsetwater(struct tty *tp)
int cps;
unsigned int x;
+ TTY_LOCK_OWNED(tp); /* debug assert */
+
#define CLAMP(x, h, l) ((x) > h ? h : ((x) < l) ? l : (x))
cps = tp->t_ospeed / 10;
thread_info_t thread_info_out,
mach_msg_type_number_t *thread_info_count);
+
/*
* Report on state of foreground process group.
+ *
+ * Locks: Assumes tty_lock() is held prior to calling.
*/
void
-ttyinfo(struct tty *tp)
+ttyinfo_locked(struct tty *tp)
{
int load;
thread_t thread;
mach_msg_type_number_t mmtn = THREAD_BASIC_INFO_COUNT;
struct pgrp * pg;
+ TTY_LOCK_OWNED(tp); /* debug assert */
if (ttycheckoutq(tp,0) == 0)
return;
}
}
pgrp_unlock(pg);
+ /* SAFE: All callers drop the lock on return */
+ tty_unlock(tp);
pg_rele(pg);
+ tty_lock(tp);
pick = proc_find(pickpid);
if (pick == PROC_NULL)
proc_rele(pick);
/* Print command, pid, state, utime, and stime */
- ttyprintf(tp, " cmd: %s %d %s %ld.%02ldu %ld.%02lds\n",
+ ttyprintf(tp, " cmd: %s %d %s %ld.%02du %ld.%02ds\n",
pick->p_comm,
pick->p_pid,
state,
tp->t_rocount = 0;
}
+
/*
* Returns 1 if p2 is "better" than p1
*
#define ONLYB 1
#define BOTH 3
+/*
+ * Locks: pgrp_lock(p2) held on call to this function
+ * tty_lock(tp) for p2's tty, for which p2 is the foreground
+ * process, held on call to this function
+ */
static int
proc_compare(proc_t p1, proc_t p2)
{
return (p2->p_pid > p1->p_pid); /* tie - return highest pid */
}
+
/*
* Output char to tty; console putchar style.
+ *
+ * Locks: Assumes tty_lock() is held prior to calling.
+ *
+ * Notes: Only ever called from putchar() in subr_prf.c
*/
int
tputchar(int c, struct tty *tp)
{
+ TTY_LOCK_OWNED(tp); /* debug assert */
+
if (!ISSET(tp->t_state, TS_CONNECTED)) {
return (-1);
}
return (0);
}
+
/*
+ * ttysleep
+ *
+ * Sleep on a wait channel waiting for an interrupt or a condition to come
+ * true so that we are woken up.
+ *
+ * Parameters: tp Tty going to sleep
+ * chan The sleep channel (usually an address
+ * of a structure member)
+ * pri priority and flags
+ * wmesg Wait message; shows up in debugger,
+ * should show up in "ps", but doesn't
+ * timo Timeout for the sleep
+ *
+ * Returns: 0 Condition came true
+ * ERESTART Upper layer must redrive the call;
+ * this is usually done by the Libc
+ * stub in user space
+ * msleep0:EINTR Interrupted (usually a signal)
+ * msleep0:ERESTART Interrupted (usually a masked signal)
+ * msleep0:EWOULDBLOCK Timeout (timo) already expired
+ *
+ * Locks: Assumes tty_lock() is held prior to calling.
+ *
* Sleep on chan, returning ERESTART if tty changed while we napped and
- * returning any errors (e.g. EINTR/EWOULDBLOCK) reported by tsleep. If
+ * returning any errors (e.g. EINTR/EWOULDBLOCK) reported by msleep0. If
* the tty is revoked, restarting a pending call will redo validation done
* at the start of the call.
*/
int error;
int gen;
+ TTY_LOCK_OWNED(tp);
+
gen = tp->t_gen;
- error = tsleep(chan, pri, wmesg, timo);
+ /* Use of msleep0() avoids conversion timo/timespec/timo */
+ error = msleep0(chan, &tp->t_lock, pri, wmesg, timo, (int (*)(int))0);
if (error)
return (error);
return (tp->t_gen == gen ? 0 : ERESTART);
}
+
/*
* Allocate a tty structure and its associated buffers.
*
*
* Returns: !NULL Address of new struct tty
* NULL Error ("ENOMEM")
+ *
+ * Locks: The tty_lock() of the returned tty is not held when it
+ * is returned.
*/
struct tty *
ttymalloc(void)
clalloc(&tp->t_canq, TTYCLSIZE, 1);
/* output queue doesn't need quoting */
clalloc(&tp->t_outq, TTYCLSIZE, 0);
+ lck_mtx_init(&tp->t_lock, tty_lck_grp, tty_lck_attr);
+ klist_init(&tp->t_rsel.si_note);
+ klist_init(&tp->t_wsel.si_note);
+ tp->t_refcnt = 1;
}
- return(tp);
+ return (tp);
+}
+
+/*
+ * Increment the reference count on a tty.
+ */
+void
+ttyhold(struct tty *tp)
+{
+ TTY_LOCK_OWNED(tp);
+ tp->t_refcnt++;
}
/*
- * Free a tty structure and its buffers.
+ * Drops a reference count on a tty structure; if the reference count reaches
+ * zero, then also frees the structure and associated buffers.
*/
void
ttyfree(struct tty *tp)
{
+ TTY_LOCK_NOTOWNED(tp);
+
+ tty_lock(tp);
+ if (--tp->t_refcnt == 0) {
+ tty_unlock(tp);
+ ttydeallocate(tp);
+ } else if (tp->t_refcnt < 0) {
+ panic("%s: freeing free tty %p", __func__, tp);
+ } else
+ tty_unlock(tp);
+}
+
+/*
+ * Deallocate a tty structure and its buffers.
+ *
+ * Locks: The tty_lock() is assumed to not be held at the time of
+ * the free; this function destroys the mutex.
+ */
+static void
+ttydeallocate(struct tty *tp)
+{
+ TTY_LOCK_NOTOWNED(tp); /* debug assert */
+
+#if DEBUG
+ if (!(SLIST_EMPTY(&tp->t_rsel.si_note) && SLIST_EMPTY(&tp->t_wsel.si_note))) {
+ panic("knotes hooked into a tty when the tty is freed.\n");
+ }
+#endif /* DEBUG */
+
clfree(&tp->t_rawq);
clfree(&tp->t_canq);
clfree(&tp->t_outq);
+ lck_mtx_destroy(&tp->t_lock, tty_lck_grp);
FREE(tp, M_TTYS);
}
+
+/*
+ * Locks: Assumes tty_lock() is held prior to calling.
+ */
int
isbackground(proc_t p, struct tty *tp)
{
- return (isctty(p, tp) && (p->p_pgrp != tp->t_pgrp));
+ TTY_LOCK_OWNED(tp);
+
+ return (tp->t_session != NULL && p->p_pgrp != NULL && (p->p_pgrp != tp->t_pgrp) && isctty_sp(p, tp, p->p_pgrp->pg_session));
}
static int
}
+
+static int filt_ttyattach(struct knote *kn, struct kevent_internal_s *kev);
+static void filt_ttydetach(struct knote *kn);
+static int filt_ttyevent(struct knote *kn, long hint);
+static int filt_ttytouch(struct knote *kn, struct kevent_internal_s *kev);
+static int filt_ttyprocess(struct knote *kn, struct filt_process_s *data, struct kevent_internal_s *kev);
+
+SECURITY_READ_ONLY_EARLY(struct filterops) tty_filtops = {
+ .f_isfd = 1,
+ .f_attach = filt_ttyattach,
+ .f_detach = filt_ttydetach,
+ .f_event = filt_ttyevent,
+ .f_touch = filt_ttytouch,
+ .f_process = filt_ttyprocess
+};
+
+/*
+ * Called with struct tty locked. Returns non-zero if there is data to be read
+ * or written.
+ */
+static int
+filt_tty_common(struct knote *kn, struct tty *tp)
+{
+ int retval = 0;
+
+ TTY_LOCK_OWNED(tp); /* debug assert */
+
+ if (tp->t_state & TS_ZOMBIE) {
+ kn->kn_flags |= EV_EOF;
+ return 1;
+ }
+
+ switch (knote_get_seltype(kn)) {
+ case FREAD:
+ retval = ttnread(tp);
+ break;
+ case FWRITE:
+ if ((tp->t_outq.c_cc <= tp->t_lowat) &&
+ (tp->t_state & TS_CONNECTED)) {
+ retval = tp->t_hiwat - tp->t_outq.c_cc;
+ }
+ break;
+ }
+
+ kn->kn_data = retval;
+
+ /*
+ * TODO(mwidmann, jandrus): For native knote low watermark support,
+ * check the kn_sfflags for NOTE_LOWAT and check against kn_sdata.
+ *
+ * res = ((kn->kn_sfflags & NOTE_LOWAT) != 0) ?
+ * (kn->kn_data >= kn->kn_sdata) : kn->kn_data;
+ */
+
+ return retval;
+}
+
+/*
+ * Find the struct tty from a waitq, which is a member of one of the two struct
+ * selinfos inside the struct tty. Use the seltype to determine which selinfo.
+ */
+static struct tty *
+tty_from_waitq(struct waitq *wq, int seltype)
+{
+ struct selinfo *si;
+ struct tty *tp = NULL;
+
+ /*
+ * The waitq is part of the selinfo structure managed by the driver. For
+ * certain drivers, we want to hook the knote into the selinfo
+ * structure's si_note field so selwakeup can call KNOTE.
+ *
+ * While 'wq' is not really a queue element, this macro only uses the
+ * pointer to calculate the offset into a structure given an element
+ * name.
+ */
+ si = qe_element(wq, struct selinfo, si_waitq);
+
+ /*
+ * For TTY drivers, the selinfo structure is somewhere in the struct
+ * tty. There are two different selinfo structures, and the one used
+ * corresponds to the type of filter requested.
+ *
+ * While 'si' is not really a queue element, this macro only uses the
+ * pointer to calculate the offset into a structure given an element
+ * name.
+ */
+ switch (seltype) {
+ case FREAD:
+ tp = qe_element(si, struct tty, t_rsel);
+ break;
+ case FWRITE:
+ tp = qe_element(si, struct tty, t_wsel);
+ break;
+ }
+
+ return tp;
+}
+
+static struct tty *
+tty_from_knote(struct knote *kn)
+{
+ return (struct tty *)kn->kn_hook;
+}
+
+/*
+ * Try to lock the TTY structure associated with a knote.
+ *
+ * On success, this function returns a locked TTY structure. Otherwise, NULL is
+ * returned.
+ */
+__attribute__((warn_unused_result))
+static struct tty *
+tty_lock_from_knote(struct knote *kn)
+{
+ struct tty *tp = tty_from_knote(kn);
+ if (tp) {
+ tty_lock(tp);
+ }
+
+ return tp;
+}
+
+/*
+ * Set the knote's struct tty to the kn_hook field.
+ *
+ * The idea is to fake a call to select with our own waitq set. If the driver
+ * calls selrecord, we'll get a link to their waitq and access to the tty
+ * structure.
+ *
+ * Returns -1 on failure, with the error set in the knote, or selres on success.
+ */
+static int
+tty_set_knote_hook(struct knote *kn)
+{
+ uthread_t uth;
+ vfs_context_t ctx;
+ vnode_t vp;
+ kern_return_t kr;
+ struct waitq *wq = NULL;
+ struct waitq_set *old_wqs;
+ struct waitq_set tmp_wqs;
+ uint64_t rsvd, rsvd_arg;
+ uint64_t *rlptr = NULL;
+ int selres = -1;
+ struct tty *tp;
+
+ uth = get_bsdthread_info(current_thread());
+
+ ctx = vfs_context_current();
+ vp = (vnode_t)kn->kn_fp->f_fglob->fg_data;
+
+ /*
+ * Reserve a link element to avoid potential allocation under
+ * a spinlock.
+ */
+ rsvd = rsvd_arg = waitq_link_reserve(NULL);
+ rlptr = (void *)&rsvd_arg;
+
+ /*
+ * Trick selrecord into hooking a known waitq set into the device's selinfo
+ * waitq. Once the link is in place, we can get back into the selinfo from
+ * the waitq and subsequently the tty (see tty_from_waitq).
+ *
+ * We can't use a real waitq set (such as the kqueue's) because wakeups
+ * might happen before we can unlink it.
+ */
+ kr = waitq_set_init(&tmp_wqs, SYNC_POLICY_FIFO | SYNC_POLICY_PREPOST, NULL,
+ NULL);
+ assert(kr == KERN_SUCCESS);
+
+ /*
+ * Lazy allocate the waitqset to avoid potential allocation under
+ * a spinlock;
+ */
+ waitq_set_lazy_init_link(&tmp_wqs);
+
+ old_wqs = uth->uu_wqset;
+ uth->uu_wqset = &tmp_wqs;
+ /*
+ * FMARK forces selects to always call selrecord, even if data is
+ * available. See ttselect, ptsselect, ptcselect.
+ *
+ * selres also contains the data currently available in the tty.
+ */
+ selres = VNOP_SELECT(vp, knote_get_seltype(kn) | FMARK, 0, rlptr, ctx);
+ uth->uu_wqset = old_wqs;
+
+ /*
+ * Make sure to cleanup the reserved link - this guards against
+ * drivers that may not actually call selrecord().
+ */
+ waitq_link_release(rsvd);
+ if (rsvd == rsvd_arg) {
+ /*
+ * The driver didn't call selrecord -- there's no tty hooked up so we
+ * can't attach.
+ */
+ knote_set_error(kn, ENOTTY);
+ selres = -1;
+ goto out;
+ }
+
+ /* rlptr may not point to a properly aligned pointer */
+ memcpy(&wq, rlptr, sizeof(void *));
+
+ tp = tty_from_waitq(wq, knote_get_seltype(kn));
+ assert(tp != NULL);
+
+ /*
+ * Take a reference and stash the tty in the knote.
+ */
+ tty_lock(tp);
+ ttyhold(tp);
+ kn->kn_hook = tp;
+ tty_unlock(tp);
+
+out:
+ /*
+ * Cleaning up the wqset will unlink its waitq and clean up any preposts
+ * that occurred as a result of data coming in while the tty was attached.
+ */
+ waitq_set_deinit(&tmp_wqs);
+
+ return selres;
+}
+
+static int
+filt_ttyattach(struct knote *kn, __unused struct kevent_internal_s *kev)
+{
+ int selres = 0;
+ struct tty *tp;
+
+ /*
+ * This function should be called from filt_specattach (spec_vnops.c),
+ * so most of the knote data structure should already be initialized.
+ */
+
+ /* don't support offsets in ttys or drivers that don't use struct tty */
+ if (kn->kn_vnode_use_ofst || !kn->kn_vnode_kqok) {
+ knote_set_error(kn, ENOTSUP);
+ return 0;
+ }
+
+ /*
+ * Connect the struct tty to the knote through the selinfo structure
+ * referenced by the waitq within the selinfo.
+ */
+ selres = tty_set_knote_hook(kn);
+ if (selres < 0) {
+ return 0;
+ }
+
+ /*
+ * Attach the knote to selinfo's klist.
+ */
+ tp = tty_lock_from_knote(kn);
+ if (!tp) {
+ knote_set_error(kn, ENOENT);
+ return 0;
+ }
+
+ switch (knote_get_seltype(kn)) {
+ case FREAD:
+ KNOTE_ATTACH(&tp->t_rsel.si_note, kn);
+ break;
+ case FWRITE:
+ KNOTE_ATTACH(&tp->t_wsel.si_note, kn);
+ break;
+ }
+
+ tty_unlock(tp);
+
+ return selres;
+}
+
+static void
+filt_ttydetach(struct knote *kn)
+{
+ struct tty *tp;
+
+ tp = tty_lock_from_knote(kn);
+ if (!tp) {
+ knote_set_error(kn, ENOENT);
+ return;
+ }
+
+ struct selinfo *si = NULL;
+ switch (knote_get_seltype(kn)) {
+ case FREAD:
+ si = &tp->t_rsel;
+ break;
+ case FWRITE:
+ si = &tp->t_wsel;
+ break;
+ /* knote_get_seltype will panic on default */
+ }
+
+ KNOTE_DETACH(&si->si_note, kn);
+ kn->kn_hook = NULL;
+
+ tty_unlock(tp);
+ ttyfree(tp);
+}
+
+static int
+filt_ttyevent(struct knote *kn, long hint)
+{
+ int ret;
+ struct tty *tp;
+ bool revoked = hint & NOTE_REVOKE;
+ hint &= ~NOTE_REVOKE;
+
+ tp = tty_from_knote(kn);
+ if (!tp) {
+ knote_set_error(kn, ENOENT);
+ return 0;
+ }
+
+ if (!hint) {
+ tty_lock(tp);
+ }
+
+ if (revoked) {
+ kn->kn_flags |= EV_EOF | EV_ONESHOT;
+ ret = 1;
+ } else {
+ ret = filt_tty_common(kn, tp);
+ }
+
+ if (!hint) {
+ tty_unlock(tp);
+ }
+
+ return ret;
+}
+
+static int
+filt_ttytouch(struct knote *kn, struct kevent_internal_s *kev)
+{
+ struct tty *tp;
+ int res = 0;
+
+ tp = tty_lock_from_knote(kn);
+ if (!tp) {
+ knote_set_error(kn, ENOENT);
+ return 0;
+ }
+
+ kn->kn_sdata = kev->data;
+ kn->kn_sfflags = kev->fflags;
+
+ if (kn->kn_vnode_kqok) {
+ res = filt_tty_common(kn, tp);
+ }
+
+ tty_unlock(tp);
+
+ return res;
+}
+
+static int
+filt_ttyprocess(struct knote *kn, __unused struct filt_process_s *data, struct kevent_internal_s *kev)
+{
+ struct tty *tp;
+ int res;
+
+ tp = tty_lock_from_knote(kn);
+ if (!tp) {
+ knote_set_error(kn, ENOENT);
+ return 0;
+ }
+
+ res = filt_tty_common(kn, tp);
+
+ if (res) {
+ *kev = kn->kn_kevent;
+ if (kn->kn_flags & EV_CLEAR) {
+ kn->kn_fflags = 0;
+ kn->kn_data = 0;
+ }
+ }
+
+ tty_unlock(tp);
+
+ return res;
+}