/*
- * Copyright (c) 1997-2006 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 1997-2010 Apple Computer, Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#include <sys/tty.h>
#include <sys/conf.h>
#include <sys/file_internal.h>
-#include <sys/uio.h>
+#include <sys/uio_internal.h>
#include <sys/kernel.h>
#include <sys/vnode.h>
#include <sys/vnode_internal.h> /* _devfs_setattr() */
char name[128];
snprintf(name, sizeof(name), "/dev/%s", direntp->de_name);
- NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, CAST_USER_ADDR_T(name), ctx);
+ NDINIT(&nd, LOOKUP, OP_SETATTR, FOLLOW, UIO_SYSSPACE, CAST_USER_ADDR_T(name), ctx);
error = namei(&nd);
if (error)
goto out;
SYSCTL_NODE(_kern, KERN_TTY, tty, CTLFLAG_RW|CTLFLAG_LOCKED, 0, "TTY");
SYSCTL_PROC(_kern_tty, OID_AUTO, ptmx_max,
- CTLTYPE_INT | CTLFLAG_RW,
+ CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
&ptmx_max, 0, &sysctl_ptmx_max, "I", "ptmx_max");
static int ptmx_clone(dev_t dev, int minor);
+/*
+ * Set of locks to keep the interaction between kevents and revoke
+ * from causing havoc.
+ */
+
+#define LOG2_PTSD_KE_NLCK 2
+#define PTSD_KE_NLCK (1l << LOG2_PTSD_KE_NLCK)
+#define PTSD_KE_LOCK_INDEX(x) ((x) & (PTSD_KE_NLCK - 1))
+
+static lck_mtx_t ptsd_kevent_lock[PTSD_KE_NLCK];
+
+static void
+ptsd_kevent_lock_init(void)
+{
+ int i;
+ lck_grp_t *lgrp = lck_grp_alloc_init("ptsd kevent", LCK_GRP_ATTR_NULL);
+
+ for (i = 0; i < PTSD_KE_NLCK; i++)
+ lck_mtx_init(&ptsd_kevent_lock[i], lgrp, LCK_ATTR_NULL);
+}
+
+static void
+ptsd_kevent_mtx_lock(int minor)
+{
+ lck_mtx_lock(&ptsd_kevent_lock[PTSD_KE_LOCK_INDEX(minor)]);
+}
+
+static void
+ptsd_kevent_mtx_unlock(int minor)
+{
+ lck_mtx_unlock(&ptsd_kevent_lock[PTSD_KE_LOCK_INDEX(minor)]);
+}
+
int
ptmx_init( __unused int config_count)
{
return (ENOENT);
}
+ if (cdevsw_setkqueueok(ptmx_major, &ptmx_cdev, 0) == -1) {
+ panic("Failed to set flags on ptmx cdevsw entry.");
+ }
+
/* Get a major number for /dev/pts/nnn */
if ((ptsd_major = cdevsw_add(-15, &ptsd_cdev)) == -1) {
(void)cdevsw_remove(ptmx_major, &ptmx_cdev);
printf("ptmx_init: failed to obtain /dev/ptmx major number\n");
return (ENOENT);
}
+
+ if (cdevsw_setkqueueok(ptsd_major, &ptsd_cdev, 0) == -1) {
+ panic("Failed to set flags on ptmx cdevsw entry.");
+ }
+
+ /*
+ * Locks to guard against races between revoke and kevents
+ */
+ ptsd_kevent_lock_init();
/* Create the /dev/ptmx device {<major>,0} */
(void)devfs_make_node_clone(makedev(ptmx_major, 0),
*
* Returns: NULL Did not exist/could not create
* !NULL structure corresponding minor number
+ *
+ * Locks: tty_lock() on ptmx_ioctl->pt_tty NOT held on entry or exit.
*/
static struct ptmx_ioctl *
ptmx_get_ioctl(int minor, int open_flag)
_state.pis_total += PTMX_GROW_VECTOR;
if (old_pis_ioctl_list)
FREE(old_pis_ioctl_list, M_TTYS);
+ }
+
+ if (_state.pis_ioctl_list[minor] != NULL) {
+ ttyfree(new_ptmx_ioctl->pt_tty);
+ DEVFS_UNLOCK();
+ FREE(new_ptmx_ioctl, M_TTYS);
+
+ /* Special error value so we know to redrive the open, we've been raced */
+ return (struct ptmx_ioctl*)-1;
+
}
/* Vector is large enough; grab a new ptmx_ioctl */
makedev(ptsd_major, minor),
DEVFS_CHAR, UID_ROOT, GID_TTY, 0620,
PTSD_TEMPLATE, minor);
+ if (_state.pis_ioctl_list[minor]->pt_devhandle == NULL) {
+ printf("devfs_make_node() call failed for ptmx_get_ioctl()!!!!\n");
+ }
} else if (open_flag & PF_OPEN_S) {
DEVFS_LOCK();
_state.pis_ioctl_list[minor]->pt_flags |= PF_OPEN_S;
return (_state.pis_ioctl_list[minor]);
}
+/*
+ * Locks: tty_lock() of old_ptmx_ioctl->pt_tty NOT held for this call.
+ */
static int
ptmx_free_ioctl(int minor, int open_flag)
{
struct ptmx_ioctl *old_ptmx_ioctl = NULL;
DEVFS_LOCK();
-#if 5161374
- /*
- * We have to check after taking the DEVFS_LOCK, since the pointer
- * is protected by the lock
- */
- if (_state.pis_ioctl_list[minor] == NULL) {
- DEVFS_UNLOCK();
- return (ENXIO);
- }
-#endif /* 5161374 */
_state.pis_ioctl_list[minor]->pt_flags &= ~(open_flag);
/*
if (!(_state.pis_ioctl_list[minor]->pt_flags & (PF_OPEN_M|PF_OPEN_S))) {
/* Mark as free so it can be reallocated later */
old_ptmx_ioctl = _state.pis_ioctl_list[ minor];
- _state.pis_ioctl_list[ minor] = NULL;
- _state.pis_free++;
}
DEVFS_UNLOCK();
devfs_remove(old_ptmx_ioctl->pt_devhandle);
ttyfree(old_ptmx_ioctl->pt_tty);
FREE(old_ptmx_ioctl, M_TTYS);
+
+ /* Don't remove the entry until the devfs slot is free */
+ DEVFS_LOCK();
+ _state.pis_ioctl_list[ minor] = NULL;
+ _state.pis_free++;
+ DEVFS_UNLOCK();
}
return (0); /* Success */
struct tty *tp;
struct ptmx_ioctl *pti;
int error;
- boolean_t funnel_state;
if ((pti = ptmx_get_ioctl(minor(dev), 0)) == NULL) {
return (ENXIO);
}
- tp = pti->pt_tty;
if (!(pti->pt_flags & PF_UNLOCKED)) {
return (EAGAIN);
}
- funnel_state = thread_funnel_set(kernel_flock, TRUE);
+ tp = pti->pt_tty;
+ tty_lock(tp);
if ((tp->t_state & TS_ISOPEN) == 0) {
- ttychars(tp); /* Set up default chars */
+ termioschars(&tp->t_termios); /* Set up default chars */
tp->t_iflag = TTYDEF_IFLAG;
tp->t_oflag = TTYDEF_OFLAG;
tp->t_lflag = TTYDEF_LFLAG;
error = (*linesw[tp->t_line].l_open)(dev, tp);
/* Successful open; mark as open by the slave */
pti->pt_flags |= PF_OPEN_S;
+ CLR(tp->t_state, TS_IOCTL_NOT_OK);
if (error == 0)
ptmx_wakeup(tp, FREAD|FWRITE);
out:
- (void) thread_funnel_set(kernel_flock, funnel_state);
+ tty_unlock(tp);
return (error);
}
+static void ptsd_revoke_knotes(dev_t, struct tty *);
+
FREE_BSDSTATIC int
ptsd_close(dev_t dev, int flag, __unused int mode, __unused proc_t p)
{
struct tty *tp;
struct ptmx_ioctl *pti;
int err;
- boolean_t funnel_state;
/*
* This is temporary until the VSX conformance tests
int save_timeout;
#endif
pti = ptmx_get_ioctl(minor(dev), 0);
-#if 5161374
- if (pti == NULL || pti->pt_tty == NULL)
- return(ENXIO);
-#endif /* 5161374 */
- tp = pti->pt_tty;
- funnel_state = thread_funnel_set(kernel_flock, TRUE);
+ tp = pti->pt_tty;
+ tty_lock(tp);
#ifdef FIX_VSX_HANG
save_timeout = tp->t_timeout;
#ifdef FIX_VSX_HANG
tp->t_timeout = save_timeout;
#endif
- (void) thread_funnel_set(kernel_flock, funnel_state);
+ tty_unlock(tp);
+
+ if ((flag & IO_REVOKE) == IO_REVOKE)
+ ptsd_revoke_knotes(dev, tp);
/* unconditional, just like ttyclose() */
ptmx_free_ioctl(minor(dev), PF_OPEN_S);
struct ptmx_ioctl *pti;
int error = 0;
struct uthread *ut;
- boolean_t funnel_state;
struct pgrp * pg;
pti = ptmx_get_ioctl(minor(dev), 0);
-#if 5161374
- if (pti == NULL || pti->pt_tty == NULL)
- return(ENXIO);
-#endif /* 5161374 */
- tp = pti->pt_tty;
-
- funnel_state = thread_funnel_set(kernel_flock, TRUE);
+ tp = pti->pt_tty;
+ tty_lock(tp);
ut = (struct uthread *)get_bsdthread_info(current_thread());
again:
error = EIO;
goto out;
}
+ /*
+ * SAFE: We about to drop the lock ourselves by
+ * SAFE: erroring out or sleeping anyway.
+ */
+ tty_unlock(tp);
if (pg->pg_jobc == 0) {
pg_rele(pg);
+ tty_lock(tp);
error = EIO;
goto out;
}
pgsignal(pg, SIGTTIN, 1);
pg_rele(pg);
+ tty_lock(tp);
error = ttysleep(tp, &lbolt, TTIPRI | PCATCH | PTTYBLOCK, "ptsd_bg",
0);
goto out;
}
if (tp->t_canq.c_cc == 0) {
- if (flag & IO_NDELAY)
- return (EWOULDBLOCK);
+ if (flag & IO_NDELAY) {
+ error = EWOULDBLOCK;
+ goto out;
+ }
error = ttysleep(tp, TSA_PTS_READ(tp), TTIPRI | PCATCH,
"ptsd_in", 0);
if (error)
error = (*linesw[tp->t_line].l_read)(tp, uio, flag);
ptmx_wakeup(tp, FWRITE);
out:
- (void) thread_funnel_set(kernel_flock, funnel_state);
+ tty_unlock(tp);
return (error);
}
struct tty *tp;
struct ptmx_ioctl *pti;
int error;
- boolean_t funnel_state;
-
- funnel_state = thread_funnel_set(kernel_flock, TRUE);
pti = ptmx_get_ioctl(minor(dev), 0);
-#if 5161374
- if (pti == NULL || pti->pt_tty == NULL)
- return(ENXIO);
-#endif /* 5161374 */
+
tp = pti->pt_tty;
+ tty_lock(tp);
if (tp->t_oproc == 0)
error = EIO;
else
error = (*linesw[tp->t_line].l_write)(tp, uio, flag);
- (void) thread_funnel_set(kernel_flock, funnel_state);
+ tty_unlock(tp);
return (error);
}
/*
* Start output on pseudo-tty.
* Wake up process selecting or sleeping for input from controlling tty.
+ *
+ * t_oproc for this driver; called from within the line discipline
+ *
+ * Locks: Assumes tp is locked on entry, remains locked on exit
*/
static void
ptsd_start(struct tty *tp)
{
struct ptmx_ioctl *pti;
- boolean_t funnel_state;
pti = ptmx_get_ioctl(minor(tp->t_dev), 0);
-#if 5161374
- if (pti == NULL)
- return; /* XXX ENXIO, but this function is void! */
-#endif /* 5161374 */
-
- funnel_state = thread_funnel_set(kernel_flock, TRUE);
if (tp->t_state & TS_TTSTOP)
goto out;
}
ptmx_wakeup(tp, FREAD);
out:
- (void) thread_funnel_set(kernel_flock, funnel_state);
return;
}
+/*
+ * Locks: Assumes tty_lock() is held over this call.
+ */
static void
ptmx_wakeup(struct tty *tp, int flag)
{
struct ptmx_ioctl *pti;
- boolean_t funnel_state;
pti = ptmx_get_ioctl(minor(tp->t_dev), 0);
-#if 5161374
- if (pti == NULL)
- return; /* XXX ENXIO, but this function is void! */
-#endif /* 5161374 */
-
- funnel_state = thread_funnel_set(kernel_flock, TRUE);
if (flag & FREAD) {
selwakeup(&pti->pt_selr);
selwakeup(&pti->pt_selw);
wakeup(TSA_PTC_WRITE(tp));
}
- (void) thread_funnel_set(kernel_flock, funnel_state);
}
FREE_BSDSTATIC int
struct tty *tp;
struct ptmx_ioctl *pti;
int error = 0;
- boolean_t funnel_state;
-
- if ((pti = ptmx_get_ioctl(minor(dev), PF_OPEN_M)) == NULL) {
+ pti = ptmx_get_ioctl(minor(dev), PF_OPEN_M);
+ if (pti == NULL) {
return (ENXIO);
+ } else if (pti == (struct ptmx_ioctl*)-1) {
+ return (EREDRIVEOPEN);
}
- tp = pti->pt_tty;
- funnel_state = thread_funnel_set(kernel_flock, TRUE);
+ tp = pti->pt_tty;
+ tty_lock(tp);
/* If master is open OR slave is still draining, pty is still busy */
if (tp->t_oproc || (tp->t_state & TS_ISOPEN)) {
+ tty_unlock(tp);
/*
* If master is closed, we are the only reference, so we
* need to clear the master open bit
if (!tp->t_oproc)
ptmx_free_ioctl(minor(dev), PF_OPEN_M);
error = EBUSY;
- goto out;
+ goto err;
}
tp->t_oproc = ptsd_start;
CLR(tp->t_state, TS_ZOMBIE);
+ SET(tp->t_state, TS_IOCTL_NOT_OK);
#ifdef sun4c
tp->t_stop = ptsd_stop;
#endif
(void)(*linesw[tp->t_line].l_modem)(tp, 1);
tp->t_lflag &= ~EXTPROC;
-out:
- (void) thread_funnel_set(kernel_flock, funnel_state);
+ tty_unlock(tp);
+err:
return (error);
}
{
struct tty *tp;
struct ptmx_ioctl *pti;
- boolean_t funnel_state;
pti = ptmx_get_ioctl(minor(dev), 0);
-#if 5161374
- if (pti == NULL || pti->pt_tty == NULL)
- return(ENXIO);
-#endif /* 5161374 */
- tp = pti->pt_tty;
- funnel_state = thread_funnel_set(kernel_flock, TRUE);
+ tp = pti->pt_tty;
+ tty_lock(tp);
(void)(*linesw[tp->t_line].l_modem)(tp, 0);
tp->t_oproc = 0; /* mark closed */
- (void) thread_funnel_set(kernel_flock, funnel_state);
+ tty_unlock(tp);
ptmx_free_ioctl(minor(dev), PF_OPEN_M);
struct ptmx_ioctl *pti;
char buf[BUFSIZ];
int error = 0, cc;
- boolean_t funnel_state;
pti = ptmx_get_ioctl(minor(dev), 0);
-#if 5161374
- if (pti == NULL || pti->pt_tty == NULL)
- return(ENXIO);
-#endif /* 5161374 */
- tp = pti->pt_tty;
- funnel_state = thread_funnel_set(kernel_flock, TRUE);
+ tp = pti->pt_tty;
+ tty_lock(tp);
/*
* We want to block until the slave
error = EWOULDBLOCK;
goto out;
}
- error = tsleep(TSA_PTC_READ(tp), TTIPRI | PCATCH, "ptmx_in", 0);
+ error = ttysleep(tp, TSA_PTC_READ(tp), TTIPRI | PCATCH, "ptmx_in", 0);
if (error)
goto out;
}
(*linesw[tp->t_line].l_start)(tp);
out:
- (void) thread_funnel_set(kernel_flock, funnel_state);
+ tty_unlock(tp);
return (error);
}
+/*
+ * Line discipline callback
+ *
+ * Locks: tty_lock() is assumed held on entry and exit.
+ */
FREE_BSDSTATIC int
ptsd_stop(struct tty *tp, int flush)
{
struct ptmx_ioctl *pti;
int flag;
- boolean_t funnel_state;
pti = ptmx_get_ioctl(minor(tp->t_dev), 0);
-#if 5161374
- if (pti == NULL)
- return(ENXIO);
-#endif /* 5161374 */
-
- funnel_state = thread_funnel_set(kernel_flock, TRUE);
/* note: FLUSHREAD and FLUSHWRITE already ok */
if (flush == 0) {
flag |= FREAD;
ptmx_wakeup(tp, flag);
- (void) thread_funnel_set(kernel_flock, funnel_state);
-
return (0);
}
{
struct ptmx_ioctl *pti;
struct tty *tp;
+ int retval = 0;
pti = ptmx_get_ioctl(minor(dev), 0);
-#if 5161374
- if (pti == NULL || pti->pt_tty == NULL)
- return(ENXIO);
-#endif /* 5161374 */
+
tp = pti->pt_tty;
if (tp == NULL)
return (ENXIO);
+ tty_lock(tp);
+
switch (rw) {
case FREAD:
- if (ttnread(tp) > 0 || ISSET(tp->t_state, TS_ZOMBIE))
- return(1);
+ if (ISSET(tp->t_state, TS_ZOMBIE)) {
+ retval = 1;
+ break;
+ }
+
+ retval = ttnread(tp);
+ if (retval > 0) {
+ break;
+ }
+
selrecord(p, &tp->t_rsel, wql);
break;
case FWRITE:
- if ((tp->t_outq.c_cc <= tp->t_lowat &&
- ISSET(tp->t_state, TS_CONNECTED))
- || ISSET(tp->t_state, TS_ZOMBIE)) {
- return (1);
+ if (ISSET(tp->t_state, TS_ZOMBIE)) {
+ retval = 1;
+ break;
+ }
+
+ if ((tp->t_outq.c_cc <= tp->t_lowat) &&
+ ISSET(tp->t_state, TS_CONNECTED)) {
+ retval = tp->t_hiwat - tp->t_outq.c_cc;
+ break;
}
+
selrecord(p, &tp->t_wsel, wql);
break;
}
- return (0);
+
+ tty_unlock(tp);
+ return (retval);
}
FREE_BSDSTATIC int
struct tty *tp;
struct ptmx_ioctl *pti;
int retval = 0;
- boolean_t funnel_state;
pti = ptmx_get_ioctl(minor(dev), 0);
-#if 5161374
- if (pti == NULL || pti->pt_tty == NULL)
- return(ENXIO);
-#endif /* 5161374 */
- tp = pti->pt_tty;
- funnel_state = thread_funnel_set(kernel_flock, TRUE);
+ tp = pti->pt_tty;
+ tty_lock(tp);
if ((tp->t_state & TS_CONNECTED) == 0) {
retval = 1;
goto out;
}
switch (rw) {
-
case FREAD:
/*
* Need to block timeouts (ttrstart).
*/
if ((tp->t_state&TS_ISOPEN) &&
tp->t_outq.c_cc && (tp->t_state&TS_TTSTOP) == 0) {
- retval = 1;
- goto out;
+ retval = tp->t_outq.c_cc;
+ break;
}
/* FALLTHROUGH */
((pti->pt_flags & PF_PKT && pti->pt_send) ||
(pti->pt_flags & PF_UCNTL && pti->pt_ucntl))) {
retval = 1;
- goto out;
+ break;
}
selrecord(p, &pti->pt_selr, wql);
break;
-
case FWRITE:
if (tp->t_state&TS_ISOPEN) {
if (pti->pt_flags & PF_REMOTE) {
if (tp->t_canq.c_cc == 0) {
- retval = 1;
- goto out;
+ retval = (TTYHOG -1) ;
+ break;
}
} else {
- if (tp->t_rawq.c_cc + tp->t_canq.c_cc < TTYHOG-2) {
- retval = 1;
- goto out;
+ retval = (TTYHOG - 2) - (tp->t_rawq.c_cc + tp->t_canq.c_cc);
+ if (retval > 0) {
+ break;
}
if (tp->t_canq.c_cc == 0 && (tp->t_lflag&ICANON)) {
retval = 1;
- goto out;
+ break;
}
+ retval = 0;
}
}
selrecord(p, &pti->pt_selw, wql);
}
out:
- (void) thread_funnel_set(kernel_flock, funnel_state);
+ tty_unlock(tp);
return (retval);
}
u_char locbuf[BUFSIZ];
int wcnt = 0;
int error = 0;
- boolean_t funnel_state;
pti = ptmx_get_ioctl(minor(dev), 0);
-#if 5161374
- if (pti == NULL || pti->pt_tty == NULL)
- return(ENXIO);
-#endif /* 5161374 */
- tp = pti->pt_tty;
- funnel_state = thread_funnel_set(kernel_flock, TRUE);
+ tp = pti->pt_tty;
+ tty_lock(tp);
again:
if ((tp->t_state&TS_ISOPEN) == 0)
}
cc = 0;
}
+
out:
- (void) thread_funnel_set(kernel_flock, funnel_state);
+ tty_unlock(tp);
return (error);
+
block:
/*
* Come here to wait for slave to open, for space
error = EWOULDBLOCK;
goto out;
}
- error = tsleep(TSA_PTC_WRITE(tp), TTOPRI | PCATCH, "ptmx_out", 0);
+ error = ttysleep(tp, TSA_PTC_WRITE(tp), TTOPRI | PCATCH, "ptmx_out", 0);
if (error) {
/* adjust for data copied in but not written */
uio_setresid(uio, (uio_resid(uio) + cc));
struct ptmx_ioctl *pti;
u_char *cc;
int stop, error = 0;
- boolean_t funnel_state;
+ int allow_ext_ioctl = 1;
pti = ptmx_get_ioctl(minor(dev), 0);
-#if 5161374
- if (pti == NULL || pti->pt_tty == NULL)
- return(ENXIO);
-#endif /* 5161374 */
+
tp = pti->pt_tty;
+ tty_lock(tp);
+
cc = tp->t_cc;
- funnel_state = thread_funnel_set(kernel_flock, TRUE);
+ /*
+ * Do not permit extended ioctls on the master side of the pty unless
+ * the slave side has been successfully opened and initialized.
+ */
+ if (cdevsw[major(dev)].d_open == ptmx_open && ISSET(tp->t_state, TS_IOCTL_NOT_OK))
+ allow_ext_ioctl = 0;
/*
* IF CONTROLLER STTY THEN MUST FLUSH TO PREVENT A HANG.
* ttywflush(tp) will hang if there are characters in the outq.
*/
- if (cmd == TIOCEXT) {
+ if (cmd == TIOCEXT && allow_ext_ioctl) {
/*
* When the EXTPROC bit is being toggled, we need
* to send an TIOCPKT_IOCTL if the packet driver
}
goto out;
} else
- if (cdevsw[major(dev)].d_open == ptmx_open)
+ if (cdevsw[major(dev)].d_open == ptmx_open) {
switch (cmd) {
case TIOCGPGRP:
ttyflush(tp, FREAD|FWRITE);
goto out;
-#if COMPAT_43_TTY
case TIOCSETP:
case TIOCSETN:
-#endif
case TIOCSETD:
- case TIOCSETA:
- case TIOCSETAW:
- case TIOCSETAF:
+ case TIOCSETA_32:
+ case TIOCSETAW_32:
+ case TIOCSETAF_32:
+ case TIOCSETA_64:
+ case TIOCSETAW_64:
+ case TIOCSETAF_64:
ndflush(&tp->t_outq, tp->t_outq.c_cc);
break;
}
if ((tp->t_lflag&NOFLSH) == 0)
ttyflush(tp, FREAD|FWRITE);
- tty_pgsignal(tp, *(unsigned int *)data, 1);
if ((*(unsigned int *)data == SIGINFO) &&
((tp->t_lflag&NOKERNINFO) == 0))
- ttyinfo(tp);
+ ttyinfo_locked(tp);
+ /*
+ * SAFE: All callers drop the lock on return and
+ * SAFE: the linesw[] will short circut this call
+ * SAFE: if the ioctl() is eaten before the lower
+ * SAFE: level code gets to see it.
+ */
+ tty_unlock(tp);
+ tty_pgsignal(tp, *(unsigned int *)data, 1);
+ tty_lock(tp);
goto out;
case TIOCPTYGRANT: /* grantpt(3) */
error = 0;
goto out;
}
+
+ /*
+ * Fail all other calls; pty masters are not serial devices;
+ * we only pretend they are when the slave side of the pty is
+ * already open.
+ */
+ if (!allow_ext_ioctl) {
+ error = ENOTTY;
+ goto out;
+ }
+ }
error = (*linesw[tp->t_line].l_ioctl)(tp, cmd, data, flag, p);
if (error == ENOTTY) {
- error = ttioctl(tp, cmd, data, flag, p);
+ error = ttioctl_locked(tp, cmd, data, flag, p);
if (error == ENOTTY) {
if (pti->pt_flags & PF_UCNTL && (cmd & ~0xff) == UIOCCMD(0)) {
/* Process the UIOCMD ioctl group */
*/
if ((tp->t_lflag&EXTPROC) && (pti->pt_flags & PF_PKT)) {
switch(cmd) {
- case TIOCSETA:
- case TIOCSETAW:
- case TIOCSETAF:
-#if COMPAT_43_TTY
+ case TIOCSETA_32:
+ case TIOCSETAW_32:
+ case TIOCSETAF_32:
+ case TIOCSETA_64:
+ case TIOCSETAW_64:
+ case TIOCSETAF_64:
case TIOCSETP:
case TIOCSETN:
-#endif
-#if COMPAT_43_TTY || defined(COMPAT_SUNOS)
case TIOCSETC:
case TIOCSLTC:
case TIOCLBIS:
case TIOCLBIC:
case TIOCLSET:
-#endif
pti->pt_send |= TIOCPKT_IOCTL;
ptmx_wakeup(tp, FREAD);
default:
}
}
out:
- (void) thread_funnel_set(kernel_flock, funnel_state);
+ tty_unlock(tp);
return (error);
}
+
+/*
+ * kqueue support.
+ */
+int ptsd_kqfilter(dev_t, struct knote *);
+static void ptsd_kqops_detach(struct knote *);
+static int ptsd_kqops_event(struct knote *, long);
+
+static struct filterops ptsd_kqops = {
+ .f_isfd = 1,
+ .f_detach = ptsd_kqops_detach,
+ .f_event = ptsd_kqops_event,
+};
+
+#define PTSD_KNOTE_VALID NULL
+#define PTSD_KNOTE_REVOKED ((void *)-911l)
+
+/*
+ * In the normal case, by the time the driver_close() routine is called
+ * on the slave, all knotes have been detached. However in the revoke(2)
+ * case, the driver's close routine is called while there are knotes active
+ * that reference the handlers below. And we have no obvious means to
+ * reach from the driver out to the kqueue's that reference them to get
+ * them to stop.
+ */
+
+static void
+ptsd_kqops_detach(struct knote *kn)
+{
+ struct ptmx_ioctl *pti;
+ struct tty *tp;
+ dev_t dev, lockdev = (dev_t)kn->kn_hookid;
+
+ ptsd_kevent_mtx_lock(minor(lockdev));
+
+ if ((dev = (dev_t)kn->kn_hookid) != 0) {
+ pti = ptmx_get_ioctl(minor(dev), 0);
+ if (pti != NULL && (tp = pti->pt_tty) != NULL) {
+ tty_lock(tp);
+ if (kn->kn_filter == EVFILT_READ)
+ KNOTE_DETACH(&tp->t_rsel.si_note, kn);
+ else
+ KNOTE_DETACH(&tp->t_wsel.si_note, kn);
+ tty_unlock(tp);
+ kn->kn_hookid = 0;
+ }
+ }
+
+ ptsd_kevent_mtx_unlock(minor(lockdev));
+}
+
+static int
+ptsd_kqops_event(struct knote *kn, long hint)
+{
+ struct ptmx_ioctl *pti;
+ struct tty *tp;
+ dev_t dev = (dev_t)kn->kn_hookid;
+ int retval = 0;
+
+ ptsd_kevent_mtx_lock(minor(dev));
+
+ do {
+ if (kn->kn_hook != PTSD_KNOTE_VALID ) {
+ /* We were revoked */
+ kn->kn_data = 0;
+ kn->kn_flags |= EV_EOF;
+ retval = 1;
+ break;
+ }
+
+ pti = ptmx_get_ioctl(minor(dev), 0);
+ if (pti == NULL || (tp = pti->pt_tty) == NULL) {
+ kn->kn_data = ENXIO;
+ kn->kn_flags |= EV_ERROR;
+ retval = 1;
+ break;
+ }
+
+ if (hint == 0)
+ tty_lock(tp);
+
+ if (kn->kn_filter == EVFILT_READ) {
+ kn->kn_data = ttnread(tp);
+ if (kn->kn_data > 0)
+ retval = 1;
+ if (ISSET(tp->t_state, TS_ZOMBIE)) {
+ kn->kn_flags |= EV_EOF;
+ retval = 1;
+ }
+ } else { /* EVFILT_WRITE */
+ if ((tp->t_outq.c_cc <= tp->t_lowat) &&
+ ISSET(tp->t_state, TS_CONNECTED)) {
+ kn->kn_data = tp->t_outq.c_cn - tp->t_outq.c_cc;
+ retval = 1;
+ }
+ if (ISSET(tp->t_state, TS_ZOMBIE)) {
+ kn->kn_flags |= EV_EOF;
+ retval = 1;
+ }
+ }
+
+ if (hint == 0)
+ tty_unlock(tp);
+ } while (0);
+
+ ptsd_kevent_mtx_unlock(minor(dev));
+
+ return (retval);
+}
+int
+ptsd_kqfilter(dev_t dev, struct knote *kn)
+{
+ struct tty *tp = NULL;
+ struct ptmx_ioctl *pti = NULL;
+ int retval = 0;
+
+ /* make sure we're talking about the right device type */
+ if (cdevsw[major(dev)].d_open != ptsd_open) {
+ return (EINVAL);
+ }
+
+ if ((pti = ptmx_get_ioctl(minor(dev), 0)) == NULL) {
+ return (ENXIO);
+ }
+
+ tp = pti->pt_tty;
+ tty_lock(tp);
+
+ kn->kn_hookid = dev;
+ kn->kn_hook = PTSD_KNOTE_VALID;
+ kn->kn_fop = &ptsd_kqops;
+
+ switch (kn->kn_filter) {
+ case EVFILT_READ:
+ KNOTE_ATTACH(&tp->t_rsel.si_note, kn);
+ break;
+ case EVFILT_WRITE:
+ KNOTE_ATTACH(&tp->t_wsel.si_note, kn);
+ break;
+ default:
+ retval = EINVAL;
+ break;
+ }
+
+ tty_unlock(tp);
+ return (retval);
+}
+
+/*
+ * Support for revoke(2).
+ *
+ * Mark all the kn_hook fields so that future invocations of the
+ * f_event op will just say "EOF" *without* looking at the
+ * ptmx_ioctl structure (which may disappear or be recycled at
+ * the end of ptsd_close). Issue wakeups to post that EOF to
+ * anyone listening. And finally remove the knotes from the
+ * tty's klists to keep ttyclose() happy, and set the hookid to
+ * zero to make the final detach passively successful.
+ */
+static void
+ptsd_revoke_knotes(dev_t dev, struct tty *tp)
+{
+ struct klist *list;
+ struct knote *kn, *tkn;
+
+ /* (Hold and drop the right locks in the right order.) */
+
+ ptsd_kevent_mtx_lock(minor(dev));
+ tty_lock(tp);
+
+ list = &tp->t_rsel.si_note;
+ SLIST_FOREACH(kn, list, kn_selnext)
+ kn->kn_hook = PTSD_KNOTE_REVOKED;
+
+ list = &tp->t_wsel.si_note;
+ SLIST_FOREACH(kn, list, kn_selnext)
+ kn->kn_hook = PTSD_KNOTE_REVOKED;
+
+ tty_unlock(tp);
+ ptsd_kevent_mtx_unlock(minor(dev));
+
+ tty_lock(tp);
+ ttwakeup(tp);
+ ttwwakeup(tp);
+ tty_unlock(tp);
+
+ ptsd_kevent_mtx_lock(minor(dev));
+ tty_lock(tp);
+
+ list = &tp->t_rsel.si_note;
+ SLIST_FOREACH_SAFE(kn, list, kn_selnext, tkn) {
+ (void) KNOTE_DETACH(list, kn);
+ kn->kn_hookid = 0;
+ }
+
+ list = &tp->t_wsel.si_note;
+ SLIST_FOREACH_SAFE(kn, list, kn_selnext, tkn) {
+ (void) KNOTE_DETACH(list, kn);
+ kn->kn_hookid = 0;
+ }
+
+ tty_unlock(tp);
+ ptsd_kevent_mtx_unlock(minor(dev));
+}