X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/3e170ce000f1506b7b5d2c5c7faec85ceabb573d..c6bf4f310a33a9262d455ea4d3f0630b1255e3fe:/bsd/kern/kern_control.c diff --git a/bsd/kern/kern_control.c b/bsd/kern/kern_control.c index ebda4203d..3142cbda6 100644 --- a/bsd/kern/kern_control.c +++ b/bsd/kern/kern_control.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 1999-2015 Apple Inc. All rights reserved. + * Copyright (c) 1999-2017 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -58,81 +58,95 @@ #include struct kctl { - TAILQ_ENTRY(kctl) next; /* controller chain */ - kern_ctl_ref kctlref; + TAILQ_ENTRY(kctl) next; /* controller chain */ + kern_ctl_ref kctlref; /* controller information provided when registering */ - char name[MAX_KCTL_NAME]; /* unique identifier */ - u_int32_t id; - u_int32_t reg_unit; + char name[MAX_KCTL_NAME]; /* unique identifier */ + u_int32_t id; + u_int32_t reg_unit; /* misc communication information */ - u_int32_t flags; /* support flags */ - u_int32_t recvbufsize; /* request more than the default buffer size */ - u_int32_t sendbufsize; /* request more than the default buffer size */ + u_int32_t flags; /* support flags */ + u_int32_t recvbufsize; /* request more than the default buffer size */ + u_int32_t sendbufsize; /* request more than the default buffer size */ /* Dispatch functions */ - ctl_connect_func connect; /* Make contact */ - ctl_disconnect_func disconnect; /* Break contact */ - ctl_send_func send; /* Send data to nke */ - ctl_send_list_func send_list; /* Send list of packets */ - ctl_setopt_func setopt; /* set kctl configuration */ - ctl_getopt_func getopt; /* get kctl configuration */ - ctl_rcvd_func rcvd; /* Notify nke when client reads data */ - - TAILQ_HEAD(, ctl_cb) kcb_head; - u_int32_t lastunit; + ctl_bind_func bind; /* Prepare contact */ + ctl_connect_func connect; /* Make contact */ + ctl_disconnect_func disconnect; /* Break contact */ + ctl_send_func send; /* Send data to nke */ + ctl_send_list_func send_list; /* Send list of packets */ + ctl_setopt_func setopt; /* set kctl configuration */ + ctl_getopt_func getopt; /* get kctl configuration */ + ctl_rcvd_func rcvd; /* Notify nke when client reads data */ + + TAILQ_HEAD(, ctl_cb) kcb_head; + u_int32_t lastunit; }; +#if DEVELOPMENT || DEBUG +enum ctl_status { + KCTL_DISCONNECTED = 0, + KCTL_CONNECTING = 1, + KCTL_CONNECTED = 2 +}; +#endif /* DEVELOPMENT || DEBUG */ + struct ctl_cb { - TAILQ_ENTRY(ctl_cb) next; /* controller chain */ - lck_mtx_t *mtx; - struct socket *so; /* controlling socket */ - struct kctl *kctl; /* back pointer to controller */ - void *userdata; - u_int32_t unit; - u_int32_t usecount; + TAILQ_ENTRY(ctl_cb) next; /* controller chain */ + lck_mtx_t *mtx; + struct socket *so; /* controlling socket */ + struct kctl *kctl; /* back pointer to controller */ + void *userdata; + struct sockaddr_ctl sac; + u_int32_t usecount; + u_int32_t kcb_usecount; +#if DEVELOPMENT || DEBUG + enum ctl_status status; +#endif /* DEVELOPMENT || DEBUG */ }; #ifndef ROUNDUP64 -#define ROUNDUP64(x) P2ROUNDUP((x), sizeof (u_int64_t)) +#define ROUNDUP64(x) P2ROUNDUP((x), sizeof (u_int64_t)) #endif #ifndef ADVANCE64 -#define ADVANCE64(p, n) (void*)((char *)(p) + ROUNDUP64(n)) +#define ADVANCE64(p, n) (void*)((char *)(p) + ROUNDUP64(n)) #endif /* * Definitions and vars for we support */ -#define CTL_SENDSIZE (2 * 1024) /* default buffer size */ -#define CTL_RECVSIZE (8 * 1024) /* default buffer size */ +#define CTL_SENDSIZE (2 * 1024) /* default buffer size */ +#define CTL_RECVSIZE (8 * 1024) /* default buffer size */ /* * Definitions and vars for we support */ -static u_int32_t ctl_maxunit = 65536; -static lck_grp_attr_t *ctl_lck_grp_attr = 0; -static lck_attr_t *ctl_lck_attr = 0; -static lck_grp_t *ctl_lck_grp = 0; -static lck_mtx_t *ctl_mtx; +static u_int32_t ctl_maxunit = 65536; +static lck_grp_attr_t *ctl_lck_grp_attr = 0; +static lck_attr_t *ctl_lck_attr = 0; +static lck_grp_t *ctl_lck_grp = 0; +static lck_mtx_t *ctl_mtx; /* all the controllers are chained */ -TAILQ_HEAD(kctl_list, kctl) ctl_head; +TAILQ_HEAD(kctl_list, kctl) ctl_head; static int ctl_attach(struct socket *, int, struct proc *); static int ctl_detach(struct socket *); static int ctl_sofreelastref(struct socket *so); +static int ctl_bind(struct socket *, struct sockaddr *, struct proc *); static int ctl_connect(struct socket *, struct sockaddr *, struct proc *); static int ctl_disconnect(struct socket *); static int ctl_ioctl(struct socket *so, u_long cmd, caddr_t data, - struct ifnet *ifp, struct proc *p); + struct ifnet *ifp, struct proc *p); static int ctl_send(struct socket *, int, struct mbuf *, - struct sockaddr *, struct mbuf *, struct proc *); + struct sockaddr *, struct mbuf *, struct proc *); static int ctl_send_list(struct socket *, int, struct mbuf *, - struct sockaddr *, struct mbuf *, struct proc *); + struct sockaddr *, struct mbuf *, struct proc *); static int ctl_ctloutput(struct socket *, struct sockopt *); static int ctl_peeraddr(struct socket *so, struct sockaddr **nam); static int ctl_usr_rcvd(struct socket *so, int flags); @@ -141,7 +155,7 @@ static struct kctl *ctl_find_by_name(const char *); static struct kctl *ctl_find_by_id_unit(u_int32_t id, u_int32_t unit); static struct socket *kcb_find_socket(kern_ctl_ref kctlref, u_int32_t unit, - u_int32_t *); + u_int32_t *); static struct ctl_cb *kcb_find(struct kctl *, u_int32_t unit); static void ctl_post_msg(u_int32_t event_code, u_int32_t id); @@ -150,42 +164,43 @@ static int ctl_unlock(struct socket *, int, void *); static lck_mtx_t * ctl_getlock(struct socket *, int); static struct pr_usrreqs ctl_usrreqs = { - .pru_attach = ctl_attach, - .pru_connect = ctl_connect, - .pru_control = ctl_ioctl, - .pru_detach = ctl_detach, - .pru_disconnect = ctl_disconnect, - .pru_peeraddr = ctl_peeraddr, - .pru_rcvd = ctl_usr_rcvd, - .pru_send = ctl_send, - .pru_send_list = ctl_send_list, - .pru_sosend = sosend, - .pru_sosend_list = sosend_list, - .pru_soreceive = soreceive, - .pru_soreceive_list = soreceive_list, + .pru_attach = ctl_attach, + .pru_bind = ctl_bind, + .pru_connect = ctl_connect, + .pru_control = ctl_ioctl, + .pru_detach = ctl_detach, + .pru_disconnect = ctl_disconnect, + .pru_peeraddr = ctl_peeraddr, + .pru_rcvd = ctl_usr_rcvd, + .pru_send = ctl_send, + .pru_send_list = ctl_send_list, + .pru_sosend = sosend, + .pru_sosend_list = sosend_list, + .pru_soreceive = soreceive, + .pru_soreceive_list = soreceive_list, }; static struct protosw kctlsw[] = { -{ - .pr_type = SOCK_DGRAM, - .pr_protocol = SYSPROTO_CONTROL, - .pr_flags = PR_ATOMIC|PR_CONNREQUIRED|PR_PCBLOCK|PR_WANTRCVD, - .pr_ctloutput = ctl_ctloutput, - .pr_usrreqs = &ctl_usrreqs, - .pr_lock = ctl_lock, - .pr_unlock = ctl_unlock, - .pr_getlock = ctl_getlock, -}, -{ - .pr_type = SOCK_STREAM, - .pr_protocol = SYSPROTO_CONTROL, - .pr_flags = PR_CONNREQUIRED|PR_PCBLOCK|PR_WANTRCVD, - .pr_ctloutput = ctl_ctloutput, - .pr_usrreqs = &ctl_usrreqs, - .pr_lock = ctl_lock, - .pr_unlock = ctl_unlock, - .pr_getlock = ctl_getlock, -} + { + .pr_type = SOCK_DGRAM, + .pr_protocol = SYSPROTO_CONTROL, + .pr_flags = PR_ATOMIC | PR_CONNREQUIRED | PR_PCBLOCK | PR_WANTRCVD, + .pr_ctloutput = ctl_ctloutput, + .pr_usrreqs = &ctl_usrreqs, + .pr_lock = ctl_lock, + .pr_unlock = ctl_unlock, + .pr_getlock = ctl_getlock, + }, + { + .pr_type = SOCK_STREAM, + .pr_protocol = SYSPROTO_CONTROL, + .pr_flags = PR_CONNREQUIRED | PR_PCBLOCK | PR_WANTRCVD, + .pr_ctloutput = ctl_ctloutput, + .pr_usrreqs = &ctl_usrreqs, + .pr_lock = ctl_lock, + .pr_unlock = ctl_unlock, + .pr_getlock = ctl_getlock, + } }; __private_extern__ int kctl_reg_list SYSCTL_HANDLER_ARGS; @@ -194,7 +209,7 @@ __private_extern__ int kctl_getstat SYSCTL_HANDLER_ARGS; SYSCTL_NODE(_net_systm, OID_AUTO, kctl, - CTLFLAG_RW|CTLFLAG_LOCKED, 0, "Kernel control family"); + CTLFLAG_RW | CTLFLAG_LOCKED, 0, "Kernel control family"); struct kctlstat kctlstat; SYSCTL_PROC(_net_systm_kctl, OID_AUTO, stats, @@ -202,29 +217,36 @@ SYSCTL_PROC(_net_systm_kctl, OID_AUTO, stats, kctl_getstat, "S,kctlstat", ""); SYSCTL_PROC(_net_systm_kctl, OID_AUTO, reg_list, - CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, - kctl_reg_list, "S,xkctl_reg", ""); + CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, + kctl_reg_list, "S,xkctl_reg", ""); SYSCTL_PROC(_net_systm_kctl, OID_AUTO, pcblist, - CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, - kctl_pcblist, "S,xkctlpcb", ""); + CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED, 0, 0, + kctl_pcblist, "S,xkctlpcb", ""); u_int32_t ctl_autorcvbuf_max = 256 * 1024; SYSCTL_INT(_net_systm_kctl, OID_AUTO, autorcvbufmax, - CTLFLAG_RW | CTLFLAG_LOCKED, &ctl_autorcvbuf_max, 0, ""); + CTLFLAG_RW | CTLFLAG_LOCKED, &ctl_autorcvbuf_max, 0, ""); u_int32_t ctl_autorcvbuf_high = 0; SYSCTL_INT(_net_systm_kctl, OID_AUTO, autorcvbufhigh, - CTLFLAG_RD | CTLFLAG_LOCKED, &ctl_autorcvbuf_high, 0, ""); + CTLFLAG_RD | CTLFLAG_LOCKED, &ctl_autorcvbuf_high, 0, ""); u_int32_t ctl_debug = 0; SYSCTL_INT(_net_systm_kctl, OID_AUTO, debug, - CTLFLAG_RW | CTLFLAG_LOCKED, &ctl_debug, 0, ""); + CTLFLAG_RW | CTLFLAG_LOCKED, &ctl_debug, 0, ""); -#define KCTL_TBL_INC 16 +#if DEVELOPMENT || DEBUG +u_int32_t ctl_panic_debug = 0; +SYSCTL_INT(_net_systm_kctl, OID_AUTO, panicdebug, + CTLFLAG_RW | CTLFLAG_LOCKED, &ctl_panic_debug, 0, ""); +#endif /* DEVELOPMENT || DEBUG */ + +#define KCTL_TBL_INC 16 static uintptr_t kctl_tbl_size = 0; static u_int32_t kctl_tbl_growing = 0; +static u_int32_t kctl_tbl_growing_waiting = 0; static uintptr_t kctl_tbl_count = 0; static struct kctl **kctl_table = NULL; static uintptr_t kctl_ref_gencnt = 0; @@ -242,7 +264,7 @@ kern_control_init(struct domain *dp) { struct protosw *pr; int i; - int kctl_proto_count = (sizeof (kctlsw) / sizeof (struct protosw)); + int kctl_proto_count = (sizeof(kctlsw) / sizeof(struct protosw)); VERIFY(!(dp->dom_flags & DOM_INITIALIZED)); VERIFY(dp == systemdomain); @@ -273,16 +295,18 @@ kern_control_init(struct domain *dp) } TAILQ_INIT(&ctl_head); - for (i = 0, pr = &kctlsw[0]; i < kctl_proto_count; i++, pr++) + for (i = 0, pr = &kctlsw[0]; i < kctl_proto_count; i++, pr++) { net_add_proto(pr, dp, 1); + } } static void kcb_delete(struct ctl_cb *kcb) { if (kcb != 0) { - if (kcb->mtx != 0) + if (kcb->mtx != 0) { lck_mtx_free(kcb->mtx, ctl_lck_grp); + } FREE(kcb, M_TEMP); } } @@ -298,7 +322,7 @@ ctl_attach(struct socket *so, int proto, struct proc *p) { #pragma unused(proto, p) int error = 0; - struct ctl_cb *kcb = 0; + struct ctl_cb *kcb = 0; MALLOC(kcb, struct ctl_cb *, sizeof(struct ctl_cb), M_TEMP, M_WAITOK); if (kcb == NULL) { @@ -320,18 +344,18 @@ quit: kcb_delete(kcb); kcb = 0; } - return (error); + return error; } static int ctl_sofreelastref(struct socket *so) { - struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb; + struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb; so->so_pcb = 0; if (kcb != 0) { - struct kctl *kctl; + struct kctl *kctl; if ((kctl = kcb->kctl) != 0) { lck_mtx_lock(ctl_mtx); TAILQ_REMOVE(&kctl->kcb_head, kcb, next); @@ -342,39 +366,86 @@ ctl_sofreelastref(struct socket *so) kcb_delete(kcb); } sofreelastref(so, 1); - return (0); + return 0; +} + +/* + * Use this function to serialize calls into the kctl subsystem + */ +static void +ctl_kcb_increment_use_count(struct ctl_cb *kcb, lck_mtx_t *mutex_held) +{ + LCK_MTX_ASSERT(mutex_held, LCK_MTX_ASSERT_OWNED); + while (kcb->kcb_usecount > 0) { + msleep(&kcb->kcb_usecount, mutex_held, PSOCK | PCATCH, "kcb_usecount", NULL); + } + kcb->kcb_usecount++; +} + +static void +clt_kcb_decrement_use_count(struct ctl_cb *kcb) +{ + assert(kcb->kcb_usecount != 0); + kcb->kcb_usecount--; + wakeup_one((caddr_t)&kcb->kcb_usecount); } static int ctl_detach(struct socket *so) { - struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb; + struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb; + + if (kcb == 0) { + return 0; + } + + lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK); + ctl_kcb_increment_use_count(kcb, mtx_held); - if (kcb == 0) - return (0); + if (kcb->kctl != NULL && kcb->kctl->bind != NULL && + kcb->userdata != NULL && !(so->so_state & SS_ISCONNECTED)) { + // The unit was bound, but not connected + // Invoke the disconnected call to cleanup + if (kcb->kctl->disconnect != NULL) { + socket_unlock(so, 0); + (*kcb->kctl->disconnect)(kcb->kctl->kctlref, + kcb->sac.sc_unit, kcb->userdata); + socket_lock(so, 0); + } + } soisdisconnected(so); +#if DEVELOPMENT || DEBUG + kcb->status = KCTL_DISCONNECTED; +#endif /* DEVELOPMENT || DEBUG */ so->so_flags |= SOF_PCBCLEARING; - return (0); + clt_kcb_decrement_use_count(kcb); + return 0; } static int -ctl_connect(struct socket *so, struct sockaddr *nam, struct proc *p) +ctl_setup_kctl(struct socket *so, struct sockaddr *nam, struct proc *p) { -#pragma unused(p) - struct kctl *kctl; - int error = 0; - struct sockaddr_ctl sa; - struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb; - struct ctl_cb *kcb_next = NULL; - u_quad_t sbmaxsize; - u_int32_t recvbufsize, sendbufsize; - - if (kcb == 0) - panic("ctl_connect so_pcb null\n"); + struct kctl *kctl = NULL; + int error = 0; + struct sockaddr_ctl sa; + struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb; + struct ctl_cb *kcb_next = NULL; + u_quad_t sbmaxsize; + u_int32_t recvbufsize, sendbufsize; - if (nam->sa_len != sizeof(struct sockaddr_ctl)) - return (EINVAL); + if (kcb == 0) { + panic("ctl_setup_kctl so_pcb null\n"); + } + + if (kcb->kctl != NULL) { + // Already set up, skip + return 0; + } + + if (nam->sa_len != sizeof(struct sockaddr_ctl)) { + return EINVAL; + } bcopy(nam, &sa, sizeof(struct sockaddr_ctl)); @@ -382,56 +453,57 @@ ctl_connect(struct socket *so, struct sockaddr *nam, struct proc *p) kctl = ctl_find_by_id_unit(sa.sc_id, sa.sc_unit); if (kctl == NULL) { lck_mtx_unlock(ctl_mtx); - return (ENOENT); + return ENOENT; } if (((kctl->flags & CTL_FLAG_REG_SOCK_STREAM) && - (so->so_type != SOCK_STREAM)) || - (!(kctl->flags & CTL_FLAG_REG_SOCK_STREAM) && - (so->so_type != SOCK_DGRAM))) { + (so->so_type != SOCK_STREAM)) || + (!(kctl->flags & CTL_FLAG_REG_SOCK_STREAM) && + (so->so_type != SOCK_DGRAM))) { lck_mtx_unlock(ctl_mtx); - return (EPROTOTYPE); + return EPROTOTYPE; } if (kctl->flags & CTL_FLAG_PRIVILEGED) { if (p == 0) { lck_mtx_unlock(ctl_mtx); - return (EINVAL); + return EINVAL; } if (kauth_cred_issuser(kauth_cred_get()) == 0) { lck_mtx_unlock(ctl_mtx); - return (EPERM); + return EPERM; } } if ((kctl->flags & CTL_FLAG_REG_ID_UNIT) || sa.sc_unit != 0) { if (kcb_find(kctl, sa.sc_unit) != NULL) { lck_mtx_unlock(ctl_mtx); - return (EBUSY); + return EBUSY; } } else { /* Find an unused ID, assumes control IDs are in order */ - u_int32_t unit = 1; + u_int32_t unit = 1; TAILQ_FOREACH(kcb_next, &kctl->kcb_head, next) { - if (kcb_next->unit > unit) { + if (kcb_next->sac.sc_unit > unit) { /* Found a gap, lets fill it in */ break; } - unit = kcb_next->unit + 1; - if (unit == ctl_maxunit) + unit = kcb_next->sac.sc_unit + 1; + if (unit == ctl_maxunit) { break; + } } if (unit == ctl_maxunit) { lck_mtx_unlock(ctl_mtx); - return (EBUSY); + return EBUSY; } sa.sc_unit = unit; } - kcb->unit = sa.sc_unit; + bcopy(&sa, &kcb->sac, sizeof(struct sockaddr_ctl)); kcb->kctl = kctl; if (kcb_next != NULL) { TAILQ_INSERT_BEFORE(kcb_next, kcb, next); @@ -449,75 +521,183 @@ ctl_connect(struct socket *so, struct sockaddr *nam, struct proc *p) */ sbmaxsize = (u_quad_t)sb_max * MCLBYTES / (MSIZE + MCLBYTES); - if (kctl->sendbufsize > sbmaxsize) + if (kctl->sendbufsize > sbmaxsize) { sendbufsize = sbmaxsize; - else + } else { sendbufsize = kctl->sendbufsize; + } - if (kctl->recvbufsize > sbmaxsize) + if (kctl->recvbufsize > sbmaxsize) { recvbufsize = sbmaxsize; - else + } else { recvbufsize = kctl->recvbufsize; + } error = soreserve(so, sendbufsize, recvbufsize); if (error) { - printf("%s - soreserve(%llx, %u, %u) error %d\n", __func__, - (uint64_t)VM_KERNEL_ADDRPERM(so), - sendbufsize, recvbufsize, error); + if (ctl_debug) { + printf("%s - soreserve(%llx, %u, %u) error %d\n", + __func__, (uint64_t)VM_KERNEL_ADDRPERM(so), + sendbufsize, recvbufsize, error); + } goto done; } - soisconnecting(so); + +done: + if (error) { + soisdisconnected(so); +#if DEVELOPMENT || DEBUG + kcb->status = KCTL_DISCONNECTED; +#endif /* DEVELOPMENT || DEBUG */ + lck_mtx_lock(ctl_mtx); + TAILQ_REMOVE(&kctl->kcb_head, kcb, next); + kcb->kctl = NULL; + kcb->sac.sc_unit = 0; + kctlstat.kcs_pcbcount--; + kctlstat.kcs_gencnt++; + kctlstat.kcs_conn_fail++; + lck_mtx_unlock(ctl_mtx); + } + return error; +} + +static int +ctl_bind(struct socket *so, struct sockaddr *nam, struct proc *p) +{ + int error = 0; + struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb; + + if (kcb == NULL) { + panic("ctl_bind so_pcb null\n"); + } + + lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK); + ctl_kcb_increment_use_count(kcb, mtx_held); + + error = ctl_setup_kctl(so, nam, p); + if (error) { + goto out; + } + + if (kcb->kctl == NULL) { + panic("ctl_bind kctl null\n"); + } + + if (kcb->kctl->bind == NULL) { + error = EINVAL; + goto out; + } socket_unlock(so, 0); - error = (*kctl->connect)(kctl->kctlref, &sa, &kcb->userdata); + error = (*kcb->kctl->bind)(kcb->kctl->kctlref, &kcb->sac, &kcb->userdata); socket_lock(so, 0); - if (error) - goto end; +out: + clt_kcb_decrement_use_count(kcb); + return error; +} + +static int +ctl_connect(struct socket *so, struct sockaddr *nam, struct proc *p) +{ + int error = 0; + struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb; + + if (kcb == NULL) { + panic("ctl_connect so_pcb null\n"); + } + + lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK); + ctl_kcb_increment_use_count(kcb, mtx_held); + +#if DEVELOPMENT || DEBUG + if (kcb->status != KCTL_DISCONNECTED && ctl_panic_debug) { + panic("kctl already connecting/connected"); + } + kcb->status = KCTL_CONNECTING; +#endif /* DEVELOPMENT || DEBUG */ + + error = ctl_setup_kctl(so, nam, p); + if (error) { + goto out; + } + + if (kcb->kctl == NULL) { + panic("ctl_connect kctl null\n"); + } + + soisconnecting(so); + socket_unlock(so, 0); + error = (*kcb->kctl->connect)(kcb->kctl->kctlref, &kcb->sac, &kcb->userdata); + socket_lock(so, 0); + if (error) { + goto end; + } soisconnected(so); +#if DEVELOPMENT || DEBUG + kcb->status = KCTL_CONNECTED; +#endif /* DEVELOPMENT || DEBUG */ end: - if (error && kctl->disconnect) { + if (error && kcb->kctl->disconnect) { + /* + * XXX Make sure we Don't check the return value + * of disconnect here. + * ipsec/utun_ctl_disconnect will return error when + * disconnect gets called after connect failure. + * However if we decide to check for disconnect return + * value here. Please make sure to revisit + * ipsec/utun_ctl_disconnect. + */ socket_unlock(so, 0); - (*kctl->disconnect)(kctl->kctlref, kcb->unit, kcb->userdata); + (*kcb->kctl->disconnect)(kcb->kctl->kctlref, kcb->sac.sc_unit, kcb->userdata); socket_lock(so, 0); } -done: if (error) { soisdisconnected(so); +#if DEVELOPMENT || DEBUG + kcb->status = KCTL_DISCONNECTED; +#endif /* DEVELOPMENT || DEBUG */ lck_mtx_lock(ctl_mtx); - kcb->kctl = 0; - kcb->unit = 0; - TAILQ_REMOVE(&kctl->kcb_head, kcb, next); + TAILQ_REMOVE(&kcb->kctl->kcb_head, kcb, next); + kcb->kctl = NULL; + kcb->sac.sc_unit = 0; kctlstat.kcs_pcbcount--; kctlstat.kcs_gencnt++; kctlstat.kcs_conn_fail++; lck_mtx_unlock(ctl_mtx); } - return (error); +out: + clt_kcb_decrement_use_count(kcb); + return error; } static int ctl_disconnect(struct socket *so) { - struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb; + struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb; if ((kcb = (struct ctl_cb *)so->so_pcb)) { - struct kctl *kctl = kcb->kctl; + lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK); + ctl_kcb_increment_use_count(kcb, mtx_held); + struct kctl *kctl = kcb->kctl; if (kctl && kctl->disconnect) { socket_unlock(so, 0); - (*kctl->disconnect)(kctl->kctlref, kcb->unit, + (*kctl->disconnect)(kctl->kctlref, kcb->sac.sc_unit, kcb->userdata); socket_lock(so, 0); } soisdisconnected(so); +#if DEVELOPMENT || DEBUG + kcb->status = KCTL_DISCONNECTED; +#endif /* DEVELOPMENT || DEBUG */ socket_unlock(so, 0); lck_mtx_lock(ctl_mtx); kcb->kctl = 0; - kcb->unit = 0; + kcb->sac.sc_unit = 0; while (kcb->usecount != 0) { msleep(&kcb->usecount, ctl_mtx, 0, "kcb->usecount", 0); } @@ -526,33 +706,36 @@ ctl_disconnect(struct socket *so) kctlstat.kcs_gencnt++; lck_mtx_unlock(ctl_mtx); socket_lock(so, 0); + clt_kcb_decrement_use_count(kcb); } - return (0); + return 0; } static int ctl_peeraddr(struct socket *so, struct sockaddr **nam) { - struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb; - struct kctl *kctl; - struct sockaddr_ctl sc; + struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb; + struct kctl *kctl; + struct sockaddr_ctl sc; - if (kcb == NULL) /* sanity check */ - return (ENOTCONN); + if (kcb == NULL) { /* sanity check */ + return ENOTCONN; + } - if ((kctl = kcb->kctl) == NULL) - return (EINVAL); + if ((kctl = kcb->kctl) == NULL) { + return EINVAL; + } bzero(&sc, sizeof(struct sockaddr_ctl)); sc.sc_len = sizeof(struct sockaddr_ctl); sc.sc_family = AF_SYSTEM; sc.ss_sysaddr = AF_SYS_CONTROL; sc.sc_id = kctl->id; - sc.sc_unit = kcb->unit; + sc.sc_unit = kcb->sac.sc_unit; *nam = dup_sockaddr((struct sockaddr *)&sc, 1); - return (0); + return 0; } static void @@ -579,9 +762,10 @@ ctl_sbrcv_trim(struct socket *so) if (trim > 0) { sbreserve(sb, (sb->sb_hiwat - trim)); - if (ctl_debug) + if (ctl_debug) { printf("%s - shrunk to %d\n", __func__, sb->sb_hiwat); + } } } } @@ -589,85 +773,113 @@ ctl_sbrcv_trim(struct socket *so) static int ctl_usr_rcvd(struct socket *so, int flags) { - struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb; - struct kctl *kctl; + int error = 0; + struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb; + struct kctl *kctl; + + if (kcb == NULL) { + return ENOTCONN; + } + + lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK); + ctl_kcb_increment_use_count(kcb, mtx_held); if ((kctl = kcb->kctl) == NULL) { - return (EINVAL); + error = EINVAL; + goto out; } if (kctl->rcvd) { socket_unlock(so, 0); - (*kctl->rcvd)(kctl->kctlref, kcb->unit, kcb->userdata, flags); + (*kctl->rcvd)(kctl->kctlref, kcb->sac.sc_unit, kcb->userdata, flags); socket_lock(so, 0); } ctl_sbrcv_trim(so); - return (0); +out: + clt_kcb_decrement_use_count(kcb); + return error; } static int ctl_send(struct socket *so, int flags, struct mbuf *m, - struct sockaddr *addr, struct mbuf *control, - struct proc *p) + struct sockaddr *addr, struct mbuf *control, + struct proc *p) { #pragma unused(addr, p) - int error = 0; - struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb; - struct kctl *kctl; + int error = 0; + struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb; + struct kctl *kctl; - if (control) + if (control) { m_freem(control); + } - if (kcb == NULL) /* sanity check */ + if (kcb == NULL) { /* sanity check */ error = ENOTCONN; + } + + lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK); + ctl_kcb_increment_use_count(kcb, mtx_held); - if (error == 0 && (kctl = kcb->kctl) == NULL) + if (error == 0 && (kctl = kcb->kctl) == NULL) { error = EINVAL; + } if (error == 0 && kctl->send) { so_tc_update_stats(m, so, m_get_service_class(m)); socket_unlock(so, 0); - error = (*kctl->send)(kctl->kctlref, kcb->unit, kcb->userdata, + error = (*kctl->send)(kctl->kctlref, kcb->sac.sc_unit, kcb->userdata, m, flags); socket_lock(so, 0); } else { m_freem(m); - if (error == 0) + if (error == 0) { error = ENOTSUP; + } } - if (error != 0) + if (error != 0) { OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_send_fail); - return (error); + } + clt_kcb_decrement_use_count(kcb); + + return error; } static int ctl_send_list(struct socket *so, int flags, struct mbuf *m, - __unused struct sockaddr *addr, struct mbuf *control, - __unused struct proc *p) + __unused struct sockaddr *addr, struct mbuf *control, + __unused struct proc *p) { - int error = 0; - struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb; - struct kctl *kctl; + int error = 0; + struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb; + struct kctl *kctl; - if (control) + if (control) { m_freem_list(control); + } - if (kcb == NULL) /* sanity check */ + if (kcb == NULL) { /* sanity check */ error = ENOTCONN; + } + + lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK); + ctl_kcb_increment_use_count(kcb, mtx_held); - if (error == 0 && (kctl = kcb->kctl) == NULL) + if (error == 0 && (kctl = kcb->kctl) == NULL) { error = EINVAL; + } if (error == 0 && kctl->send_list) { struct mbuf *nxt; - for (nxt = m; nxt != NULL; nxt = nxt->m_nextpkt) + for (nxt = m; nxt != NULL; nxt = nxt->m_nextpkt) { so_tc_update_stats(nxt, so, m_get_service_class(nxt)); + } socket_unlock(so, 0); - error = (*kctl->send_list)(kctl->kctlref, kcb->unit, + error = (*kctl->send_list)(kctl->kctlref, kcb->sac.sc_unit, kcb->userdata, m, flags); socket_lock(so, 0); } else if (error == 0 && kctl->send) { @@ -677,45 +889,52 @@ ctl_send_list(struct socket *so, int flags, struct mbuf *m, m->m_nextpkt = NULL; so_tc_update_stats(m, so, m_get_service_class(m)); socket_unlock(so, 0); - error = (*kctl->send)(kctl->kctlref, kcb->unit, + error = (*kctl->send)(kctl->kctlref, kcb->sac.sc_unit, kcb->userdata, m, flags); socket_lock(so, 0); m = nextpkt; } - if (m != NULL) + if (m != NULL) { m_freem_list(m); + } } else { m_freem_list(m); - if (error == 0) + if (error == 0) { error = ENOTSUP; + } } - if (error != 0) + if (error != 0) { OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_send_list_fail); - return (error); + } + clt_kcb_decrement_use_count(kcb); + + return error; } static errno_t ctl_rcvbspace(struct socket *so, u_int32_t datasize, - u_int32_t kctlflags, u_int32_t flags) + u_int32_t kctlflags, u_int32_t flags) { struct sockbuf *sb = &so->so_rcv; u_int32_t space = sbspace(sb); errno_t error; if ((kctlflags & CTL_FLAG_REG_CRIT) == 0) { - if ((u_int32_t) space >= datasize) + if ((u_int32_t) space >= datasize) { error = 0; - else + } else { error = ENOBUFS; + } } else if ((flags & CTL_DATA_CRIT) == 0) { /* * Reserve 25% for critical messages */ if (space < (sb->sb_hiwat >> 2) || - space < datasize) + space < datasize) { error = ENOBUFS; - else + } else { error = 0; + } } else { u_int32_t autorcvbuf_max; @@ -723,7 +942,7 @@ ctl_rcvbspace(struct socket *so, u_int32_t datasize, * Allow overcommit of 25% */ autorcvbuf_max = min(sb->sb_idealsize + (sb->sb_idealsize >> 2), - ctl_autorcvbuf_max); + ctl_autorcvbuf_max); if ((u_int32_t) space >= datasize) { error = 0; @@ -736,9 +955,9 @@ ctl_rcvbspace(struct socket *so, u_int32_t datasize, if (sbreserve(sb, min((sb->sb_hiwat + grow), autorcvbuf_max)) == 1) { - - if (sb->sb_hiwat > ctl_autorcvbuf_high) + if (sb->sb_hiwat > ctl_autorcvbuf_high) { ctl_autorcvbuf_high = sb->sb_hiwat; + } /* * A final check @@ -749,9 +968,10 @@ ctl_rcvbspace(struct socket *so, u_int32_t datasize, error = ENOBUFS; } - if (ctl_debug) + if (ctl_debug) { printf("%s - grown to %d error %d\n", __func__, sb->sb_hiwat, error); + } } else { error = ENOBUFS; } @@ -759,21 +979,21 @@ ctl_rcvbspace(struct socket *so, u_int32_t datasize, error = ENOBUFS; } } - return (error); + return error; } errno_t ctl_enqueuembuf(kern_ctl_ref kctlref, u_int32_t unit, struct mbuf *m, u_int32_t flags) { - struct socket *so; - errno_t error = 0; - int len = m->m_pkthdr.len; - u_int32_t kctlflags; + struct socket *so; + errno_t error = 0; + int len = m->m_pkthdr.len; + u_int32_t kctlflags; so = kcb_find_socket(kctlref, unit, &kctlflags); if (so == NULL) { - return (EINVAL); + return EINVAL; } if (ctl_rcvbspace(so, len, kctlflags, flags) != 0) { @@ -781,28 +1001,32 @@ ctl_enqueuembuf(kern_ctl_ref kctlref, u_int32_t unit, struct mbuf *m, OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fullsock); goto bye; } - if ((flags & CTL_DATA_EOR)) + if ((flags & CTL_DATA_EOR)) { m->m_flags |= M_EOR; + } so_recv_data_stat(so, m, 0); if (sbappend(&so->so_rcv, m) != 0) { - if ((flags & CTL_DATA_NOWAKEUP) == 0) + if ((flags & CTL_DATA_NOWAKEUP) == 0) { sorwakeup(so); + } } else { error = ENOBUFS; OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fullsock); } bye: - if (ctl_debug && error != 0 && (flags & CTL_DATA_CRIT)) + if (ctl_debug && error != 0 && (flags & CTL_DATA_CRIT)) { printf("%s - crit data err %d len %d hiwat %d cc: %d\n", - __func__, error, len, - so->so_rcv.sb_hiwat, so->so_rcv.sb_cc); + __func__, error, len, + so->so_rcv.sb_hiwat, so->so_rcv.sb_cc); + } socket_unlock(so, 1); - if (error != 0) + if (error != 0) { OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fail); + } - return (error); + return error; } /* @@ -814,21 +1038,22 @@ m_space(struct mbuf *m) int space = 0; struct mbuf *nxt; - for (nxt = m; nxt != NULL; nxt = nxt->m_next) + for (nxt = m; nxt != NULL; nxt = nxt->m_next) { space += nxt->m_len; + } - return (space); + return space; } errno_t ctl_enqueuembuf_list(void *kctlref, u_int32_t unit, struct mbuf *m_list, - u_int32_t flags, struct mbuf **m_remain) + u_int32_t flags, struct mbuf **m_remain) { struct socket *so = NULL; errno_t error = 0; struct mbuf *m, *nextpkt; int needwakeup = 0; - int len; + int len = 0; u_int32_t kctlflags; /* @@ -857,9 +1082,10 @@ ctl_enqueuembuf_list(void *kctlref, u_int32_t unit, struct mbuf *m_list, for (m = m_list; m != NULL; m = nextpkt) { nextpkt = m->m_nextpkt; - if (m->m_pkthdr.len == 0) + if (m->m_pkthdr.len == 0 && ctl_debug) { printf("%s: %llx m_pkthdr.len is 0", - __func__, (uint64_t)VM_KERNEL_ADDRPERM(m)); + __func__, (uint64_t)VM_KERNEL_ADDRPERM(m)); + } /* * The mbuf is either appended or freed by sbappendrecord() @@ -869,7 +1095,7 @@ ctl_enqueuembuf_list(void *kctlref, u_int32_t unit, struct mbuf *m_list, if (ctl_rcvbspace(so, len, kctlflags, flags) != 0) { error = ENOBUFS; OSIncrementAtomic64( - (SInt64 *)&kctlstat.kcs_enqueue_fullsock); + (SInt64 *)&kctlstat.kcs_enqueue_fullsock); break; } else { /* @@ -887,20 +1113,22 @@ ctl_enqueuembuf_list(void *kctlref, u_int32_t unit, struct mbuf *m_list, m = nextpkt; error = ENOBUFS; OSIncrementAtomic64( - (SInt64 *)&kctlstat.kcs_enqueue_fullsock); + (SInt64 *)&kctlstat.kcs_enqueue_fullsock); break; } } } - if (needwakeup && (flags & CTL_DATA_NOWAKEUP) == 0) + if (needwakeup && (flags & CTL_DATA_NOWAKEUP) == 0) { sorwakeup(so); + } done: if (so != NULL) { - if (ctl_debug && error != 0 && (flags & CTL_DATA_CRIT)) + if (ctl_debug && error != 0 && (flags & CTL_DATA_CRIT)) { printf("%s - crit data err %d len %d hiwat %d cc: %d\n", - __func__, error, len, - so->so_rcv.sb_hiwat, so->so_rcv.sb_cc); + __func__, error, len, + so->so_rcv.sb_hiwat, so->so_rcv.sb_cc); + } socket_unlock(so, 1); } @@ -913,35 +1141,38 @@ done: printf("%s m_list %llx\n", __func__, (uint64_t) VM_KERNEL_ADDRPERM(m_list)); - for (n = m; n != NULL; n = n->m_nextpkt) + for (n = m; n != NULL; n = n->m_nextpkt) { printf(" remain %llx m_next %llx\n", (uint64_t) VM_KERNEL_ADDRPERM(n), (uint64_t) VM_KERNEL_ADDRPERM(n->m_next)); + } } } else { - if (m != NULL) + if (m != NULL) { m_freem_list(m); + } } - if (error != 0) + if (error != 0) { OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fail); - return (error); + } + return error; } errno_t ctl_enqueuedata(void *kctlref, u_int32_t unit, void *data, size_t len, u_int32_t flags) { - struct socket *so; - struct mbuf *m; - errno_t error = 0; - unsigned int num_needed; - struct mbuf *n; - size_t curlen = 0; - u_int32_t kctlflags; + struct socket *so; + struct mbuf *m; + errno_t error = 0; + unsigned int num_needed; + struct mbuf *n; + size_t curlen = 0; + u_int32_t kctlflags; so = kcb_find_socket(kctlref, unit, &kctlflags); if (so == NULL) { - return (EINVAL); + return EINVAL; } if (ctl_rcvbspace(so, len, kctlflags, flags) != 0) { @@ -953,8 +1184,11 @@ ctl_enqueuedata(void *kctlref, u_int32_t unit, void *data, size_t len, num_needed = 1; m = m_allocpacket_internal(&num_needed, len, NULL, M_NOWAIT, 1, 0); if (m == NULL) { - printf("ctl_enqueuedata: m_allocpacket_internal(%lu) failed\n", - len); + kctlstat.kcs_enqdata_mb_alloc_fail++; + if (ctl_debug) { + printf("%s: m_allocpacket_internal(%lu) failed\n", + __func__, len); + } error = ENOMEM; goto bye; } @@ -962,50 +1196,57 @@ ctl_enqueuedata(void *kctlref, u_int32_t unit, void *data, size_t len, for (n = m; n != NULL; n = n->m_next) { size_t mlen = mbuf_maxlen(n); - if (mlen + curlen > len) + if (mlen + curlen > len) { mlen = len - curlen; + } n->m_len = mlen; bcopy((char *)data + curlen, n->m_data, mlen); curlen += mlen; } mbuf_pkthdr_setlen(m, curlen); - if ((flags & CTL_DATA_EOR)) + if ((flags & CTL_DATA_EOR)) { m->m_flags |= M_EOR; + } so_recv_data_stat(so, m, 0); if (sbappend(&so->so_rcv, m) != 0) { - if ((flags & CTL_DATA_NOWAKEUP) == 0) + if ((flags & CTL_DATA_NOWAKEUP) == 0) { sorwakeup(so); + } } else { + kctlstat.kcs_enqdata_sbappend_fail++; error = ENOBUFS; OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fullsock); } bye: - if (ctl_debug && error != 0 && (flags & CTL_DATA_CRIT)) + if (ctl_debug && error != 0 && (flags & CTL_DATA_CRIT)) { printf("%s - crit data err %d len %d hiwat %d cc: %d\n", - __func__, error, (int)len, - so->so_rcv.sb_hiwat, so->so_rcv.sb_cc); + __func__, error, (int)len, + so->so_rcv.sb_hiwat, so->so_rcv.sb_cc); + } socket_unlock(so, 1); - if (error != 0) + if (error != 0) { OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fail); - return (error); + } + return error; } errno_t ctl_getenqueuepacketcount(kern_ctl_ref kctlref, u_int32_t unit, u_int32_t *pcnt) { - struct socket *so; + struct socket *so; u_int32_t cnt; struct mbuf *m1; - if (pcnt == NULL) - return (EINVAL); + if (pcnt == NULL) { + return EINVAL; + } so = kcb_find_socket(kctlref, unit, NULL); if (so == NULL) { - return (EINVAL); + return EINVAL; } cnt = 0; @@ -1013,50 +1254,53 @@ ctl_getenqueuepacketcount(kern_ctl_ref kctlref, u_int32_t unit, u_int32_t *pcnt) while (m1 != NULL) { if (m1->m_type == MT_DATA || m1->m_type == MT_HEADER || - m1->m_type == MT_OOBDATA) + m1->m_type == MT_OOBDATA) { cnt += 1; + } m1 = m1->m_nextpkt; } *pcnt = cnt; socket_unlock(so, 1); - return (0); + return 0; } errno_t ctl_getenqueuespace(kern_ctl_ref kctlref, u_int32_t unit, size_t *space) { - struct socket *so; + struct socket *so; long avail; - if (space == NULL) - return (EINVAL); + if (space == NULL) { + return EINVAL; + } so = kcb_find_socket(kctlref, unit, NULL); if (so == NULL) { - return (EINVAL); + return EINVAL; } avail = sbspace(&so->so_rcv); *space = (avail < 0) ? 0 : avail; socket_unlock(so, 1); - return (0); + return 0; } errno_t ctl_getenqueuereadable(kern_ctl_ref kctlref, u_int32_t unit, u_int32_t *difference) { - struct socket *so; + struct socket *so; - if (difference == NULL) - return (EINVAL); + if (difference == NULL) { + return EINVAL; + } so = kcb_find_socket(kctlref, unit, NULL); if (so == NULL) { - return (EINVAL); + return EINVAL; } if (so->so_rcv.sb_cc >= so->so_rcv.sb_lowat) { @@ -1066,144 +1310,166 @@ ctl_getenqueuereadable(kern_ctl_ref kctlref, u_int32_t unit, } socket_unlock(so, 1); - return (0); + return 0; } static int ctl_ctloutput(struct socket *so, struct sockopt *sopt) { - struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb; - struct kctl *kctl; - int error = 0; - void *data; - size_t len; + struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb; + struct kctl *kctl; + int error = 0; + void *data = NULL; + size_t len; if (sopt->sopt_level != SYSPROTO_CONTROL) { - return (EINVAL); + return EINVAL; } - if (kcb == NULL) /* sanity check */ - return (ENOTCONN); + if (kcb == NULL) { /* sanity check */ + return ENOTCONN; + } - if ((kctl = kcb->kctl) == NULL) - return (EINVAL); + if ((kctl = kcb->kctl) == NULL) { + return EINVAL; + } + + lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK); + ctl_kcb_increment_use_count(kcb, mtx_held); switch (sopt->sopt_dir) { - case SOPT_SET: - if (kctl->setopt == NULL) - return (ENOTSUP); - if (sopt->sopt_valsize == 0) { - data = NULL; - } else { - MALLOC(data, void *, sopt->sopt_valsize, M_TEMP, - M_WAITOK); - if (data == NULL) - return (ENOMEM); - error = sooptcopyin(sopt, data, - sopt->sopt_valsize, sopt->sopt_valsize); - } - if (error == 0) { - socket_unlock(so, 0); - error = (*kctl->setopt)(kctl->kctlref, - kcb->unit, kcb->userdata, sopt->sopt_name, - data, sopt->sopt_valsize); - socket_lock(so, 0); + case SOPT_SET: + if (kctl->setopt == NULL) { + error = ENOTSUP; + goto out; + } + if (sopt->sopt_valsize != 0) { + MALLOC(data, void *, sopt->sopt_valsize, M_TEMP, + M_WAITOK | M_ZERO); + if (data == NULL) { + error = ENOMEM; + goto out; } + error = sooptcopyin(sopt, data, + sopt->sopt_valsize, sopt->sopt_valsize); + } + if (error == 0) { + socket_unlock(so, 0); + error = (*kctl->setopt)(kctl->kctlref, + kcb->sac.sc_unit, kcb->userdata, sopt->sopt_name, + data, sopt->sopt_valsize); + socket_lock(so, 0); + } + + if (data != NULL) { FREE(data, M_TEMP); - break; + } + break; - case SOPT_GET: - if (kctl->getopt == NULL) - return (ENOTSUP); - data = NULL; - if (sopt->sopt_valsize && sopt->sopt_val) { - MALLOC(data, void *, sopt->sopt_valsize, M_TEMP, - M_WAITOK); - if (data == NULL) - return (ENOMEM); - /* - * 4108337 - copy user data in case the - * kernel control needs it - */ - error = sooptcopyin(sopt, data, - sopt->sopt_valsize, sopt->sopt_valsize); + case SOPT_GET: + if (kctl->getopt == NULL) { + error = ENOTSUP; + goto out; + } + + if (sopt->sopt_valsize && sopt->sopt_val) { + MALLOC(data, void *, sopt->sopt_valsize, M_TEMP, + M_WAITOK | M_ZERO); + if (data == NULL) { + error = ENOMEM; + goto out; } + /* + * 4108337 - copy user data in case the + * kernel control needs it + */ + error = sooptcopyin(sopt, data, + sopt->sopt_valsize, sopt->sopt_valsize); + } + + if (error == 0) { len = sopt->sopt_valsize; socket_unlock(so, 0); - error = (*kctl->getopt)(kctl->kctlref, kcb->unit, - kcb->userdata, sopt->sopt_name, - data, &len); - if (data != NULL && len > sopt->sopt_valsize) + error = (*kctl->getopt)(kctl->kctlref, kcb->sac.sc_unit, + kcb->userdata, sopt->sopt_name, + data, &len); + if (data != NULL && len > sopt->sopt_valsize) { panic_plain("ctl_ctloutput: ctl %s returned " - "len (%lu) > sopt_valsize (%lu)\n", - kcb->kctl->name, len, - sopt->sopt_valsize); + "len (%lu) > sopt_valsize (%lu)\n", + kcb->kctl->name, len, + sopt->sopt_valsize); + } socket_lock(so, 0); if (error == 0) { - if (data != NULL) + if (data != NULL) { error = sooptcopyout(sopt, data, len); - else + } else { sopt->sopt_valsize = len; + } } - if (data != NULL) - FREE(data, M_TEMP); - break; + } + if (data != NULL) { + FREE(data, M_TEMP); + } + break; } - return (error); + +out: + clt_kcb_decrement_use_count(kcb); + return error; } static int ctl_ioctl(struct socket *so, u_long cmd, caddr_t data, - struct ifnet *ifp, struct proc *p) + struct ifnet *ifp, struct proc *p) { #pragma unused(so, ifp, p) - int error = ENOTSUP; + int error = ENOTSUP; switch (cmd) { - /* get the number of controllers */ - case CTLIOCGCOUNT: { - struct kctl *kctl; - u_int32_t n = 0; + /* get the number of controllers */ + case CTLIOCGCOUNT: { + struct kctl *kctl; + u_int32_t n = 0; - lck_mtx_lock(ctl_mtx); - TAILQ_FOREACH(kctl, &ctl_head, next) - n++; - lck_mtx_unlock(ctl_mtx); + lck_mtx_lock(ctl_mtx); + TAILQ_FOREACH(kctl, &ctl_head, next) + n++; + lck_mtx_unlock(ctl_mtx); - bcopy(&n, data, sizeof (n)); - error = 0; - break; - } - case CTLIOCGINFO: { - struct ctl_info ctl_info; - struct kctl *kctl = 0; - size_t name_len; + bcopy(&n, data, sizeof(n)); + error = 0; + break; + } + case CTLIOCGINFO: { + struct ctl_info ctl_info; + struct kctl *kctl = 0; + size_t name_len; - bcopy(data, &ctl_info, sizeof (ctl_info)); - name_len = strnlen(ctl_info.ctl_name, MAX_KCTL_NAME); + bcopy(data, &ctl_info, sizeof(ctl_info)); + name_len = strnlen(ctl_info.ctl_name, MAX_KCTL_NAME); - if (name_len == 0 || name_len + 1 > MAX_KCTL_NAME) { - error = EINVAL; - break; - } - lck_mtx_lock(ctl_mtx); - kctl = ctl_find_by_name(ctl_info.ctl_name); - lck_mtx_unlock(ctl_mtx); - if (kctl == 0) { - error = ENOENT; - break; - } - ctl_info.ctl_id = kctl->id; - bcopy(&ctl_info, data, sizeof (ctl_info)); - error = 0; + if (name_len == 0 || name_len + 1 > MAX_KCTL_NAME) { + error = EINVAL; break; } + lck_mtx_lock(ctl_mtx); + kctl = ctl_find_by_name(ctl_info.ctl_name); + lck_mtx_unlock(ctl_mtx); + if (kctl == 0) { + error = ENOENT; + break; + } + ctl_info.ctl_id = kctl->id; + bcopy(&ctl_info, data, sizeof(ctl_info)); + error = 0; + break; + } /* add controls to get list of NKEs */ - } - return (error); + return error; } static void @@ -1214,19 +1480,28 @@ kctl_tbl_grow() lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED); - while (kctl_tbl_growing) { + if (kctl_tbl_growing) { /* Another thread is allocating */ - (void) msleep((caddr_t) &kctl_tbl_growing, ctl_mtx, - PSOCK | PCATCH, "kctl_tbl_growing", 0); + kctl_tbl_growing_waiting++; + + do { + (void) msleep((caddr_t) &kctl_tbl_growing, ctl_mtx, + PSOCK | PCATCH, "kctl_tbl_growing", 0); + } while (kctl_tbl_growing); + kctl_tbl_growing_waiting--; } /* Another thread grew the table */ - if (kctl_table != NULL && kctl_tbl_count < kctl_tbl_size) + if (kctl_table != NULL && kctl_tbl_count < kctl_tbl_size) { return; + } /* Verify we have a sane size */ if (kctl_tbl_size + KCTL_TBL_INC >= UINT16_MAX) { - printf("%s kctl_tbl_size %lu too big\n", - __func__, kctl_tbl_size); + kctlstat.kcs_tbl_size_too_big++; + if (ctl_debug) { + printf("%s kctl_tbl_size %lu too big\n", + __func__, kctl_tbl_size); + } return; } kctl_tbl_growing = 1; @@ -1250,6 +1525,10 @@ kctl_tbl_grow() } kctl_tbl_growing = 0; + + if (kctl_tbl_growing_waiting) { + wakeup(&kctl_tbl_growing); + } } #define KCTLREF_INDEX_MASK 0x0000FFFF @@ -1263,8 +1542,9 @@ kctl_make_ref(struct kctl *kctl) lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED); - if (kctl_tbl_count >= kctl_tbl_size) + if (kctl_tbl_count >= kctl_tbl_size) { kctl_tbl_grow(); + } kctl->kctlref = NULL; for (i = 0; i < kctl_tbl_size; i++) { @@ -1280,7 +1560,7 @@ kctl_make_ref(struct kctl *kctl) * Add generation count as salt to reference to prevent * use after deregister */ - ref = ((kctl_ref_gencnt << KCTLREF_GENCNT_SHIFT) & + ref = ((kctl_ref_gencnt << KCTLREF_GENCNT_SHIFT) & KCTLREF_GENCNT_MASK) + ((i + 1) & KCTLREF_INDEX_MASK); @@ -1291,14 +1571,16 @@ kctl_make_ref(struct kctl *kctl) } } - if (kctl->kctlref == NULL) + if (kctl->kctlref == NULL) { panic("%s no space in table", __func__); + } - if (ctl_debug > 0) + if (ctl_debug > 0) { printf("%s %p for %p\n", - __func__, kctl->kctlref, kctl); + __func__, kctl->kctlref, kctl); + } - return (kctl->kctlref); + return kctl->kctlref; } static void @@ -1338,14 +1620,14 @@ kctl_from_ref(kern_ctl_ref kctlref) if (i >= kctl_tbl_size) { kctlstat.kcs_bad_kctlref++; - return (NULL); + return NULL; } kctl = kctl_table[i]; if (kctl->kctlref != kctlref) { kctlstat.kcs_bad_kctlref++; - return (NULL); + return NULL; } - return (kctl); + return kctl; } /* @@ -1354,23 +1636,27 @@ kctl_from_ref(kern_ctl_ref kctlref) errno_t ctl_register(struct kern_ctl_reg *userkctl, kern_ctl_ref *kctlref) { - struct kctl *kctl = NULL; - struct kctl *kctl_next = NULL; - u_int32_t id = 1; - size_t name_len; - int is_extended = 0; - - if (userkctl == NULL) /* sanity check */ - return (EINVAL); - if (userkctl->ctl_connect == NULL) - return (EINVAL); + struct kctl *kctl = NULL; + struct kctl *kctl_next = NULL; + u_int32_t id = 1; + size_t name_len; + int is_extended = 0; + + if (userkctl == NULL) { /* sanity check */ + return EINVAL; + } + if (userkctl->ctl_connect == NULL) { + return EINVAL; + } name_len = strlen(userkctl->ctl_name); - if (name_len == 0 || name_len + 1 > MAX_KCTL_NAME) - return (EINVAL); + if (name_len == 0 || name_len + 1 > MAX_KCTL_NAME) { + return EINVAL; + } MALLOC(kctl, struct kctl *, sizeof(*kctl), M_TEMP, M_WAITOK); - if (kctl == NULL) - return (ENOMEM); + if (kctl == NULL) { + return ENOMEM; + } bzero((char *)kctl, sizeof(*kctl)); lck_mtx_lock(ctl_mtx); @@ -1378,7 +1664,7 @@ ctl_register(struct kern_ctl_reg *userkctl, kern_ctl_ref *kctlref) if (kctl_make_ref(kctl) == NULL) { lck_mtx_unlock(ctl_mtx); FREE(kctl, M_TEMP); - return (ENOMEM); + return ENOMEM; } /* @@ -1400,7 +1686,7 @@ ctl_register(struct kern_ctl_reg *userkctl, kern_ctl_ref *kctlref) kctl_delete_ref(kctl->kctlref); lck_mtx_unlock(ctl_mtx); FREE(kctl, M_TEMP); - return (EEXIST); + return EEXIST; } /* Start with 1 in case the list is empty */ @@ -1436,15 +1722,16 @@ ctl_register(struct kern_ctl_reg *userkctl, kern_ctl_ref *kctlref) kctl->reg_unit = -1; } else { TAILQ_FOREACH(kctl_next, &ctl_head, next) { - if (kctl_next->id > userkctl->ctl_id) + if (kctl_next->id > userkctl->ctl_id) { break; + } } if (ctl_find_by_id_unit(userkctl->ctl_id, userkctl->ctl_unit)) { kctl_delete_ref(kctl->kctlref); lck_mtx_unlock(ctl_mtx); FREE(kctl, M_TEMP); - return (EEXIST); + return EEXIST; } kctl->id = userkctl->ctl_id; kctl->reg_unit = userkctl->ctl_unit; @@ -1471,6 +1758,7 @@ ctl_register(struct kern_ctl_reg *userkctl, kern_ctl_ref *kctlref) kctl->recvbufsize = userkctl->ctl_recvsize; } + kctl->bind = userkctl->ctl_bind; kctl->connect = userkctl->ctl_connect; kctl->disconnect = userkctl->ctl_disconnect; kctl->send = userkctl->ctl_send; @@ -1483,10 +1771,11 @@ ctl_register(struct kern_ctl_reg *userkctl, kern_ctl_ref *kctlref) TAILQ_INIT(&kctl->kcb_head); - if (kctl_next) + if (kctl_next) { TAILQ_INSERT_BEFORE(kctl_next, kctl, next); - else + } else { TAILQ_INSERT_TAIL(&ctl_head, kctl, next); + } kctlstat.kcs_reg_count++; kctlstat.kcs_gencnt++; @@ -1496,27 +1785,28 @@ ctl_register(struct kern_ctl_reg *userkctl, kern_ctl_ref *kctlref) *kctlref = kctl->kctlref; ctl_post_msg(KEV_CTL_REGISTERED, kctl->id); - return (0); + return 0; } errno_t ctl_deregister(void *kctlref) { - struct kctl *kctl; + struct kctl *kctl; lck_mtx_lock(ctl_mtx); if ((kctl = kctl_from_ref(kctlref)) == NULL) { kctlstat.kcs_bad_kctlref++; lck_mtx_unlock(ctl_mtx); - if (ctl_debug != 0) + if (ctl_debug != 0) { printf("%s invalid kctlref %p\n", - __func__, kctlref); - return (EINVAL); + __func__, kctlref); + } + return EINVAL; } if (!TAILQ_EMPTY(&kctl->kcb_head)) { lck_mtx_unlock(ctl_mtx); - return (EBUSY); + return EBUSY; } TAILQ_REMOVE(&ctl_head, kctl, next); @@ -1529,7 +1819,7 @@ ctl_deregister(void *kctlref) ctl_post_msg(KEV_CTL_DEREGISTERED, kctl->id); FREE(kctl, M_TEMP); - return (0); + return 0; } /* @@ -1538,53 +1828,57 @@ ctl_deregister(void *kctlref) static struct kctl * ctl_find_by_name(const char *name) { - struct kctl *kctl; + struct kctl *kctl; lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED); TAILQ_FOREACH(kctl, &ctl_head, next) - if (strncmp(kctl->name, name, sizeof(kctl->name)) == 0) - return (kctl); + if (strncmp(kctl->name, name, sizeof(kctl->name)) == 0) { + return kctl; + } - return (NULL); + return NULL; } u_int32_t ctl_id_by_name(const char *name) { - u_int32_t ctl_id = 0; - struct kctl *kctl; + u_int32_t ctl_id = 0; + struct kctl *kctl; lck_mtx_lock(ctl_mtx); kctl = ctl_find_by_name(name); - if (kctl) + if (kctl) { ctl_id = kctl->id; + } lck_mtx_unlock(ctl_mtx); - return (ctl_id); + return ctl_id; } errno_t ctl_name_by_id(u_int32_t id, char *out_name, size_t maxsize) { - int found = 0; + int found = 0; struct kctl *kctl; lck_mtx_lock(ctl_mtx); TAILQ_FOREACH(kctl, &ctl_head, next) { - if (kctl->id == id) + if (kctl->id == id) { break; + } } if (kctl) { - if (maxsize > MAX_KCTL_NAME) + if (maxsize > MAX_KCTL_NAME) { maxsize = MAX_KCTL_NAME; + } strlcpy(out_name, kctl->name, maxsize); found = 1; } lck_mtx_unlock(ctl_mtx); - return (found ? 0 : ENOENT); + return found ? 0 : ENOENT; } /* @@ -1594,17 +1888,18 @@ ctl_name_by_id(u_int32_t id, char *out_name, size_t maxsize) static struct kctl * ctl_find_by_id_unit(u_int32_t id, u_int32_t unit) { - struct kctl *kctl; + struct kctl *kctl; lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED); TAILQ_FOREACH(kctl, &ctl_head, next) { - if (kctl->id == id && (kctl->flags & CTL_FLAG_REG_ID_UNIT) == 0) - return (kctl); - else if (kctl->id == id && kctl->reg_unit == unit) - return (kctl); + if (kctl->id == id && (kctl->flags & CTL_FLAG_REG_ID_UNIT) == 0) { + return kctl; + } else if (kctl->id == id && kctl->reg_unit == unit) { + return kctl; + } } - return (NULL); + return NULL; } /* @@ -1613,22 +1908,23 @@ ctl_find_by_id_unit(u_int32_t id, u_int32_t unit) static struct ctl_cb * kcb_find(struct kctl *kctl, u_int32_t unit) { - struct ctl_cb *kcb; + struct ctl_cb *kcb; lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED); TAILQ_FOREACH(kcb, &kctl->kcb_head, next) - if (kcb->unit == unit) - return (kcb); + if (kcb->sac.sc_unit == unit) { + return kcb; + } - return (NULL); + return NULL; } static struct socket * kcb_find_socket(kern_ctl_ref kctlref, u_int32_t unit, u_int32_t *kctlflags) { struct socket *so = NULL; - struct ctl_cb *kcb; + struct ctl_cb *kcb; void *lr_saved; struct kctl *kctl; int i; @@ -1642,16 +1938,17 @@ kcb_find_socket(kern_ctl_ref kctlref, u_int32_t unit, u_int32_t *kctlflags) if ((kctl = kctl_from_ref(kctlref)) == NULL) { kctlstat.kcs_bad_kctlref++; lck_mtx_unlock(ctl_mtx); - if (ctl_debug != 0) + if (ctl_debug != 0) { printf("%s invalid kctlref %p\n", - __func__, kctlref); - return (NULL); + __func__, kctlref); + } + return NULL; } kcb = kcb_find(kctl, unit); if (kcb == NULL || kcb->kctl != kctl || (so = kcb->so) == NULL) { lck_mtx_unlock(ctl_mtx); - return (NULL); + return NULL; } /* * This prevents the socket from being closed @@ -1682,19 +1979,20 @@ kcb_find_socket(kern_ctl_ref kctlref, u_int32_t unit, u_int32_t *kctlflags) } kcb->usecount--; - if (kcb->usecount == 0) + if (kcb->usecount == 0) { wakeup((event_t)&kcb->usecount); + } lck_mtx_unlock(ctl_mtx); - return (so); + return so; } static void ctl_post_msg(u_int32_t event_code, u_int32_t id) { - struct ctl_event_data ctl_ev_data; - struct kev_msg ev_msg; + struct ctl_event_data ctl_ev_data; + struct kev_msg ev_msg; lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_NOTOWNED); @@ -1721,14 +2019,15 @@ ctl_lock(struct socket *so, int refcount, void *lr) { void *lr_saved; - if (lr == NULL) + if (lr == NULL) { lr_saved = __builtin_return_address(0); - else + } else { lr_saved = lr; + } if (so->so_pcb != NULL) { lck_mtx_lock(((struct ctl_cb *)so->so_pcb)->mtx); - } else { + } else { panic("ctl_lock: so=%p NO PCB! lr=%p lrh= %s\n", so, lr_saved, solockhistory_nr(so)); /* NOTREACHED */ @@ -1736,17 +2035,18 @@ ctl_lock(struct socket *so, int refcount, void *lr) if (so->so_usecount < 0) { panic("ctl_lock: so=%p so_pcb=%p lr=%p ref=%x lrh= %s\n", - so, so->so_pcb, lr_saved, so->so_usecount, - solockhistory_nr(so)); + so, so->so_pcb, lr_saved, so->so_usecount, + solockhistory_nr(so)); /* NOTREACHED */ } - if (refcount) + if (refcount) { so->so_usecount++; + } so->lock_lr[so->next_lock_lr] = lr_saved; - so->next_lock_lr = (so->next_lock_lr+1) % SO_LCKDBG_MAX; - return (0); + so->next_lock_lr = (so->next_lock_lr + 1) % SO_LCKDBG_MAX; + return 0; } static int @@ -1755,20 +2055,22 @@ ctl_unlock(struct socket *so, int refcount, void *lr) void *lr_saved; lck_mtx_t *mutex_held; - if (lr == NULL) + if (lr == NULL) { lr_saved = __builtin_return_address(0); - else + } else { lr_saved = lr; + } -#ifdef MORE_KCTLLOCK_DEBUG +#if (MORE_KCTLLOCK_DEBUG && (DEVELOPMENT || DEBUG)) printf("ctl_unlock: so=%llx sopcb=%x lock=%llx ref=%u lr=%llx\n", (uint64_t)VM_KERNEL_ADDRPERM(so), (uint64_t)VM_KERNEL_ADDRPERM(so->so_pcb, (uint64_t)VM_KERNEL_ADDRPERM(((struct ctl_cb *)so->so_pcb)->mtx), so->so_usecount, (uint64_t)VM_KERNEL_ADDRPERM(lr_saved)); -#endif - if (refcount) +#endif /* (MORE_KCTLLOCK_DEBUG && (DEVELOPMENT || DEBUG)) */ + if (refcount) { so->so_usecount--; + } if (so->so_usecount < 0) { panic("ctl_unlock: so=%p usecount=%x lrh= %s\n", @@ -1777,38 +2079,40 @@ ctl_unlock(struct socket *so, int refcount, void *lr) } if (so->so_pcb == NULL) { panic("ctl_unlock: so=%p NO PCB usecount=%x lr=%p lrh= %s\n", - so, so->so_usecount, (void *)lr_saved, - solockhistory_nr(so)); + so, so->so_usecount, (void *)lr_saved, + solockhistory_nr(so)); /* NOTREACHED */ } mutex_held = ((struct ctl_cb *)so->so_pcb)->mtx; - lck_mtx_assert(mutex_held, LCK_MTX_ASSERT_OWNED); - so->unlock_lr[so->next_unlock_lr] = lr_saved; - so->next_unlock_lr = (so->next_unlock_lr+1) % SO_LCKDBG_MAX; - lck_mtx_unlock(mutex_held); + lck_mtx_assert(mutex_held, LCK_MTX_ASSERT_OWNED); + so->unlock_lr[so->next_unlock_lr] = lr_saved; + so->next_unlock_lr = (so->next_unlock_lr + 1) % SO_LCKDBG_MAX; + lck_mtx_unlock(mutex_held); - if (so->so_usecount == 0) + if (so->so_usecount == 0) { ctl_sofreelastref(so); + } - return (0); + return 0; } static lck_mtx_t * -ctl_getlock(struct socket *so, int locktype) +ctl_getlock(struct socket *so, int flags) { -#pragma unused(locktype) - struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb; +#pragma unused(flags) + struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb; - if (so->so_pcb) { - if (so->so_usecount < 0) - panic("ctl_getlock: so=%p usecount=%x lrh= %s\n", - so, so->so_usecount, solockhistory_nr(so)); - return (kcb->mtx); + if (so->so_pcb) { + if (so->so_usecount < 0) { + panic("ctl_getlock: so=%p usecount=%x lrh= %s\n", + so, so->so_usecount, solockhistory_nr(so)); + } + return kcb->mtx; } else { - panic("ctl_getlock: so=%p NULL NO so_pcb %s\n", - so, solockhistory_nr(so)); - return (so->so_proto->pr_domain->dom_mtx); + panic("ctl_getlock: so=%p NULL NO so_pcb %s\n", + so, solockhistory_nr(so)); + return so->so_proto->pr_domain->dom_mtx; } } @@ -1816,264 +2120,268 @@ __private_extern__ int kctl_reg_list SYSCTL_HANDLER_ARGS { #pragma unused(oidp, arg1, arg2) - int error = 0; - int n, i; - struct xsystmgen xsg; - void *buf = NULL; - struct kctl *kctl; - size_t item_size = ROUNDUP64(sizeof (struct xkctl_reg)); - - buf = _MALLOC(item_size, M_TEMP, M_WAITOK | M_ZERO); - if (buf == NULL) - return (ENOMEM); - - lck_mtx_lock(ctl_mtx); - - n = kctlstat.kcs_reg_count; - - if (req->oldptr == USER_ADDR_NULL) { - req->oldidx = (n + n/8) * sizeof(struct xkctl_reg); - goto done; - } - if (req->newptr != USER_ADDR_NULL) { - error = EPERM; - goto done; - } - bzero(&xsg, sizeof (xsg)); - xsg.xg_len = sizeof (xsg); - xsg.xg_count = n; - xsg.xg_gen = kctlstat.kcs_gencnt; - xsg.xg_sogen = so_gencnt; - error = SYSCTL_OUT(req, &xsg, sizeof (xsg)); - if (error) { - goto done; - } - /* - * We are done if there is no pcb - */ - if (n == 0) { - goto done; - } - - i = 0; - for (i = 0, kctl = TAILQ_FIRST(&ctl_head); - i < n && kctl != NULL; - i++, kctl = TAILQ_NEXT(kctl, next)) { - struct xkctl_reg *xkr = (struct xkctl_reg *)buf; - struct ctl_cb *kcb; - u_int32_t pcbcount = 0; - - TAILQ_FOREACH(kcb, &kctl->kcb_head, next) - pcbcount++; - - bzero(buf, item_size); - - xkr->xkr_len = sizeof(struct xkctl_reg); - xkr->xkr_kind = XSO_KCREG; - xkr->xkr_id = kctl->id; - xkr->xkr_reg_unit = kctl->reg_unit; - xkr->xkr_flags = kctl->flags; - xkr->xkr_kctlref = (uint64_t)(kctl->kctlref); - xkr->xkr_recvbufsize = kctl->recvbufsize; - xkr->xkr_sendbufsize = kctl->sendbufsize; - xkr->xkr_lastunit = kctl->lastunit; - xkr->xkr_pcbcount = pcbcount; - xkr->xkr_connect = (uint64_t)VM_KERNEL_ADDRPERM(kctl->connect); - xkr->xkr_disconnect = - (uint64_t)VM_KERNEL_ADDRPERM(kctl->disconnect); - xkr->xkr_send = (uint64_t)VM_KERNEL_ADDRPERM(kctl->send); - xkr->xkr_send_list = - (uint64_t)VM_KERNEL_ADDRPERM(kctl->send_list); - xkr->xkr_setopt = (uint64_t)VM_KERNEL_ADDRPERM(kctl->setopt); - xkr->xkr_getopt = (uint64_t)VM_KERNEL_ADDRPERM(kctl->getopt); - xkr->xkr_rcvd = (uint64_t)VM_KERNEL_ADDRPERM(kctl->rcvd); - strlcpy(xkr->xkr_name, kctl->name, sizeof(xkr->xkr_name)); - - error = SYSCTL_OUT(req, buf, item_size); - } - - if (error == 0) { - /* - * Give the user an updated idea of our state. - * If the generation differs from what we told - * her before, she knows that something happened - * while we were processing this request, and it - * might be necessary to retry. - */ - bzero(&xsg, sizeof (xsg)); - xsg.xg_len = sizeof (xsg); - xsg.xg_count = n; - xsg.xg_gen = kctlstat.kcs_gencnt; - xsg.xg_sogen = so_gencnt; - error = SYSCTL_OUT(req, &xsg, sizeof (xsg)); - if (error) { - goto done; + int error = 0; + int n, i; + struct xsystmgen xsg; + void *buf = NULL; + struct kctl *kctl; + size_t item_size = ROUNDUP64(sizeof(struct xkctl_reg)); + + buf = _MALLOC(item_size, M_TEMP, M_WAITOK | M_ZERO); + if (buf == NULL) { + return ENOMEM; + } + + lck_mtx_lock(ctl_mtx); + + n = kctlstat.kcs_reg_count; + + if (req->oldptr == USER_ADDR_NULL) { + req->oldidx = (n + n / 8) * sizeof(struct xkctl_reg); + goto done; + } + if (req->newptr != USER_ADDR_NULL) { + error = EPERM; + goto done; + } + bzero(&xsg, sizeof(xsg)); + xsg.xg_len = sizeof(xsg); + xsg.xg_count = n; + xsg.xg_gen = kctlstat.kcs_gencnt; + xsg.xg_sogen = so_gencnt; + error = SYSCTL_OUT(req, &xsg, sizeof(xsg)); + if (error) { + goto done; + } + /* + * We are done if there is no pcb + */ + if (n == 0) { + goto done; + } + + i = 0; + for (i = 0, kctl = TAILQ_FIRST(&ctl_head); + i < n && kctl != NULL; + i++, kctl = TAILQ_NEXT(kctl, next)) { + struct xkctl_reg *xkr = (struct xkctl_reg *)buf; + struct ctl_cb *kcb; + u_int32_t pcbcount = 0; + + TAILQ_FOREACH(kcb, &kctl->kcb_head, next) + pcbcount++; + + bzero(buf, item_size); + + xkr->xkr_len = sizeof(struct xkctl_reg); + xkr->xkr_kind = XSO_KCREG; + xkr->xkr_id = kctl->id; + xkr->xkr_reg_unit = kctl->reg_unit; + xkr->xkr_flags = kctl->flags; + xkr->xkr_kctlref = (uint64_t)(kctl->kctlref); + xkr->xkr_recvbufsize = kctl->recvbufsize; + xkr->xkr_sendbufsize = kctl->sendbufsize; + xkr->xkr_lastunit = kctl->lastunit; + xkr->xkr_pcbcount = pcbcount; + xkr->xkr_connect = (uint64_t)VM_KERNEL_UNSLIDE(kctl->connect); + xkr->xkr_disconnect = + (uint64_t)VM_KERNEL_UNSLIDE(kctl->disconnect); + xkr->xkr_send = (uint64_t)VM_KERNEL_UNSLIDE(kctl->send); + xkr->xkr_send_list = + (uint64_t)VM_KERNEL_UNSLIDE(kctl->send_list); + xkr->xkr_setopt = (uint64_t)VM_KERNEL_UNSLIDE(kctl->setopt); + xkr->xkr_getopt = (uint64_t)VM_KERNEL_UNSLIDE(kctl->getopt); + xkr->xkr_rcvd = (uint64_t)VM_KERNEL_UNSLIDE(kctl->rcvd); + strlcpy(xkr->xkr_name, kctl->name, sizeof(xkr->xkr_name)); + + error = SYSCTL_OUT(req, buf, item_size); + } + + if (error == 0) { + /* + * Give the user an updated idea of our state. + * If the generation differs from what we told + * her before, she knows that something happened + * while we were processing this request, and it + * might be necessary to retry. + */ + bzero(&xsg, sizeof(xsg)); + xsg.xg_len = sizeof(xsg); + xsg.xg_count = n; + xsg.xg_gen = kctlstat.kcs_gencnt; + xsg.xg_sogen = so_gencnt; + error = SYSCTL_OUT(req, &xsg, sizeof(xsg)); + if (error) { + goto done; } } done: - lck_mtx_unlock(ctl_mtx); + lck_mtx_unlock(ctl_mtx); - if (buf != NULL) - FREE(buf, M_TEMP); + if (buf != NULL) { + FREE(buf, M_TEMP); + } - return (error); + return error; } __private_extern__ int kctl_pcblist SYSCTL_HANDLER_ARGS { #pragma unused(oidp, arg1, arg2) - int error = 0; - int n, i; - struct xsystmgen xsg; - void *buf = NULL; - struct kctl *kctl; - size_t item_size = ROUNDUP64(sizeof (struct xkctlpcb)) + - ROUNDUP64(sizeof (struct xsocket_n)) + - 2 * ROUNDUP64(sizeof (struct xsockbuf_n)) + - ROUNDUP64(sizeof (struct xsockstat_n)); - - buf = _MALLOC(item_size, M_TEMP, M_WAITOK | M_ZERO); - if (buf == NULL) - return (ENOMEM); - - lck_mtx_lock(ctl_mtx); - - n = kctlstat.kcs_pcbcount; - - if (req->oldptr == USER_ADDR_NULL) { - req->oldidx = (n + n/8) * item_size; - goto done; - } - if (req->newptr != USER_ADDR_NULL) { - error = EPERM; - goto done; - } - bzero(&xsg, sizeof (xsg)); - xsg.xg_len = sizeof (xsg); - xsg.xg_count = n; - xsg.xg_gen = kctlstat.kcs_gencnt; - xsg.xg_sogen = so_gencnt; - error = SYSCTL_OUT(req, &xsg, sizeof (xsg)); - if (error) { - goto done; - } - /* - * We are done if there is no pcb - */ - if (n == 0) { - goto done; - } - - i = 0; - for (i = 0, kctl = TAILQ_FIRST(&ctl_head); - i < n && kctl != NULL; - kctl = TAILQ_NEXT(kctl, next)) { - struct ctl_cb *kcb; - - for (kcb = TAILQ_FIRST(&kctl->kcb_head); - i < n && kcb != NULL; - i++, kcb = TAILQ_NEXT(kcb, next)) { - struct xkctlpcb *xk = (struct xkctlpcb *)buf; - struct xsocket_n *xso = (struct xsocket_n *) - ADVANCE64(xk, sizeof (*xk)); - struct xsockbuf_n *xsbrcv = (struct xsockbuf_n *) - ADVANCE64(xso, sizeof (*xso)); - struct xsockbuf_n *xsbsnd = (struct xsockbuf_n *) - ADVANCE64(xsbrcv, sizeof (*xsbrcv)); - struct xsockstat_n *xsostats = (struct xsockstat_n *) - ADVANCE64(xsbsnd, sizeof (*xsbsnd)); - - bzero(buf, item_size); - - xk->xkp_len = sizeof(struct xkctlpcb); - xk->xkp_kind = XSO_KCB; - xk->xkp_unit = kcb->unit; - xk->xkp_kctpcb = (uint64_t)VM_KERNEL_ADDRPERM(kcb); - xk->xkp_kctlref = (uint64_t)VM_KERNEL_ADDRPERM(kctl); - xk->xkp_kctlid = kctl->id; - strlcpy(xk->xkp_kctlname, kctl->name, - sizeof(xk->xkp_kctlname)); - - sotoxsocket_n(kcb->so, xso); - sbtoxsockbuf_n(kcb->so ? - &kcb->so->so_rcv : NULL, xsbrcv); - sbtoxsockbuf_n(kcb->so ? - &kcb->so->so_snd : NULL, xsbsnd); - sbtoxsockstat_n(kcb->so, xsostats); - - error = SYSCTL_OUT(req, buf, item_size); + int error = 0; + int n, i; + struct xsystmgen xsg; + void *buf = NULL; + struct kctl *kctl; + size_t item_size = ROUNDUP64(sizeof(struct xkctlpcb)) + + ROUNDUP64(sizeof(struct xsocket_n)) + + 2 * ROUNDUP64(sizeof(struct xsockbuf_n)) + + ROUNDUP64(sizeof(struct xsockstat_n)); + + buf = _MALLOC(item_size, M_TEMP, M_WAITOK | M_ZERO); + if (buf == NULL) { + return ENOMEM; + } + + lck_mtx_lock(ctl_mtx); + + n = kctlstat.kcs_pcbcount; + + if (req->oldptr == USER_ADDR_NULL) { + req->oldidx = (n + n / 8) * item_size; + goto done; + } + if (req->newptr != USER_ADDR_NULL) { + error = EPERM; + goto done; + } + bzero(&xsg, sizeof(xsg)); + xsg.xg_len = sizeof(xsg); + xsg.xg_count = n; + xsg.xg_gen = kctlstat.kcs_gencnt; + xsg.xg_sogen = so_gencnt; + error = SYSCTL_OUT(req, &xsg, sizeof(xsg)); + if (error) { + goto done; + } + /* + * We are done if there is no pcb + */ + if (n == 0) { + goto done; + } + + i = 0; + for (i = 0, kctl = TAILQ_FIRST(&ctl_head); + i < n && kctl != NULL; + kctl = TAILQ_NEXT(kctl, next)) { + struct ctl_cb *kcb; + + for (kcb = TAILQ_FIRST(&kctl->kcb_head); + i < n && kcb != NULL; + i++, kcb = TAILQ_NEXT(kcb, next)) { + struct xkctlpcb *xk = (struct xkctlpcb *)buf; + struct xsocket_n *xso = (struct xsocket_n *) + ADVANCE64(xk, sizeof(*xk)); + struct xsockbuf_n *xsbrcv = (struct xsockbuf_n *) + ADVANCE64(xso, sizeof(*xso)); + struct xsockbuf_n *xsbsnd = (struct xsockbuf_n *) + ADVANCE64(xsbrcv, sizeof(*xsbrcv)); + struct xsockstat_n *xsostats = (struct xsockstat_n *) + ADVANCE64(xsbsnd, sizeof(*xsbsnd)); + + bzero(buf, item_size); + + xk->xkp_len = sizeof(struct xkctlpcb); + xk->xkp_kind = XSO_KCB; + xk->xkp_unit = kcb->sac.sc_unit; + xk->xkp_kctpcb = (uint64_t)VM_KERNEL_ADDRPERM(kcb); + xk->xkp_kctlref = (uint64_t)VM_KERNEL_ADDRPERM(kctl); + xk->xkp_kctlid = kctl->id; + strlcpy(xk->xkp_kctlname, kctl->name, + sizeof(xk->xkp_kctlname)); + + sotoxsocket_n(kcb->so, xso); + sbtoxsockbuf_n(kcb->so ? + &kcb->so->so_rcv : NULL, xsbrcv); + sbtoxsockbuf_n(kcb->so ? + &kcb->so->so_snd : NULL, xsbsnd); + sbtoxsockstat_n(kcb->so, xsostats); + + error = SYSCTL_OUT(req, buf, item_size); } } - if (error == 0) { - /* - * Give the user an updated idea of our state. - * If the generation differs from what we told - * her before, she knows that something happened - * while we were processing this request, and it - * might be necessary to retry. - */ - bzero(&xsg, sizeof (xsg)); - xsg.xg_len = sizeof (xsg); - xsg.xg_count = n; - xsg.xg_gen = kctlstat.kcs_gencnt; - xsg.xg_sogen = so_gencnt; - error = SYSCTL_OUT(req, &xsg, sizeof (xsg)); - if (error) { - goto done; + if (error == 0) { + /* + * Give the user an updated idea of our state. + * If the generation differs from what we told + * her before, she knows that something happened + * while we were processing this request, and it + * might be necessary to retry. + */ + bzero(&xsg, sizeof(xsg)); + xsg.xg_len = sizeof(xsg); + xsg.xg_count = n; + xsg.xg_gen = kctlstat.kcs_gencnt; + xsg.xg_sogen = so_gencnt; + error = SYSCTL_OUT(req, &xsg, sizeof(xsg)); + if (error) { + goto done; } } done: - lck_mtx_unlock(ctl_mtx); + lck_mtx_unlock(ctl_mtx); - return (error); + return error; } int kctl_getstat SYSCTL_HANDLER_ARGS { #pragma unused(oidp, arg1, arg2) - int error = 0; + int error = 0; - lck_mtx_lock(ctl_mtx); + lck_mtx_lock(ctl_mtx); - if (req->newptr != USER_ADDR_NULL) { - error = EPERM; - goto done; + if (req->newptr != USER_ADDR_NULL) { + error = EPERM; + goto done; } - if (req->oldptr == USER_ADDR_NULL) { - req->oldidx = sizeof(struct kctlstat); - goto done; + if (req->oldptr == USER_ADDR_NULL) { + req->oldidx = sizeof(struct kctlstat); + goto done; } - error = SYSCTL_OUT(req, &kctlstat, - MIN(sizeof(struct kctlstat), req->oldlen)); + error = SYSCTL_OUT(req, &kctlstat, + MIN(sizeof(struct kctlstat), req->oldlen)); done: - lck_mtx_unlock(ctl_mtx); - return (error); + lck_mtx_unlock(ctl_mtx); + return error; } void kctl_fill_socketinfo(struct socket *so, struct socket_info *si) { - struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb; - struct kern_ctl_info *kcsi = - &si->soi_proto.pri_kern_ctl; - struct kctl *kctl = kcb->kctl; + struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb; + struct kern_ctl_info *kcsi = + &si->soi_proto.pri_kern_ctl; + struct kctl *kctl = kcb->kctl; - si->soi_kind = SOCKINFO_KERN_CTL; + si->soi_kind = SOCKINFO_KERN_CTL; - if (kctl == 0) - return; + if (kctl == 0) { + return; + } - kcsi->kcsi_id = kctl->id; - kcsi->kcsi_reg_unit = kctl->reg_unit; - kcsi->kcsi_flags = kctl->flags; - kcsi->kcsi_recvbufsize = kctl->recvbufsize; - kcsi->kcsi_sendbufsize = kctl->sendbufsize; - kcsi->kcsi_unit = kcb->unit; - strlcpy(kcsi->kcsi_name, kctl->name, MAX_KCTL_NAME); + kcsi->kcsi_id = kctl->id; + kcsi->kcsi_reg_unit = kctl->reg_unit; + kcsi->kcsi_flags = kctl->flags; + kcsi->kcsi_recvbufsize = kctl->recvbufsize; + kcsi->kcsi_sendbufsize = kctl->sendbufsize; + kcsi->kcsi_unit = kcb->sac.sc_unit; + strlcpy(kcsi->kcsi_name, kctl->name, MAX_KCTL_NAME); }