/*
- * Copyright (c) 2000 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 1999-2011 Apple Inc. All rights reserved.
*
- * @APPLE_LICENSE_HEADER_START@
- *
- * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
* This file contains Original Code and/or Modifications of Original Code
* as defined in and that are subject to the Apple Public Source License
* Version 2.0 (the 'License'). You may not use this file except in
- * compliance with the License. Please obtain a copy of the License at
- * http://www.opensource.apple.com/apsl/ and read it before using this
- * file.
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
+ *
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
*
* The Original Code and all software distributed under the License are
* distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* Please see the License for the specific language governing rights and
* limitations under the License.
*
- * @APPLE_LICENSE_HEADER_END@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
-/* Copyright (C) 1999 Apple Computer, Inc. */
/*
- * NKE management domain - allows control connections to
- * an NKE and to read/write data.
+ * Kernel Control domain - allows control connections to
+ * and to read/write data.
*
+ * Vincent Lubet, 040506
* Christophe Allie, 010928
* Justin C. Walker, 990319
*/
#include <sys/domain.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
-#include <net/kext_net.h>
#include <sys/sys_domain.h>
#include <sys/kern_event.h>
#include <sys/kern_control.h>
+#include <sys/kauth.h>
#include <net/if_var.h>
#include <mach/vm_types.h>
-#include <mach/kmod.h>
#include <kern/thread.h>
-
/*
* Definitions and vars for we support
*/
#define CTL_RECVSIZE (8 * 1024) /* default buffer size */
/*
- internal structure maintained for each register controller
-*/
-struct ctl
-{
- TAILQ_ENTRY(ctl) next; /* controller chain */
- struct socket *skt; /* current controlling socket */
+ * Definitions and vars for we support
+ */
- /* controller information provided when registering */
- u_int32_t id; /* unique nke identifier, provided by DTS */
- u_int32_t unit; /* unit number for use by the nke */
- void *userdata; /* for private use by nke */
-
- /* misc communication information */
- u_int32_t flags; /* support flags */
- u_int32_t recvbufsize; /* request more than the default buffer size */
- u_int32_t sendbufsize; /* request more than the default buffer size */
-
- /* Dispatch functions */
- int (*connect)(kern_ctl_ref, void *); /* Make contact */
- void (*disconnect)(kern_ctl_ref, void *); /* Break contact */
- int (*write) (kern_ctl_ref, void *, struct mbuf *); /* Send data to nke */
- int (*set)(kern_ctl_ref, void *, int, void *, size_t ); /* set ctl configuration */
- int (*get)(kern_ctl_ref, void *, int, void *, size_t *); /* get ctl configuration */
-};
+static u_int32_t ctl_maxunit = 65536;
+static lck_grp_attr_t *ctl_lck_grp_attr = 0;
+static lck_attr_t *ctl_lck_attr = 0;
+static lck_grp_t *ctl_lck_grp = 0;
+static lck_mtx_t *ctl_mtx;
/* all the controllers are chained */
-TAILQ_HEAD(, ctl) ctl_head;
-
-int ctl_attach(struct socket *, int, struct proc *);
-int ctl_connect(struct socket *, struct sockaddr *, struct proc *);
-int ctl_disconnect(struct socket *);
-int ctl_ioctl(struct socket *so, u_long cmd, caddr_t data,
+TAILQ_HEAD(kctl_list, kctl) ctl_head;
+
+static int ctl_attach(struct socket *, int, struct proc *);
+static int ctl_detach(struct socket *);
+static int ctl_sofreelastref(struct socket *so);
+static int ctl_connect(struct socket *, struct sockaddr *, struct proc *);
+static int ctl_disconnect(struct socket *);
+static int ctl_ioctl(struct socket *so, u_long cmd, caddr_t data,
struct ifnet *ifp, struct proc *p);
-int ctl_send(struct socket *, int, struct mbuf *,
+static int ctl_send(struct socket *, int, struct mbuf *,
struct sockaddr *, struct mbuf *, struct proc *);
-int ctl_ctloutput(struct socket *, struct sockopt *);
+static int ctl_ctloutput(struct socket *, struct sockopt *);
+static int ctl_peeraddr(struct socket *so, struct sockaddr **nam);
-struct ctl *ctl_find(u_int32_t, u_int32_t unit);
-void ctl_post_msg(u_long event_code, u_int32_t id, u_int32_t unit);
+static struct kctl *ctl_find_by_name(const char *);
+static struct kctl *ctl_find_by_id_unit(u_int32_t id, u_int32_t unit);
+static struct socket *kcb_find_socket(struct kctl *, u_int32_t unit);
+static struct ctl_cb *kcb_find(struct kctl *, u_int32_t unit);
+static void ctl_post_msg(u_int32_t event_code, u_int32_t id);
-struct pr_usrreqs ctl_usrreqs =
+static int ctl_lock(struct socket *, int, void *);
+static int ctl_unlock(struct socket *, int, void *);
+static lck_mtx_t * ctl_getlock(struct socket *, int);
+
+static struct pr_usrreqs ctl_usrreqs =
{
pru_abort_notsupp, pru_accept_notsupp, ctl_attach, pru_bind_notsupp,
- ctl_connect, pru_connect2_notsupp, ctl_ioctl, pru_detach_notsupp,
- ctl_disconnect, pru_listen_notsupp, pru_peeraddr_notsupp,
+ ctl_connect, pru_connect2_notsupp, ctl_ioctl, ctl_detach,
+ ctl_disconnect, pru_listen_notsupp, ctl_peeraddr,
pru_rcvd_notsupp, pru_rcvoob_notsupp, ctl_send,
pru_sense_null, pru_shutdown_notsupp, pru_sockaddr_notsupp,
- sosend, soreceive, sopoll
+ sosend, soreceive, pru_sopoll_notsupp
};
-struct protosw ctlsw =
+static struct protosw kctlswk_dgram =
{
- SOCK_DGRAM, &systemdomain, SYSPROTO_CONTROL, PR_ATOMIC|PR_CONNREQUIRED,
+ SOCK_DGRAM, &systemdomain, SYSPROTO_CONTROL,
+ PR_ATOMIC|PR_CONNREQUIRED|PR_PCBLOCK,
NULL, NULL, NULL, ctl_ctloutput,
NULL, NULL,
- NULL, NULL, NULL, NULL, &ctl_usrreqs
+ NULL, NULL, NULL, NULL, &ctl_usrreqs,
+ ctl_lock, ctl_unlock, ctl_getlock, { 0, 0 } , 0, { 0 }
};
+static struct protosw kctlswk_stream =
+{
+ SOCK_STREAM, &systemdomain, SYSPROTO_CONTROL,
+ PR_CONNREQUIRED|PR_PCBLOCK,
+ NULL, NULL, NULL, ctl_ctloutput,
+ NULL, NULL,
+ NULL, NULL, NULL, NULL, &ctl_usrreqs,
+ ctl_lock, ctl_unlock, ctl_getlock, { 0, 0 } , 0, { 0 }
+};
+
+
/*
- * Install the protosw's for the NKE manager.
+ * Install the protosw's for the Kernel Control manager.
*/
-int
+__private_extern__ int
kern_control_init(void)
{
- int retval;
-
- retval = net_add_proto(&ctlsw, &systemdomain);
- if (retval) {
- log(LOG_WARNING, "Can't install Kernel Controller Manager (%d)\n", retval);
- return retval;
- }
+ int error = 0;
+
+ ctl_lck_grp_attr = lck_grp_attr_alloc_init();
+ if (ctl_lck_grp_attr == 0) {
+ printf(": lck_grp_attr_alloc_init failed\n");
+ error = ENOMEM;
+ goto done;
+ }
+
+ ctl_lck_grp = lck_grp_alloc_init("Kernel Control Protocol", ctl_lck_grp_attr);
+ if (ctl_lck_grp == 0) {
+ printf("kern_control_init: lck_grp_alloc_init failed\n");
+ error = ENOMEM;
+ goto done;
+ }
+
+ ctl_lck_attr = lck_attr_alloc_init();
+ if (ctl_lck_attr == 0) {
+ printf("kern_control_init: lck_attr_alloc_init failed\n");
+ error = ENOMEM;
+ goto done;
+ }
+
+ ctl_mtx = lck_mtx_alloc_init(ctl_lck_grp, ctl_lck_attr);
+ if (ctl_mtx == 0) {
+ printf("kern_control_init: lck_mtx_alloc_init failed\n");
+ error = ENOMEM;
+ goto done;
+ }
+ TAILQ_INIT(&ctl_head);
+
+ error = net_add_proto(&kctlswk_dgram, &systemdomain);
+ if (error) {
+ log(LOG_WARNING, "kern_control_init: net_add_proto dgram failed (%d)\n", error);
+ }
+ error = net_add_proto(&kctlswk_stream, &systemdomain);
+ if (error) {
+ log(LOG_WARNING, "kern_control_init: net_add_proto stream failed (%d)\n", error);
+ }
+
+ done:
+ if (error != 0) {
+ if (ctl_mtx) {
+ lck_mtx_free(ctl_mtx, ctl_lck_grp);
+ ctl_mtx = 0;
+ }
+ if (ctl_lck_grp) {
+ lck_grp_free(ctl_lck_grp);
+ ctl_lck_grp = 0;
+ }
+ if (ctl_lck_grp_attr) {
+ lck_grp_attr_free(ctl_lck_grp_attr);
+ ctl_lck_grp_attr = 0;
+ }
+ if (ctl_lck_attr) {
+ lck_attr_free(ctl_lck_attr);
+ ctl_lck_attr = 0;
+ }
+ }
+ return error;
+}
- TAILQ_INIT(&ctl_head);
-
- return(KERN_SUCCESS);
+static void
+kcb_delete(struct ctl_cb *kcb)
+{
+ if (kcb != 0) {
+ if (kcb->mtx != 0)
+ lck_mtx_free(kcb->mtx, ctl_lck_grp);
+ FREE(kcb, M_TEMP);
+ }
}
/*
* Kernel Controller user-request functions
+ * attach function must exist and succeed
+ * detach not necessary
+ * we need a pcb for the per socket mutex
*/
-int
-ctl_attach (struct socket *so, int proto, struct proc *p)
+static int
+ctl_attach(__unused struct socket *so, __unused int proto, __unused struct proc *p)
{
- /*
- * attach function must exist and succeed
- * detach not necessary since we use
- * connect/disconnect to handle so_pcb
- */
+ int error = 0;
+ struct ctl_cb *kcb = 0;
+
+ MALLOC(kcb, struct ctl_cb *, sizeof(struct ctl_cb), M_TEMP, M_WAITOK);
+ if (kcb == NULL) {
+ error = ENOMEM;
+ goto quit;
+ }
+ bzero(kcb, sizeof(struct ctl_cb));
+
+ kcb->mtx = lck_mtx_alloc_init(ctl_lck_grp, ctl_lck_attr);
+ if (kcb->mtx == NULL) {
+ error = ENOMEM;
+ goto quit;
+ }
+ kcb->so = so;
+ so->so_pcb = (caddr_t)kcb;
+quit:
+ if (error != 0) {
+ kcb_delete(kcb);
+ kcb = 0;
+ }
+ return error;
+}
+
+static int
+ctl_sofreelastref(struct socket *so)
+{
+ struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
+
+ so->so_pcb = 0;
+
+ if (kcb != 0) {
+ struct kctl *kctl;
+ if ((kctl = kcb->kctl) != 0) {
+ lck_mtx_lock(ctl_mtx);
+ TAILQ_REMOVE(&kctl->kcb_head, kcb, next);
+ lck_mtx_unlock(ctl_mtx);
+ }
+ kcb_delete(kcb);
+ }
+ sofreelastref(so, 1);
return 0;
}
-int
-ctl_connect(struct socket *so, struct sockaddr *nam, struct proc *p)
-{
- struct ctl *ctl;
- int error = 0;
- struct sockaddr_ctl *sa = (struct sockaddr_ctl *)nam;
+static int
+ctl_detach(struct socket *so)
+{
+ struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
+
+ if (kcb == 0)
+ return 0;
- ctl = ctl_find(sa->sc_id, sa->sc_unit);
- if (ctl == NULL)
- return(EADDRNOTAVAIL);
+ soisdisconnected(so);
+ so->so_flags |= SOF_PCBCLEARING;
+ return 0;
+}
- if (ctl->skt != NULL)
- return(EBUSY);
- error = soreserve(so,
- ctl->sendbufsize ? ctl->sendbufsize : CTL_SENDSIZE,
- ctl->recvbufsize ? ctl->recvbufsize : CTL_RECVSIZE);
- if (error)
- return error;
+static int
+ctl_connect(struct socket *so, struct sockaddr *nam, __unused struct proc *p)
+{
+ struct kctl *kctl;
+ int error = 0;
+ struct sockaddr_ctl sa;
+ struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
+ struct ctl_cb *kcb_next = NULL;
- ctl->skt = so;
+ if (kcb == 0)
+ panic("ctl_connect so_pcb null\n");
- if (ctl->flags & CTL_FLAG_PRIVILEGED) {
- if (p == 0)
- return(EPERM);
- if (error = suser(p->p_ucred, &p->p_acflag))
- return error;
- }
+ if (nam->sa_len != sizeof(struct sockaddr_ctl))
+ return(EINVAL);
- if (ctl->connect)
- error = (*ctl->connect)(ctl, ctl->userdata);
- if (error) {
- ctl->skt = NULL;
- return error;
+ bcopy(nam, &sa, sizeof(struct sockaddr_ctl));
+
+ lck_mtx_lock(ctl_mtx);
+ kctl = ctl_find_by_id_unit(sa.sc_id, sa.sc_unit);
+ if (kctl == NULL) {
+ lck_mtx_unlock(ctl_mtx);
+ return ENOENT;
+ }
+
+ if (((kctl->flags & CTL_FLAG_REG_SOCK_STREAM) && (so->so_type != SOCK_STREAM)) ||
+ (!(kctl->flags & CTL_FLAG_REG_SOCK_STREAM) && (so->so_type != SOCK_DGRAM))) {
+ lck_mtx_unlock(ctl_mtx);
+ return EPROTOTYPE;
+ }
+
+ if (kctl->flags & CTL_FLAG_PRIVILEGED) {
+ if (p == 0) {
+ lck_mtx_unlock(ctl_mtx);
+ return(EINVAL);
+ }
+ if (kauth_cred_issuser(kauth_cred_get()) == 0) {
+ lck_mtx_unlock(ctl_mtx);
+ return EPERM;
+ }
+ }
+
+ if ((kctl->flags & CTL_FLAG_REG_ID_UNIT) || sa.sc_unit != 0) {
+ if (kcb_find(kctl, sa.sc_unit) != NULL) {
+ lck_mtx_unlock(ctl_mtx);
+ return EBUSY;
+ }
+ } else {
+ /* Find an unused ID, assumes control IDs are listed in order */
+ u_int32_t unit = 1;
+
+ TAILQ_FOREACH(kcb_next, &kctl->kcb_head, next) {
+ if (kcb_next->unit > unit) {
+ /* Found a gap, lets fill it in */
+ break;
+ }
+ unit = kcb_next->unit + 1;
+ if (unit == ctl_maxunit)
+ break;
+ }
+
+ if (unit == ctl_maxunit) {
+ lck_mtx_unlock(ctl_mtx);
+ return EBUSY;
+ }
+
+ sa.sc_unit = unit;
+ }
+
+ kcb->unit = sa.sc_unit;
+ kcb->kctl = kctl;
+ if (kcb_next != NULL) {
+ TAILQ_INSERT_BEFORE(kcb_next, kcb, next);
}
+ else {
+ TAILQ_INSERT_TAIL(&kctl->kcb_head, kcb, next);
+ }
+ lck_mtx_unlock(ctl_mtx);
+
+ error = soreserve(so, kctl->sendbufsize, kctl->recvbufsize);
+ if (error)
+ goto done;
+ soisconnecting(so);
- so->so_pcb = (caddr_t)ctl;
- soisconnected(so);
+ socket_unlock(so, 0);
+ error = (*kctl->connect)(kctl, &sa, &kcb->userdata);
+ socket_lock(so, 0);
+ if (error)
+ goto end;
+ soisconnected(so);
+
+end:
+ if (error && kctl->disconnect) {
+ socket_unlock(so, 0);
+ (*kctl->disconnect)(kctl, kcb->unit, kcb->userdata);
+ socket_lock(so, 0);
+ }
+done:
+ if (error) {
+ soisdisconnected(so);
+ lck_mtx_lock(ctl_mtx);
+ kcb->kctl = 0;
+ kcb->unit = 0;
+ TAILQ_REMOVE(&kctl->kcb_head, kcb, next);
+ lck_mtx_unlock(ctl_mtx);
+ }
return error;
}
-int
+static int
ctl_disconnect(struct socket *so)
{
- struct ctl *ctl;
+ struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
- if ((ctl = (struct ctl *)so->so_pcb))
- {
- if (ctl->disconnect)
- (*ctl->disconnect)(ctl, ctl->userdata);
- ctl->skt = NULL;
- so->so_pcb = NULL;
+ if ((kcb = (struct ctl_cb *)so->so_pcb)) {
+ struct kctl *kctl = kcb->kctl;
+
+ if (kctl && kctl->disconnect) {
+ socket_unlock(so, 0);
+ (*kctl->disconnect)(kctl, kcb->unit, kcb->userdata);
+ socket_lock(so, 0);
+ }
+
soisdisconnected(so);
+
+ socket_unlock(so, 0);
+ lck_mtx_lock(ctl_mtx);
+ kcb->kctl = 0;
+ kcb->unit = 0;
+ while (kcb->usecount != 0) {
+ msleep(&kcb->usecount, ctl_mtx, 0, "kcb->usecount", 0);
+ }
+ TAILQ_REMOVE(&kctl->kcb_head, kcb, next);
+ lck_mtx_unlock(ctl_mtx);
+ socket_lock(so, 0);
}
return 0;
}
-int
-ctl_send(struct socket *so, int flags, struct mbuf *m,
- struct sockaddr *addr, struct mbuf *control,
- struct proc *p)
+static int
+ctl_peeraddr(struct socket *so, struct sockaddr **nam)
{
- struct ctl *ctl = (struct ctl *)so->so_pcb;
- int error = 0;
+ struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
+ struct kctl *kctl;
+ struct sockaddr_ctl sc;
+
+ if (kcb == NULL) /* sanity check */
+ return(ENOTCONN);
+
+ if ((kctl = kcb->kctl) == NULL)
+ return(EINVAL);
+
+ bzero(&sc, sizeof(struct sockaddr_ctl));
+ sc.sc_len = sizeof(struct sockaddr_ctl);
+ sc.sc_family = AF_SYSTEM;
+ sc.ss_sysaddr = AF_SYS_CONTROL;
+ sc.sc_id = kctl->id;
+ sc.sc_unit = kcb->unit;
+
+ *nam = dup_sockaddr((struct sockaddr *)&sc, 1);
+
+ return 0;
+}
- if (ctl == NULL)
- return(ENOTCONN);
-
- if (ctl->write)
- error = (*ctl->write)(ctl, ctl->userdata, m);
-
- return error;
+static int
+ctl_send(struct socket *so, int flags, struct mbuf *m,
+ __unused struct sockaddr *addr, struct mbuf *control,
+ __unused struct proc *p)
+{
+ int error = 0;
+ struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
+ struct kctl *kctl;
+
+ if (control) m_freem(control);
+
+ if (kcb == NULL) /* sanity check */
+ error = ENOTCONN;
+
+ if (error == 0 && (kctl = kcb->kctl) == NULL)
+ error = EINVAL;
+
+ if (error == 0 && kctl->send) {
+ socket_unlock(so, 0);
+ error = (*kctl->send)(kctl, kcb->unit, kcb->userdata, m, flags);
+ socket_lock(so, 0);
+ } else {
+ m_freem(m);
+ if (error == 0)
+ error = ENOTSUP;
+ }
+ return error;
}
-int
-ctl_enqueuembuf(void *ctlref, struct mbuf *m, u_int32_t flags)
+errno_t
+ctl_enqueuembuf(void *kctlref, u_int32_t unit, struct mbuf *m, u_int32_t flags)
{
- struct ctl *ctl = (struct ctl *)ctlref;
- struct socket *so = (struct socket *)ctl->skt;
+ struct socket *so;
+ errno_t error = 0;
+ struct kctl *kctl = (struct kctl *)kctlref;
+
+ if (kctl == NULL)
+ return EINVAL;
+
+ so = kcb_find_socket(kctl, unit);
+
+ if (so == NULL)
+ return EINVAL;
+
+ if (sbspace(&so->so_rcv) < m->m_pkthdr.len) {
+ error = ENOBUFS;
+ goto bye;
+ }
+ if ((flags & CTL_DATA_EOR))
+ m->m_flags |= M_EOR;
+ if (sbappend(&so->so_rcv, m) && (flags & CTL_DATA_NOWAKEUP) == 0)
+ sorwakeup(so);
+bye:
+ socket_unlock(so, 1);
+ return error;
+}
- if (ctl == NULL) /* sanity check */
- return(EINVAL);
+errno_t
+ctl_enqueuedata(void *kctlref, u_int32_t unit, void *data, size_t len, u_int32_t flags)
+{
+ struct socket *so;
+ struct mbuf *m;
+ errno_t error = 0;
+ struct kctl *kctl = (struct kctl *)kctlref;
+ unsigned int num_needed;
+ struct mbuf *n;
+ size_t curlen = 0;
+
+ if (kctlref == NULL)
+ return EINVAL;
+
+ so = kcb_find_socket(kctl, unit);
+ if (so == NULL)
+ return EINVAL;
+
+ if (sbspace(&so->so_rcv) < (int)len) {
+ error = ENOBUFS;
+ goto bye;
+ }
+
+ num_needed = 1;
+ m = m_allocpacket_internal(&num_needed, len, NULL, M_NOWAIT, 1, 0);
+ if (m == NULL) {
+ printf("ctl_enqueuedata: m_allocpacket_internal(%lu) failed\n", len);
+ error = ENOBUFS;
+ goto bye;
+ }
+
+ for (n = m; n != NULL; n = n->m_next) {
+ size_t mlen = mbuf_maxlen(n);
+
+ if (mlen + curlen > len)
+ mlen = len - curlen;
+ n->m_len = mlen;
+ bcopy((char *)data + curlen, n->m_data, mlen);
+ curlen += mlen;
+ }
+ mbuf_pkthdr_setlen(m, curlen);
+
+ if ((flags & CTL_DATA_EOR))
+ m->m_flags |= M_EOR;
+ if (sbappend(&so->so_rcv, m) && (flags & CTL_DATA_NOWAKEUP) == 0)
+ sorwakeup(so);
+bye:
+ socket_unlock(so, 1);
+ return error;
+}
- if (so == NULL)
- return(ENOTCONN);
- if (sbspace(&so->so_rcv) < m->m_pkthdr.len)
- return(ENOBUFS);
+errno_t
+ctl_getenqueuespace(kern_ctl_ref kctlref, u_int32_t unit, size_t *space)
+{
+ struct kctl *kctl = (struct kctl *)kctlref;
+ struct socket *so;
+ long avail;
+
+ if (kctlref == NULL || space == NULL)
+ return EINVAL;
+
+ so = kcb_find_socket(kctl, unit);
+ if (so == NULL)
+ return EINVAL;
+
+ avail = sbspace(&so->so_rcv);
+ *space = (avail < 0) ? 0 : avail;
+ socket_unlock(so, 1);
+
+ return 0;
+}
- sbappend(&so->so_rcv, m);
- if ((flags & CTL_DATA_NOWAKEUP) == 0)
- sorwakeup(so);
- return 0;
+static int
+ctl_ctloutput(struct socket *so, struct sockopt *sopt)
+{
+ struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
+ struct kctl *kctl;
+ int error = 0;
+ void *data;
+ size_t len;
+
+ if (sopt->sopt_level != SYSPROTO_CONTROL) {
+ return(EINVAL);
+ }
+
+ if (kcb == NULL) /* sanity check */
+ return(ENOTCONN);
+
+ if ((kctl = kcb->kctl) == NULL)
+ return(EINVAL);
+
+ switch (sopt->sopt_dir) {
+ case SOPT_SET:
+ if (kctl->setopt == NULL)
+ return(ENOTSUP);
+ if (sopt->sopt_valsize == 0) {
+ data = NULL;
+ } else {
+ MALLOC(data, void *, sopt->sopt_valsize, M_TEMP, M_WAITOK);
+ if (data == NULL)
+ return(ENOMEM);
+ error = sooptcopyin(sopt, data, sopt->sopt_valsize, sopt->sopt_valsize);
+ }
+ if (error == 0) {
+ socket_unlock(so, 0);
+ error = (*kctl->setopt)(kcb->kctl, kcb->unit, kcb->userdata, sopt->sopt_name,
+ data, sopt->sopt_valsize);
+ socket_lock(so, 0);
+ }
+ FREE(data, M_TEMP);
+ break;
+
+ case SOPT_GET:
+ if (kctl->getopt == NULL)
+ return(ENOTSUP);
+ data = NULL;
+ if (sopt->sopt_valsize && sopt->sopt_val) {
+ MALLOC(data, void *, sopt->sopt_valsize, M_TEMP, M_WAITOK);
+ if (data == NULL)
+ return(ENOMEM);
+ /* 4108337 - copy in data for get socket option */
+ error = sooptcopyin(sopt, data, sopt->sopt_valsize, sopt->sopt_valsize);
+ }
+ len = sopt->sopt_valsize;
+ socket_unlock(so, 0);
+ error = (*kctl->getopt)(kcb->kctl, kcb->unit, kcb->userdata, sopt->sopt_name,
+ data, &len);
+ if (data != NULL && len > sopt->sopt_valsize)
+ panic_plain("ctl_ctloutput: ctl %s returned len (%lu) > sopt_valsize (%lu)\n",
+ kcb->kctl->name, len, sopt->sopt_valsize);
+ socket_lock(so, 0);
+ if (error == 0) {
+ if (data != NULL)
+ error = sooptcopyout(sopt, data, len);
+ else
+ sopt->sopt_valsize = len;
+ }
+ if (data != NULL)
+ FREE(data, M_TEMP);
+ break;
+ }
+ return error;
}
-int
-ctl_enqueuedata(void *ctlref, void *data, size_t len, u_int32_t flags)
+static int
+ctl_ioctl(__unused struct socket *so, u_long cmd, caddr_t data,
+ __unused struct ifnet *ifp, __unused struct proc *p)
{
- struct ctl *ctl = (struct ctl *)ctlref;
- struct socket *so = (struct socket *)ctl->skt;
- struct mbuf *m;
+ int error = ENOTSUP;
+
+ switch (cmd) {
+ /* get the number of controllers */
+ case CTLIOCGCOUNT: {
+ struct kctl *kctl;
+ u_int32_t n = 0;
+
+ lck_mtx_lock(ctl_mtx);
+ TAILQ_FOREACH(kctl, &ctl_head, next)
+ n++;
+ lck_mtx_unlock(ctl_mtx);
+
+ bcopy(&n, data, sizeof (n));
+ error = 0;
+ break;
+ }
+ case CTLIOCGINFO: {
+ struct ctl_info ctl_info;
+ struct kctl *kctl = 0;
+ size_t name_len;
+
+ bcopy(data, &ctl_info, sizeof (ctl_info));
+ name_len = strnlen(ctl_info.ctl_name, MAX_KCTL_NAME);
+
+ if (name_len == 0 || name_len + 1 > MAX_KCTL_NAME) {
+ error = EINVAL;
+ break;
+ }
+ lck_mtx_lock(ctl_mtx);
+ kctl = ctl_find_by_name(ctl_info.ctl_name);
+ lck_mtx_unlock(ctl_mtx);
+ if (kctl == 0) {
+ error = ENOENT;
+ break;
+ }
+ ctl_info.ctl_id = kctl->id;
+ bcopy(&ctl_info, data, sizeof (ctl_info));
+ error = 0;
+ break;
+ }
+
+ /* add controls to get list of NKEs */
+
+ }
+
+ return error;
+}
- if (ctl == NULL) /* sanity check */
- return(EINVAL);
+/*
+ * Register/unregister a NKE
+ */
+errno_t
+ctl_register(struct kern_ctl_reg *userkctl, kern_ctl_ref *kctlref)
+{
+ struct kctl *kctl = NULL;
+ struct kctl *kctl_next = NULL;
+ u_int32_t id = 1;
+ size_t name_len;
+
+ if (userkctl == NULL) /* sanity check */
+ return(EINVAL);
+ if (userkctl->ctl_connect == NULL)
+ return(EINVAL);
+ name_len = strlen(userkctl->ctl_name);
+ if (name_len == 0 || name_len + 1 > MAX_KCTL_NAME)
+ return(EINVAL);
+
+ MALLOC(kctl, struct kctl *, sizeof(*kctl), M_TEMP, M_WAITOK);
+ if (kctl == NULL)
+ return(ENOMEM);
+ bzero((char *)kctl, sizeof(*kctl));
+
+ lck_mtx_lock(ctl_mtx);
+
+ /*
+ * Kernel Control IDs
+ *
+ * CTL_FLAG_REG_ID_UNIT indicates the control ID and unit number are
+ * static. If they do not exist, add them to the list in order. If the
+ * flag is not set, we must find a new unique value. We assume the
+ * list is in order. We find the last item in the list and add one. If
+ * this leads to wrapping the id around, we start at the front of the
+ * list and look for a gap.
+ */
+
+ if ((userkctl->ctl_flags & CTL_FLAG_REG_ID_UNIT) == 0) {
+ /* Must dynamically assign an unused ID */
+
+ /* Verify the same name isn't already registered */
+ if (ctl_find_by_name(userkctl->ctl_name) != NULL) {
+ lck_mtx_unlock(ctl_mtx);
+ FREE(kctl, M_TEMP);
+ return(EEXIST);
+ }
+
+ /* Start with 1 in case the list is empty */
+ id = 1;
+ kctl_next = TAILQ_LAST(&ctl_head, kctl_list);
+
+ if (kctl_next != NULL) {
+ /* List was not empty, add one to the last item in the list */
+ id = kctl_next->id + 1;
+ kctl_next = NULL;
+
+ /*
+ * If this wrapped the id number, start looking at the front
+ * of the list for an unused id.
+ */
+ if (id == 0) {
+ /* Find the next unused ID */
+ id = 1;
+
+ TAILQ_FOREACH(kctl_next, &ctl_head, next) {
+ if (kctl_next->id > id) {
+ /* We found a gap */
+ break;
+ }
+
+ id = kctl_next->id + 1;
+ }
+ }
+ }
+
+ userkctl->ctl_id = id;
+ kctl->id = id;
+ kctl->reg_unit = -1;
+ } else {
+ TAILQ_FOREACH(kctl_next, &ctl_head, next) {
+ if (kctl_next->id > userkctl->ctl_id)
+ break;
+ }
+
+ if (ctl_find_by_id_unit(userkctl->ctl_id, userkctl->ctl_unit) != NULL) {
+ lck_mtx_unlock(ctl_mtx);
+ FREE(kctl, M_TEMP);
+ return(EEXIST);
+ }
+ kctl->id = userkctl->ctl_id;
+ kctl->reg_unit = userkctl->ctl_unit;
+ }
+ strlcpy(kctl->name, userkctl->ctl_name, MAX_KCTL_NAME);
+ kctl->flags = userkctl->ctl_flags;
+
+ /* Let the caller know the default send and receive sizes */
+ if (userkctl->ctl_sendsize == 0)
+ userkctl->ctl_sendsize = CTL_SENDSIZE;
+ kctl->sendbufsize = userkctl->ctl_sendsize;
+
+ if (userkctl->ctl_recvsize == 0)
+ userkctl->ctl_recvsize = CTL_RECVSIZE;
+ kctl->recvbufsize = userkctl->ctl_recvsize;
+
+ kctl->connect = userkctl->ctl_connect;
+ kctl->disconnect = userkctl->ctl_disconnect;
+ kctl->send = userkctl->ctl_send;
+ kctl->setopt = userkctl->ctl_setopt;
+ kctl->getopt = userkctl->ctl_getopt;
+
+ TAILQ_INIT(&kctl->kcb_head);
+
+ if (kctl_next)
+ TAILQ_INSERT_BEFORE(kctl_next, kctl, next);
+ else
+ TAILQ_INSERT_TAIL(&ctl_head, kctl, next);
+
+ lck_mtx_unlock(ctl_mtx);
+
+ *kctlref = kctl;
+
+ ctl_post_msg(KEV_CTL_REGISTERED, kctl->id);
+ return(0);
+}
- if (so == NULL)
- return(ENOTCONN);
+errno_t
+ctl_deregister(void *kctlref)
+{
+ struct kctl *kctl;
- if (len > MCLBYTES)
- return(EMSGSIZE);
+ if (kctlref == NULL) /* sanity check */
+ return(EINVAL);
- if (sbspace(&so->so_rcv) < len)
- return(ENOBUFS);
-
- if ((m = m_gethdr(M_NOWAIT, MT_DATA)) == NULL)
- return (ENOBUFS);
-
- if (len > MHLEN) {
- MCLGET(m, M_NOWAIT);
- if (!(m->m_flags & M_EXT)) {
- m_freem(m);
- return(ENOBUFS);
- }
+ lck_mtx_lock(ctl_mtx);
+ TAILQ_FOREACH(kctl, &ctl_head, next) {
+ if (kctl == (struct kctl *)kctlref)
+ break;
}
+ if (kctl != (struct kctl *)kctlref) {
+ lck_mtx_unlock(ctl_mtx);
+ return EINVAL;
+ }
+ if (!TAILQ_EMPTY(&kctl->kcb_head)) {
+ lck_mtx_unlock(ctl_mtx);
+ return EBUSY;
+ }
+
+ TAILQ_REMOVE(&ctl_head, kctl, next);
- bcopy(data, mtod(m, void *), len);
+ lck_mtx_unlock(ctl_mtx);
- sbappend(&so->so_rcv, m);
- if ((flags & CTL_DATA_NOWAKEUP) == 0)
- sorwakeup(so);
- return 0;
+ ctl_post_msg(KEV_CTL_DEREGISTERED, kctl->id);
+ FREE(kctl, M_TEMP);
+ return(0);
}
-int
-ctl_ctloutput(struct socket *so, struct sockopt *sopt)
-{
- struct ctl *ctl = (struct ctl *)so->so_pcb;
- int error = 0, s;
- void *data;
- size_t len;
+/*
+ * Must be called with global ctl_mtx lock taked
+ */
+static struct kctl *
+ctl_find_by_name(const char *name)
+{
+ struct kctl *kctl;
- if (sopt->sopt_level != SYSPROTO_CONTROL) {
- return(EINVAL);
- }
+ TAILQ_FOREACH(kctl, &ctl_head, next)
+ if (strncmp(kctl->name, name, sizeof(kctl->name)) == 0)
+ return kctl;
- if (ctl == NULL)
- return(ENOTCONN);
-
- switch (sopt->sopt_dir) {
- case SOPT_SET:
- if (ctl->set == NULL)
- return(ENOTSUP);
- MALLOC(data, void *, sopt->sopt_valsize, M_TEMP, M_WAITOK);
- if (data == NULL)
- return(ENOMEM);
- error = sooptcopyin(sopt, data, sopt->sopt_valsize, sopt->sopt_valsize);
- if (error == 0)
- error = (*ctl->set)(ctl, ctl->userdata, sopt->sopt_name, data, sopt->sopt_valsize);
- FREE(data, M_TEMP);
- break;
+ return NULL;
+}
- case SOPT_GET:
- if (ctl->get == NULL)
- return(ENOTSUP);
- data = NULL;
- if (sopt->sopt_valsize && sopt->sopt_val) {
- MALLOC(data, void *, sopt->sopt_valsize, M_TEMP, M_WAITOK);
- if (data == NULL)
- return(ENOMEM);
- }
- len = sopt->sopt_valsize;
- error = (*ctl->get)(ctl, ctl->userdata, sopt->sopt_name, data, &len);
- if (error == 0) {
- if (data != NULL)
- error = sooptcopyout(sopt, data, len);
- else
- sopt->sopt_valsize = len;
- }
- if (data != NULL)
- FREE(data, M_TEMP);
- break;
- }
- return error;
+u_int32_t
+ctl_id_by_name(const char *name)
+{
+ u_int32_t ctl_id = 0;
+
+ lck_mtx_lock(ctl_mtx);
+ struct kctl *kctl = ctl_find_by_name(name);
+ if (kctl) ctl_id = kctl->id;
+ lck_mtx_unlock(ctl_mtx);
+
+ return ctl_id;
}
-int ctl_ioctl(struct socket *so, u_long cmd, caddr_t data,
- struct ifnet *ifp, struct proc *p)
+errno_t
+ctl_name_by_id(
+ u_int32_t id,
+ char *out_name,
+ size_t maxsize)
{
- int error = ENOTSUP, s, n;
- struct ctl *ctl = (struct ctl *)so->so_pcb;
-
- switch (cmd) {
- /* get the number of controllers */
- case CTLIOCGCOUNT:
- n = 0;
- TAILQ_FOREACH(ctl, &ctl_head, next)
- n++;
- *(u_int32_t *)data = n;
- error = 0;
+ int found = 0;
+
+ lck_mtx_lock(ctl_mtx);
+ struct kctl *kctl;
+ TAILQ_FOREACH(kctl, &ctl_head, next) {
+ if (kctl->id == id)
break;
-
-
- /* add controls to get list of NKEs */
-
}
- return error;
+ if (kctl && kctl->name)
+ {
+ if (maxsize > MAX_KCTL_NAME)
+ maxsize = MAX_KCTL_NAME;
+ strlcpy(out_name, kctl->name, maxsize);
+ found = 1;
+ }
+ lck_mtx_unlock(ctl_mtx);
+
+ return found ? 0 : ENOENT;
}
/*
- * Register/unregister a NKE
+ * Must be called with global ctl_mtx lock taked
+ *
*/
-int
-ctl_register(struct kern_ctl_reg *userctl, void *userdata, kern_ctl_ref *ctlref)
-{
- struct ctl *ctl;
-
- if (userctl == NULL) /* sanity check */
- return(EINVAL);
-
- ctl = ctl_find(userctl->ctl_id, userctl->ctl_unit);
- if (ctl != NULL)
- return(EEXIST);
-
- MALLOC(ctl, struct ctl *, sizeof(*ctl), M_TEMP, M_WAITOK);
- if (ctl == NULL)
- return(ENOMEM);
-
- bzero((char *)ctl, sizeof(*ctl));
-
- ctl->id = userctl->ctl_id;
- ctl->unit = userctl->ctl_unit;
- ctl->flags = userctl->ctl_flags;
- ctl->sendbufsize = userctl->ctl_sendsize;
- ctl->recvbufsize = userctl->ctl_recvsize;
- ctl->userdata = userdata;
- ctl->connect = userctl->ctl_connect;
- ctl->disconnect = userctl->ctl_disconnect;
- ctl->write = userctl->ctl_write;
- ctl->set = userctl->ctl_set;
- ctl->get = userctl->ctl_get;
-
- TAILQ_INSERT_TAIL(&ctl_head, ctl, next);
-
- *ctlref = ctl;
-
- ctl_post_msg(KEV_CTL_REGISTERED, ctl->id, ctl->unit);
- return(0);
-}
-
-int
-ctl_deregister(void *ctlref)
+static struct kctl *
+ctl_find_by_id_unit(u_int32_t id, u_int32_t unit)
{
- struct ctl *ctl = (struct ctl *)ctlref;
- struct socket *so;
-
- if (ctl == NULL) /* sanity check */
- return(EINVAL);
+ struct kctl *kctl;
- TAILQ_REMOVE(&ctl_head, ctl, next);
-
- if (ctl->skt) {
- ctl->skt->so_pcb = 0;
- soisdisconnected(ctl->skt);
+ TAILQ_FOREACH(kctl, &ctl_head, next) {
+ if (kctl->id == id && (kctl->flags & CTL_FLAG_REG_ID_UNIT) == 0)
+ return kctl;
+ else if (kctl->id == id && kctl->reg_unit == unit)
+ return kctl;
}
-
- ctl_post_msg(KEV_CTL_DEREGISTERED, ctl->id, ctl->unit);
- FREE(ctl, M_TEMP);
- return(0);
+ return NULL;
}
/*
- * Locate a NKE
+ * Must be called with kernel controller lock taken
*/
-struct ctl *
-ctl_find(u_int32_t id, u_int32_t unit)
+static struct ctl_cb *
+kcb_find(struct kctl *kctl, u_int32_t unit)
{
- struct ctl *ctl;
+ struct ctl_cb *kcb;
- TAILQ_FOREACH(ctl, &ctl_head, next)
- if ((ctl->id == id) && (ctl->unit == unit))
- return ctl;
+ TAILQ_FOREACH(kcb, &kctl->kcb_head, next)
+ if (kcb->unit == unit)
+ return kcb;
return NULL;
}
-void ctl_post_msg(u_long event_code, u_int32_t id, u_int32_t unit)
+static struct socket *
+kcb_find_socket(struct kctl *kctl, u_int32_t unit)
+{
+ struct socket *so = NULL;
+
+ lck_mtx_lock(ctl_mtx);
+ struct ctl_cb *kcb = kcb_find(kctl, unit);
+ if (kcb && kcb->kctl == kctl) {
+ so = kcb->so;
+ if (so) {
+ kcb->usecount++;
+ }
+ }
+ lck_mtx_unlock(ctl_mtx);
+
+ if (so == NULL) {
+ return NULL;
+ }
+
+ socket_lock(so, 1);
+
+ lck_mtx_lock(ctl_mtx);
+ if (kcb->kctl == NULL)
+ {
+ lck_mtx_unlock(ctl_mtx);
+ socket_unlock(so, 1);
+ so = NULL;
+ lck_mtx_lock(ctl_mtx);
+ }
+ kcb->usecount--;
+ if (kcb->usecount == 0)
+ wakeup((event_t)&kcb->usecount);
+ lck_mtx_unlock(ctl_mtx);
+
+ return so;
+}
+
+static void
+ctl_post_msg(u_int32_t event_code, u_int32_t id)
{
struct ctl_event_data ctl_ev_data;
struct kev_msg ev_msg;
+ lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_NOTOWNED);
+
+ bzero(&ev_msg, sizeof(struct kev_msg));
ev_msg.vendor_code = KEV_VENDOR_APPLE;
ev_msg.kev_class = KEV_SYSTEM_CLASS;
/* common nke subclass data */
bzero(&ctl_ev_data, sizeof(ctl_ev_data));
ctl_ev_data.ctl_id = id;
- ctl_ev_data.ctl_unit = unit;
ev_msg.dv[0].data_ptr = &ctl_ev_data;
ev_msg.dv[0].data_length = sizeof(ctl_ev_data);
kev_post_msg(&ev_msg);
}
+static int
+ctl_lock(struct socket *so, int refcount, void *lr)
+{
+ void *lr_saved;
+
+ if (lr == NULL)
+ lr_saved = __builtin_return_address(0);
+ else
+ lr_saved = lr;
+
+ if (so->so_pcb != NULL) {
+ lck_mtx_lock(((struct ctl_cb *)so->so_pcb)->mtx);
+ } else {
+ panic("ctl_lock: so=%p NO PCB! lr=%p lrh= %s\n",
+ so, lr_saved, solockhistory_nr(so));
+ /* NOTREACHED */
+ }
+
+ if (so->so_usecount < 0) {
+ panic("ctl_lock: so=%p so_pcb=%p lr=%p ref=%x lrh= %s\n",
+ so, so->so_pcb, lr_saved, so->so_usecount, solockhistory_nr(so));
+ /* NOTREACHED */
+ }
+
+ if (refcount)
+ so->so_usecount++;
+
+ so->lock_lr[so->next_lock_lr] = lr_saved;
+ so->next_lock_lr = (so->next_lock_lr+1) % SO_LCKDBG_MAX;
+ return (0);
+}
+
+static int
+ctl_unlock(struct socket *so, int refcount, void *lr)
+{
+ void *lr_saved;
+ lck_mtx_t *mutex_held;
+
+ if (lr == NULL)
+ lr_saved = __builtin_return_address(0);
+ else
+ lr_saved = lr;
+
+#ifdef MORE_KCTLLOCK_DEBUG
+ printf("ctl_unlock: so=%x sopcb=%x lock=%x ref=%x lr=%p\n",
+ so, so->so_pcb, ((struct ctl_cb *)so->so_pcb)->mtx,
+ so->so_usecount, lr_saved);
+#endif
+ if (refcount)
+ so->so_usecount--;
+
+ if (so->so_usecount < 0) {
+ panic("ctl_unlock: so=%p usecount=%x lrh= %s\n",
+ so, so->so_usecount, solockhistory_nr(so));
+ /* NOTREACHED */
+ }
+ if (so->so_pcb == NULL) {
+ panic("ctl_unlock: so=%p NO PCB usecount=%x lr=%p lrh= %s\n",
+ so, so->so_usecount, (void *)lr_saved, solockhistory_nr(so));
+ /* NOTREACHED */
+ }
+ mutex_held = ((struct ctl_cb *)so->so_pcb)->mtx;
+
+ lck_mtx_assert(mutex_held, LCK_MTX_ASSERT_OWNED);
+ so->unlock_lr[so->next_unlock_lr] = lr_saved;
+ so->next_unlock_lr = (so->next_unlock_lr+1) % SO_LCKDBG_MAX;
+ lck_mtx_unlock(mutex_held);
+
+ if (so->so_usecount == 0)
+ ctl_sofreelastref(so);
+
+ return (0);
+}
+
+static lck_mtx_t *
+ctl_getlock(struct socket *so, __unused int locktype)
+{
+ struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
+
+ if (so->so_pcb) {
+ if (so->so_usecount < 0)
+ panic("ctl_getlock: so=%p usecount=%x lrh= %s\n",
+ so, so->so_usecount, solockhistory_nr(so));
+ return(kcb->mtx);
+ } else {
+ panic("ctl_getlock: so=%p NULL NO so_pcb %s\n",
+ so, solockhistory_nr(so));
+ return (so->so_proto->pr_domain->dom_mtx);
+ }
+}