/*
- * Copyright (c) 1999-2004 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 1999-2006 Apple Computer, Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#include <sys/sys_domain.h>
#include <sys/kern_event.h>
#include <sys/kern_control.h>
+#include <sys/kauth.h>
#include <net/if_var.h>
#include <mach/vm_types.h>
* Definitions and vars for we support
*/
-static u_int32_t ctl_last_id = 0;
-static u_int32_t ctl_max = 256;
static u_int32_t ctl_maxunit = 65536;
static lck_grp_attr_t *ctl_lck_grp_attr = 0;
static lck_attr_t *ctl_lck_attr = 0;
static lck_grp_t *ctl_lck_grp = 0;
static lck_mtx_t *ctl_mtx;
-/*
- * internal structure maintained for each register controller
- */
-
-struct ctl_cb;
-
-struct kctl
-{
- TAILQ_ENTRY(kctl) next; /* controller chain */
-
- /* controller information provided when registering */
- char name[MAX_KCTL_NAME]; /* unique nke identifier, provided by DTS */
- u_int32_t id;
- u_int32_t reg_unit;
-
- /* misc communication information */
- u_int32_t flags; /* support flags */
- u_int32_t recvbufsize; /* request more than the default buffer size */
- u_int32_t sendbufsize; /* request more than the default buffer size */
-
- /* Dispatch functions */
- ctl_connect_func connect; /* Make contact */
- ctl_disconnect_func disconnect; /* Break contact */
- ctl_send_func send; /* Send data to nke */
- ctl_setopt_func setopt; /* set kctl configuration */
- ctl_getopt_func getopt; /* get kctl configuration */
-
- TAILQ_HEAD(, ctl_cb) kcb_head;
- u_int32_t lastunit;
-};
-
-struct ctl_cb {
- TAILQ_ENTRY(ctl_cb) next; /* controller chain */
- lck_mtx_t *mtx;
- struct socket *so; /* controlling socket */
- struct kctl *kctl; /* back pointer to controller */
- u_int32_t unit;
- void *userdata;
-};
/* all the controllers are chained */
-TAILQ_HEAD(, kctl) ctl_head;
+TAILQ_HEAD(kctl_list, kctl) ctl_head;
static int ctl_attach(struct socket *, int, struct proc *);
static int ctl_detach(struct socket *);
static int ctl_ctloutput(struct socket *, struct sockopt *);
static int ctl_peeraddr(struct socket *so, struct sockaddr **nam);
-static struct kctl *ctl_find_by_id(u_int32_t);
static struct kctl *ctl_find_by_name(const char *);
static struct kctl *ctl_find_by_id_unit(u_int32_t id, u_int32_t unit);
error = ENOMEM;
goto done;
}
- lck_grp_attr_setdefault(ctl_lck_grp_attr);
ctl_lck_grp = lck_grp_alloc_init("Kernel Control Protocol", ctl_lck_grp_attr);
if (ctl_lck_grp == 0) {
error = ENOMEM;
goto done;
}
- lck_attr_setdefault(ctl_lck_attr);
ctl_mtx = lck_mtx_alloc_init(ctl_lck_grp, ctl_lck_attr);
if (ctl_mtx == 0) {
}
kcb_delete(kcb);
}
+ sofreelastref(so, 1);
return 0;
}
int error = 0;
struct sockaddr_ctl sa;
struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
+ struct ctl_cb *kcb_next = NULL;
if (kcb == 0)
panic("ctl_connect so_pcb null\n");
lck_mtx_unlock(ctl_mtx);
return(EINVAL);
}
- if ((error = proc_suser(p))) {
+ if (kauth_cred_issuser(kauth_cred_get()) == 0) {
lck_mtx_unlock(ctl_mtx);
- return error;
+ return EPERM;
}
}
return EBUSY;
}
} else {
- u_int32_t unit = kctl->lastunit + 1;
+ /* Find an unused ID, assumes control IDs are listed in order */
+ u_int32_t unit = 1;
+
+ TAILQ_FOREACH(kcb_next, &kctl->kcb_head, next) {
+ if (kcb_next->unit > unit) {
+ /* Found a gap, lets fill it in */
+ break;
+ }
+ unit = kcb_next->unit + 1;
+ if (unit == ctl_maxunit)
+ break;
+ }
- while (1) {
- if (unit == ctl_maxunit)
- unit = 1;
- if (kcb_find(kctl, unit) == NULL) {
- kctl->lastunit = sa.sc_unit = unit;
- break;
- }
- if (unit++ == kctl->lastunit) {
- lck_mtx_unlock(ctl_mtx);
- return EBUSY;
- }
- }
+ if (unit == ctl_maxunit) {
+ lck_mtx_unlock(ctl_mtx);
+ return EBUSY;
+ }
+
+ sa.sc_unit = unit;
}
kcb->unit = sa.sc_unit;
kcb->kctl = kctl;
- TAILQ_INSERT_TAIL(&kctl->kcb_head, kcb, next);
+ if (kcb_next != NULL) {
+ TAILQ_INSERT_BEFORE(kcb_next, kcb, next);
+ }
+ else {
+ TAILQ_INSERT_TAIL(&kctl->kcb_head, kcb, next);
+ }
lck_mtx_unlock(ctl_mtx);
error = soreserve(so, kctl->sendbufsize, kctl->recvbufsize);
return EINVAL;
socket_lock(so, 1);
- if ((size_t)sbspace(&so->so_rcv) < len) {
+ if (sbspace(&so->so_rcv) < (long)len) {
error = ENOBUFS;
goto bye;
}
struct ctl_cb *kcb;
struct kctl *kctl = (struct kctl *)kctlref;
struct socket *so;
+ long avail;
if (kctlref == NULL || space == NULL)
return EINVAL;
return EINVAL;
socket_lock(so, 1);
- *space = sbspace(&so->so_rcv);
+ avail = sbspace(&so->so_rcv);
+ *space = (avail < 0) ? 0 : avail;
socket_unlock(so, 1);
return 0;
case SOPT_SET:
if (kctl->setopt == NULL)
return(ENOTSUP);
- MALLOC(data, void *, sopt->sopt_valsize, M_TEMP, M_WAITOK);
- if (data == NULL)
- return(ENOMEM);
- error = sooptcopyin(sopt, data, sopt->sopt_valsize, sopt->sopt_valsize);
+ if (sopt->sopt_valsize == 0) {
+ data = NULL;
+ } else {
+ MALLOC(data, void *, sopt->sopt_valsize, M_TEMP, M_WAITOK);
+ if (data == NULL)
+ return(ENOMEM);
+ error = sooptcopyin(sopt, data, sopt->sopt_valsize, sopt->sopt_valsize);
+ }
if (error == 0) {
socket_unlock(so, 0);
error = (*kctl->setopt)(kcb->kctl, kcb->unit, kcb->userdata, sopt->sopt_name,
*/
errno_t
ctl_register(struct kern_ctl_reg *userkctl, kern_ctl_ref *kctlref)
-{
- struct kctl *kctl = 0;
- u_int32_t id = -1;
- u_int32_t n;
+{
+ struct kctl *kctl = NULL;
+ struct kctl *kctl_next = NULL;
+ u_int32_t id = 1;
size_t name_len;
if (userkctl == NULL) /* sanity check */
lck_mtx_lock(ctl_mtx);
- if ((userkctl->ctl_flags & CTL_FLAG_REG_ID_UNIT) == 0) {
+ /*
+ * Kernel Control IDs
+ *
+ * CTL_FLAG_REG_ID_UNIT indicates the control ID and unit number are
+ * static. If they do not exist, add them to the list in order. If the
+ * flag is not set, we must find a new unique value. We assume the
+ * list is in order. We find the last item in the list and add one. If
+ * this leads to wrapping the id around, we start at the front of the
+ * list and look for a gap.
+ */
+
+ if ((userkctl->ctl_flags & CTL_FLAG_REG_ID_UNIT) == 0) {
+ /* Must dynamically assign an unused ID */
+
+ /* Verify the same name isn't already registered */
if (ctl_find_by_name(userkctl->ctl_name) != NULL) {
lck_mtx_unlock(ctl_mtx);
FREE(kctl, M_TEMP);
return(EEXIST);
}
- for (n = 0, id = ctl_last_id + 1; n < ctl_max; id++, n++) {
+
+ /* Start with 1 in case the list is empty */
+ id = 1;
+ kctl_next = TAILQ_LAST(&ctl_head, kctl_list);
+
+ if (kctl_next != NULL) {
+ /* List was not empty, add one to the last item in the list */
+ id = kctl_next->id + 1;
+ kctl_next = NULL;
+
+ /*
+ * If this wrapped the id number, start looking at the front
+ * of the list for an unused id.
+ */
if (id == 0) {
- n--;
- continue;
+ /* Find the next unused ID */
+ id = 1;
+
+ TAILQ_FOREACH(kctl_next, &ctl_head, next) {
+ if (kctl_next->id > id) {
+ /* We found a gap */
+ break;
+ }
+
+ id = kctl_next->id + 1;
+ }
}
- if (ctl_find_by_id(id) == 0)
- break;
}
- if (id == ctl_max) {
- lck_mtx_unlock(ctl_mtx);
- FREE(kctl, M_TEMP);
- return(ENOBUFS);
- }
- userkctl->ctl_id =id;
+
+ userkctl->ctl_id = id;
kctl->id = id;
kctl->reg_unit = -1;
} else {
+ TAILQ_FOREACH(kctl_next, &ctl_head, next) {
+ if (kctl_next->id > userkctl->ctl_id)
+ break;
+ }
+
if (ctl_find_by_id_unit(userkctl->ctl_id, userkctl->ctl_unit) != NULL) {
lck_mtx_unlock(ctl_mtx);
FREE(kctl, M_TEMP);
kctl->id = userkctl->ctl_id;
kctl->reg_unit = userkctl->ctl_unit;
}
- strcpy(kctl->name, userkctl->ctl_name);
+ strlcpy(kctl->name, userkctl->ctl_name, MAX_KCTL_NAME);
kctl->flags = userkctl->ctl_flags;
/* Let the caller know the default send and receive sizes */
TAILQ_INIT(&kctl->kcb_head);
- TAILQ_INSERT_TAIL(&ctl_head, kctl, next);
- ctl_max++;
+ if (kctl_next)
+ TAILQ_INSERT_BEFORE(kctl_next, kctl, next);
+ else
+ TAILQ_INSERT_TAIL(&ctl_head, kctl, next);
lck_mtx_unlock(ctl_mtx);
}
TAILQ_REMOVE(&ctl_head, kctl, next);
- ctl_max--;
lck_mtx_unlock(ctl_mtx);
return(0);
}
-/*
- * Must be called with global lock taked
- */
-static struct kctl *
-ctl_find_by_id(u_int32_t id)
-{
- struct kctl *kctl;
-
- TAILQ_FOREACH(kctl, &ctl_head, next)
- if (kctl->id == id)
- return kctl;
-
- return NULL;
-}
-
/*
* Must be called with global ctl_mtx lock taked
*/
struct kctl *kctl;
TAILQ_FOREACH(kctl, &ctl_head, next)
- if (strcmp(kctl->name, name) == 0)
+ if (strncmp(kctl->name, name, sizeof(kctl->name)) == 0)
return kctl;
return NULL;
static int
ctl_lock(struct socket *so, int refcount, int lr)
{
- int lr_saved;
-#ifdef __ppc__
- if (lr == 0) {
- __asm__ volatile("mflr %0" : "=r" (lr_saved));
- }
+ uint32_t lr_saved;
+ if (lr == 0)
+ lr_saved = (unsigned int) __builtin_return_address(0);
else lr_saved = lr;
-#endif
if (so->so_pcb) {
lck_mtx_lock(((struct ctl_cb *)so->so_pcb)->mtx);
} else {
- panic("ctl_lock: so=%x NO PCB! lr=%x\n", so, lr_saved);
+ panic("ctl_lock: so=%p NO PCB! lr=%x\n", so, lr_saved);
lck_mtx_lock(so->so_proto->pr_domain->dom_mtx);
}
if (so->so_usecount < 0)
- panic("ctl_lock: so=%x so_pcb=%x lr=%x ref=%x\n",
+ panic("ctl_lock: so=%p so_pcb=%p lr=%x ref=%x\n",
so, so->so_pcb, lr_saved, so->so_usecount);
if (refcount)
so->so_usecount++;
- so->reserved3 = (void *)lr_saved;
+
+ so->lock_lr[so->next_lock_lr] = lr_saved;
+ so->next_lock_lr = (so->next_lock_lr+1) % SO_LCKDBG_MAX;
return (0);
}
static int
ctl_unlock(struct socket *so, int refcount, int lr)
{
- int lr_saved;
+ uint32_t lr_saved;
lck_mtx_t * mutex_held;
-#ifdef __ppc__
- if (lr == 0) {
- __asm__ volatile("mflr %0" : "=r" (lr_saved));
- }
+ if (lr == 0)
+ lr_saved = (unsigned int) __builtin_return_address(0);
else lr_saved = lr;
-#endif
#ifdef MORE_KCTLLOCK_DEBUG
printf("ctl_unlock: so=%x sopcb=%x lock=%x ref=%x lr=%x\n",
so->so_usecount--;
if (so->so_usecount < 0)
- panic("ctl_unlock: so=%x usecount=%x\n", so, so->so_usecount);
+ panic("ctl_unlock: so=%p usecount=%x\n", so, so->so_usecount);
if (so->so_pcb == NULL) {
- panic("ctl_unlock: so=%x NO PCB usecount=%x lr=%x\n", so, so->so_usecount, lr_saved);
+ panic("ctl_unlock: so=%p NO PCB usecount=%x lr=%x\n", so, so->so_usecount, lr_saved);
mutex_held = so->so_proto->pr_domain->dom_mtx;
} else {
mutex_held = ((struct ctl_cb *)so->so_pcb)->mtx;
}
lck_mtx_assert(mutex_held, LCK_MTX_ASSERT_OWNED);
+ so->unlock_lr[so->next_unlock_lr] = lr_saved;
+ so->next_unlock_lr = (so->next_unlock_lr+1) % SO_LCKDBG_MAX;
lck_mtx_unlock(mutex_held);
- so->reserved4 = (void *)lr_saved;
if (so->so_usecount == 0)
ctl_sofreelastref(so);
if (so->so_pcb) {
if (so->so_usecount < 0)
- panic("ctl_getlock: so=%x usecount=%x\n", so, so->so_usecount);
+ panic("ctl_getlock: so=%p usecount=%x\n", so, so->so_usecount);
return(kcb->mtx);
} else {
- panic("ctl_getlock: so=%x NULL so_pcb\n", so);
+ panic("ctl_getlock: so=%p NULL so_pcb\n", so);
return (so->so_proto->pr_domain->dom_mtx);
}
}