/*
- * Copyright (c) 1999-2004 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 1999-2011 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#include <sys/sys_domain.h>
#include <sys/kern_event.h>
#include <sys/kern_control.h>
+#include <sys/kauth.h>
#include <net/if_var.h>
#include <mach/vm_types.h>
-#include <mach/kmod.h>
#include <kern/thread.h>
* Definitions and vars for we support
*/
-static u_int32_t ctl_last_id = 0;
-static u_int32_t ctl_max = 256;
static u_int32_t ctl_maxunit = 65536;
static lck_grp_attr_t *ctl_lck_grp_attr = 0;
static lck_attr_t *ctl_lck_attr = 0;
static lck_grp_t *ctl_lck_grp = 0;
static lck_mtx_t *ctl_mtx;
-/*
- * internal structure maintained for each register controller
- */
-
-struct ctl_cb;
-
-struct kctl
-{
- TAILQ_ENTRY(kctl) next; /* controller chain */
-
- /* controller information provided when registering */
- char name[MAX_KCTL_NAME]; /* unique nke identifier, provided by DTS */
- u_int32_t id;
- u_int32_t reg_unit;
-
- /* misc communication information */
- u_int32_t flags; /* support flags */
- u_int32_t recvbufsize; /* request more than the default buffer size */
- u_int32_t sendbufsize; /* request more than the default buffer size */
-
- /* Dispatch functions */
- ctl_connect_func connect; /* Make contact */
- ctl_disconnect_func disconnect; /* Break contact */
- ctl_send_func send; /* Send data to nke */
- ctl_setopt_func setopt; /* set kctl configuration */
- ctl_getopt_func getopt; /* get kctl configuration */
-
- TAILQ_HEAD(, ctl_cb) kcb_head;
- u_int32_t lastunit;
-};
-
-struct ctl_cb {
- TAILQ_ENTRY(ctl_cb) next; /* controller chain */
- lck_mtx_t *mtx;
- struct socket *so; /* controlling socket */
- struct kctl *kctl; /* back pointer to controller */
- u_int32_t unit;
- void *userdata;
-};
/* all the controllers are chained */
-TAILQ_HEAD(, kctl) ctl_head;
+TAILQ_HEAD(kctl_list, kctl) ctl_head;
static int ctl_attach(struct socket *, int, struct proc *);
static int ctl_detach(struct socket *);
static int ctl_ctloutput(struct socket *, struct sockopt *);
static int ctl_peeraddr(struct socket *so, struct sockaddr **nam);
-static struct kctl *ctl_find_by_id(u_int32_t);
static struct kctl *ctl_find_by_name(const char *);
static struct kctl *ctl_find_by_id_unit(u_int32_t id, u_int32_t unit);
+static struct socket *kcb_find_socket(struct kctl *, u_int32_t unit);
static struct ctl_cb *kcb_find(struct kctl *, u_int32_t unit);
-static void ctl_post_msg(u_long event_code, u_int32_t id);
+static void ctl_post_msg(u_int32_t event_code, u_int32_t id);
-static int ctl_lock(struct socket *, int, int);
-static int ctl_unlock(struct socket *, int, int);
+static int ctl_lock(struct socket *, int, void *);
+static int ctl_unlock(struct socket *, int, void *);
static lck_mtx_t * ctl_getlock(struct socket *, int);
static struct pr_usrreqs ctl_usrreqs =
error = ENOMEM;
goto done;
}
- lck_grp_attr_setdefault(ctl_lck_grp_attr);
ctl_lck_grp = lck_grp_alloc_init("Kernel Control Protocol", ctl_lck_grp_attr);
if (ctl_lck_grp == 0) {
error = ENOMEM;
goto done;
}
- lck_attr_setdefault(ctl_lck_attr);
ctl_mtx = lck_mtx_alloc_init(ctl_lck_grp, ctl_lck_attr);
if (ctl_mtx == 0) {
if ((kctl = kcb->kctl) != 0) {
lck_mtx_lock(ctl_mtx);
TAILQ_REMOVE(&kctl->kcb_head, kcb, next);
- lck_mtx_lock(ctl_mtx);
+ lck_mtx_unlock(ctl_mtx);
}
kcb_delete(kcb);
}
+ sofreelastref(so, 1);
return 0;
}
int error = 0;
struct sockaddr_ctl sa;
struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
+ struct ctl_cb *kcb_next = NULL;
if (kcb == 0)
panic("ctl_connect so_pcb null\n");
lck_mtx_unlock(ctl_mtx);
return(EINVAL);
}
- if ((error = proc_suser(p))) {
+ if (kauth_cred_issuser(kauth_cred_get()) == 0) {
lck_mtx_unlock(ctl_mtx);
- return error;
+ return EPERM;
}
}
return EBUSY;
}
} else {
- u_int32_t unit = kctl->lastunit + 1;
+ /* Find an unused ID, assumes control IDs are listed in order */
+ u_int32_t unit = 1;
- while (1) {
- if (unit == ctl_maxunit)
- unit = 1;
- if (kcb_find(kctl, unit) == NULL) {
- kctl->lastunit = sa.sc_unit = unit;
- break;
- }
- if (unit++ == kctl->lastunit) {
- lck_mtx_unlock(ctl_mtx);
- return EBUSY;
- }
- }
+ TAILQ_FOREACH(kcb_next, &kctl->kcb_head, next) {
+ if (kcb_next->unit > unit) {
+ /* Found a gap, lets fill it in */
+ break;
+ }
+ unit = kcb_next->unit + 1;
+ if (unit == ctl_maxunit)
+ break;
+ }
+
+ if (unit == ctl_maxunit) {
+ lck_mtx_unlock(ctl_mtx);
+ return EBUSY;
+ }
+
+ sa.sc_unit = unit;
}
kcb->unit = sa.sc_unit;
kcb->kctl = kctl;
- TAILQ_INSERT_TAIL(&kctl->kcb_head, kcb, next);
+ if (kcb_next != NULL) {
+ TAILQ_INSERT_BEFORE(kcb_next, kcb, next);
+ }
+ else {
+ TAILQ_INSERT_TAIL(&kctl->kcb_head, kcb, next);
+ }
lck_mtx_unlock(ctl_mtx);
error = soreserve(so, kctl->sendbufsize, kctl->recvbufsize);
error = (*kctl->connect)(kctl, &sa, &kcb->userdata);
socket_lock(so, 0);
if (error)
- goto done;
+ goto end;
soisconnected(so);
+end:
+ if (error && kctl->disconnect) {
+ socket_unlock(so, 0);
+ (*kctl->disconnect)(kctl, kcb->unit, kcb->userdata);
+ socket_lock(so, 0);
+ }
done:
if (error) {
soisdisconnected(so);
(*kctl->disconnect)(kctl, kcb->unit, kcb->userdata);
socket_lock(so, 0);
}
+
+ soisdisconnected(so);
+
+ socket_unlock(so, 0);
lck_mtx_lock(ctl_mtx);
kcb->kctl = 0;
kcb->unit = 0;
+ while (kcb->usecount != 0) {
+ msleep(&kcb->usecount, ctl_mtx, 0, "kcb->usecount", 0);
+ }
TAILQ_REMOVE(&kctl->kcb_head, kcb, next);
- soisdisconnected(so);
lck_mtx_unlock(ctl_mtx);
+ socket_lock(so, 0);
}
return 0;
}
static int
ctl_send(struct socket *so, int flags, struct mbuf *m,
- __unused struct sockaddr *addr, __unused struct mbuf *control,
+ __unused struct sockaddr *addr, struct mbuf *control,
__unused struct proc *p)
{
int error = 0;
struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
struct kctl *kctl;
+ if (control) m_freem(control);
+
if (kcb == NULL) /* sanity check */
- return(ENOTCONN);
+ error = ENOTCONN;
- if ((kctl = kcb->kctl) == NULL)
- return(EINVAL);
+ if (error == 0 && (kctl = kcb->kctl) == NULL)
+ error = EINVAL;
- if (kctl->send) {
+ if (error == 0 && kctl->send) {
socket_unlock(so, 0);
error = (*kctl->send)(kctl, kcb->unit, kcb->userdata, m, flags);
socket_lock(so, 0);
+ } else {
+ m_freem(m);
+ if (error == 0)
+ error = ENOTSUP;
}
return error;
}
errno_t
ctl_enqueuembuf(void *kctlref, u_int32_t unit, struct mbuf *m, u_int32_t flags)
{
- struct ctl_cb *kcb;
struct socket *so;
errno_t error = 0;
struct kctl *kctl = (struct kctl *)kctlref;
if (kctl == NULL)
return EINVAL;
-
- kcb = kcb_find(kctl, unit);
- if (kcb == NULL)
- return EINVAL;
- so = (struct socket *)kcb->so;
- if (so == NULL)
+ so = kcb_find_socket(kctl, unit);
+
+ if (so == NULL)
return EINVAL;
- socket_lock(so, 1);
if (sbspace(&so->so_rcv) < m->m_pkthdr.len) {
error = ENOBUFS;
goto bye;
errno_t
ctl_enqueuedata(void *kctlref, u_int32_t unit, void *data, size_t len, u_int32_t flags)
{
- struct ctl_cb *kcb;
struct socket *so;
struct mbuf *m;
errno_t error = 0;
if (kctlref == NULL)
return EINVAL;
- kcb = kcb_find(kctl, unit);
- if (kcb == NULL)
- return EINVAL;
-
- so = (struct socket *)kcb->so;
- if (so == NULL)
+ so = kcb_find_socket(kctl, unit);
+ if (so == NULL)
return EINVAL;
- socket_lock(so, 1);
- if ((size_t)sbspace(&so->so_rcv) < len) {
+ if (sbspace(&so->so_rcv) < (int)len) {
error = ENOBUFS;
goto bye;
}
errno_t
ctl_getenqueuespace(kern_ctl_ref kctlref, u_int32_t unit, size_t *space)
{
- struct ctl_cb *kcb;
struct kctl *kctl = (struct kctl *)kctlref;
struct socket *so;
+ long avail;
if (kctlref == NULL || space == NULL)
return EINVAL;
-
- kcb = kcb_find(kctl, unit);
- if (kcb == NULL)
- return EINVAL;
- so = (struct socket *)kcb->so;
- if (so == NULL)
+ so = kcb_find_socket(kctl, unit);
+ if (so == NULL)
return EINVAL;
- socket_lock(so, 1);
- *space = sbspace(&so->so_rcv);
+ avail = sbspace(&so->so_rcv);
+ *space = (avail < 0) ? 0 : avail;
socket_unlock(so, 1);
-
+
return 0;
}
case SOPT_SET:
if (kctl->setopt == NULL)
return(ENOTSUP);
- MALLOC(data, void *, sopt->sopt_valsize, M_TEMP, M_WAITOK);
- if (data == NULL)
- return(ENOMEM);
- error = sooptcopyin(sopt, data, sopt->sopt_valsize, sopt->sopt_valsize);
+ if (sopt->sopt_valsize == 0) {
+ data = NULL;
+ } else {
+ MALLOC(data, void *, sopt->sopt_valsize, M_TEMP, M_WAITOK);
+ if (data == NULL)
+ return(ENOMEM);
+ error = sooptcopyin(sopt, data, sopt->sopt_valsize, sopt->sopt_valsize);
+ }
if (error == 0) {
socket_unlock(so, 0);
error = (*kctl->setopt)(kcb->kctl, kcb->unit, kcb->userdata, sopt->sopt_name,
socket_unlock(so, 0);
error = (*kctl->getopt)(kcb->kctl, kcb->unit, kcb->userdata, sopt->sopt_name,
data, &len);
+ if (data != NULL && len > sopt->sopt_valsize)
+ panic_plain("ctl_ctloutput: ctl %s returned len (%lu) > sopt_valsize (%lu)\n",
+ kcb->kctl->name, len, sopt->sopt_valsize);
socket_lock(so, 0);
if (error == 0) {
if (data != NULL)
/* get the number of controllers */
case CTLIOCGCOUNT: {
struct kctl *kctl;
- int n = 0;
+ u_int32_t n = 0;
lck_mtx_lock(ctl_mtx);
TAILQ_FOREACH(kctl, &ctl_head, next)
n++;
lck_mtx_unlock(ctl_mtx);
-
- *(u_int32_t *)data = n;
+
+ bcopy(&n, data, sizeof (n));
error = 0;
break;
}
case CTLIOCGINFO: {
- struct ctl_info *ctl_info = (struct ctl_info *)data;
+ struct ctl_info ctl_info;
struct kctl *kctl = 0;
- size_t name_len = strlen(ctl_info->ctl_name);
-
+ size_t name_len;
+
+ bcopy(data, &ctl_info, sizeof (ctl_info));
+ name_len = strnlen(ctl_info.ctl_name, MAX_KCTL_NAME);
+
if (name_len == 0 || name_len + 1 > MAX_KCTL_NAME) {
error = EINVAL;
break;
}
lck_mtx_lock(ctl_mtx);
- kctl = ctl_find_by_name(ctl_info->ctl_name);
+ kctl = ctl_find_by_name(ctl_info.ctl_name);
lck_mtx_unlock(ctl_mtx);
if (kctl == 0) {
error = ENOENT;
break;
}
- ctl_info->ctl_id = kctl->id;
+ ctl_info.ctl_id = kctl->id;
+ bcopy(&ctl_info, data, sizeof (ctl_info));
error = 0;
break;
}
*/
errno_t
ctl_register(struct kern_ctl_reg *userkctl, kern_ctl_ref *kctlref)
-{
- struct kctl *kctl = 0;
- u_int32_t id = -1;
- u_int32_t n;
+{
+ struct kctl *kctl = NULL;
+ struct kctl *kctl_next = NULL;
+ u_int32_t id = 1;
size_t name_len;
if (userkctl == NULL) /* sanity check */
lck_mtx_lock(ctl_mtx);
- if ((userkctl->ctl_flags & CTL_FLAG_REG_ID_UNIT) == 0) {
+ /*
+ * Kernel Control IDs
+ *
+ * CTL_FLAG_REG_ID_UNIT indicates the control ID and unit number are
+ * static. If they do not exist, add them to the list in order. If the
+ * flag is not set, we must find a new unique value. We assume the
+ * list is in order. We find the last item in the list and add one. If
+ * this leads to wrapping the id around, we start at the front of the
+ * list and look for a gap.
+ */
+
+ if ((userkctl->ctl_flags & CTL_FLAG_REG_ID_UNIT) == 0) {
+ /* Must dynamically assign an unused ID */
+
+ /* Verify the same name isn't already registered */
if (ctl_find_by_name(userkctl->ctl_name) != NULL) {
lck_mtx_unlock(ctl_mtx);
FREE(kctl, M_TEMP);
return(EEXIST);
}
- for (n = 0, id = ctl_last_id + 1; n < ctl_max; id++, n++) {
+
+ /* Start with 1 in case the list is empty */
+ id = 1;
+ kctl_next = TAILQ_LAST(&ctl_head, kctl_list);
+
+ if (kctl_next != NULL) {
+ /* List was not empty, add one to the last item in the list */
+ id = kctl_next->id + 1;
+ kctl_next = NULL;
+
+ /*
+ * If this wrapped the id number, start looking at the front
+ * of the list for an unused id.
+ */
if (id == 0) {
- n--;
- continue;
+ /* Find the next unused ID */
+ id = 1;
+
+ TAILQ_FOREACH(kctl_next, &ctl_head, next) {
+ if (kctl_next->id > id) {
+ /* We found a gap */
+ break;
+ }
+
+ id = kctl_next->id + 1;
+ }
}
- if (ctl_find_by_id(id) == 0)
- break;
- }
- if (id == ctl_max) {
- lck_mtx_unlock(ctl_mtx);
- FREE(kctl, M_TEMP);
- return(ENOBUFS);
}
- userkctl->ctl_id =id;
+
+ userkctl->ctl_id = id;
kctl->id = id;
kctl->reg_unit = -1;
} else {
+ TAILQ_FOREACH(kctl_next, &ctl_head, next) {
+ if (kctl_next->id > userkctl->ctl_id)
+ break;
+ }
+
if (ctl_find_by_id_unit(userkctl->ctl_id, userkctl->ctl_unit) != NULL) {
lck_mtx_unlock(ctl_mtx);
FREE(kctl, M_TEMP);
kctl->id = userkctl->ctl_id;
kctl->reg_unit = userkctl->ctl_unit;
}
- strcpy(kctl->name, userkctl->ctl_name);
+ strlcpy(kctl->name, userkctl->ctl_name, MAX_KCTL_NAME);
kctl->flags = userkctl->ctl_flags;
/* Let the caller know the default send and receive sizes */
TAILQ_INIT(&kctl->kcb_head);
- TAILQ_INSERT_TAIL(&ctl_head, kctl, next);
- ctl_max++;
+ if (kctl_next)
+ TAILQ_INSERT_BEFORE(kctl_next, kctl, next);
+ else
+ TAILQ_INSERT_TAIL(&ctl_head, kctl, next);
lck_mtx_unlock(ctl_mtx);
}
TAILQ_REMOVE(&ctl_head, kctl, next);
- ctl_max--;
lck_mtx_unlock(ctl_mtx);
}
/*
- * Must be called with global lock taked
+ * Must be called with global ctl_mtx lock taked
*/
static struct kctl *
-ctl_find_by_id(u_int32_t id)
+ctl_find_by_name(const char *name)
{
struct kctl *kctl;
TAILQ_FOREACH(kctl, &ctl_head, next)
- if (kctl->id == id)
+ if (strncmp(kctl->name, name, sizeof(kctl->name)) == 0)
return kctl;
return NULL;
}
-/*
- * Must be called with global ctl_mtx lock taked
- */
-static struct kctl *
-ctl_find_by_name(const char *name)
-{
- struct kctl *kctl;
-
- TAILQ_FOREACH(kctl, &ctl_head, next)
- if (strcmp(kctl->name, name) == 0)
- return kctl;
+u_int32_t
+ctl_id_by_name(const char *name)
+{
+ u_int32_t ctl_id = 0;
+
+ lck_mtx_lock(ctl_mtx);
+ struct kctl *kctl = ctl_find_by_name(name);
+ if (kctl) ctl_id = kctl->id;
+ lck_mtx_unlock(ctl_mtx);
+
+ return ctl_id;
+}
- return NULL;
+errno_t
+ctl_name_by_id(
+ u_int32_t id,
+ char *out_name,
+ size_t maxsize)
+{
+ int found = 0;
+
+ lck_mtx_lock(ctl_mtx);
+ struct kctl *kctl;
+ TAILQ_FOREACH(kctl, &ctl_head, next) {
+ if (kctl->id == id)
+ break;
+ }
+
+ if (kctl && kctl->name)
+ {
+ if (maxsize > MAX_KCTL_NAME)
+ maxsize = MAX_KCTL_NAME;
+ strlcpy(out_name, kctl->name, maxsize);
+ found = 1;
+ }
+ lck_mtx_unlock(ctl_mtx);
+
+ return found ? 0 : ENOENT;
}
/*
struct ctl_cb *kcb;
TAILQ_FOREACH(kcb, &kctl->kcb_head, next)
- if ((kcb->unit == unit))
+ if (kcb->unit == unit)
return kcb;
return NULL;
}
-/*
- * Must be called witout lock
- */
+static struct socket *
+kcb_find_socket(struct kctl *kctl, u_int32_t unit)
+{
+ struct socket *so = NULL;
+
+ lck_mtx_lock(ctl_mtx);
+ struct ctl_cb *kcb = kcb_find(kctl, unit);
+ if (kcb && kcb->kctl == kctl) {
+ so = kcb->so;
+ if (so) {
+ kcb->usecount++;
+ }
+ }
+ lck_mtx_unlock(ctl_mtx);
+
+ if (so == NULL) {
+ return NULL;
+ }
+
+ socket_lock(so, 1);
+
+ lck_mtx_lock(ctl_mtx);
+ if (kcb->kctl == NULL)
+ {
+ lck_mtx_unlock(ctl_mtx);
+ socket_unlock(so, 1);
+ so = NULL;
+ lck_mtx_lock(ctl_mtx);
+ }
+ kcb->usecount--;
+ if (kcb->usecount == 0)
+ wakeup((event_t)&kcb->usecount);
+ lck_mtx_unlock(ctl_mtx);
+
+ return so;
+}
+
static void
-ctl_post_msg(u_long event_code, u_int32_t id)
+ctl_post_msg(u_int32_t event_code, u_int32_t id)
{
struct ctl_event_data ctl_ev_data;
struct kev_msg ev_msg;
+ lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_NOTOWNED);
+
+ bzero(&ev_msg, sizeof(struct kev_msg));
ev_msg.vendor_code = KEV_VENDOR_APPLE;
ev_msg.kev_class = KEV_SYSTEM_CLASS;
}
static int
-ctl_lock(struct socket *so, int refcount, int lr)
- {
- int lr_saved;
-#ifdef __ppc__
- if (lr == 0) {
- __asm__ volatile("mflr %0" : "=r" (lr_saved));
- }
- else lr_saved = lr;
-#endif
-
- if (so->so_pcb) {
+ctl_lock(struct socket *so, int refcount, void *lr)
+{
+ void *lr_saved;
+
+ if (lr == NULL)
+ lr_saved = __builtin_return_address(0);
+ else
+ lr_saved = lr;
+
+ if (so->so_pcb != NULL) {
lck_mtx_lock(((struct ctl_cb *)so->so_pcb)->mtx);
} else {
- panic("ctl_lock: so=%x NO PCB! lr=%x\n", so, lr_saved);
- lck_mtx_lock(so->so_proto->pr_domain->dom_mtx);
+ panic("ctl_lock: so=%p NO PCB! lr=%p lrh= %s\n",
+ so, lr_saved, solockhistory_nr(so));
+ /* NOTREACHED */
}
-
- if (so->so_usecount < 0)
- panic("ctl_lock: so=%x so_pcb=%x lr=%x ref=%x\n",
- so, so->so_pcb, lr_saved, so->so_usecount);
-
+
+ if (so->so_usecount < 0) {
+ panic("ctl_lock: so=%p so_pcb=%p lr=%p ref=%x lrh= %s\n",
+ so, so->so_pcb, lr_saved, so->so_usecount, solockhistory_nr(so));
+ /* NOTREACHED */
+ }
+
if (refcount)
so->so_usecount++;
- so->reserved3 = (void *)lr_saved;
+
+ so->lock_lr[so->next_lock_lr] = lr_saved;
+ so->next_lock_lr = (so->next_lock_lr+1) % SO_LCKDBG_MAX;
return (0);
}
static int
-ctl_unlock(struct socket *so, int refcount, int lr)
+ctl_unlock(struct socket *so, int refcount, void *lr)
{
- int lr_saved;
- lck_mtx_t * mutex_held;
-
-#ifdef __ppc__
- if (lr == 0) {
- __asm__ volatile("mflr %0" : "=r" (lr_saved));
- }
- else lr_saved = lr;
-#endif
-
+ void *lr_saved;
+ lck_mtx_t *mutex_held;
+
+ if (lr == NULL)
+ lr_saved = __builtin_return_address(0);
+ else
+ lr_saved = lr;
+
#ifdef MORE_KCTLLOCK_DEBUG
- printf("ctl_unlock: so=%x sopcb=%x lock=%x ref=%x lr=%x\n",
- so, so->so_pcb, ((struct ctl_cb *)so->so_pcb)->mtx, so->so_usecount, lr_saved);
+ printf("ctl_unlock: so=%x sopcb=%x lock=%x ref=%x lr=%p\n",
+ so, so->so_pcb, ((struct ctl_cb *)so->so_pcb)->mtx,
+ so->so_usecount, lr_saved);
#endif
if (refcount)
so->so_usecount--;
-
- if (so->so_usecount < 0)
- panic("ctl_unlock: so=%x usecount=%x\n", so, so->so_usecount);
+
+ if (so->so_usecount < 0) {
+ panic("ctl_unlock: so=%p usecount=%x lrh= %s\n",
+ so, so->so_usecount, solockhistory_nr(so));
+ /* NOTREACHED */
+ }
if (so->so_pcb == NULL) {
- panic("ctl_unlock: so=%x NO PCB usecount=%x lr=%x\n", so, so->so_usecount, lr_saved);
- mutex_held = so->so_proto->pr_domain->dom_mtx;
- } else {
- mutex_held = ((struct ctl_cb *)so->so_pcb)->mtx;
+ panic("ctl_unlock: so=%p NO PCB usecount=%x lr=%p lrh= %s\n",
+ so, so->so_usecount, (void *)lr_saved, solockhistory_nr(so));
+ /* NOTREACHED */
}
+ mutex_held = ((struct ctl_cb *)so->so_pcb)->mtx;
+
lck_mtx_assert(mutex_held, LCK_MTX_ASSERT_OWNED);
+ so->unlock_lr[so->next_unlock_lr] = lr_saved;
+ so->next_unlock_lr = (so->next_unlock_lr+1) % SO_LCKDBG_MAX;
lck_mtx_unlock(mutex_held);
- so->reserved4 = (void *)lr_saved;
-
+
if (so->so_usecount == 0)
ctl_sofreelastref(so);
-
+
return (0);
}
if (so->so_pcb) {
if (so->so_usecount < 0)
- panic("ctl_getlock: so=%x usecount=%x\n", so, so->so_usecount);
+ panic("ctl_getlock: so=%p usecount=%x lrh= %s\n",
+ so, so->so_usecount, solockhistory_nr(so));
return(kcb->mtx);
} else {
- panic("ctl_getlock: so=%x NULL so_pcb\n", so);
+ panic("ctl_getlock: so=%p NULL NO so_pcb %s\n",
+ so, solockhistory_nr(so));
return (so->so_proto->pr_domain->dom_mtx);
}
}