/*
- * Copyright (c) 1999-2017 Apple Inc. All rights reserved.
+ * Copyright (c) 1999-2020 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
u_int32_t sendbufsize; /* request more than the default buffer size */
/* Dispatch functions */
+ ctl_setup_func setup; /* Setup contact */
ctl_bind_func bind; /* Prepare contact */
ctl_connect_func connect; /* Make contact */
ctl_disconnect_func disconnect; /* Break contact */
u_int32_t lastunit;
};
+#if DEVELOPMENT || DEBUG
+enum ctl_status {
+ KCTL_DISCONNECTED = 0,
+ KCTL_CONNECTING = 1,
+ KCTL_CONNECTED = 2
+};
+#endif /* DEVELOPMENT || DEBUG */
+
struct ctl_cb {
TAILQ_ENTRY(ctl_cb) next; /* controller chain */
- lck_mtx_t *mtx;
+ lck_mtx_t mtx;
struct socket *so; /* controlling socket */
struct kctl *kctl; /* back pointer to controller */
void *userdata;
struct sockaddr_ctl sac;
u_int32_t usecount;
+ u_int32_t kcb_usecount;
+ u_int32_t require_clearing_count;
+#if DEVELOPMENT || DEBUG
+ enum ctl_status status;
+#endif /* DEVELOPMENT || DEBUG */
};
#ifndef ROUNDUP64
* Definitions and vars for we support
*/
-static u_int32_t ctl_maxunit = 65536;
-static lck_grp_attr_t *ctl_lck_grp_attr = 0;
-static lck_attr_t *ctl_lck_attr = 0;
-static lck_grp_t *ctl_lck_grp = 0;
-static lck_mtx_t *ctl_mtx;
+const u_int32_t ctl_maxunit = 65536;
+static LCK_ATTR_DECLARE(ctl_lck_attr, 0, 0);
+static LCK_GRP_DECLARE(ctl_lck_grp, "Kernel Control Protocol");
+static LCK_MTX_DECLARE_ATTR(ctl_mtx, &ctl_lck_grp, &ctl_lck_attr);
/* all the controllers are chained */
-TAILQ_HEAD(kctl_list, kctl) ctl_head;
+TAILQ_HEAD(kctl_list, kctl) ctl_head = TAILQ_HEAD_INITIALIZER(ctl_head);
static int ctl_attach(struct socket *, int, struct proc *);
static int ctl_detach(struct socket *);
SYSCTL_INT(_net_systm_kctl, OID_AUTO, debug,
CTLFLAG_RW | CTLFLAG_LOCKED, &ctl_debug, 0, "");
+#if DEVELOPMENT || DEBUG
+u_int32_t ctl_panic_debug = 0;
+SYSCTL_INT(_net_systm_kctl, OID_AUTO, panicdebug,
+ CTLFLAG_RW | CTLFLAG_LOCKED, &ctl_panic_debug, 0, "");
+#endif /* DEVELOPMENT || DEBUG */
+
#define KCTL_TBL_INC 16
static uintptr_t kctl_tbl_size = 0;
VERIFY(!(dp->dom_flags & DOM_INITIALIZED));
VERIFY(dp == systemdomain);
- ctl_lck_grp_attr = lck_grp_attr_alloc_init();
- if (ctl_lck_grp_attr == NULL) {
- panic("%s: lck_grp_attr_alloc_init failed\n", __func__);
- /* NOTREACHED */
- }
-
- ctl_lck_grp = lck_grp_alloc_init("Kernel Control Protocol",
- ctl_lck_grp_attr);
- if (ctl_lck_grp == NULL) {
- panic("%s: lck_grp_alloc_init failed\n", __func__);
- /* NOTREACHED */
- }
-
- ctl_lck_attr = lck_attr_alloc_init();
- if (ctl_lck_attr == NULL) {
- panic("%s: lck_attr_alloc_init failed\n", __func__);
- /* NOTREACHED */
- }
-
- ctl_mtx = lck_mtx_alloc_init(ctl_lck_grp, ctl_lck_attr);
- if (ctl_mtx == NULL) {
- panic("%s: lck_mtx_alloc_init failed\n", __func__);
- /* NOTREACHED */
- }
- TAILQ_INIT(&ctl_head);
-
for (i = 0, pr = &kctlsw[0]; i < kctl_proto_count; i++, pr++) {
net_add_proto(pr, dp, 1);
}
kcb_delete(struct ctl_cb *kcb)
{
if (kcb != 0) {
- if (kcb->mtx != 0) {
- lck_mtx_free(kcb->mtx, ctl_lck_grp);
- }
- FREE(kcb, M_TEMP);
+ lck_mtx_destroy(&kcb->mtx, &ctl_lck_grp);
+ kheap_free(KHEAP_DEFAULT, kcb, sizeof(struct ctl_cb));
}
}
int error = 0;
struct ctl_cb *kcb = 0;
- MALLOC(kcb, struct ctl_cb *, sizeof(struct ctl_cb), M_TEMP, M_WAITOK);
+ kcb = kheap_alloc(KHEAP_DEFAULT, sizeof(struct ctl_cb), Z_WAITOK | Z_ZERO);
if (kcb == NULL) {
error = ENOMEM;
goto quit;
}
- bzero(kcb, sizeof(struct ctl_cb));
- kcb->mtx = lck_mtx_alloc_init(ctl_lck_grp, ctl_lck_attr);
- if (kcb->mtx == NULL) {
- error = ENOMEM;
- goto quit;
- }
+ lck_mtx_init(&kcb->mtx, &ctl_lck_grp, &ctl_lck_attr);
kcb->so = so;
so->so_pcb = (caddr_t)kcb;
if (kcb != 0) {
struct kctl *kctl;
if ((kctl = kcb->kctl) != 0) {
- lck_mtx_lock(ctl_mtx);
+ lck_mtx_lock(&ctl_mtx);
TAILQ_REMOVE(&kctl->kcb_head, kcb, next);
kctlstat.kcs_pcbcount--;
kctlstat.kcs_gencnt++;
- lck_mtx_unlock(ctl_mtx);
+ lck_mtx_unlock(&ctl_mtx);
}
kcb_delete(kcb);
}
return 0;
}
+/*
+ * Use this function and ctl_kcb_require_clearing to serialize
+ * critical calls into the kctl subsystem
+ */
+static void
+ctl_kcb_increment_use_count(struct ctl_cb *kcb, lck_mtx_t *mutex_held)
+{
+ LCK_MTX_ASSERT(mutex_held, LCK_MTX_ASSERT_OWNED);
+ while (kcb->require_clearing_count > 0) {
+ msleep(&kcb->require_clearing_count, mutex_held, PSOCK | PCATCH, "kcb_require_clearing", NULL);
+ }
+ kcb->kcb_usecount++;
+}
+
+static void
+ctl_kcb_require_clearing(struct ctl_cb *kcb, lck_mtx_t *mutex_held)
+{
+ assert(kcb->kcb_usecount != 0);
+ kcb->require_clearing_count++;
+ kcb->kcb_usecount--;
+ while (kcb->kcb_usecount > 0) { // we need to wait until no one else is running
+ msleep(&kcb->kcb_usecount, mutex_held, PSOCK | PCATCH, "kcb_usecount", NULL);
+ }
+ kcb->kcb_usecount++;
+}
+
+static void
+ctl_kcb_done_clearing(struct ctl_cb *kcb)
+{
+ assert(kcb->require_clearing_count != 0);
+ kcb->require_clearing_count--;
+ wakeup((caddr_t)&kcb->require_clearing_count);
+}
+
+static void
+ctl_kcb_decrement_use_count(struct ctl_cb *kcb)
+{
+ assert(kcb->kcb_usecount != 0);
+ kcb->kcb_usecount--;
+ wakeup((caddr_t)&kcb->kcb_usecount);
+}
+
static int
ctl_detach(struct socket *so)
{
return 0;
}
+ lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
+ ctl_kcb_increment_use_count(kcb, mtx_held);
+ ctl_kcb_require_clearing(kcb, mtx_held);
+
if (kcb->kctl != NULL && kcb->kctl->bind != NULL &&
kcb->userdata != NULL && !(so->so_state & SS_ISCONNECTED)) {
// The unit was bound, but not connected
}
soisdisconnected(so);
+#if DEVELOPMENT || DEBUG
+ kcb->status = KCTL_DISCONNECTED;
+#endif /* DEVELOPMENT || DEBUG */
so->so_flags |= SOF_PCBCLEARING;
+ ctl_kcb_done_clearing(kcb);
+ ctl_kcb_decrement_use_count(kcb);
return 0;
}
bcopy(nam, &sa, sizeof(struct sockaddr_ctl));
- lck_mtx_lock(ctl_mtx);
+ lck_mtx_lock(&ctl_mtx);
kctl = ctl_find_by_id_unit(sa.sc_id, sa.sc_unit);
if (kctl == NULL) {
- lck_mtx_unlock(ctl_mtx);
+ lck_mtx_unlock(&ctl_mtx);
return ENOENT;
}
(so->so_type != SOCK_STREAM)) ||
(!(kctl->flags & CTL_FLAG_REG_SOCK_STREAM) &&
(so->so_type != SOCK_DGRAM))) {
- lck_mtx_unlock(ctl_mtx);
+ lck_mtx_unlock(&ctl_mtx);
return EPROTOTYPE;
}
if (kctl->flags & CTL_FLAG_PRIVILEGED) {
if (p == 0) {
- lck_mtx_unlock(ctl_mtx);
+ lck_mtx_unlock(&ctl_mtx);
return EINVAL;
}
if (kauth_cred_issuser(kauth_cred_get()) == 0) {
- lck_mtx_unlock(ctl_mtx);
+ lck_mtx_unlock(&ctl_mtx);
return EPERM;
}
}
if ((kctl->flags & CTL_FLAG_REG_ID_UNIT) || sa.sc_unit != 0) {
if (kcb_find(kctl, sa.sc_unit) != NULL) {
- lck_mtx_unlock(ctl_mtx);
+ lck_mtx_unlock(&ctl_mtx);
return EBUSY;
}
+ } else if (kctl->setup != NULL) {
+ error = (*kctl->setup)(&sa.sc_unit, &kcb->userdata);
+ if (error != 0) {
+ lck_mtx_unlock(&ctl_mtx);
+ return error;
+ }
} else {
/* Find an unused ID, assumes control IDs are in order */
u_int32_t unit = 1;
}
if (unit == ctl_maxunit) {
- lck_mtx_unlock(ctl_mtx);
+ lck_mtx_unlock(&ctl_mtx);
return EBUSY;
}
kctlstat.kcs_pcbcount++;
kctlstat.kcs_gencnt++;
kctlstat.kcs_connections++;
- lck_mtx_unlock(ctl_mtx);
+ lck_mtx_unlock(&ctl_mtx);
/*
* rdar://15526688: Limit the send and receive sizes to sb_max
sbmaxsize = (u_quad_t)sb_max * MCLBYTES / (MSIZE + MCLBYTES);
if (kctl->sendbufsize > sbmaxsize) {
- sendbufsize = sbmaxsize;
+ sendbufsize = (u_int32_t)sbmaxsize;
} else {
sendbufsize = kctl->sendbufsize;
}
if (kctl->recvbufsize > sbmaxsize) {
- recvbufsize = sbmaxsize;
+ recvbufsize = (u_int32_t)sbmaxsize;
} else {
recvbufsize = kctl->recvbufsize;
}
done:
if (error) {
soisdisconnected(so);
- lck_mtx_lock(ctl_mtx);
+#if DEVELOPMENT || DEBUG
+ kcb->status = KCTL_DISCONNECTED;
+#endif /* DEVELOPMENT || DEBUG */
+ lck_mtx_lock(&ctl_mtx);
TAILQ_REMOVE(&kctl->kcb_head, kcb, next);
kcb->kctl = NULL;
kcb->sac.sc_unit = 0;
kctlstat.kcs_pcbcount--;
kctlstat.kcs_gencnt++;
kctlstat.kcs_conn_fail++;
- lck_mtx_unlock(ctl_mtx);
+ lck_mtx_unlock(&ctl_mtx);
}
return error;
}
panic("ctl_bind so_pcb null\n");
}
+ lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
+ ctl_kcb_increment_use_count(kcb, mtx_held);
+ ctl_kcb_require_clearing(kcb, mtx_held);
+
error = ctl_setup_kctl(so, nam, p);
if (error) {
- return error;
+ goto out;
}
if (kcb->kctl == NULL) {
}
if (kcb->kctl->bind == NULL) {
- return EINVAL;
+ error = EINVAL;
+ goto out;
}
socket_unlock(so, 0);
error = (*kcb->kctl->bind)(kcb->kctl->kctlref, &kcb->sac, &kcb->userdata);
socket_lock(so, 0);
+out:
+ ctl_kcb_done_clearing(kcb);
+ ctl_kcb_decrement_use_count(kcb);
return error;
}
panic("ctl_connect so_pcb null\n");
}
+ lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
+ ctl_kcb_increment_use_count(kcb, mtx_held);
+ ctl_kcb_require_clearing(kcb, mtx_held);
+
+#if DEVELOPMENT || DEBUG
+ if (kcb->status != KCTL_DISCONNECTED && ctl_panic_debug) {
+ panic("kctl already connecting/connected");
+ }
+ kcb->status = KCTL_CONNECTING;
+#endif /* DEVELOPMENT || DEBUG */
+
error = ctl_setup_kctl(so, nam, p);
if (error) {
- return error;
+ goto out;
}
if (kcb->kctl == NULL) {
goto end;
}
soisconnected(so);
+#if DEVELOPMENT || DEBUG
+ kcb->status = KCTL_CONNECTED;
+#endif /* DEVELOPMENT || DEBUG */
end:
if (error && kcb->kctl->disconnect) {
}
if (error) {
soisdisconnected(so);
- lck_mtx_lock(ctl_mtx);
+#if DEVELOPMENT || DEBUG
+ kcb->status = KCTL_DISCONNECTED;
+#endif /* DEVELOPMENT || DEBUG */
+ lck_mtx_lock(&ctl_mtx);
TAILQ_REMOVE(&kcb->kctl->kcb_head, kcb, next);
kcb->kctl = NULL;
kcb->sac.sc_unit = 0;
kctlstat.kcs_pcbcount--;
kctlstat.kcs_gencnt++;
kctlstat.kcs_conn_fail++;
- lck_mtx_unlock(ctl_mtx);
+ lck_mtx_unlock(&ctl_mtx);
}
+out:
+ ctl_kcb_done_clearing(kcb);
+ ctl_kcb_decrement_use_count(kcb);
return error;
}
struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
if ((kcb = (struct ctl_cb *)so->so_pcb)) {
+ lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
+ ctl_kcb_increment_use_count(kcb, mtx_held);
+ ctl_kcb_require_clearing(kcb, mtx_held);
struct kctl *kctl = kcb->kctl;
if (kctl && kctl->disconnect) {
}
soisdisconnected(so);
+#if DEVELOPMENT || DEBUG
+ kcb->status = KCTL_DISCONNECTED;
+#endif /* DEVELOPMENT || DEBUG */
socket_unlock(so, 0);
- lck_mtx_lock(ctl_mtx);
+ lck_mtx_lock(&ctl_mtx);
kcb->kctl = 0;
kcb->sac.sc_unit = 0;
while (kcb->usecount != 0) {
- msleep(&kcb->usecount, ctl_mtx, 0, "kcb->usecount", 0);
+ msleep(&kcb->usecount, &ctl_mtx, 0, "kcb->usecount", 0);
}
TAILQ_REMOVE(&kctl->kcb_head, kcb, next);
kctlstat.kcs_pcbcount--;
kctlstat.kcs_gencnt++;
- lck_mtx_unlock(ctl_mtx);
+ lck_mtx_unlock(&ctl_mtx);
socket_lock(so, 0);
+ ctl_kcb_done_clearing(kcb);
+ ctl_kcb_decrement_use_count(kcb);
}
return 0;
}
static int
ctl_usr_rcvd(struct socket *so, int flags)
{
+ int error = 0;
struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
struct kctl *kctl;
+ if (kcb == NULL) {
+ return ENOTCONN;
+ }
+
+ lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
+ ctl_kcb_increment_use_count(kcb, mtx_held);
+
if ((kctl = kcb->kctl) == NULL) {
- return EINVAL;
+ error = EINVAL;
+ goto out;
}
if (kctl->rcvd) {
ctl_sbrcv_trim(so);
- return 0;
+out:
+ ctl_kcb_decrement_use_count(kcb);
+ return error;
}
static int
error = ENOTCONN;
}
+ lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
+ ctl_kcb_increment_use_count(kcb, mtx_held);
+
if (error == 0 && (kctl = kcb->kctl) == NULL) {
error = EINVAL;
}
if (error != 0) {
OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_send_fail);
}
+ ctl_kcb_decrement_use_count(kcb);
+
return error;
}
error = ENOTCONN;
}
+ lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
+ ctl_kcb_increment_use_count(kcb, mtx_held);
+
if (error == 0 && (kctl = kcb->kctl) == NULL) {
error = EINVAL;
}
if (error != 0) {
OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_send_list_fail);
}
+ ctl_kcb_decrement_use_count(kcb);
+
return error;
}
static errno_t
-ctl_rcvbspace(struct socket *so, u_int32_t datasize,
+ctl_rcvbspace(struct socket *so, size_t datasize,
u_int32_t kctlflags, u_int32_t flags)
{
struct sockbuf *sb = &so->so_rcv;
error = 0;
}
} else {
- u_int32_t autorcvbuf_max;
+ size_t autorcvbuf_max;
/*
* Allow overcommit of 25%
/*
* Grow with a little bit of leeway
*/
- u_int32_t grow = datasize - space + MSIZE;
+ size_t grow = datasize - space + MSIZE;
+ u_int32_t cc = (u_int32_t)MIN(MIN((sb->sb_hiwat + grow), autorcvbuf_max), UINT32_MAX);
- if (sbreserve(sb,
- min((sb->sb_hiwat + grow), autorcvbuf_max)) == 1) {
+ if (sbreserve(sb, cc) == 1) {
if (sb->sb_hiwat > ctl_autorcvbuf_high) {
ctl_autorcvbuf_high = sb->sb_hiwat;
}
}
so_recv_data_stat(so, m, 0);
- if (sbappend(&so->so_rcv, m) != 0) {
+ if (sbappend_nodrop(&so->so_rcv, m) != 0) {
if ((flags & CTL_DATA_NOWAKEUP) == 0) {
sorwakeup(so);
}
*/
m->m_nextpkt = NULL;
so_recv_data_stat(so, m, 0);
- if (sbappendrecord(&so->so_rcv, m) != 0) {
+ if (sbappendrecord_nodrop(&so->so_rcv, m) != 0) {
needwakeup = 1;
} else {
/*
if (mlen + curlen > len) {
mlen = len - curlen;
}
- n->m_len = mlen;
+ n->m_len = (int32_t)mlen;
bcopy((char *)data + curlen, n->m_data, mlen);
curlen += mlen;
}
m->m_flags |= M_EOR;
}
so_recv_data_stat(so, m, 0);
+ /*
+ * No need to call the "nodrop" variant of sbappend
+ * because the mbuf is local to the scope of the function
+ */
if (sbappend(&so->so_rcv, m) != 0) {
if ((flags & CTL_DATA_NOWAKEUP) == 0) {
sorwakeup(so);
struct kctl *kctl;
int error = 0;
void *data = NULL;
+ size_t data_len = 0;
size_t len;
if (sopt->sopt_level != SYSPROTO_CONTROL) {
return EINVAL;
}
+ lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
+ ctl_kcb_increment_use_count(kcb, mtx_held);
+
switch (sopt->sopt_dir) {
case SOPT_SET:
if (kctl->setopt == NULL) {
- return ENOTSUP;
+ error = ENOTSUP;
+ goto out;
}
if (sopt->sopt_valsize != 0) {
- MALLOC(data, void *, sopt->sopt_valsize, M_TEMP,
- M_WAITOK | M_ZERO);
+ data_len = sopt->sopt_valsize;
+ data = kheap_alloc(KHEAP_TEMP, data_len, Z_WAITOK | Z_ZERO);
if (data == NULL) {
- return ENOMEM;
+ data_len = 0;
+ error = ENOMEM;
+ goto out;
}
error = sooptcopyin(sopt, data,
sopt->sopt_valsize, sopt->sopt_valsize);
socket_lock(so, 0);
}
- if (data != NULL) {
- FREE(data, M_TEMP);
- }
+ kheap_free(KHEAP_TEMP, data, data_len);
break;
case SOPT_GET:
if (kctl->getopt == NULL) {
- return ENOTSUP;
+ error = ENOTSUP;
+ goto out;
}
if (sopt->sopt_valsize && sopt->sopt_val) {
- MALLOC(data, void *, sopt->sopt_valsize, M_TEMP,
- M_WAITOK | M_ZERO);
+ data_len = sopt->sopt_valsize;
+ data = kheap_alloc(KHEAP_TEMP, data_len, Z_WAITOK | Z_ZERO);
if (data == NULL) {
- return ENOMEM;
+ data_len = 0;
+ error = ENOMEM;
+ goto out;
}
/*
* 4108337 - copy user data in case the
}
}
}
- if (data != NULL) {
- FREE(data, M_TEMP);
- }
+
+ kheap_free(KHEAP_TEMP, data, data_len);
break;
}
+
+out:
+ ctl_kcb_decrement_use_count(kcb);
return error;
}
struct kctl *kctl;
u_int32_t n = 0;
- lck_mtx_lock(ctl_mtx);
+ lck_mtx_lock(&ctl_mtx);
TAILQ_FOREACH(kctl, &ctl_head, next)
n++;
- lck_mtx_unlock(ctl_mtx);
+ lck_mtx_unlock(&ctl_mtx);
bcopy(&n, data, sizeof(n));
error = 0;
error = EINVAL;
break;
}
- lck_mtx_lock(ctl_mtx);
+ lck_mtx_lock(&ctl_mtx);
kctl = ctl_find_by_name(ctl_info.ctl_name);
- lck_mtx_unlock(ctl_mtx);
+ lck_mtx_unlock(&ctl_mtx);
if (kctl == 0) {
error = ENOENT;
break;
}
static void
-kctl_tbl_grow()
+kctl_tbl_grow(void)
{
struct kctl **new_table;
uintptr_t new_size;
- lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED);
+ lck_mtx_assert(&ctl_mtx, LCK_MTX_ASSERT_OWNED);
if (kctl_tbl_growing) {
/* Another thread is allocating */
kctl_tbl_growing_waiting++;
do {
- (void) msleep((caddr_t) &kctl_tbl_growing, ctl_mtx,
+ (void) msleep((caddr_t) &kctl_tbl_growing, &ctl_mtx,
PSOCK | PCATCH, "kctl_tbl_growing", 0);
} while (kctl_tbl_growing);
kctl_tbl_growing_waiting--;
new_size = kctl_tbl_size + KCTL_TBL_INC;
- lck_mtx_unlock(ctl_mtx);
- new_table = _MALLOC(sizeof(struct kctl *) * new_size,
- M_TEMP, M_WAIT | M_ZERO);
- lck_mtx_lock(ctl_mtx);
+ lck_mtx_unlock(&ctl_mtx);
+ new_table = kheap_alloc(KHEAP_DEFAULT, sizeof(struct kctl *) * new_size,
+ Z_WAITOK | Z_ZERO);
+ lck_mtx_lock(&ctl_mtx);
if (new_table != NULL) {
if (kctl_table != NULL) {
bcopy(kctl_table, new_table,
kctl_tbl_size * sizeof(struct kctl *));
- _FREE(kctl_table, M_TEMP);
+ kheap_free(KHEAP_DEFAULT, kctl_table,
+ sizeof(struct kctl *) * kctl_tbl_size);
}
kctl_table = new_table;
kctl_tbl_size = new_size;
{
uintptr_t i;
- lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED);
+ lck_mtx_assert(&ctl_mtx, LCK_MTX_ASSERT_OWNED);
if (kctl_tbl_count >= kctl_tbl_size) {
kctl_tbl_grow();
*/
uintptr_t i = (((uintptr_t)kctlref) & KCTLREF_INDEX_MASK) - 1;
- lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED);
+ lck_mtx_assert(&ctl_mtx, LCK_MTX_ASSERT_OWNED);
if (i < kctl_tbl_size) {
struct kctl *kctl = kctl_table[i];
uintptr_t i = (((uintptr_t)kctlref) & KCTLREF_INDEX_MASK) - 1;
struct kctl *kctl = NULL;
- lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED);
+ lck_mtx_assert(&ctl_mtx, LCK_MTX_ASSERT_OWNED);
if (i >= kctl_tbl_size) {
kctlstat.kcs_bad_kctlref++;
u_int32_t id = 1;
size_t name_len;
int is_extended = 0;
+ int is_setup = 0;
if (userkctl == NULL) { /* sanity check */
return EINVAL;
return EINVAL;
}
- MALLOC(kctl, struct kctl *, sizeof(*kctl), M_TEMP, M_WAITOK);
+ kctl = kheap_alloc(KHEAP_DEFAULT, sizeof(struct kctl), Z_WAITOK | Z_ZERO);
if (kctl == NULL) {
return ENOMEM;
}
- bzero((char *)kctl, sizeof(*kctl));
- lck_mtx_lock(ctl_mtx);
+ lck_mtx_lock(&ctl_mtx);
if (kctl_make_ref(kctl) == NULL) {
- lck_mtx_unlock(ctl_mtx);
- FREE(kctl, M_TEMP);
+ lck_mtx_unlock(&ctl_mtx);
+ kheap_free(KHEAP_DEFAULT, kctl, sizeof(struct kctl));
return ENOMEM;
}
/* Verify the same name isn't already registered */
if (ctl_find_by_name(userkctl->ctl_name) != NULL) {
kctl_delete_ref(kctl->kctlref);
- lck_mtx_unlock(ctl_mtx);
- FREE(kctl, M_TEMP);
+ lck_mtx_unlock(&ctl_mtx);
+ kheap_free(KHEAP_DEFAULT, kctl, sizeof(struct kctl));
return EEXIST;
}
if (ctl_find_by_id_unit(userkctl->ctl_id, userkctl->ctl_unit)) {
kctl_delete_ref(kctl->kctlref);
- lck_mtx_unlock(ctl_mtx);
- FREE(kctl, M_TEMP);
+ lck_mtx_unlock(&ctl_mtx);
+ kheap_free(KHEAP_DEFAULT, kctl, sizeof(struct kctl));
return EEXIST;
}
kctl->id = userkctl->ctl_id;
}
is_extended = (userkctl->ctl_flags & CTL_FLAG_REG_EXTENDED);
+ is_setup = (userkctl->ctl_flags & CTL_FLAG_REG_SETUP);
strlcpy(kctl->name, userkctl->ctl_name, MAX_KCTL_NAME);
kctl->flags = userkctl->ctl_flags;
kctl->recvbufsize = userkctl->ctl_recvsize;
}
+ if (is_setup) {
+ kctl->setup = userkctl->ctl_setup;
+ }
kctl->bind = userkctl->ctl_bind;
kctl->connect = userkctl->ctl_connect;
kctl->disconnect = userkctl->ctl_disconnect;
kctlstat.kcs_reg_count++;
kctlstat.kcs_gencnt++;
- lck_mtx_unlock(ctl_mtx);
+ lck_mtx_unlock(&ctl_mtx);
*kctlref = kctl->kctlref;
{
struct kctl *kctl;
- lck_mtx_lock(ctl_mtx);
+ lck_mtx_lock(&ctl_mtx);
if ((kctl = kctl_from_ref(kctlref)) == NULL) {
kctlstat.kcs_bad_kctlref++;
- lck_mtx_unlock(ctl_mtx);
+ lck_mtx_unlock(&ctl_mtx);
if (ctl_debug != 0) {
printf("%s invalid kctlref %p\n",
__func__, kctlref);
}
if (!TAILQ_EMPTY(&kctl->kcb_head)) {
- lck_mtx_unlock(ctl_mtx);
+ lck_mtx_unlock(&ctl_mtx);
return EBUSY;
}
kctlstat.kcs_gencnt++;
kctl_delete_ref(kctl->kctlref);
- lck_mtx_unlock(ctl_mtx);
+ lck_mtx_unlock(&ctl_mtx);
ctl_post_msg(KEV_CTL_DEREGISTERED, kctl->id);
- FREE(kctl, M_TEMP);
+ kheap_free(KHEAP_DEFAULT, kctl, sizeof(struct kctl));
return 0;
}
{
struct kctl *kctl;
- lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED);
+ lck_mtx_assert(&ctl_mtx, LCK_MTX_ASSERT_OWNED);
TAILQ_FOREACH(kctl, &ctl_head, next)
if (strncmp(kctl->name, name, sizeof(kctl->name)) == 0) {
u_int32_t ctl_id = 0;
struct kctl *kctl;
- lck_mtx_lock(ctl_mtx);
+ lck_mtx_lock(&ctl_mtx);
kctl = ctl_find_by_name(name);
if (kctl) {
ctl_id = kctl->id;
}
- lck_mtx_unlock(ctl_mtx);
+ lck_mtx_unlock(&ctl_mtx);
return ctl_id;
}
int found = 0;
struct kctl *kctl;
- lck_mtx_lock(ctl_mtx);
+ lck_mtx_lock(&ctl_mtx);
TAILQ_FOREACH(kctl, &ctl_head, next) {
if (kctl->id == id) {
break;
strlcpy(out_name, kctl->name, maxsize);
found = 1;
}
- lck_mtx_unlock(ctl_mtx);
+ lck_mtx_unlock(&ctl_mtx);
return found ? 0 : ENOENT;
}
{
struct kctl *kctl;
- lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED);
+ lck_mtx_assert(&ctl_mtx, LCK_MTX_ASSERT_OWNED);
TAILQ_FOREACH(kctl, &ctl_head, next) {
if (kctl->id == id && (kctl->flags & CTL_FLAG_REG_ID_UNIT) == 0) {
{
struct ctl_cb *kcb;
- lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_OWNED);
+ lck_mtx_assert(&ctl_mtx, LCK_MTX_ASSERT_OWNED);
TAILQ_FOREACH(kcb, &kctl->kcb_head, next)
if (kcb->sac.sc_unit == unit) {
lr_saved = __builtin_return_address(0);
- lck_mtx_lock(ctl_mtx);
+ lck_mtx_lock(&ctl_mtx);
/*
* First validate the kctlref
*/
if ((kctl = kctl_from_ref(kctlref)) == NULL) {
kctlstat.kcs_bad_kctlref++;
- lck_mtx_unlock(ctl_mtx);
+ lck_mtx_unlock(&ctl_mtx);
if (ctl_debug != 0) {
printf("%s invalid kctlref %p\n",
__func__, kctlref);
kcb = kcb_find(kctl, unit);
if (kcb == NULL || kcb->kctl != kctl || (so = kcb->so) == NULL) {
- lck_mtx_unlock(ctl_mtx);
+ lck_mtx_unlock(&ctl_mtx);
return NULL;
}
/*
/*
* Respect lock ordering: socket before ctl_mtx
*/
- lck_mtx_unlock(ctl_mtx);
+ lck_mtx_unlock(&ctl_mtx);
socket_lock(so, 1);
/*
i = (so->next_lock_lr + SO_LCKDBG_MAX - 1) % SO_LCKDBG_MAX;
so->lock_lr[i] = lr_saved;
- lck_mtx_lock(ctl_mtx);
+ lck_mtx_lock(&ctl_mtx);
if ((kctl = kctl_from_ref(kctlref)) == NULL || kcb->kctl == NULL) {
- lck_mtx_unlock(ctl_mtx);
+ lck_mtx_unlock(&ctl_mtx);
socket_unlock(so, 1);
so = NULL;
- lck_mtx_lock(ctl_mtx);
+ lck_mtx_lock(&ctl_mtx);
} else if (kctlflags != NULL) {
*kctlflags = kctl->flags;
}
wakeup((event_t)&kcb->usecount);
}
- lck_mtx_unlock(ctl_mtx);
+ lck_mtx_unlock(&ctl_mtx);
return so;
}
struct ctl_event_data ctl_ev_data;
struct kev_msg ev_msg;
- lck_mtx_assert(ctl_mtx, LCK_MTX_ASSERT_NOTOWNED);
+ lck_mtx_assert(&ctl_mtx, LCK_MTX_ASSERT_NOTOWNED);
bzero(&ev_msg, sizeof(struct kev_msg));
ev_msg.vendor_code = KEV_VENDOR_APPLE;
}
if (so->so_pcb != NULL) {
- lck_mtx_lock(((struct ctl_cb *)so->so_pcb)->mtx);
+ lck_mtx_lock(&((struct ctl_cb *)so->so_pcb)->mtx);
} else {
panic("ctl_lock: so=%p NO PCB! lr=%p lrh= %s\n",
so, lr_saved, solockhistory_nr(so));
printf("ctl_unlock: so=%llx sopcb=%x lock=%llx ref=%u lr=%llx\n",
(uint64_t)VM_KERNEL_ADDRPERM(so),
(uint64_t)VM_KERNEL_ADDRPERM(so->so_pcb,
- (uint64_t)VM_KERNEL_ADDRPERM(((struct ctl_cb *)so->so_pcb)->mtx),
+ (uint64_t)VM_KERNEL_ADDRPERM(&((struct ctl_cb *)so->so_pcb)->mtx),
so->so_usecount, (uint64_t)VM_KERNEL_ADDRPERM(lr_saved));
#endif /* (MORE_KCTLLOCK_DEBUG && (DEVELOPMENT || DEBUG)) */
if (refcount) {
solockhistory_nr(so));
/* NOTREACHED */
}
- mutex_held = ((struct ctl_cb *)so->so_pcb)->mtx;
+ mutex_held = &((struct ctl_cb *)so->so_pcb)->mtx;
lck_mtx_assert(mutex_held, LCK_MTX_ASSERT_OWNED);
so->unlock_lr[so->next_unlock_lr] = lr_saved;
panic("ctl_getlock: so=%p usecount=%x lrh= %s\n",
so, so->so_usecount, solockhistory_nr(so));
}
- return kcb->mtx;
+ return &kcb->mtx;
} else {
panic("ctl_getlock: so=%p NULL NO so_pcb %s\n",
so, solockhistory_nr(so));
{
#pragma unused(oidp, arg1, arg2)
int error = 0;
- int n, i;
+ u_int64_t i, n;
struct xsystmgen xsg;
void *buf = NULL;
struct kctl *kctl;
size_t item_size = ROUNDUP64(sizeof(struct xkctl_reg));
- buf = _MALLOC(item_size, M_TEMP, M_WAITOK | M_ZERO);
+ buf = kheap_alloc(KHEAP_TEMP, item_size, Z_WAITOK | Z_ZERO);
if (buf == NULL) {
return ENOMEM;
}
- lck_mtx_lock(ctl_mtx);
+ lck_mtx_lock(&ctl_mtx);
n = kctlstat.kcs_reg_count;
if (req->oldptr == USER_ADDR_NULL) {
- req->oldidx = (n + n / 8) * sizeof(struct xkctl_reg);
+ req->oldidx = (size_t)(n + n / 8) * sizeof(struct xkctl_reg);
goto done;
}
if (req->newptr != USER_ADDR_NULL) {
goto done;
}
- i = 0;
for (i = 0, kctl = TAILQ_FIRST(&ctl_head);
i < n && kctl != NULL;
i++, kctl = TAILQ_NEXT(kctl, next)) {
}
done:
- lck_mtx_unlock(ctl_mtx);
+ lck_mtx_unlock(&ctl_mtx);
- if (buf != NULL) {
- FREE(buf, M_TEMP);
- }
+ kheap_free(KHEAP_TEMP, buf, item_size);
return error;
}
{
#pragma unused(oidp, arg1, arg2)
int error = 0;
- int n, i;
+ u_int64_t n, i;
struct xsystmgen xsg;
void *buf = NULL;
struct kctl *kctl;
2 * ROUNDUP64(sizeof(struct xsockbuf_n)) +
ROUNDUP64(sizeof(struct xsockstat_n));
- buf = _MALLOC(item_size, M_TEMP, M_WAITOK | M_ZERO);
+ buf = kheap_alloc(KHEAP_TEMP, item_size, Z_WAITOK | Z_ZERO);
if (buf == NULL) {
return ENOMEM;
}
- lck_mtx_lock(ctl_mtx);
+ lck_mtx_lock(&ctl_mtx);
n = kctlstat.kcs_pcbcount;
if (req->oldptr == USER_ADDR_NULL) {
- req->oldidx = (n + n / 8) * item_size;
+ req->oldidx = (size_t)(n + n / 8) * item_size;
goto done;
}
if (req->newptr != USER_ADDR_NULL) {
goto done;
}
- i = 0;
for (i = 0, kctl = TAILQ_FIRST(&ctl_head);
i < n && kctl != NULL;
kctl = TAILQ_NEXT(kctl, next)) {
}
done:
- lck_mtx_unlock(ctl_mtx);
+ lck_mtx_unlock(&ctl_mtx);
+ kheap_free(KHEAP_TEMP, buf, item_size);
return error;
}
#pragma unused(oidp, arg1, arg2)
int error = 0;
- lck_mtx_lock(ctl_mtx);
+ lck_mtx_lock(&ctl_mtx);
if (req->newptr != USER_ADDR_NULL) {
error = EPERM;
error = SYSCTL_OUT(req, &kctlstat,
MIN(sizeof(struct kctlstat), req->oldlen));
done:
- lck_mtx_unlock(ctl_mtx);
+ lck_mtx_unlock(&ctl_mtx);
return error;
}