+
+ lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
+ ctl_kcb_increment_use_count(kcb, mtx_held);
+
+ if (error == 0 && (kctl = kcb->kctl) == NULL) {
+ error = EINVAL;
+ }
+
+ if (error == 0 && kctl->send) {
+ so_tc_update_stats(m, so, m_get_service_class(m));
+ socket_unlock(so, 0);
+ error = (*kctl->send)(kctl->kctlref, kcb->sac.sc_unit, kcb->userdata,
+ m, flags);
+ socket_lock(so, 0);
+ } else {
+ m_freem(m);
+ if (error == 0) {
+ error = ENOTSUP;
+ }
+ }
+ if (error != 0) {
+ OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_send_fail);
+ }
+ clt_kcb_decrement_use_count(kcb);
+
+ return error;
+}
+
+static int
+ctl_send_list(struct socket *so, int flags, struct mbuf *m,
+ __unused struct sockaddr *addr, struct mbuf *control,
+ __unused struct proc *p)
+{
+ int error = 0;
+ struct ctl_cb *kcb = (struct ctl_cb *)so->so_pcb;
+ struct kctl *kctl;
+
+ if (control) {
+ m_freem_list(control);
+ }
+
+ if (kcb == NULL) { /* sanity check */
+ error = ENOTCONN;
+ }
+
+ lck_mtx_t *mtx_held = socket_getlock(so, PR_F_WILLUNLOCK);
+ ctl_kcb_increment_use_count(kcb, mtx_held);
+
+ if (error == 0 && (kctl = kcb->kctl) == NULL) {
+ error = EINVAL;
+ }
+
+ if (error == 0 && kctl->send_list) {
+ struct mbuf *nxt;
+
+ for (nxt = m; nxt != NULL; nxt = nxt->m_nextpkt) {
+ so_tc_update_stats(nxt, so, m_get_service_class(nxt));
+ }
+
+ socket_unlock(so, 0);
+ error = (*kctl->send_list)(kctl->kctlref, kcb->sac.sc_unit,
+ kcb->userdata, m, flags);
+ socket_lock(so, 0);
+ } else if (error == 0 && kctl->send) {
+ while (m != NULL && error == 0) {
+ struct mbuf *nextpkt = m->m_nextpkt;
+
+ m->m_nextpkt = NULL;
+ so_tc_update_stats(m, so, m_get_service_class(m));
+ socket_unlock(so, 0);
+ error = (*kctl->send)(kctl->kctlref, kcb->sac.sc_unit,
+ kcb->userdata, m, flags);
+ socket_lock(so, 0);
+ m = nextpkt;
+ }
+ if (m != NULL) {
+ m_freem_list(m);
+ }
+ } else {
+ m_freem_list(m);
+ if (error == 0) {
+ error = ENOTSUP;
+ }
+ }
+ if (error != 0) {
+ OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_send_list_fail);
+ }
+ clt_kcb_decrement_use_count(kcb);
+
+ return error;
+}
+
+static errno_t
+ctl_rcvbspace(struct socket *so, u_int32_t datasize,
+ u_int32_t kctlflags, u_int32_t flags)
+{
+ struct sockbuf *sb = &so->so_rcv;
+ u_int32_t space = sbspace(sb);
+ errno_t error;
+
+ if ((kctlflags & CTL_FLAG_REG_CRIT) == 0) {
+ if ((u_int32_t) space >= datasize) {
+ error = 0;
+ } else {
+ error = ENOBUFS;
+ }
+ } else if ((flags & CTL_DATA_CRIT) == 0) {
+ /*
+ * Reserve 25% for critical messages
+ */
+ if (space < (sb->sb_hiwat >> 2) ||
+ space < datasize) {
+ error = ENOBUFS;
+ } else {
+ error = 0;
+ }
+ } else {
+ u_int32_t autorcvbuf_max;
+
+ /*
+ * Allow overcommit of 25%
+ */
+ autorcvbuf_max = min(sb->sb_idealsize + (sb->sb_idealsize >> 2),
+ ctl_autorcvbuf_max);
+
+ if ((u_int32_t) space >= datasize) {
+ error = 0;
+ } else if (tcp_cansbgrow(sb) &&
+ sb->sb_hiwat < autorcvbuf_max) {
+ /*
+ * Grow with a little bit of leeway
+ */
+ u_int32_t grow = datasize - space + MSIZE;
+
+ if (sbreserve(sb,
+ min((sb->sb_hiwat + grow), autorcvbuf_max)) == 1) {
+ if (sb->sb_hiwat > ctl_autorcvbuf_high) {
+ ctl_autorcvbuf_high = sb->sb_hiwat;
+ }
+
+ /*
+ * A final check
+ */
+ if ((u_int32_t) sbspace(sb) >= datasize) {
+ error = 0;
+ } else {
+ error = ENOBUFS;
+ }
+
+ if (ctl_debug) {
+ printf("%s - grown to %d error %d\n",
+ __func__, sb->sb_hiwat, error);
+ }
+ } else {
+ error = ENOBUFS;
+ }
+ } else {
+ error = ENOBUFS;
+ }
+ }
+ return error;
+}
+
+errno_t
+ctl_enqueuembuf(kern_ctl_ref kctlref, u_int32_t unit, struct mbuf *m,
+ u_int32_t flags)
+{
+ struct socket *so;
+ errno_t error = 0;
+ int len = m->m_pkthdr.len;
+ u_int32_t kctlflags;
+
+ so = kcb_find_socket(kctlref, unit, &kctlflags);
+ if (so == NULL) {
+ return EINVAL;
+ }
+
+ if (ctl_rcvbspace(so, len, kctlflags, flags) != 0) {
+ error = ENOBUFS;
+ OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fullsock);
+ goto bye;
+ }
+ if ((flags & CTL_DATA_EOR)) {
+ m->m_flags |= M_EOR;
+ }
+
+ so_recv_data_stat(so, m, 0);
+ if (sbappend(&so->so_rcv, m) != 0) {
+ if ((flags & CTL_DATA_NOWAKEUP) == 0) {
+ sorwakeup(so);
+ }
+ } else {
+ error = ENOBUFS;
+ OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fullsock);
+ }
+bye:
+ if (ctl_debug && error != 0 && (flags & CTL_DATA_CRIT)) {
+ printf("%s - crit data err %d len %d hiwat %d cc: %d\n",
+ __func__, error, len,
+ so->so_rcv.sb_hiwat, so->so_rcv.sb_cc);
+ }
+
+ socket_unlock(so, 1);
+ if (error != 0) {
+ OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fail);
+ }
+
+ return error;
+}
+
+/*
+ * Compute space occupied by mbuf like sbappendrecord
+ */
+static int
+m_space(struct mbuf *m)
+{
+ int space = 0;
+ struct mbuf *nxt;
+
+ for (nxt = m; nxt != NULL; nxt = nxt->m_next) {
+ space += nxt->m_len;
+ }
+
+ return space;
+}
+
+errno_t
+ctl_enqueuembuf_list(void *kctlref, u_int32_t unit, struct mbuf *m_list,
+ u_int32_t flags, struct mbuf **m_remain)
+{
+ struct socket *so = NULL;
+ errno_t error = 0;
+ struct mbuf *m, *nextpkt;
+ int needwakeup = 0;
+ int len = 0;
+ u_int32_t kctlflags;
+
+ /*
+ * Need to point the beginning of the list in case of early exit
+ */
+ m = m_list;
+
+ /*
+ * kcb_find_socket takes the socket lock with a reference
+ */
+ so = kcb_find_socket(kctlref, unit, &kctlflags);
+ if (so == NULL) {
+ error = EINVAL;
+ goto done;
+ }
+
+ if (kctlflags & CTL_FLAG_REG_SOCK_STREAM) {
+ error = EOPNOTSUPP;
+ goto done;
+ }
+ if (flags & CTL_DATA_EOR) {
+ error = EINVAL;
+ goto done;
+ }
+
+ for (m = m_list; m != NULL; m = nextpkt) {
+ nextpkt = m->m_nextpkt;
+
+ if (m->m_pkthdr.len == 0 && ctl_debug) {
+ printf("%s: %llx m_pkthdr.len is 0",
+ __func__, (uint64_t)VM_KERNEL_ADDRPERM(m));
+ }
+
+ /*
+ * The mbuf is either appended or freed by sbappendrecord()
+ * so it's not reliable from a data standpoint
+ */
+ len = m_space(m);
+ if (ctl_rcvbspace(so, len, kctlflags, flags) != 0) {
+ error = ENOBUFS;
+ OSIncrementAtomic64(
+ (SInt64 *)&kctlstat.kcs_enqueue_fullsock);
+ break;
+ } else {
+ /*
+ * Unlink from the list, m is on its own
+ */
+ m->m_nextpkt = NULL;
+ so_recv_data_stat(so, m, 0);
+ if (sbappendrecord(&so->so_rcv, m) != 0) {
+ needwakeup = 1;
+ } else {
+ /*
+ * We free or return the remaining
+ * mbufs in the list
+ */
+ m = nextpkt;
+ error = ENOBUFS;
+ OSIncrementAtomic64(
+ (SInt64 *)&kctlstat.kcs_enqueue_fullsock);
+ break;
+ }
+ }
+ }
+ if (needwakeup && (flags & CTL_DATA_NOWAKEUP) == 0) {
+ sorwakeup(so);
+ }
+
+done:
+ if (so != NULL) {
+ if (ctl_debug && error != 0 && (flags & CTL_DATA_CRIT)) {
+ printf("%s - crit data err %d len %d hiwat %d cc: %d\n",
+ __func__, error, len,
+ so->so_rcv.sb_hiwat, so->so_rcv.sb_cc);
+ }
+
+ socket_unlock(so, 1);
+ }
+ if (m_remain) {
+ *m_remain = m;
+
+ if (m != NULL && socket_debug && so != NULL &&
+ (so->so_options & SO_DEBUG)) {
+ struct mbuf *n;
+
+ printf("%s m_list %llx\n", __func__,
+ (uint64_t) VM_KERNEL_ADDRPERM(m_list));
+ for (n = m; n != NULL; n = n->m_nextpkt) {
+ printf(" remain %llx m_next %llx\n",
+ (uint64_t) VM_KERNEL_ADDRPERM(n),
+ (uint64_t) VM_KERNEL_ADDRPERM(n->m_next));
+ }
+ }
+ } else {
+ if (m != NULL) {
+ m_freem_list(m);
+ }
+ }
+ if (error != 0) {
+ OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fail);
+ }
+ return error;
+}
+
+errno_t
+ctl_enqueuedata(void *kctlref, u_int32_t unit, void *data, size_t len,
+ u_int32_t flags)
+{
+ struct socket *so;
+ struct mbuf *m;
+ errno_t error = 0;
+ unsigned int num_needed;
+ struct mbuf *n;
+ size_t curlen = 0;
+ u_int32_t kctlflags;
+
+ so = kcb_find_socket(kctlref, unit, &kctlflags);
+ if (so == NULL) {
+ return EINVAL;
+ }
+
+ if (ctl_rcvbspace(so, len, kctlflags, flags) != 0) {
+ error = ENOBUFS;
+ OSIncrementAtomic64((SInt64 *)&kctlstat.kcs_enqueue_fullsock);
+ goto bye;
+ }
+
+ num_needed = 1;
+ m = m_allocpacket_internal(&num_needed, len, NULL, M_NOWAIT, 1, 0);
+ if (m == NULL) {
+ kctlstat.kcs_enqdata_mb_alloc_fail++;
+ if (ctl_debug) {
+ printf("%s: m_allocpacket_internal(%lu) failed\n",
+ __func__, len);
+ }
+ error = ENOMEM;
+ goto bye;
+ }
+