/*
- * Copyright (c) 1998-2015 Apple Inc. All rights reserved.
+ * Copyright (c) 1998-2017 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#define DBG_FNC_SBDROP NETDBG_CODE(DBG_NETSOCK, 4)
#define DBG_FNC_SBAPPEND NETDBG_CODE(DBG_NETSOCK, 5)
+extern char *proc_best_name(proc_t p);
+
SYSCTL_DECL(_kern_ipc);
__private_extern__ u_int32_t net_io_policy_throttle_best_effort = 0;
static u_int32_t sb_efficiency = 8; /* parameter for sbreserve() */
int32_t total_sbmb_cnt __attribute__((aligned(8))) = 0;
+int32_t total_sbmb_cnt_floor __attribute__((aligned(8))) = 0;
int32_t total_sbmb_cnt_peak __attribute__((aligned(8))) = 0;
-int32_t total_snd_byte_count __attribute__((aligned(8))) = 0;
int64_t sbmb_limreached __attribute__((aligned(8))) = 0;
/* Control whether to throttle sockets eligible to be throttled */
void
soisconnecting(struct socket *so)
{
-
so->so_state &= ~(SS_ISCONNECTED|SS_ISDISCONNECTING);
so->so_state |= SS_ISCONNECTING;
void
soisconnected(struct socket *so)
{
- struct socket *head = so->so_head;
-
so->so_state &= ~(SS_ISCONNECTING|SS_ISDISCONNECTING|SS_ISCONFIRMING);
so->so_state |= SS_ISCONNECTED;
sflt_notify(so, sock_evt_connected, NULL);
- if (head && (so->so_state & SS_INCOMP)) {
- so->so_state &= ~SS_INCOMP;
- so->so_state |= SS_COMP;
+ if (so->so_head != NULL && (so->so_state & SS_INCOMP)) {
+ struct socket *head = so->so_head;
+ int locked = 0;
+
+ /*
+ * Enforce lock order when the protocol has per socket locks
+ */
if (head->so_proto->pr_getlock != NULL) {
- socket_unlock(so, 0);
socket_lock(head, 1);
+ so_acquire_accept_list(head, so);
+ locked = 1;
}
- postevent(head, 0, EV_RCONN);
- TAILQ_REMOVE(&head->so_incomp, so, so_list);
- head->so_incqlen--;
- TAILQ_INSERT_TAIL(&head->so_comp, so, so_list);
- sorwakeup(head);
- wakeup_one((caddr_t)&head->so_timeo);
- if (head->so_proto->pr_getlock != NULL) {
+ if (so->so_head == head && (so->so_state & SS_INCOMP)) {
+ so->so_state &= ~SS_INCOMP;
+ so->so_state |= SS_COMP;
+ TAILQ_REMOVE(&head->so_incomp, so, so_list);
+ TAILQ_INSERT_TAIL(&head->so_comp, so, so_list);
+ head->so_incqlen--;
+
+ /*
+ * We have to release the accept list in
+ * case a socket callback calls sock_accept()
+ */
+ if (locked != 0) {
+ so_release_accept_list(head);
+ socket_unlock(so, 0);
+ }
+ postevent(head, 0, EV_RCONN);
+ sorwakeup(head);
+ wakeup_one((caddr_t)&head->so_timeo);
+
+ if (locked != 0) {
+ socket_unlock(head, 1);
+ socket_lock(so, 0);
+ }
+ } else if (locked != 0) {
+ so_release_accept_list(head);
socket_unlock(head, 1);
- socket_lock(so, 0);
}
} else {
postevent(so, 0, EV_WCONN);
return ((so->so_state & SS_ISCONNECTED) ||
!(so->so_proto->pr_flags & PR_CONNREQUIRED) ||
(so->so_flags1 & SOF1_PRECONNECT_DATA));
-
}
void
#endif
/* inherit traffic management properties of listener */
- so->so_traffic_mgt_flags =
- head->so_traffic_mgt_flags & (TRAFFIC_MGT_SO_BACKGROUND);
+ so->so_flags1 |=
+ head->so_flags1 & (SOF1_TRAFFIC_MGT_SO_BACKGROUND);
so->so_background_thread = head->so_background_thread;
so->so_traffic_class = head->so_traffic_class;
atomic_add_32(&so->so_proto->pr_domain->dom_refs, 1);
/* Insert in head appropriate lists */
+ so_acquire_accept_list(head, NULL);
+
so->so_head = head;
/*
}
head->so_qlen++;
+ so_release_accept_list(head);
+
/* Attach socket filters for this protocol */
sflt_initsock(so);
/* NOTREACHED */
}
+ if ((so->so_state & SS_DRAINING) || (so->so_flags & SOF_DEFUNCT)) {
+ error = EBADF;
+ if (so->so_flags & SOF_DEFUNCT) {
+ SODEFUNCTLOG("%s[%d, %s]: defunct so 0x%llx [%d,%d] "
+ "(%d)\n", __func__, proc_selfpid(),
+ proc_best_name(current_proc()),
+ (uint64_t)VM_KERNEL_ADDRPERM(so),
+ SOCK_DOM(so), SOCK_TYPE(so), error);
+ }
+ return (error);
+ }
+
if (so->so_proto->pr_getlock != NULL)
mutex_held = (*so->so_proto->pr_getlock)(so, 0);
else
if ((so->so_state & SS_DRAINING) || (so->so_flags & SOF_DEFUNCT)) {
error = EBADF;
if (so->so_flags & SOF_DEFUNCT) {
- SODEFUNCTLOG(("%s[%d]: defunct so 0x%llx [%d,%d] "
+ SODEFUNCTLOG("%s[%d, %s]: defunct so 0x%llx [%d,%d] "
"(%d)\n", __func__, proc_selfpid(),
+ proc_best_name(current_proc()),
(uint64_t)VM_KERNEL_ADDRPERM(so),
- SOCK_DOM(so), SOCK_TYPE(so), error));
+ SOCK_DOM(so), SOCK_TYPE(so), error);
}
}
sowakeup(struct socket *so, struct sockbuf *sb)
{
if (so->so_flags & SOF_DEFUNCT) {
- SODEFUNCTLOG(("%s[%d]: defunct so 0x%llx [%d,%d] si 0x%x, "
+ SODEFUNCTLOG("%s[%d, %s]: defunct so 0x%llx [%d,%d] si 0x%x, "
"fl 0x%x [%s]\n", __func__, proc_selfpid(),
+ proc_best_name(current_proc()),
(uint64_t)VM_KERNEL_ADDRPERM(so), SOCK_DOM(so),
SOCK_TYPE(so), (uint32_t)sb->sb_sel.si_flags, sb->sb_flags,
- (sb->sb_flags & SB_RECV) ? "rcv" : "snd"));
+ (sb->sb_flags & SB_RECV) ? "rcv" : "snd");
}
sb->sb_flags &= ~SB_SEL;
int
soreserve(struct socket *so, u_int32_t sndcc, u_int32_t rcvcc)
{
-
if (sbreserve(&so->so_snd, sndcc) == 0)
goto bad;
else
/* XXX: Probably don't need */
sb->sb_ctl += m->m_len;
}
+
+ /* update send byte count */
+ if (sb->sb_flags & SB_SNDBYTE_CNT) {
+ inp_incr_sndbytes_total(sb->sb_so,
+ m->m_len);
+ inp_incr_sndbytes_unsent(sb->sb_so,
+ m->m_len);
+ }
m = m_free(m);
continue;
}
{
void *lr_saved = __builtin_return_address(0);
struct socket *so = sb->sb_so;
-#ifdef notyet
- lck_mtx_t *mutex_held;
-#endif
u_int32_t i;
/* so_usecount may be 0 if we get here from sofreelastref() */
so->so_usecount, lr_saved, solockhistory_nr(so));
/* NOTREACHED */
}
-#ifdef notyet
- /*
- * XXX: This code is currently commented out, because we may get here
- * as part of sofreelastref(), and at that time, pr_getlock() may no
- * longer be able to return us the lock; this will be fixed in future.
- */
- if (so->so_proto->pr_getlock != NULL)
- mutex_held = (*so->so_proto->pr_getlock)(so, 0);
- else
- mutex_held = so->so_proto->pr_domain->dom_mtx;
-
- lck_mtx_assert(mutex_held, LCK_MTX_ASSERT_OWNED);
-#endif
/*
* Obtain lock on the socket buffer (SB_LOCK). This is required
ml = (struct mbuf *)0;
while (len > 0) {
- if (m == 0) {
- if (next == 0) {
+ if (m == NULL) {
+ if (next == NULL) {
/*
* temporarily replacing this panic with printf
* because it occurs occasionally when closing
m->m_len -= len;
m->m_data += len;
sb->sb_cc -= len;
+ /* update the send byte count */
+ if (sb->sb_flags & SB_SNDBYTE_CNT)
+ inp_decr_sndbytes_total(sb->sb_so, len);
if (m->m_type != MT_DATA && m->m_type != MT_HEADER &&
m->m_type != MT_OOBDATA)
sb->sb_ctl -= len;
}
int
-pru_connectx_notsupp(struct socket *so, struct sockaddr_list **src_sl,
- struct sockaddr_list **dst_sl, struct proc *p, uint32_t ifscope,
+pru_connectx_notsupp(struct socket *so, struct sockaddr *src,
+ struct sockaddr *dst, struct proc *p, uint32_t ifscope,
sae_associd_t aid, sae_connid_t *pcid, uint32_t flags, void *arg,
uint32_t arglen, struct uio *uio, user_ssize_t *bytes_written)
{
-#pragma unused(so, src_sl, dst_sl, p, ifscope, aid, pcid, flags, arg, arglen, uio, bytes_written)
+#pragma unused(so, src, dst, p, ifscope, aid, pcid, flags, arg, arglen, uio, bytes_written)
return (EOPNOTSUPP);
}
}
int
-pru_soreceive_list_notsupp(struct socket *so,
+pru_soreceive_list_notsupp(struct socket *so,
struct recv_msg_elem *recv_msg_array, u_int uiocnt, int *flagsp)
{
#pragma unused(so, recv_msg_array, uiocnt, flagsp)
total_sbmb_cnt_peak = total_sbmb_cnt;
/*
- * If data is being appended to the send socket buffer,
+ * If data is being added to the send socket buffer,
* update the send byte count
*/
- if (!(sb->sb_flags & SB_RECV))
- OSAddAtomic(cnt, &total_snd_byte_count);
+ if (sb->sb_flags & SB_SNDBYTE_CNT) {
+ inp_incr_sndbytes_total(sb->sb_so, m->m_len);
+ inp_incr_sndbytes_unsent(sb->sb_so, m->m_len);
+ }
}
/* adjust counters in sb reflecting freeing of m */
}
OSAddAtomic(cnt, &total_sbmb_cnt);
VERIFY(total_sbmb_cnt >= 0);
+ if (total_sbmb_cnt < total_sbmb_cnt_floor)
+ total_sbmb_cnt_floor = total_sbmb_cnt;
/*
* If data is being removed from the send socket buffer,
* update the send byte count
*/
- if (!(sb->sb_flags & SB_RECV)) {
- OSAddAtomic(cnt, &total_snd_byte_count);
- }
+ if (sb->sb_flags & SB_SNDBYTE_CNT)
+ inp_decr_sndbytes_total(sb->sb_so, m->m_len);
}
/*
if (error == 0 && (so->so_flags & SOF_DEFUNCT) &&
!(flags & SBL_IGNDEFUNCT)) {
error = EBADF;
- SODEFUNCTLOG(("%s[%d]: defunct so 0x%llx [%d,%d] "
+ SODEFUNCTLOG("%s[%d, %s]: defunct so 0x%llx [%d,%d] "
"(%d)\n", __func__, proc_selfpid(),
+ proc_best_name(current_proc()),
(uint64_t)VM_KERNEL_ADDRPERM(so),
- SOCK_DOM(so), SOCK_TYPE(so), error));
+ SOCK_DOM(so), SOCK_TYPE(so), error);
}
if (error != 0)
lck_mtx_assert(mutex_held, LCK_MTX_ASSERT_OWNED);
- VERIFY(so->so_usecount != 0);
+ VERIFY(so->so_usecount > 0);
so->so_usecount--;
so->unlock_lr[so->next_unlock_lr] = lr_saved;
so->next_unlock_lr = (so->next_unlock_lr + 1) % SO_LCKDBG_MAX;
* application, as we're missing the system wide "decision maker"
*/
return (
- (so->so_traffic_mgt_flags & TRAFFIC_MGT_SO_BACKGROUND));
+ (so->so_flags1 & SOF1_TRAFFIC_MGT_SO_BACKGROUND));
}
inline int
inline int
soissrcbackground(struct socket *so)
{
- return ((so->so_traffic_mgt_flags & TRAFFIC_MGT_SO_BACKGROUND) ||
+ return ((so->so_flags1 & SOF1_TRAFFIC_MGT_SO_BACKGROUND) ||
IS_SO_TC_BACKGROUND(so->so_traffic_class));
}
return (err);
if (i != net_io_policy_throttled)
- SOTHROTTLELOG(("throttle: network IO policy throttling is "
- "now %s\n", i ? "ON" : "OFF"));
+ SOTHROTTLELOG("throttle: network IO policy throttling is "
+ "now %s\n", i ? "ON" : "OFF");
net_io_policy_throttled = i;
SYSCTL_INT(_kern_ipc, OID_AUTO, soqlencomp, CTLFLAG_RW | CTLFLAG_LOCKED,
&soqlencomp, 0, "Listen backlog represents only complete queue");
+SYSCTL_INT(_kern_ipc, OID_AUTO, sbmb_cnt, CTLFLAG_RD | CTLFLAG_LOCKED,
+ &total_sbmb_cnt, 0, "");
+SYSCTL_INT(_kern_ipc, OID_AUTO, sbmb_cnt_peak, CTLFLAG_RD | CTLFLAG_LOCKED,
+ &total_sbmb_cnt_peak, 0, "");
+SYSCTL_INT(_kern_ipc, OID_AUTO, sbmb_cnt_floor, CTLFLAG_RD | CTLFLAG_LOCKED,
+ &total_sbmb_cnt_floor, 0, "");
+SYSCTL_QUAD(_kern_ipc, OID_AUTO, sbmb_limreached, CTLFLAG_RD | CTLFLAG_LOCKED,
+ &sbmb_limreached, "");
+
+
SYSCTL_NODE(_kern_ipc, OID_AUTO, io_policy, CTLFLAG_RW, 0, "network IO policy");
SYSCTL_PROC(_kern_ipc_io_policy, OID_AUTO, throttled,