/*
- * Copyright (c) 1998-2007 Apple Inc. All rights reserved.
+ * Copyright (c) 1998-2012 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
#include <sys/kauth.h>
#include <sys/malloc.h>
#include <sys/mbuf.h>
+#include <sys/mcache.h>
#include <sys/protosw.h>
#include <sys/stat.h>
#include <sys/socket.h>
#include <security/mac_framework.h>
#endif
+#include <mach/vm_param.h>
+
/* TODO: this should be in a header file somewhere */
extern void postevent(struct socket *, struct sockbuf *, int);
static int soqlimitcompat = 1;
static int soqlencomp = 0;
-u_long sb_max = SB_MAX; /* XXX should be static */
+/* Based on the number of mbuf clusters configured, high_sb_max and sb_max can get
+ * scaled up or down to suit that memory configuration. high_sb_max is a higher
+ * limit on sb_max that is checked when sb_max gets set through sysctl.
+ */
+
+u_int32_t sb_max = SB_MAX; /* XXX should be static */
+u_int32_t high_sb_max = SB_MAX;
+
+static u_int32_t sb_efficiency = 8; /* parameter for sbreserve() */
+__private_extern__ int32_t total_sbmb_cnt = 0;
-static u_long sb_efficiency = 8; /* parameter for sbreserve() */
-__private_extern__ unsigned int total_mb_cnt = 0;
-__private_extern__ unsigned int total_cl_cnt = 0;
-__private_extern__ int sbspace_factor = 8;
+/* Control whether to throttle sockets eligible to be throttled */
+__private_extern__ u_int32_t net_io_policy_throttled = 0;
+static int sysctl_io_policy_throttled SYSCTL_HANDLER_ARGS;
/*
* Procedures to manipulate state flags of socket
wakeup((caddr_t)&so->so_timeo);
sorwakeup(so);
sowwakeup(so);
+ soevent(so, SO_FILT_HINT_LOCKED);
}
}
{
so->so_state &= ~SS_ISCONNECTING;
so->so_state |= (SS_ISDISCONNECTING|SS_CANTRCVMORE|SS_CANTSENDMORE);
+ soevent(so, SO_FILT_HINT_LOCKED);
sflt_notify(so, sock_evt_disconnecting, NULL);
wakeup((caddr_t)&so->so_timeo);
sowwakeup(so);
{
so->so_state &= ~(SS_ISCONNECTING|SS_ISCONNECTED|SS_ISDISCONNECTING);
so->so_state |= (SS_CANTRCVMORE|SS_CANTSENDMORE|SS_ISDISCONNECTED);
+ soevent(so, SO_FILT_HINT_LOCKED);
sflt_notify(so, sock_evt_disconnected, NULL);
wakeup((caddr_t)&so->so_timeo);
sowwakeup(so);
sorwakeup(so);
}
+/* This function will issue a wakeup like soisdisconnected but it will not
+ * notify the socket filters. This will avoid unlocking the socket
+ * in the midst of closing it.
+ */
+void
+sodisconnectwakeup(struct socket *so)
+{
+ so->so_state &= ~(SS_ISCONNECTING|SS_ISCONNECTED|SS_ISDISCONNECTING);
+ so->so_state |= (SS_CANTRCVMORE|SS_CANTSENDMORE|SS_ISDISCONNECTED);
+ soevent(so, SO_FILT_HINT_LOCKED);
+ wakeup((caddr_t)&so->so_timeo);
+ sowwakeup(so);
+ sorwakeup(so);
+}
+
/*
* When an attempt at a new connection is noted on a socket
* which accepts connections, sonewconn is called. If the
if (so_qlen >=
(soqlimitcompat ? head->so_qlimit : (3 * head->so_qlimit / 2)))
return ((struct socket *)0);
- so = soalloc(M_NOWAIT, head->so_proto->pr_domain->dom_family,
+ so = soalloc(1, head->so_proto->pr_domain->dom_family,
head->so_type);
if (so == NULL)
return ((struct socket *)0);
return ((struct socket *)0);
}
- so->so_head = head;
so->so_type = head->so_type;
so->so_options = head->so_options &~ SO_ACCEPTCONN;
so->so_linger = head->so_linger;
so->so_proto = head->so_proto;
so->so_timeo = head->so_timeo;
so->so_pgid = head->so_pgid;
- so->so_uid = head->so_uid;
- so->so_flags = head->so_flags & (SOF_REUSESHAREUID|SOF_NOTIFYCONFLICT); /* inherit SO_REUSESHAREUID and SO_NOTIFYCONFLICT ocket options */
+ kauth_cred_ref(head->so_cred);
+ so->so_cred = head->so_cred;
+ so->last_pid = head->last_pid;
+ so->last_upid = head->last_upid;
+ /* inherit socket options stored in so_flags */
+ so->so_flags = head->so_flags & (SOF_NOSIGPIPE |
+ SOF_NOADDRAVAIL |
+ SOF_REUSESHAREUID |
+ SOF_NOTIFYCONFLICT |
+ SOF_BINDRANDOMPORT |
+ SOF_NPX_SETOPTSHUT |
+ SOF_NODEFUNCT |
+ SOF_PRIVILEGED_TRAFFIC_CLASS|
+ SOF_NOTSENT_LOWAT |
+ SOF_USELRO);
so->so_usecount = 1;
so->next_lock_lr = 0;
so->next_unlock_lr = 0;
mac_socket_label_associate_accept(head, so);
#endif
+ /* inherit traffic management properties of listener */
+ so->so_traffic_mgt_flags = head->so_traffic_mgt_flags & (TRAFFIC_MGT_SO_BACKGROUND);
+ so->so_background_thread = head->so_background_thread;
+ so->so_traffic_class = head->so_traffic_class;
+
if (soreserve(so, head->so_snd.sb_hiwat, head->so_rcv.sb_hiwat)) {
- sflt_termsock(so);
sodealloc(so);
return ((struct socket *)0);
}
+ so->so_rcv.sb_flags |= (head->so_rcv.sb_flags & SB_USRSIZE);
+ so->so_snd.sb_flags |= (head->so_snd.sb_flags & SB_USRSIZE);
/*
* Must be done with head unlocked to avoid deadlock
socket_unlock(head, 0);
if (((*so->so_proto->pr_usrreqs->pru_attach)(so, 0, NULL) != 0) ||
error) {
- sflt_termsock(so);
sodealloc(so);
if (head->so_proto->pr_unlock)
socket_lock(head, 0);
return ((struct socket *)0);
}
- if (head->so_proto->pr_unlock)
+ if (head->so_proto->pr_unlock) {
socket_lock(head, 0);
+ /* Radar 7385998 Recheck that the head is still accepting
+ * to avoid race condition when head is getting closed.
+ */
+ if ((head->so_options & SO_ACCEPTCONN) == 0) {
+ so->so_state &= ~SS_NOFDREF;
+ soclose(so);
+ return ((struct socket *)0);
+ }
+ }
+
#ifdef __APPLE__
so->so_proto->pr_domain->dom_refs++;
#endif
+ /* Insert in head appropriate lists */
+ so->so_head = head;
+
+ /* Since this socket is going to be inserted into the incomp
+ * queue, it can be picked up by another thread in
+ * tcp_dropdropablreq to get dropped before it is setup..
+ * To prevent this race, set in-progress flag which can be
+ * cleared later
+ */
+ so->so_flags |= SOF_INCOMP_INPROGRESS;
if (connstatus) {
TAILQ_INSERT_TAIL(&head->so_comp, so, so_list);
struct socket *
sonewconn(struct socket *head, int connstatus, const struct sockaddr *from)
{
- int error = 0;
- struct socket_filter_entry *filter;
- int filtered = 0;
-
- for (filter = head->so_filt; filter && (error == 0);
- filter = filter->sfe_next_onsocket) {
- if (filter->sfe_filter->sf_filter.sf_connect_in) {
- if (filtered == 0) {
- filtered = 1;
- sflt_use(head);
- socket_unlock(head, 0);
- }
- error = filter->sfe_filter->sf_filter.
- sf_connect_in(filter->sfe_cookie, head, from);
- }
- }
- if (filtered != 0) {
- socket_lock(head, 0);
- sflt_unuse(head);
- }
-
+ int error = sflt_connectin(head, from);
if (error) {
return (NULL);
}
socantsendmore(struct socket *so)
{
so->so_state |= SS_CANTSENDMORE;
+ soevent(so, SO_FILT_HINT_LOCKED);
sflt_notify(so, sock_evt_cantsendmore, NULL);
sowwakeup(so);
}
socantrcvmore(struct socket *so)
{
so->so_state |= SS_CANTRCVMORE;
+ soevent(so, SO_FILT_HINT_LOCKED);
sflt_notify(so, sock_evt_cantrecvmore, NULL);
sorwakeup(so);
}
int
sbwait(struct sockbuf *sb)
{
- int error = 0, lr_saved;
+ int error = 0;
+ uintptr_t lr_saved;
struct socket *so = sb->sb_so;
lck_mtx_t *mutex_held;
struct timespec ts;
- lr_saved = (unsigned int) __builtin_return_address(0);
+ lr_saved = (uintptr_t) __builtin_return_address(0);
if (so->so_proto->pr_getlock != NULL)
mutex_held = (*so->so_proto->pr_getlock)(so, 0);
else
mutex_held = so->so_proto->pr_domain->dom_mtx;
+ lck_mtx_assert(mutex_held, LCK_MTX_ASSERT_OWNED);
sb->sb_flags |= SB_WAIT;
if (so->so_usecount < 1)
panic("sbwait: so=%p refcount=%d\n", so, so->so_usecount);
- if ((so->so_state & SS_DRAINING)) {
+ if ((so->so_state & SS_DRAINING) || (so->so_flags & SOF_DEFUNCT)) {
error = EBADF;
+ if (so->so_flags & SOF_DEFUNCT) {
+ SODEFUNCTLOG(("%s[%d]: defunct so %p [%d,%d] (%d)\n",
+ __func__, proc_selfpid(), so, INP_SOCKAF(so),
+ INP_SOCKTYPE(so), error));
+ }
}
return (error);
while (sb->sb_flags & SB_LOCK) {
sb->sb_flags |= SB_WANT;
+
if (so->so_proto->pr_getlock != NULL)
mutex_held = (*so->so_proto->pr_getlock)(so, 0);
else
mutex_held = so->so_proto->pr_domain->dom_mtx;
+ lck_mtx_assert(mutex_held, LCK_MTX_ASSERT_OWNED);
+
if (so->so_usecount < 1)
panic("sb_lock: so=%p refcount=%d\n", so,
so->so_usecount);
if (so->so_usecount < 1)
panic("sb_lock: 2 so=%p refcount=%d\n", so,
so->so_usecount);
+
+ if (error == 0 && (so->so_flags & SOF_DEFUNCT)) {
+ error = EBADF;
+ SODEFUNCTLOG(("%s[%d]: defunct so %p [%d,%d] (%d)\n",
+ __func__, proc_selfpid(), so, INP_SOCKAF(so),
+ INP_SOCKTYPE(so), error));
+ }
+
if (error)
return (error);
}
return (0);
}
+void
+sbwakeup(struct sockbuf *sb)
+{
+ if (sb->sb_flags & SB_WAIT) {
+ sb->sb_flags &= ~SB_WAIT;
+ wakeup((caddr_t)&sb->sb_cc);
+ }
+}
+
/*
* Wakeup processes waiting on a socket buffer.
* Do asynchronous notification via SIGIO
void
sowakeup(struct socket *so, struct sockbuf *sb)
{
+ if (so->so_flags & SOF_DEFUNCT) {
+ SODEFUNCTLOG(("%s[%d]: defunct so %p [%d,%d] si 0x%x, "
+ "fl 0x%x [%s]\n", __func__, proc_selfpid(), so,
+ INP_SOCKAF(so), INP_SOCKTYPE(so),
+ (uint32_t)sb->sb_sel.si_flags, (uint16_t)sb->sb_flags,
+ (sb->sb_flags & SB_RECV) ? "rcv" : "snd"));
+ }
+
sb->sb_flags &= ~SB_SEL;
selwakeup(&sb->sb_sel);
- if (sb->sb_flags & SB_WAIT) {
- sb->sb_flags &= ~SB_WAIT;
- wakeup((caddr_t)&sb->sb_cc);
- }
+ sbwakeup(sb);
if (so->so_state & SS_ASYNC) {
if (so->so_pgid < 0)
gsignal(-so->so_pgid, SIGIO);
so_upcall = so->so_upcall;
so_upcallarg = so->so_upcallarg;
/* Let close know that we're about to do an upcall */
- so->so_flags |= SOF_UPCALLINUSE;
+ so->so_upcallusecount++;
socket_unlock(so, 0);
(*so_upcall)(so, so_upcallarg, M_DONTWAIT);
socket_lock(so, 0);
- so->so_flags &= ~SOF_UPCALLINUSE;
+ so->so_upcallusecount--;
/* Tell close that it's safe to proceed */
- if (so->so_flags & SOF_CLOSEWAIT)
+ if (so->so_flags & SOF_CLOSEWAIT && so->so_upcallusecount == 0)
wakeup((caddr_t)&so->so_upcall);
}
}
* ENOBUFS
*/
int
-soreserve(struct socket *so, u_long sndcc, u_long rcvcc)
+soreserve(struct socket *so, u_int32_t sndcc, u_int32_t rcvcc)
{
if (sbreserve(&so->so_snd, sndcc) == 0)
goto bad;
+ else
+ so->so_snd.sb_idealsize = sndcc;
+
if (sbreserve(&so->so_rcv, rcvcc) == 0)
goto bad2;
+ else
+ so->so_rcv.sb_idealsize = rcvcc;
+
if (so->so_rcv.sb_lowat == 0)
so->so_rcv.sb_lowat = 1;
if (so->so_snd.sb_lowat == 0)
* if buffering efficiency is near the normal case.
*/
int
-sbreserve(struct sockbuf *sb, u_long cc)
+sbreserve(struct sockbuf *sb, u_int32_t cc)
{
if ((u_quad_t)cc > (u_quad_t)sb_max * MCLBYTES / (MSIZE + MCLBYTES))
return (0);
return (sbappendrecord(sb, m));
if (sb->sb_flags & SB_RECV) {
- int error = sflt_data_in(so, NULL, &m, NULL, 0, NULL);
+ int error = sflt_data_in(so, NULL, &m, NULL, 0);
SBLASTRECORDCHK(sb, "sbappend 2");
if (error != 0) {
if (error != EJUSTRETURN)
}
if (sb->sb_flags & SB_RECV) {
- int error = sflt_data_in(so, NULL, &m, NULL, 0, NULL);
+ int error = sflt_data_in(so, NULL, &m, NULL, 0);
SBLASTRECORDCHK(sb, "sbappendstream 1");
if (error != 0) {
if (error != EJUSTRETURN)
{
struct mbuf *m;
struct mbuf *n = 0;
- u_long len = 0, mbcnt = 0;
+ u_int32_t len = 0, mbcnt = 0;
lck_mtx_t *mutex_held;
if (sb->sb_so->so_proto->pr_getlock != NULL)
if (sb->sb_flags & SB_RECV) {
int error = sflt_data_in(sb->sb_so, NULL, &m0, NULL,
- sock_data_filt_flag_record, NULL);
+ sock_data_filt_flag_record);
if (error != 0) {
SBLASTRECORDCHK(sb, "sbappendrecord 1");
if (error != EJUSTRETURN)
if ((sb->sb_flags & SB_RECV) != 0) {
int error = sflt_data_in(sb->sb_so, NULL, &m0, NULL,
- sock_data_filt_flag_oob, NULL);
+ sock_data_filt_flag_oob);
SBLASTRECORDCHK(sb, "sbinsertoob 2");
if (error) {
/* Call socket data in filters */
if ((sb->sb_flags & SB_RECV) != 0) {
int error;
- error = sflt_data_in(sb->sb_so, asa, &m0, &control, 0, NULL);
+ error = sflt_data_in(sb->sb_so, asa, &m0, &control, 0);
SBLASTRECORDCHK(sb, __func__);
if (error) {
if (error != EJUSTRETURN) {
if (sb->sb_flags & SB_RECV) {
int error;
- error = sflt_data_in(sb->sb_so, NULL, &m0, &control, 0, NULL);
+ error = sflt_data_in(sb->sb_so, NULL, &m0, &control, 0);
SBLASTRECORDCHK(sb, __func__);
if (error) {
if (error != EJUSTRETURN) {
{
if (!(sb->sb_cc == 0 && sb->sb_mb == NULL && sb->sb_mbcnt == 0 &&
sb->sb_mbtail == NULL && sb->sb_lastrecord == NULL)) {
- panic("%s: sb %p so %p cc %ld mbcnt %ld mb %p mbtail %p "
+ panic("%s: sb %p so %p cc %d mbcnt %d mb %p mbtail %p "
"lastrecord %p\n", where, sb, sb->sb_so, sb->sb_cc,
sb->sb_mbcnt, sb->sb_mb, sb->sb_mbtail, sb->sb_lastrecord);
/* NOTREACHED */
if ((m = m_get(M_DONTWAIT, MT_CONTROL)) == NULL)
return ((struct mbuf *)NULL);
cp = mtod(m, struct cmsghdr *);
+ VERIFY(IS_P2ALIGNED(cp, sizeof (u_int32_t)));
/* XXX check size? */
(void) memcpy(CMSG_DATA(cp), p, size);
m->m_len = CMSG_SPACE(size);
return (m);
}
+struct mbuf**
+sbcreatecontrol_mbuf(caddr_t p, int size, int type, int level, struct mbuf** mp)
+{
+ struct mbuf* m;
+ struct cmsghdr *cp;
+
+ if (*mp == NULL){
+ *mp = sbcreatecontrol(p, size, type, level);
+ return mp;
+ }
+
+ if (CMSG_SPACE((u_int)size) + (*mp)->m_len > MLEN){
+ mp = &(*mp)->m_next;
+ *mp = sbcreatecontrol(p, size, type, level);
+ return mp;
+ }
+
+ m = *mp;
+
+ cp = (struct cmsghdr *)(void *)(mtod(m, char *) + m->m_len);
+ /* CMSG_SPACE ensures 32-bit alignment */
+ VERIFY(IS_P2ALIGNED(cp, sizeof (u_int32_t)));
+ m->m_len += CMSG_SPACE(size);
+
+ /* XXX check size? */
+ (void) memcpy(CMSG_DATA(cp), p, size);
+ cp->cmsg_len = CMSG_LEN(size);
+ cp->cmsg_level = level;
+ cp->cmsg_type = type;
+
+ return mp;
+}
+
+
/*
* Some routines that return EOPNOTSUPP for entry points that are not
* supported by a protocol. Fill in as needed.
}
int
-pru_control_notsupp(__unused struct socket *so, __unused u_long cmd,
+pru_control_notsupp(__unused struct socket *so, __unused u_long cmd,
__unused caddr_t data, __unused struct ifnet *ifp, __unused struct proc *p)
{
return (EOPNOTSUPP);
* How much space is there in a socket buffer (so->so_snd or so->so_rcv)?
* This is problematical if the fields are unsigned, as the space might
* still be negative (cc > hiwat or mbcnt > mbmax). Should detect
- * overflow and return 0. Should use "lmin" but it doesn't exist now.
+ * overflow and return 0.
*/
-long
+int
sbspace(struct sockbuf *sb)
{
- return ((long)imin((int)(sb->sb_hiwat - sb->sb_cc),
- (int)(sb->sb_mbmax - sb->sb_mbcnt)));
+ int space =
+ imin((int)(sb->sb_hiwat - sb->sb_cc),
+ (int)(sb->sb_mbmax - sb->sb_mbcnt));
+ if (space < 0)
+ space = 0;
+
+ return space;
}
/* do we have to send all at once on a socket? */
int
sowriteable(struct socket *so)
{
- return ((sbspace(&(so)->so_snd) >= (long)(so)->so_snd.sb_lowat &&
- ((so->so_state&SS_ISCONNECTED) ||
- (so->so_proto->pr_flags&PR_CONNREQUIRED) == 0)) ||
+ return ((!so_wait_for_if_feedback(so) &&
+ sbspace(&(so)->so_snd) >= (so)->so_snd.sb_lowat &&
+ ((so->so_state & SS_ISCONNECTED) ||
+ (so->so_proto->pr_flags & PR_CONNREQUIRED) == 0)) ||
(so->so_state & SS_CANTSENDMORE) ||
so->so_error);
}
void
sballoc(struct sockbuf *sb, struct mbuf *m)
{
- int cnt = 1;
+ u_int32_t cnt = 1;
sb->sb_cc += m->m_len;
if (m->m_type != MT_DATA && m->m_type != MT_HEADER &&
m->m_type != MT_OOBDATA)
if (m->m_flags & M_EXT) {
sb->sb_mbcnt += m->m_ext.ext_size;
- cnt += m->m_ext.ext_size / MSIZE ;
+ cnt += (m->m_ext.ext_size >> MSIZESHIFT) ;
}
- OSAddAtomic(cnt, (SInt32*)&total_mb_cnt);
+ OSAddAtomic(cnt, &total_sbmb_cnt);
+ VERIFY(total_sbmb_cnt > 0);
}
/* adjust counters in sb reflecting freeing of m */
sbfree(struct sockbuf *sb, struct mbuf *m)
{
int cnt = -1;
+
sb->sb_cc -= m->m_len;
if (m->m_type != MT_DATA && m->m_type != MT_HEADER &&
m->m_type != MT_OOBDATA)
sb->sb_mbcnt -= MSIZE;
if (m->m_flags & M_EXT) {
sb->sb_mbcnt -= m->m_ext.ext_size;
- cnt -= m->m_ext.ext_size / MSIZE ;
+ cnt -= (m->m_ext.ext_size >> MSIZESHIFT) ;
}
- OSAddAtomic(cnt, (SInt32*)&total_mb_cnt);
+ OSAddAtomic(cnt, &total_sbmb_cnt);
+ VERIFY(total_sbmb_cnt >= 0);
}
/*
sbunlock(struct sockbuf *sb, int keeplocked)
{
struct socket *so = sb->sb_so;
- int lr_saved;
+ void *lr_saved;
lck_mtx_t *mutex_held;
- lr_saved = (unsigned int) __builtin_return_address(0);
+ lr_saved = __builtin_return_address(0);
sb->sb_flags &= ~SB_LOCK;
if (sb->sb_flags & SB_WANT) {
sb->sb_flags &= ~SB_WANT;
- if (so->so_usecount < 0)
- panic("sbunlock: b4 wakeup so=%p ref=%d lr=%x "
- "sb_flags=%x\n", sb->sb_so, so->so_usecount,
- lr_saved, sb->sb_flags);
-
+ if (so->so_usecount < 0) {
+ panic("sbunlock: b4 wakeup so=%p ref=%d lr=%p "
+ "sb_flags=%x lrh= %s\n", sb->sb_so, so->so_usecount,
+ lr_saved, sb->sb_flags, solockhistory_nr(so));
+ /* NOTREACHED */
+ }
wakeup((caddr_t)&(sb)->sb_flags);
}
if (keeplocked == 0) { /* unlock on exit */
- if (so->so_proto->pr_getlock != NULL)
+ if (so->so_proto->pr_getlock != NULL)
mutex_held = (*so->so_proto->pr_getlock)(so, 0);
- else
+ else
mutex_held = so->so_proto->pr_domain->dom_mtx;
-
+
lck_mtx_assert(mutex_held, LCK_MTX_ASSERT_OWNED);
so->so_usecount--;
if (so->so_usecount < 0)
- panic("sbunlock: unlock on exit so=%p ref=%d lr=%x "
- "sb_flags=%x\n", so, so->so_usecount, lr_saved,
- sb->sb_flags);
- so->unlock_lr[so->next_unlock_lr] = (u_int32_t)lr_saved;
+ panic("sbunlock: unlock on exit so=%p ref=%d lr=%p "
+ "sb_flags=%x lrh= %s\n", so, so->so_usecount, lr_saved,
+ sb->sb_flags, solockhistory_nr(so));
+ so->unlock_lr[so->next_unlock_lr] = lr_saved;
so->next_unlock_lr = (so->next_unlock_lr+1) % SO_LCKDBG_MAX;
lck_mtx_unlock(mutex_held);
}
if (sb_notify(&so->so_snd))
sowakeup(so, &so->so_snd);
}
+
+void
+soevent(struct socket *so, long hint)
+{
+ if (so->so_flags & SOF_KNOTE)
+ KNOTE(&so->so_klist, hint);
+}
+
#endif /* __APPLE__ */
/*
sotoxsocket(struct socket *so, struct xsocket *xso)
{
xso->xso_len = sizeof (*xso);
- xso->xso_so = so;
+ xso->xso_so = (_XSOCKET_PTR(struct socket *))VM_KERNEL_ADDRPERM(so);
xso->so_type = so->so_type;
- xso->so_options = so->so_options;
+ xso->so_options = (short)(so->so_options & 0xffff);
xso->so_linger = so->so_linger;
xso->so_state = so->so_state;
- xso->so_pcb = so->so_pcb;
+ xso->so_pcb = (_XSOCKET_PTR(caddr_t))VM_KERNEL_ADDRPERM(so->so_pcb);
if (so->so_proto) {
xso->xso_protocol = so->so_proto->pr_protocol;
xso->xso_family = so->so_proto->pr_domain->dom_family;
xso->so_oobmark = so->so_oobmark;
sbtoxsockbuf(&so->so_snd, &xso->so_snd);
sbtoxsockbuf(&so->so_rcv, &xso->so_rcv);
- xso->so_uid = so->so_uid;
+ xso->so_uid = kauth_cred_getuid(so->so_cred);
}
+
+#if !CONFIG_EMBEDDED
+
+void
+sotoxsocket64(struct socket *so, struct xsocket64 *xso)
+{
+ xso->xso_len = sizeof (*xso);
+ xso->xso_so = (u_int64_t)VM_KERNEL_ADDRPERM(so);
+ xso->so_type = so->so_type;
+ xso->so_options = (short)(so->so_options & 0xffff);
+ xso->so_linger = so->so_linger;
+ xso->so_state = so->so_state;
+ xso->so_pcb = (u_int64_t)VM_KERNEL_ADDRPERM(so->so_pcb);
+ if (so->so_proto) {
+ xso->xso_protocol = so->so_proto->pr_protocol;
+ xso->xso_family = so->so_proto->pr_domain->dom_family;
+ } else {
+ xso->xso_protocol = xso->xso_family = 0;
+ }
+ xso->so_qlen = so->so_qlen;
+ xso->so_incqlen = so->so_incqlen;
+ xso->so_qlimit = so->so_qlimit;
+ xso->so_timeo = so->so_timeo;
+ xso->so_error = so->so_error;
+ xso->so_pgid = so->so_pgid;
+ xso->so_oobmark = so->so_oobmark;
+ sbtoxsockbuf(&so->so_snd, &xso->so_snd);
+ sbtoxsockbuf(&so->so_rcv, &xso->so_rcv);
+ xso->so_uid = kauth_cred_getuid(so->so_cred);
+}
+
+#endif /* !CONFIG_EMBEDDED */
+
/*
* This does the same for sockbufs. Note that the xsockbuf structure,
* since it is always embedded in a socket, does not include a self
xsb->sb_mbmax = sb->sb_mbmax;
xsb->sb_lowat = sb->sb_lowat;
xsb->sb_flags = sb->sb_flags;
- xsb->sb_timeo = (u_long)
+ xsb->sb_timeo = (short)
(sb->sb_timeo.tv_sec * hz) + sb->sb_timeo.tv_usec / tick;
if (xsb->sb_timeo == 0 && sb->sb_timeo.tv_usec != 0)
xsb->sb_timeo = 1;
}
+/*
+ * Based on the policy set by an all knowing decison maker, throttle sockets
+ * that either have been marked as belonging to "background" process.
+ */
+int
+soisthrottled(struct socket *so)
+{
+ /*
+ * On non-embedded, we rely on implicit throttling by the application,
+ * as we're missing the system-wide "decision maker".
+ */
+ return (
+#if CONFIG_EMBEDDED
+ net_io_policy_throttled &&
+#endif /* CONFIG_EMBEDDED */
+ (so->so_traffic_mgt_flags & TRAFFIC_MGT_SO_BACKGROUND));
+}
+
+int
+soisprivilegedtraffic(struct socket *so)
+{
+ return (so->so_flags & SOF_PRIVILEGED_TRAFFIC_CLASS);
+}
+
/*
* Here is the definition of some of the basic objects in the kern.ipc
* branch of the MIB.
*/
-SYSCTL_NODE(_kern, KERN_IPC, ipc, CTLFLAG_RW|CTLFLAG_LOCKED, 0, "IPC");
+SYSCTL_NODE(_kern, KERN_IPC, ipc, CTLFLAG_RW|CTLFLAG_LOCKED|CTLFLAG_ANYBODY, 0, "IPC");
-/* This takes the place of kern.maxsockbuf, which moved to kern.ipc. */
-static int dummy;
-SYSCTL_INT(_kern, KERN_DUMMY, dummy, CTLFLAG_RW, &dummy, 0, "");
+/* Check that the maximum socket buffer size is within a range */
-SYSCTL_INT(_kern_ipc, KIPC_MAXSOCKBUF, maxsockbuf, CTLFLAG_RW,
- &sb_max, 0, "Maximum socket buffer size");
-SYSCTL_INT(_kern_ipc, OID_AUTO, maxsockets, CTLFLAG_RD,
+static int
+sysctl_sb_max(__unused struct sysctl_oid *oidp, __unused void *arg1,
+ __unused int arg2, struct sysctl_req *req)
+{
+ u_int32_t new_value;
+ int changed = 0;
+ int error = sysctl_io_number(req, sb_max, sizeof(u_int32_t), &new_value,
+ &changed);
+ if (!error && changed) {
+ if (new_value > LOW_SB_MAX &&
+ new_value <= high_sb_max ) {
+ sb_max = new_value;
+ } else {
+ error = ERANGE;
+ }
+ }
+ return error;
+}
+
+static int
+sysctl_io_policy_throttled SYSCTL_HANDLER_ARGS
+{
+#pragma unused(arg1, arg2)
+ int i, err;
+
+ i = net_io_policy_throttled;
+
+ err = sysctl_handle_int(oidp, &i, 0, req);
+ if (err != 0 || req->newptr == USER_ADDR_NULL)
+ return (err);
+
+ if (i != net_io_policy_throttled)
+ SOTHROTTLELOG(("throttle: network IO policy throttling is "
+ "now %s\n", i ? "ON" : "OFF"));
+
+ net_io_policy_throttled = i;
+
+ return (err);
+}
+
+SYSCTL_PROC(_kern_ipc, KIPC_MAXSOCKBUF, maxsockbuf, CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
+ &sb_max, 0, &sysctl_sb_max, "IU", "Maximum socket buffer size");
+
+SYSCTL_INT(_kern_ipc, OID_AUTO, maxsockets, CTLFLAG_RD | CTLFLAG_LOCKED,
&maxsockets, 0, "Maximum number of sockets avaliable");
-SYSCTL_INT(_kern_ipc, KIPC_SOCKBUF_WASTE, sockbuf_waste_factor, CTLFLAG_RW,
+SYSCTL_INT(_kern_ipc, KIPC_SOCKBUF_WASTE, sockbuf_waste_factor, CTLFLAG_RW | CTLFLAG_LOCKED,
&sb_efficiency, 0, "");
-SYSCTL_INT(_kern_ipc, OID_AUTO, sbspace_factor, CTLFLAG_RW,
- &sbspace_factor, 0, "Ratio of mbuf/cluster use for socket layers");
-SYSCTL_INT(_kern_ipc, KIPC_NMBCLUSTERS, nmbclusters, CTLFLAG_RD,
+SYSCTL_INT(_kern_ipc, KIPC_NMBCLUSTERS, nmbclusters, CTLFLAG_RD | CTLFLAG_LOCKED,
&nmbclusters, 0, "");
-SYSCTL_INT(_kern_ipc, OID_AUTO, njcl, CTLFLAG_RD, &njcl, 0, "");
-SYSCTL_INT(_kern_ipc, OID_AUTO, njclbytes, CTLFLAG_RD, &njclbytes, 0, "");
-SYSCTL_INT(_kern_ipc, KIPC_SOQLIMITCOMPAT, soqlimitcompat, CTLFLAG_RW,
+SYSCTL_INT(_kern_ipc, OID_AUTO, njcl, CTLFLAG_RD | CTLFLAG_LOCKED, &njcl, 0, "");
+SYSCTL_INT(_kern_ipc, OID_AUTO, njclbytes, CTLFLAG_RD | CTLFLAG_LOCKED, &njclbytes, 0, "");
+SYSCTL_INT(_kern_ipc, KIPC_SOQLIMITCOMPAT, soqlimitcompat, CTLFLAG_RW | CTLFLAG_LOCKED,
&soqlimitcompat, 1, "Enable socket queue limit compatibility");
-SYSCTL_INT(_kern_ipc, OID_AUTO, soqlencomp, CTLFLAG_RW,
+SYSCTL_INT(_kern_ipc, OID_AUTO, soqlencomp, CTLFLAG_RW | CTLFLAG_LOCKED,
&soqlencomp, 0, "Listen backlog represents only complete queue");
+
+SYSCTL_NODE(_kern_ipc, OID_AUTO, io_policy, CTLFLAG_RW, 0, "network IO policy");
+
+SYSCTL_PROC(_kern_ipc_io_policy, OID_AUTO, throttled,
+ CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &net_io_policy_throttled, 0,
+ sysctl_io_policy_throttled, "I", "");