* SUCH DAMAGE.
*
* @(#)uipc_socket2.c 8.1 (Berkeley) 6/10/93
+ * $FreeBSD: src/sys/kern/uipc_socket2.c,v 1.55.2.9 2001/07/26 18:53:02 peter Exp $
*/
#include <sys/param.h>
#include <sys/sysctl.h>
#include <sys/ev.h>
+#include <sys/kdebug.h>
+
+#define DBG_FNC_SBDROP NETDBG_CODE(DBG_NETSOCK, 4)
+#define DBG_FNC_SBAPPEND NETDBG_CODE(DBG_NETSOCK, 5)
+
+
/*
* Primitive routines for operating on sockets and socket buffers
*/
static u_long sb_efficiency = 8; /* parameter for sbreserve() */
-char netcon[] = "netcon";
-
/*
* Procedures to manipulate state flags of socket
* and do appropriate wakeups. Normal sequence from the
* called during processing of connect() call,
* resulting in an eventual call to soisconnected() if/when the
* connection is established. When the connection is torn down
- * soisdisconnecting() is called during processing of disconnect() call,
+ * soisdisconnecting() is called during processing of disconnect() call,
* and soisdisconnected() is called when the connection to the peer
* is totally severed. The semantics of these routines are such that
* connectionless protocols can call soisconnected() and soisdisconnected()
* From the passive side, a socket is created with
* two queues of sockets: so_incomp for connections in progress
* and so_comp for connections already made and awaiting user acceptance.
- * As a protocol is preparing incoming connections, it creates a socket
+ * As a protocol is preparing incoming connections, it creates a socket
* structure queued on so_incomp by calling sonewconn(). When the connection
* is established, soisconnected() is called, and transfers the
* socket structure to so_comp, making it available to accept().
*
- * If a socket is closed with sockets on either
+ * If a socket is closed with sockets on either
* so_incomp or so_comp, these sockets are dropped.
- *
+ *
* If higher level protocols are implemented in
* the kernel, the wakeups done here will sometimes
* cause software-interrupt process scheduling.
void
soisconnected(so)
- register struct socket *so;
-{ register struct kextcb *kp;
- register struct socket *head = so->so_head;
+ struct socket *so;
+{
+ struct socket *head = so->so_head;
+ struct kextcb *kp;
kp = sotokextcb(so);
- while (kp)
- { if (kp->e_soif && kp->e_soif->sf_soisconnected)
- { if ((*kp->e_soif->sf_soisconnected)(so, kp))
+ while (kp) {
+ if (kp->e_soif && kp->e_soif->sf_soisconnected) {
+ if ((*kp->e_soif->sf_soisconnected)(so, kp))
return;
}
kp = kp->e_next;
TAILQ_INSERT_TAIL(&head->so_comp, so, so_list);
so->so_state |= SS_COMP;
sorwakeup(head);
- wakeup((caddr_t)&head->so_timeo);
+ wakeup_one(&head->so_timeo);
} else {
postevent(so,0,EV_WCONN);
wakeup((caddr_t)&so->so_timeo);
void
soisdisconnecting(so)
register struct socket *so;
-{ register struct kextcb *kp;
+{
+ register struct kextcb *kp;
kp = sotokextcb(so);
- while (kp)
- { if (kp->e_soif && kp->e_soif->sf_soisdisconnecting)
- { if ((*kp->e_soif->sf_soisdisconnecting)(so, kp))
+ while (kp) {
+ if (kp->e_soif && kp->e_soif->sf_soisdisconnecting) {
+ if ((*kp->e_soif->sf_soisdisconnecting)(so, kp))
return;
}
kp = kp->e_next;
void
soisdisconnected(so)
register struct socket *so;
-{ register struct kextcb *kp;
+{
+ register struct kextcb *kp;
kp = sotokextcb(so);
- while (kp)
- { if (kp->e_soif && kp->e_soif->sf_soisdisconnected)
- { if ((*kp->e_soif->sf_soisdisconnected)(so, kp))
+ while (kp) {
+ if (kp->e_soif && kp->e_soif->sf_soisdisconnected) {
+ if ((*kp->e_soif->sf_soisdisconnected)(so, kp))
return;
}
kp = kp->e_next;
}
so->so_state &= ~(SS_ISCONNECTING|SS_ISCONNECTED|SS_ISDISCONNECTING);
- so->so_state |= (SS_CANTRCVMORE|SS_CANTSENDMORE);
+ so->so_state |= (SS_CANTRCVMORE|SS_CANTSENDMORE|SS_ISDISCONNECTED);
wakeup((caddr_t)&so->so_timeo);
sowwakeup(so);
sorwakeup(so);
sonewconn(head, connstatus)
register struct socket *head;
int connstatus;
-{ int error = 0;
+{
+ int error = 0;
register struct socket *so;
register struct kextcb *kp;
if (head->so_qlen > 3 * head->so_qlimit / 2)
return ((struct socket *)0);
- so = soalloc(0, head->so_proto->pr_domain->dom_family, head->so_type);
+ so = soalloc(1, head->so_proto->pr_domain->dom_family, head->so_type);
if (so == NULL)
return ((struct socket *)0);
-
- kp = sotokextcb(so);
- while (kp)
- { if (kp->e_soif && kp->e_soif->sf_sonewconn1)
- { if ((*kp->e_soif->sf_sonewconn1)(so, connstatus, kp))
- return;
- }
- kp = kp->e_next;
+ /* check if head was closed during the soalloc */
+ if (head->so_proto == NULL) {
+ sodealloc(so);
+ return ((struct socket *)0);
}
so->so_head = head;
so->so_timeo = head->so_timeo;
so->so_pgid = head->so_pgid;
so->so_uid = head->so_uid;
- so->so_rcv.sb_flags |= SB_RECV; /* XXX */
- (void) soreserve(so, head->so_snd.sb_hiwat, head->so_rcv.sb_hiwat);
+ /* Attach socket filters for this protocol */
if (so->so_proto->pr_sfilter.tqh_first)
error = sfilter_init(so);
- if (error == 0 && (*so->so_proto->pr_usrreqs->pru_attach)(so, 0, NULL)) {
+ if (error != 0) {
+ sodealloc(so);
+ return ((struct socket *)0);
+ }
+
+ /* Call socket filters' sonewconn1 function if set */
+ kp = sotokextcb(so);
+ while (kp) {
+ if (kp->e_soif && kp->e_soif->sf_sonewconn) {
+ error = (int)(*kp->e_soif->sf_sonewconn)(so, connstatus, kp);
+ if (error == EJUSTRETURN) {
+ return so;
+ } else if (error != 0) {
+ sodealloc(so);
+ return NULL;
+ }
+ }
+ kp = kp->e_next;
+ }
+
+ if (soreserve(so, head->so_snd.sb_hiwat, head->so_rcv.sb_hiwat) ||
+ (*so->so_proto->pr_usrreqs->pru_attach)(so, 0, NULL)) {
sfilter_term(so);
sodealloc(so);
return ((struct socket *)0);
}
+#ifdef __APPLE__
so->so_proto->pr_domain->dom_refs++;
+#endif
if (connstatus) {
TAILQ_INSERT_TAIL(&head->so_comp, so, so_list);
wakeup((caddr_t)&head->so_timeo);
so->so_state |= connstatus;
}
+#ifdef __APPLE__
so->so_rcv.sb_so = so->so_snd.sb_so = so;
TAILQ_INIT(&so->so_evlist);
+#endif
return (so);
}
void
socantsendmore(so)
struct socket *so;
-{ register struct kextcb *kp;
-
+{
+ register struct kextcb *kp;
+
kp = sotokextcb(so);
- while (kp)
- { if (kp->e_soif && kp->e_soif->sf_socantsendmore)
- { if ((*kp->e_soif->sf_socantsendmore)(so, kp))
+ while (kp) {
+ if (kp->e_soif && kp->e_soif->sf_socantsendmore) {
+ if ((*kp->e_soif->sf_socantsendmore)(so, kp))
return;
}
kp = kp->e_next;
void
socantrcvmore(so)
struct socket *so;
-{ register struct kextcb *kp;
+{
+ register struct kextcb *kp;
kp = sotokextcb(so);
- while (kp)
- { if (kp->e_soif && kp->e_soif->sf_socantrcvmore)
- { if ((*kp->e_soif->sf_socantrcvmore)(so, kp))
+ while (kp) {
+ if (kp->e_soif && kp->e_soif->sf_socantrcvmore) {
+ if ((*kp->e_soif->sf_socantrcvmore)(so, kp))
return;
}
kp = kp->e_next;
register struct sockbuf *sb;
{
struct proc *p = current_proc();
-
-
-
- thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL);
- sb->sb_sel.si_flags &= ~SI_SBSEL;
+ /* We clear the flag before calling selwakeup. */
+ /* BSD calls selwakeup then sets the flag */
+ sb->sb_flags &= ~SB_SEL;
selwakeup(&sb->sb_sel);
- thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL);
-
if (sb->sb_flags & SB_WAIT) {
sb->sb_flags &= ~SB_WAIT;
wakeup((caddr_t)&sb->sb_cc);
else if (so->so_pgid > 0 && (p = pfind(so->so_pgid)) != 0)
psignal(p, SIGIO);
}
-
if (sb->sb_flags & SB_UPCALL)
(*so->so_upcall)(so, so->so_upcallarg, M_DONTWAIT);
}
register struct kextcb *kp;
kp = sotokextcb(so);
- while (kp)
- { if (kp->e_soif && kp->e_soif->sf_soreserve)
- { if ((*kp->e_soif->sf_soreserve)(so, sndcc, rcvcc, kp))
+ while (kp) {
+ if (kp->e_soif && kp->e_soif->sf_soreserve) {
+ if ((*kp->e_soif->sf_soreserve)(so, sndcc, rcvcc, kp))
return;
}
kp = kp->e_next;
so->so_snd.sb_lowat = so->so_snd.sb_hiwat;
return (0);
bad2:
+#ifdef __APPLE__
+ selthreadclear(&so->so_snd.sb_sel);
+#endif
sbrelease(&so->so_snd);
bad:
return (ENOBUFS);
/*
* Free mbufs held by a socket, and reserved mbuf space.
*/
+ /* WARNING needs to do selthreadclear() before calling this */
void
sbrelease(sb)
struct sockbuf *sb;
{
sbflush(sb);
- sb->sb_hiwat = sb->sb_mbmax = 0;
-
- {
- int oldpri = splimp();
- selthreadclear(&sb->sb_sel);
- splx(oldpri);
- }
+ sb->sb_hiwat = 0;
+ sb->sb_mbmax = 0;
+
}
/*
sbappend(sb, m)
struct sockbuf *sb;
struct mbuf *m;
-{ register struct kextcb *kp;
+{
+ struct kextcb *kp;
register struct mbuf *n;
+
+ KERNEL_DEBUG((DBG_FNC_SBAPPEND | DBG_FUNC_START), sb, m->m_len, 0, 0, 0);
+
if (m == 0)
return;
kp = sotokextcb(sbtoso(sb));
- while (kp)
- { if (kp->e_sout && kp->e_sout->su_sbappend)
- { if ((*kp->e_sout->su_sbappend)(sb, m, kp))
+ while (kp) {
+ if (kp->e_sout && kp->e_sout->su_sbappend) {
+ if ((*kp->e_sout->su_sbappend)(sb, m, kp))
return;
}
kp = kp->e_next;
}
-
- if (n = sb->sb_mb) {
+ n = sb->sb_mb;
+ if (n) {
while (n->m_nextpkt)
n = n->m_nextpkt;
do {
} while (n->m_next && (n = n->m_next));
}
sbcompress(sb, m, n);
+
+ KERNEL_DEBUG((DBG_FNC_SBAPPEND | DBG_FUNC_END), sb, sb->sb_cc, 0, 0, 0);
}
#ifdef SOCKBUF_DEBUG
for (m = sb->sb_mb; m; m = n) {
n = m->m_nextpkt;
for (; m; m = m->m_next) {
- len += m->m_len;
- mbcnt += MSIZE;
- if (m->m_flags & M_EXT) /*XXX*/ /* pretty sure this is bogus */
- mbcnt += m->m_ext.ext_size;
- if (m->m_nextpkt)
- panic("sbcheck nextpkt");
- }
- if (len != sb->sb_cc || mbcnt != sb->sb_mbcnt) {
- printf("cc %ld != %ld || mbcnt %ld != %ld\n", len, sb->sb_cc,
- mbcnt, sb->sb_mbcnt);
- panic("sbcheck");
+ len += m->m_len;
+ mbcnt += MSIZE;
+ if (m->m_flags & M_EXT) /*XXX*/ /* pretty sure this is bogus */
+ mbcnt += m->m_ext.ext_size;
+ }
}
+#ifndef __APPLE__
+ if (len != sb->sb_cc || mbcnt != sb->sb_mbcnt) {
+ printf("cc %ld != %ld || mbcnt %ld != %ld\n", len, sb->sb_cc,
+ mbcnt, sb->sb_mbcnt);
+ panic("sbcheck");
+ }
+#else
+ if (len != sb->sb_cc)
+ printf("sbcheck len %ld != sb_cc %ld\n", len, sb->sb_cc);
+ if (mbcnt != sb->sb_mbcnt)
+ printf("sbcheck mbcnt %ld != sb_mbcnt %ld\n", mbcnt, sb->sb_mbcnt);
+#endif
}
#endif
{
register struct mbuf *m;
register struct kextcb *kp;
-
+
if (m0 == 0)
return;
m = m_free(m);
continue;
}
- if (n && (n->m_flags & (M_EXT | M_EOR)) == 0 &&
- (n->m_data + n->m_len + m->m_len) < &n->m_dat[MLEN] &&
+ if (n && (n->m_flags & M_EOR) == 0 &&
+#ifndef __APPLE__
+ M_WRITABLE(n) &&
+#endif
+ m->m_len <= MCLBYTES / 4 && /* XXX: Don't copy too much */
+ m->m_len <= M_TRAILINGSPACE(n) &&
n->m_type == m->m_type) {
bcopy(mtod(m, caddr_t), mtod(n, caddr_t) + n->m_len,
(unsigned)m->m_len);
register struct kextcb *kp;
kp = sotokextcb(sbtoso(sb));
- while (kp)
- { if (kp->e_sout && kp->e_sout->su_sbflush)
- { if ((*kp->e_sout->su_sbflush)(sb, kp))
+ while (kp) {
+ if (kp->e_sout && kp->e_sout->su_sbflush) {
+ if ((*kp->e_sout->su_sbflush)(sb, kp))
return;
}
kp = kp->e_next;
}
if (sb->sb_flags & SB_LOCK)
- panic("sbflush: locked");
- while (sb->sb_mbcnt && sb->sb_cc)
+ sb_lock(sb);
+ while (sb->sb_mbcnt) {
+ /*
+ * Don't call sbdrop(sb, 0) if the leading mbuf is non-empty:
+ * we would loop forever. Panic instead.
+ */
+ if (!sb->sb_cc && (sb->sb_mb == NULL || sb->sb_mb->m_len))
+ break;
sbdrop(sb, (int)sb->sb_cc);
+ }
if (sb->sb_cc || sb->sb_mb || sb->sb_mbcnt)
panic("sbflush: cc %ld || mb %p || mbcnt %ld", sb->sb_cc, (void *)sb->sb_mb, sb->sb_mbcnt);
postevent(0, sb, EV_RWBYTES);
/*
* Drop data from (the front of) a sockbuf.
+ * use m_freem_list to free the mbuf structures
+ * under a single lock... this is done by pruning
+ * the top of the tree from the body by keeping track
+ * of where we get to in the tree and then zeroing the
+ * two pertinent pointers m_nextpkt and m_next
+ * the socket buffer is then updated to point at the new
+ * top of the tree and the pruned area is released via
+ * m_freem_list.
*/
void
sbdrop(sb, len)
register struct sockbuf *sb;
register int len;
{
- register struct mbuf *m, *mn;
- struct mbuf *next;
+ register struct mbuf *m, *free_list, *ml;
+ struct mbuf *next, *last;
register struct kextcb *kp;
+ KERNEL_DEBUG((DBG_FNC_SBDROP | DBG_FUNC_START), sb, len, 0, 0, 0);
+
kp = sotokextcb(sbtoso(sb));
- while (kp)
- { if (kp->e_sout && kp->e_sout->su_sbdrop)
- { if ((*kp->e_sout->su_sbdrop)(sb, len, kp))
+ while (kp) {
+ if (kp->e_sout && kp->e_sout->su_sbdrop) {
+ if ((*kp->e_sout->su_sbdrop)(sb, len, kp))
return;
}
kp = kp->e_next;
}
-
next = (m = sb->sb_mb) ? m->m_nextpkt : 0;
+ free_list = last = m;
+ ml = (struct mbuf *)0;
+
while (len > 0) {
if (m == 0) {
- if (next == 0)
- panic("sbdrop");
- m = next;
- next = m->m_nextpkt;
- continue;
+ if (next == 0) {
+ /* temporarily replacing this panic with printf because
+ * it occurs occasionally when closing a socket when there
+ * is no harm in ignoring it. This problem will be investigated
+ * further.
+ */
+ /* panic("sbdrop"); */
+ printf("sbdrop - count not zero\n");
+ len = 0;
+ /* zero the counts. if we have no mbufs, we have no data (PR-2986815) */
+ sb->sb_cc = 0;
+ sb->sb_mbcnt = 0;
+ break;
+ }
+ m = last = next;
+ next = m->m_nextpkt;
+ continue;
}
if (m->m_len > len) {
m->m_len -= len;
}
len -= m->m_len;
sbfree(sb, m);
- MFREE(m, mn);
- m = mn;
+
+ ml = m;
+ m = m->m_next;
}
while (m && m->m_len == 0) {
sbfree(sb, m);
- MFREE(m, mn);
- m = mn;
+
+ ml = m;
+ m = m->m_next;
+ }
+ if (ml) {
+ ml->m_next = (struct mbuf *)0;
+ last->m_nextpkt = (struct mbuf *)0;
+ m_freem_list(free_list);
}
if (m) {
sb->sb_mb = m;
m->m_nextpkt = next;
} else
sb->sb_mb = next;
+
postevent(0, sb, EV_RWBYTES);
+
+ KERNEL_DEBUG((DBG_FNC_SBDROP | DBG_FUNC_END), sb, 0, 0, 0, 0);
}
/*
register struct kextcb *kp;
kp = sotokextcb(sbtoso(sb));
- while (kp)
- { if (kp->e_sout && kp->e_sout->su_sbdroprecord)
- { if ((*kp->e_sout->su_sbdroprecord)(sb, kp))
+ while (kp) {
+ if (kp->e_sout && kp->e_sout->su_sbdroprecord) {
+ if ((*kp->e_sout->su_sbdroprecord)(sb, kp))
return;
}
kp = kp->e_next;
do {
sbfree(sb, m);
MFREE(m, mn);
- } while (m = mn);
+ m = mn;
+ } while (m);
}
postevent(0, sb, EV_RWBYTES);
}
register struct cmsghdr *cp;
struct mbuf *m;
+ if (CMSG_SPACE((u_int)size) > MLEN)
+ return ((struct mbuf *) NULL);
if ((m = m_get(M_DONTWAIT, MT_CONTROL)) == NULL)
return ((struct mbuf *) NULL);
cp = mtod(m, struct cmsghdr *);
/* XXX check size? */
(void)memcpy(CMSG_DATA(cp), p, size);
- size += sizeof(*cp);
- m->m_len = size;
- cp->cmsg_len = size;
+ m->m_len = CMSG_SPACE(size);
+ cp->cmsg_len = CMSG_LEN(size);
cp->cmsg_level = level;
cp->cmsg_type = type;
return (m);
}
+#ifdef __APPLE__
+/*
+ * The following are macros on BSD and functions on Darwin
+ */
+
+/*
+ * Do we need to notify the other side when I/O is possible?
+ */
+
+int
+sb_notify(struct sockbuf *sb)
+{
+ return ((sb->sb_flags & (SB_WAIT|SB_SEL|SB_ASYNC|SB_UPCALL)) != 0);
+}
+
+/*
+ * How much space is there in a socket buffer (so->so_snd or so->so_rcv)?
+ * This is problematical if the fields are unsigned, as the space might
+ * still be negative (cc > hiwat or mbcnt > mbmax). Should detect
+ * overflow and return 0. Should use "lmin" but it doesn't exist now.
+ */
+long
+sbspace(struct sockbuf *sb)
+{
+ return ((long) imin((int)(sb->sb_hiwat - sb->sb_cc),
+ (int)(sb->sb_mbmax - sb->sb_mbcnt)));
+}
+
+/* do we have to send all at once on a socket? */
+int
+sosendallatonce(struct socket *so)
+{
+ return (so->so_proto->pr_flags & PR_ATOMIC);
+}
+
+/* can we read something from so? */
+int
+soreadable(struct socket *so)
+{
+ return (so->so_rcv.sb_cc >= so->so_rcv.sb_lowat ||
+ (so->so_state & SS_CANTRCVMORE) ||
+ so->so_comp.tqh_first || so->so_error);
+}
+
+/* can we write something to so? */
+
+int
+sowriteable(struct socket *so)
+{
+ return ((sbspace(&(so)->so_snd) >= (so)->so_snd.sb_lowat &&
+ ((so->so_state&SS_ISCONNECTED) ||
+ (so->so_proto->pr_flags&PR_CONNREQUIRED)==0)) ||
+ (so->so_state & SS_CANTSENDMORE) ||
+ so->so_error);
+}
+
+/* adjust counters in sb reflecting allocation of m */
+
+void
+sballoc(struct sockbuf *sb, struct mbuf *m)
+{
+ sb->sb_cc += m->m_len;
+ sb->sb_mbcnt += MSIZE;
+ if (m->m_flags & M_EXT)
+ sb->sb_mbcnt += m->m_ext.ext_size;
+}
+
+/* adjust counters in sb reflecting freeing of m */
+void
+sbfree(struct sockbuf *sb, struct mbuf *m)
+{
+ sb->sb_cc -= m->m_len;
+ sb->sb_mbcnt -= MSIZE;
+ if (m->m_flags & M_EXT)
+ sb->sb_mbcnt -= m->m_ext.ext_size;
+}
+
+/*
+ * Set lock on sockbuf sb; sleep if lock is already held.
+ * Unless SB_NOINTR is set on sockbuf, sleep is interruptible.
+ * Returns error without lock if sleep is interrupted.
+ */
+int
+sblock(struct sockbuf *sb, int wf)
+{
+ return(sb->sb_flags & SB_LOCK ?
+ ((wf == M_WAIT) ? sb_lock(sb) : EWOULDBLOCK) :
+ (sb->sb_flags |= SB_LOCK), 0);
+}
+
+/* release lock on sockbuf sb */
+void
+sbunlock(struct sockbuf *sb)
+{
+ sb->sb_flags &= ~SB_LOCK;
+ if (sb->sb_flags & SB_WANT) {
+ sb->sb_flags &= ~SB_WANT;
+ wakeup((caddr_t)&(sb)->sb_flags);
+ }
+}
+
+void
+sorwakeup(struct socket * so)
+{
+ if (sb_notify(&so->so_rcv))
+ sowakeup(so, &so->so_rcv);
+}
+
+void
+sowwakeup(struct socket * so)
+{
+ if (sb_notify(&so->so_snd))
+ sowakeup(so, &so->so_snd);
+}
+#endif __APPLE__
/*
* Make a copy of a sockaddr in a malloced buffer of type M_SONAME.
* Here is the definition of some of the basic objects in the kern.ipc
* branch of the MIB.
*/
-
-
SYSCTL_NODE(_kern, KERN_IPC, ipc, CTLFLAG_RW, 0, "IPC");
/* This takes the place of kern.maxsockbuf, which moved to kern.ipc. */
static int dummy;
SYSCTL_INT(_kern, KERN_DUMMY, dummy, CTLFLAG_RW, &dummy, 0, "");
-SYSCTL_INT(_kern_ipc, KIPC_MAXSOCKBUF, maxsockbuf, CTLFLAG_RW, &sb_max, 0, "");
-SYSCTL_INT(_kern_ipc, OID_AUTO, maxsockets, CTLFLAG_RD, &maxsockets, 0, "");
+SYSCTL_INT(_kern_ipc, KIPC_MAXSOCKBUF, maxsockbuf, CTLFLAG_RW,
+ &sb_max, 0, "Maximum socket buffer size");
+SYSCTL_INT(_kern_ipc, OID_AUTO, maxsockets, CTLFLAG_RD,
+ &maxsockets, 0, "Maximum number of sockets avaliable");
SYSCTL_INT(_kern_ipc, KIPC_SOCKBUF_WASTE, sockbuf_waste_factor, CTLFLAG_RW,
&sb_efficiency, 0, "");
SYSCTL_INT(_kern_ipc, KIPC_NMBCLUSTERS, nmbclusters, CTLFLAG_RD, &nmbclusters, 0, "");