*
* @APPLE_LICENSE_HEADER_START@
*
- * The contents of this file constitute Original Code as defined in and
- * are subject to the Apple Public Source License Version 1.1 (the
- * "License"). You may not use this file except in compliance with the
- * License. Please obtain a copy of the License at
- * http://www.apple.com/publicsource and read it before using this file.
+ * Copyright (c) 1999-2003 Apple Computer, Inc. All Rights Reserved.
*
- * This Original Code and all software distributed under the License are
- * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this
+ * file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
- * License for the specific language governing rights and limitations
- * under the License.
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
*
* @APPLE_LICENSE_HEADER_END@
*/
#include <net/netisr.h>
#include <kern/queue.h>
+#include <kern/kern_types.h>
+#include <kern/sched_prim.h>
+
+#include <IOKit/IOMapper.h>
+
+#define _MCLREF(p) (++mclrefcnt[mtocl(p)])
+#define _MCLUNREF(p) (--mclrefcnt[mtocl(p)] == 0)
-extern kernel_pmap; /* The kernel's pmap */
+extern pmap_t kernel_pmap; /* The kernel's pmap */
+/* kernel translater */
+extern ppnum_t pmap_find_phys(pmap_t pmap, addr64_t va);
decl_simple_lock_data(, mbuf_slock);
struct mbuf *mfree; /* mbuf free list */
extern int nmbclusters; /* max number of mapped clusters */
short *mclrefcnt; /* mapped cluster reference counts */
int *mcl_paddr;
+static ppnum_t mcl_paddr_base; /* Handle returned by IOMapper::iovmAlloc() */
union mcluster *mclfree; /* mapped cluster free list */
int max_linkhdr; /* largest link-level header */
int max_protohdr; /* largest protocol header */
extern int dlil_expand_mcl;
extern int dlil_initialized;
+#if 0
+static int mfree_munge = 0;
+#if 0
+#define _MFREE_MUNGE(m) { \
+ if (mfree_munge) \
+ { int i; \
+ vm_offset_t *element = (vm_offset_t *)(m); \
+ for (i = 0; \
+ i < sizeof(struct mbuf)/sizeof(vm_offset_t); \
+ i++) \
+ (element)[i] = 0xdeadbeef; \
+ } \
+}
+#else
+void
+munge_mbuf(struct mbuf *m)
+{
+ int i;
+ vm_offset_t *element = (vm_offset_t *)(m);
+ for (i = 0;
+ i < sizeof(struct mbuf)/sizeof(vm_offset_t);
+ i++)
+ (element)[i] = 0xdeadbeef;
+}
+#define _MFREE_MUNGE(m) { \
+ if (mfree_munge) \
+ munge_mbuf(m); \
+}
+#endif
+#else
+#define _MFREE_MUNGE(m)
+#endif
+
+
+#define _MINTGET(m, type) { \
+ MBUF_LOCK(); \
+ if (((m) = mfree) != 0) { \
+ MCHECK(m); \
+ ++mclrefcnt[mtocl(m)]; \
+ mbstat.m_mtypes[MT_FREE]--; \
+ mbstat.m_mtypes[(type)]++; \
+ mfree = (m)->m_next; \
+ } \
+ MBUF_UNLOCK(); \
+}
+
void
mbinit()
{
int s,m;
int initmcl = 32;
+ int mcl_pages;
if (nclpp)
return;
- nclpp = round_page(MCLBYTES) / MCLBYTES; /* see mbufgc() */
+ nclpp = round_page_32(MCLBYTES) / MCLBYTES; /* see mbufgc() */
if (nclpp < 1) nclpp = 1;
MBUF_LOCKINIT();
// NETISR_LOCKINIT();
+
+ mbstat.m_msize = MSIZE;
+ mbstat.m_mclbytes = MCLBYTES;
+ mbstat.m_minclsize = MINCLSIZE;
+ mbstat.m_mlen = MLEN;
+ mbstat.m_mhlen = MHLEN;
+
if (nmbclusters == 0)
nmbclusters = NMBCLUSTERS;
MALLOC(mclrefcnt, short *, nmbclusters * sizeof (short),
for (m = 0; m < nmbclusters; m++)
mclrefcnt[m] = -1;
- MALLOC(mcl_paddr, int *, (nmbclusters/(PAGE_SIZE/CLBYTES)) * sizeof (int),
- M_TEMP, M_WAITOK);
+ /* Calculate the number of pages assigned to the cluster pool */
+ mcl_pages = nmbclusters/(PAGE_SIZE/CLBYTES);
+ MALLOC(mcl_paddr, int *, mcl_pages * sizeof(int), M_TEMP, M_WAITOK);
if (mcl_paddr == 0)
panic("mbinit1");
- bzero((char *)mcl_paddr, (nmbclusters/(PAGE_SIZE/CLBYTES)) * sizeof (int));
+ /* Register with the I/O Bus mapper */
+ mcl_paddr_base = IOMapperIOVMAlloc(mcl_pages);
+ bzero((char *)mcl_paddr, mcl_pages * sizeof(int));
embutl = (union mcluster *)((unsigned char *)mbutl + (nmbclusters * MCLBYTES));
if (ncl < i)
ncl = i;
- size = round_page(ncl * MCLBYTES);
+ size = round_page_32(ncl * MCLBYTES);
mcl = (union mcluster *)kmem_mb_alloc(mb_map, size);
if (mcl == 0 && ncl > 1) {
- size = round_page(MCLBYTES); /* Try for 1 if failed */
+ size = round_page_32(MCLBYTES); /* Try for 1 if failed */
mcl = (union mcluster *)kmem_mb_alloc(mb_map, size);
}
for (i = 0; i < ncl; i++) {
if (++mclrefcnt[mtocl(mcl)] != 0)
panic("m_clalloc already there");
- if (((int)mcl & PAGE_MASK) == 0)
- mcl_paddr[((char *)mcl - (char *)mbutl)/PAGE_SIZE] = pmap_extract(kernel_pmap, (char *)mcl);
+ if (((int)mcl & PAGE_MASK) == 0) {
+ ppnum_t offset = ((char *)mcl - (char *)mbutl)/PAGE_SIZE;
+ ppnum_t new_page = pmap_find_phys(kernel_pmap, (vm_address_t) mcl);
+
+ /*
+ * In the case of no mapper being available
+ * the following code nops and returns the
+ * input page, if there is a mapper the I/O
+ * page appropriate is returned.
+ */
+ new_page = IOMapperInsertPage(mcl_paddr_base, offset, new_page);
+ mcl_paddr[offset] = new_page << 12;
+ }
mcl->mcl_next = mclfree;
mclfree = mcl++;
mbstat.m_mtypes[MT_FREE] += i;
mbstat.m_mbufs += i;
while (i--) {
+ _MFREE_MUNGE(m);
m->m_type = MT_FREE;
m->m_next = mfree;
mfree = m++;
m_retry(canwait, type)
int canwait, type;
{
-#define m_retry(h, t) 0
register struct mbuf *m;
int wait, s;
funnel_t * fnl;
for (;;) {
(void) m_expand(canwait);
- MGET(m, XXX, type);
+ _MINTGET(m, type);
+ if (m) {
+ (m)->m_next = (m)->m_nextpkt = 0;
+ (m)->m_type = (type);
+ (m)->m_data = (m)->m_dat;
+ (m)->m_flags = 0;
+ }
if (m || canwait == M_DONTWAIT)
break;
MBUF_LOCK();
wait = m_want++;
-
dlil_expand_mcl = 1;
+ if (wait == 0)
+ mbstat.m_drain++;
+ else
+ mbstat.m_wait++;
MBUF_UNLOCK();
if (dlil_initialized)
wakeup((caddr_t)&dlil_input_thread_wakeup);
- if (wait == 0) {
- mbstat.m_drain++;
- }
- else {
- assert_wait((caddr_t)&mfree, THREAD_UNINT);
- mbstat.m_wait++;
- }
-
/*
* Grab network funnel because m_reclaim calls into the
* socket domains and tsleep end-up calling splhigh
*/
fnl = thread_funnel_get();
- if (fnl && (fnl == kernel_flock)) {
- fnl_switch = 1;
- thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL);
- } else
- funnel_state = thread_funnel_set(network_flock, TRUE);
+ if (fnl && (fnl == kernel_flock)) {
+ fnl_switch = 1;
+ thread_funnel_switch(KERNEL_FUNNEL, NETWORK_FUNNEL);
+ } else
+ funnel_state = thread_funnel_set(network_flock, TRUE);
if (wait == 0) {
m_reclaim();
} else {
/* Sleep with a small timeout as insurance */
- (void) tsleep((caddr_t)0, PZERO-1, "m_retry", hz);
+ (void) tsleep((caddr_t)&mfree, PZERO-1, "m_retry", hz);
}
if (fnl_switch)
thread_funnel_switch(NETWORK_FUNNEL, KERNEL_FUNNEL);
thread_funnel_set(network_flock, funnel_state);
}
return (m);
-#undef m_retry
}
/*
if (m = m_retry(canwait, type)) {
m->m_flags |= M_PKTHDR;
m->m_data = m->m_pktdat;
+ m->m_pkthdr.rcvif = NULL;
+ m->m_pkthdr.len = 0;
+ m->m_pkthdr.header = NULL;
+ m->m_pkthdr.csum_flags = 0;
+ m->m_pkthdr.csum_data = 0;
+ m->m_pkthdr.aux = (struct mbuf *)NULL;
+ m->m_pkthdr.reserved1 = NULL;
+ m->m_pkthdr.reserved2 = NULL;
}
return (m);
}
{
register struct mbuf *m;
- MGET(m, nowait, type);
+ _MINTGET(m, type);
+ if (m) {
+ m->m_next = m->m_nextpkt = 0;
+ m->m_type = type;
+ m->m_data = m->m_dat;
+ m->m_flags = 0;
+ } else
+ (m) = m_retry(nowait, type);
+
return (m);
}
{
register struct mbuf *m;
- MGETHDR(m, nowait, type);
- return (m);
+ _MINTGET(m, type);
+ if (m) {
+ m->m_next = m->m_nextpkt = 0;
+ m->m_type = type;
+ m->m_data = m->m_pktdat;
+ m->m_flags = M_PKTHDR;
+ m->m_pkthdr.rcvif = NULL;
+ m->m_pkthdr.header = NULL;
+ m->m_pkthdr.csum_flags = 0;
+ m->m_pkthdr.csum_data = 0;
+ m->m_pkthdr.aux = (struct mbuf *)NULL;
+ m->m_pkthdr.reserved1 = NULL;
+ m->m_pkthdr.reserved2 = NULL;
+ } else
+ m = m_retryhdr(nowait, type);
+
+ return m;
}
struct mbuf *
if (m->m_type == MT_FREE)
panic("freeing free mbuf");
+ /* Free the aux data if there is any */
+ if ((m->m_flags & M_PKTHDR) && m->m_pkthdr.aux)
+ {
+ m_freem(m->m_pkthdr.aux);
+ }
+
MBUF_LOCK();
- if (m->m_flags & M_EXT) {
+ if ((m->m_flags & M_EXT))
+ {
if (MCLHASREFERENCE(m)) {
remque((queue_t)&m->m_ext.ext_refs);
} else if (m->m_ext.ext_free == NULL) {
union mcluster *mcl= (union mcluster *)m->m_ext.ext_buf;
- if (MCLUNREF(mcl)) {
+ if (_MCLUNREF(mcl)) {
mcl->mcl_next = mclfree;
mclfree = mcl;
++mbstat.m_clfree;
}
}
mbstat.m_mtypes[m->m_type]--;
- (void) MCLUNREF(m);
+ (void) _MCLUNREF(m);
+ _MFREE_MUNGE(m);
m->m_type = MT_FREE;
mbstat.m_mtypes[m->m_type]++;
m->m_flags = 0;
return (n);
}
+/* m_mclget() add an mbuf cluster to a normal mbuf */
+struct mbuf *
+m_mclget(m, nowait)
+ struct mbuf *m;
+ int nowait;
+{
+ MCLALLOC(m->m_ext.ext_buf, nowait);
+ if (m->m_ext.ext_buf) {
+ m->m_data = m->m_ext.ext_buf;
+ m->m_flags |= M_EXT;
+ m->m_ext.ext_size = MCLBYTES;
+ m->m_ext.ext_free = 0;
+ m->m_ext.ext_refs.forward = m->m_ext.ext_refs.backward =
+ &m->m_ext.ext_refs;
+ }
+
+ return m;
+}
+
+/* m_mclalloc() allocate an mbuf cluster */
+caddr_t
+m_mclalloc( nowait)
+ int nowait;
+{
+ caddr_t p;
+
+ (void)m_clalloc(1, nowait);
+ if ((p = (caddr_t)mclfree)) {
+ ++mclrefcnt[mtocl(p)];
+ mbstat.m_clfree--;
+ mclfree = ((union mcluster *)p)->mcl_next;
+ }
+ MBUF_UNLOCK();
+
+ return p;
+}
+
+/* m_mclfree() releases a reference to a cluster allocated by MCLALLOC,
+ * freeing the cluster if the reference count has reached 0. */
+void
+m_mclfree(p)
+ caddr_t p;
+{
+ MBUF_LOCK();
+ if (--mclrefcnt[mtocl(p)] == 0) {
+ ((union mcluster *)(p))->mcl_next = mclfree;
+ mclfree = (union mcluster *)(p);
+ mbstat.m_clfree++;
+ }
+ MBUF_UNLOCK();
+}
+
+/* mcl_hasreference() checks if a cluster of an mbuf is referenced by another mbuf */
+int
+m_mclhasreference(m)
+ struct mbuf *m;
+{
+ return (m->m_ext.ext_refs.forward != &(m->m_ext.ext_refs));
+}
+
+/* */
+void
+m_copy_pkthdr(to, from)
+ struct mbuf *to, *from;
+{
+ to->m_pkthdr = from->m_pkthdr;
+ from->m_pkthdr.aux = (struct mbuf *)NULL;
+ to->m_flags = from->m_flags & M_COPYFLAGS;
+ to->m_data = (to)->m_pktdat;
+}
+
/* Best effort to get a mbuf cluster + pkthdr under one lock.
* If we don't have them avail, just bail out and use the regular
* path.
mclfree = ((union mcluster *)(m->m_ext.ext_buf))->mcl_next;
m->m_next = m->m_nextpkt = 0;
- m->m_ext.ext_free = 0;
m->m_type = MT_DATA;
m->m_data = m->m_ext.ext_buf;
m->m_flags = M_PKTHDR | M_EXT;
- m->m_pkthdr.aux = (struct mbuf *)NULL;
+ m->m_pkthdr.len = 0;
+ m->m_pkthdr.rcvif = NULL;
+ m->m_pkthdr.header = NULL;
m->m_pkthdr.csum_data = 0;
m->m_pkthdr.csum_flags = 0;
+ m->m_pkthdr.aux = (struct mbuf *)NULL;
+ m->m_pkthdr.reserved1 = 0;
+ m->m_pkthdr.reserved2 = 0;
+ m->m_ext.ext_free = 0;
m->m_ext.ext_size = MCLBYTES;
m->m_ext.ext_refs.forward = m->m_ext.ext_refs.backward =
&m->m_ext.ext_refs;
return (m);
}
+
+/*
+ * return a list of mbuf hdrs that point to clusters...
+ * try for num_needed, if this can't be met, return whatever
+ * number were available... set up the first num_with_pkthdrs
+ * with mbuf hdrs configured as packet headers... these are
+ * chained on the m_nextpkt field... any packets requested beyond
+ * this are chained onto the last packet header's m_next field.
+ */
+struct mbuf *
+m_getpackets(int num_needed, int num_with_pkthdrs, int how)
+{
+ struct mbuf *m;
+ struct mbuf **np, *top;
+
+ top = NULL;
+ np = ⊤
+
+ m_clalloc(num_needed, how); /* takes the MBUF_LOCK, but doesn't release it... */
+
+ while (num_needed--) {
+ if (mfree && mclfree) { /* mbuf + cluster are available */
+ m = mfree;
+ MCHECK(m);
+ mfree = m->m_next;
+ ++mclrefcnt[mtocl(m)];
+ mbstat.m_mtypes[MT_FREE]--;
+ mbstat.m_mtypes[MT_DATA]++;
+ m->m_ext.ext_buf = (caddr_t)mclfree; /* get the cluster */
+ ++mclrefcnt[mtocl(m->m_ext.ext_buf)];
+ mbstat.m_clfree--;
+ mclfree = ((union mcluster *)(m->m_ext.ext_buf))->mcl_next;
+
+ m->m_next = m->m_nextpkt = 0;
+ m->m_type = MT_DATA;
+ m->m_data = m->m_ext.ext_buf;
+ m->m_ext.ext_free = 0;
+ m->m_ext.ext_size = MCLBYTES;
+ m->m_ext.ext_refs.forward = m->m_ext.ext_refs.backward = &m->m_ext.ext_refs;
+
+ if (num_with_pkthdrs == 0)
+ m->m_flags = M_EXT;
+ else {
+ m->m_flags = M_PKTHDR | M_EXT;
+ m->m_pkthdr.len = 0;
+ m->m_pkthdr.rcvif = NULL;
+ m->m_pkthdr.header = NULL;
+ m->m_pkthdr.csum_flags = 0;
+ m->m_pkthdr.csum_data = 0;
+ m->m_pkthdr.aux = (struct mbuf *)NULL;
+ m->m_pkthdr.reserved1 = NULL;
+ m->m_pkthdr.reserved2 = NULL;
+
+ num_with_pkthdrs--;
+ }
+
+ } else {
+
+ MBUF_UNLOCK();
+
+ if (num_with_pkthdrs == 0) {
+ MGET(m, how, MT_DATA );
+ } else {
+ MGETHDR(m, how, MT_DATA);
+
+ num_with_pkthdrs--;
+ }
+ if (m == 0)
+ return(top);
+
+ MCLGET(m, how);
+ if ((m->m_flags & M_EXT) == 0) {
+ m_free(m);
+ return(top);
+ }
+ MBUF_LOCK();
+ }
+ *np = m;
+
+ if (num_with_pkthdrs)
+ np = &m->m_nextpkt;
+ else
+ np = &m->m_next;
+ }
+ MBUF_UNLOCK();
+
+ return (top);
+}
+
+
+/*
+ * return a list of mbuf hdrs set up as packet hdrs
+ * chained together on the m_nextpkt field
+ */
+struct mbuf *
+m_getpackethdrs(int num_needed, int how)
+{
+ struct mbuf *m;
+ struct mbuf **np, *top;
+
+ top = NULL;
+ np = ⊤
+
+ MBUF_LOCK();
+
+ while (num_needed--) {
+ if (m = mfree) { /* mbufs are available */
+ MCHECK(m);
+ mfree = m->m_next;
+ ++mclrefcnt[mtocl(m)];
+ mbstat.m_mtypes[MT_FREE]--;
+ mbstat.m_mtypes[MT_DATA]++;
+
+ m->m_next = m->m_nextpkt = 0;
+ m->m_type = MT_DATA;
+ m->m_flags = M_PKTHDR;
+ m->m_data = m->m_pktdat;
+ m->m_pkthdr.len = 0;
+ m->m_pkthdr.rcvif = NULL;
+ m->m_pkthdr.header = NULL;
+ m->m_pkthdr.csum_flags = 0;
+ m->m_pkthdr.csum_data = 0;
+ m->m_pkthdr.aux = (struct mbuf *)NULL;
+ m->m_pkthdr.reserved1 = NULL;
+ m->m_pkthdr.reserved2 = NULL;
+
+ } else {
+
+ MBUF_UNLOCK();
+
+ m = m_retryhdr(how, MT_DATA);
+
+ if (m == 0)
+ return(top);
+
+ MBUF_LOCK();
+ }
+ *np = m;
+ np = &m->m_nextpkt;
+ }
+ MBUF_UNLOCK();
+
+ return (top);
+}
+
+
/* free and mbuf list (m_nextpkt) while following m_next under one lock.
* returns the count for mbufs packets freed. Used by the drivers.
*/
struct mbuf *m;
{
struct mbuf *nextpkt;
- int i, s, count=0;
+ int i, count=0;
-// s = splimp();
MBUF_LOCK();
+
while (m) {
if (m)
- nextpkt = m->m_nextpkt; /* chain of linked mbufs from driver */
+ nextpkt = m->m_nextpkt; /* chain of linked mbufs from driver */
else
- nextpkt = 0;
+ nextpkt = 0;
+
count++;
+
while (m) { /* free the mbuf chain (like mfreem) */
- struct mbuf *n = m->m_next;
+
+ struct mbuf *n;
+
+ /* Free the aux data if there is any */
+ if ((m->m_flags & M_PKTHDR) && m->m_pkthdr.aux) {
+ /*
+ * Treat the current m as the nextpkt and set m
+ * to the aux data. This lets us free the aux
+ * data in this loop without having to call
+ * m_freem recursively, which wouldn't work
+ * because we've still got the lock.
+ */
+ nextpkt = m;
+ m = nextpkt->m_pkthdr.aux;
+ nextpkt->m_pkthdr.aux = NULL;
+ }
+
+ n = m->m_next;
+
if (n && n->m_nextpkt)
panic("m_freem_list: m_nextpkt of m_next != NULL");
if (m->m_type == MT_FREE)
panic("freeing free mbuf");
+
if (m->m_flags & M_EXT) {
if (MCLHASREFERENCE(m)) {
remque((queue_t)&m->m_ext.ext_refs);
} else if (m->m_ext.ext_free == NULL) {
union mcluster *mcl= (union mcluster *)m->m_ext.ext_buf;
- if (MCLUNREF(mcl)) {
+ if (_MCLUNREF(mcl)) {
mcl->mcl_next = mclfree;
mclfree = mcl;
++mbstat.m_clfree;
}
}
mbstat.m_mtypes[m->m_type]--;
- (void) MCLUNREF(m);
+ (void) _MCLUNREF(m);
+ _MFREE_MUNGE(m);
+ mbstat.m_mtypes[MT_FREE]++;
m->m_type = MT_FREE;
- mbstat.m_mtypes[m->m_type]++;
m->m_flags = 0;
m->m_len = 0;
m->m_next = mfree;
}
m = nextpkt; /* bump m with saved nextpkt if any */
}
- i = m_want;
- m_want = 0;
+ if (i = m_want)
+ m_want = 0;
+
MBUF_UNLOCK();
- if (i) wakeup((caddr_t)&mfree);
+
+ if (i)
+ wakeup((caddr_t)&mfree);
+
return (count);
}
* Lesser-used path for M_PREPEND:
* allocate new mbuf to prepend to chain,
* copy junk along.
+ * Does not adjust packet header length.
*/
struct mbuf *
m_prepend(m, len, how)
return (m);
}
+/*
+ * Replacement for old M_PREPEND macro:
+ * allocate new mbuf to prepend to chain,
+ * copy junk along, and adjust length.
+ *
+ */
+struct mbuf *
+m_prepend_2(m, len, how)
+ register struct mbuf *m;
+ int len, how;
+{
+ if (M_LEADINGSPACE(m) >= len) {
+ m->m_data -= len;
+ m->m_len += len;
+ } else {
+ m = m_prepend(m, len, how);
+ }
+ if ((m) && (m->m_flags & M_PKTHDR))
+ m->m_pkthdr.len += len;
+ return (m);
+}
+
/*
* Make a copy of an mbuf chain starting "off0" bytes from the beginning,
* continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf.
panic("m_copym");
if (off == 0 && m->m_flags & M_PKTHDR)
copyhdr = 1;
- while (off > 0) {
+
+ while (off >= m->m_len) {
if (m == 0)
panic("m_copym");
- if (off < m->m_len)
- break;
off -= m->m_len;
m = m->m_next;
}
np = ⊤
top = 0;
+
+ MBUF_LOCK();
+
while (len > 0) {
if (m == 0) {
if (len != M_COPYALL)
panic("m_copym");
break;
}
- MGET(n, wait, m->m_type);
+ if (n = mfree) {
+ MCHECK(n);
+ ++mclrefcnt[mtocl(n)];
+ mbstat.m_mtypes[MT_FREE]--;
+ mbstat.m_mtypes[m->m_type]++;
+ mfree = n->m_next;
+ n->m_next = n->m_nextpkt = 0;
+ n->m_type = m->m_type;
+ n->m_data = n->m_dat;
+ n->m_flags = 0;
+ } else {
+ MBUF_UNLOCK();
+ n = m_retry(wait, m->m_type);
+ MBUF_LOCK();
+ }
*np = n;
+
if (n == 0)
goto nospace;
if (copyhdr) {
n->m_len = MHLEN;
}
if (m->m_flags & M_EXT) {
- MBUF_LOCK();
n->m_ext = m->m_ext;
insque((queue_t)&n->m_ext.ext_refs, (queue_t)&m->m_ext.ext_refs);
- MBUF_UNLOCK();
n->m_data = m->m_data + off;
n->m_flags |= M_EXT;
- } else
+ } else {
bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
(unsigned)n->m_len);
+ }
if (len != M_COPYALL)
len -= n->m_len;
off = 0;
m = m->m_next;
np = &n->m_next;
}
+ MBUF_UNLOCK();
+
if (top == 0)
MCFail++;
+
return (top);
nospace:
+ MBUF_UNLOCK();
+
m_freem(top);
MCFail++;
return (0);
}
+
+/*
+ * equivilent to m_copym except that all necessary
+ * mbuf hdrs are allocated within this routine
+ * also, the last mbuf and offset accessed are passed
+ * out and can be passed back in to avoid having to
+ * rescan the entire mbuf list (normally hung off of the socket)
+ */
+struct mbuf *
+m_copym_with_hdrs(m, off0, len, wait, m_last, m_off)
+ register struct mbuf *m;
+ int off0, wait;
+ register int len;
+ struct mbuf **m_last;
+ int *m_off;
+{
+ register struct mbuf *n, **np;
+ register int off = off0;
+ struct mbuf *top = 0;
+ int copyhdr = 0;
+ int type;
+
+ if (off == 0 && m->m_flags & M_PKTHDR)
+ copyhdr = 1;
+
+ if (*m_last) {
+ m = *m_last;
+ off = *m_off;
+ } else {
+ while (off >= m->m_len) {
+ off -= m->m_len;
+ m = m->m_next;
+ }
+ }
+ MBUF_LOCK();
+
+ while (len > 0) {
+ if (top == 0)
+ type = MT_HEADER;
+ else {
+ if (m == 0)
+ panic("m_gethdr_and_copym");
+ type = m->m_type;
+ }
+ if (n = mfree) {
+ MCHECK(n);
+ ++mclrefcnt[mtocl(n)];
+ mbstat.m_mtypes[MT_FREE]--;
+ mbstat.m_mtypes[type]++;
+ mfree = n->m_next;
+ n->m_next = n->m_nextpkt = 0;
+ n->m_type = type;
+
+ if (top) {
+ n->m_data = n->m_dat;
+ n->m_flags = 0;
+ } else {
+ n->m_data = n->m_pktdat;
+ n->m_flags = M_PKTHDR;
+ n->m_pkthdr.len = 0;
+ n->m_pkthdr.rcvif = NULL;
+ n->m_pkthdr.header = NULL;
+ n->m_pkthdr.csum_flags = 0;
+ n->m_pkthdr.csum_data = 0;
+ n->m_pkthdr.aux = (struct mbuf *)NULL;
+ n->m_pkthdr.reserved1 = NULL;
+ n->m_pkthdr.reserved2 = NULL;
+ }
+ } else {
+ MBUF_UNLOCK();
+ if (top)
+ n = m_retry(wait, type);
+ else
+ n = m_retryhdr(wait, type);
+ MBUF_LOCK();
+ }
+ if (n == 0)
+ goto nospace;
+ if (top == 0) {
+ top = n;
+ np = &top->m_next;
+ continue;
+ } else
+ *np = n;
+
+ if (copyhdr) {
+ M_COPY_PKTHDR(n, m);
+ n->m_pkthdr.len = len;
+ copyhdr = 0;
+ }
+ n->m_len = min(len, (m->m_len - off));
+
+ if (m->m_flags & M_EXT) {
+ n->m_ext = m->m_ext;
+ insque((queue_t)&n->m_ext.ext_refs, (queue_t)&m->m_ext.ext_refs);
+ n->m_data = m->m_data + off;
+ n->m_flags |= M_EXT;
+ } else {
+ bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t),
+ (unsigned)n->m_len);
+ }
+ len -= n->m_len;
+
+ if (len == 0) {
+ if ((off + n->m_len) == m->m_len) {
+ *m_last = m->m_next;
+ *m_off = 0;
+ } else {
+ *m_last = m;
+ *m_off = off + n->m_len;
+ }
+ break;
+ }
+ off = 0;
+ m = m->m_next;
+ np = &n->m_next;
+ }
+ MBUF_UNLOCK();
+
+ return (top);
+nospace:
+ MBUF_UNLOCK();
+
+ if (top)
+ m_freem(top);
+ MCFail++;
+ return (0);
+}
+
+
/*
* Copy data from an mbuf chain starting "off" bytes from the beginning,
* continuing for "len" bytes, into the indicated buffer.
n->m_pkthdr.len = m->m_pkthdr.len;
n->m_pkthdr.rcvif = m->m_pkthdr.rcvif;
n->m_pkthdr.header = NULL;
+ n->m_pkthdr.csum_flags = 0;
+ n->m_pkthdr.csum_data = 0;
n->m_pkthdr.aux = NULL;
+ n->m_pkthdr.reserved1 = 0;
+ n->m_pkthdr.reserved2 = 0;
bcopy(m->m_data, n->m_data, m->m_pkthdr.len);
return(n);
}
return (0);
}
+int
+m_mclref(struct mbuf *p)
+{
+ return (_MCLREF(p));
+}
+
+int
+m_mclunref(struct mbuf *p)
+{
+ return (_MCLUNREF(p));
+}
+
+/* change mbuf to new type */
+void
+m_mchtype(struct mbuf *m, int t)
+{
+ MBUF_LOCK();
+ mbstat.m_mtypes[(m)->m_type]--;
+ mbstat.m_mtypes[t]++;
+ (m)->m_type = t;
+ MBUF_UNLOCK();
+}
+
+void *m_mtod(struct mbuf *m)
+{
+ return ((m)->m_data);
+}
+
+struct mbuf *m_dtom(void *x)
+{
+ return ((struct mbuf *)((u_long)(x) & ~(MSIZE-1)));
+}
+
+int m_mtocl(void *x)
+{
+ return (((char *)(x) - (char *)mbutl) / sizeof(union mcluster));
+}
+
+union mcluster *m_cltom(int x)
+{
+ return ((union mcluster *)(mbutl + (x)));
+}
+
+
+void m_mcheck(struct mbuf *m)
+{
+ if (m->m_type != MT_FREE)
+ panic("mget MCHECK: m_type=%x m=%x", m->m_type, m);
+}
+
#if 0
#include <sys/sysctl.h>