X-Git-Url: https://git.saurik.com/apple/xnu.git/blobdiff_plain/4d15aeb193b2c68f1d38666c317f8d3734f5f083..5ba3f43ea354af8ad55bea84372a2bc834d8757c:/bsd/kern/uipc_mbuf.c diff --git a/bsd/kern/uipc_mbuf.c b/bsd/kern/uipc_mbuf.c index cee3242d1..04df5a515 100644 --- a/bsd/kern/uipc_mbuf.c +++ b/bsd/kern/uipc_mbuf.c @@ -1,5 +1,5 @@ /* - * Copyright (c) 1998-2016 Apple Inc. All rights reserved. + * Copyright (c) 1998-2017 Apple Inc. All rights reserved. * * @APPLE_OSREFERENCE_LICENSE_HEADER_START@ * @@ -317,9 +317,6 @@ static lck_grp_attr_t *mbuf_mlock_grp_attr; /* Back-end (common) layer */ static boolean_t mbuf_worker_needs_wakeup; /* wait channel for mbuf worker */ static int mbuf_worker_ready; /* worker thread is runnable */ -static int mbuf_expand_mcl; /* number of cluster creation requets */ -static int mbuf_expand_big; /* number of big cluster creation requests */ -static int mbuf_expand_16k; /* number of 16KB cluster creation requests */ static int ncpu; /* number of CPUs */ static ppnum_t *mcl_paddr; /* Array of cluster physical addresses */ static ppnum_t mcl_pages; /* Size of array (# physical pages) */ @@ -496,7 +493,7 @@ static struct mleak_table mleak_table; static mleak_stat_t *mleak_stat; #define MLEAK_STAT_SIZE(n) \ - ((size_t)(&((mleak_stat_t *)0)->ml_trace[n])) + __builtin_offsetof(mleak_stat_t, ml_trace[n]) struct mallocation { mcache_obj_t *element; /* the alloc'ed element, NULL if unused */ @@ -588,6 +585,7 @@ typedef struct { int mtbl_maxlimit; /* maximum allowed */ u_int32_t mtbl_wantpurge; /* purge during next reclaim */ uint32_t mtbl_avgtotal; /* average total on iOS */ + u_int32_t mtbl_expand; /* worker should expand the class */ } mbuf_table_t; #define m_class(c) mbuf_table[c].mtbl_class @@ -613,6 +611,7 @@ typedef struct { #define m_ctotal(c) mbuf_table[c].mtbl_stats->mbcl_ctotal #define m_peak(c) mbuf_table[c].mtbl_stats->mbcl_peak_reported #define m_release_cnt(c) mbuf_table[c].mtbl_stats->mbcl_release_cnt +#define m_region_expand(c) mbuf_table[c].mtbl_expand static mbuf_table_t mbuf_table[] = { /* @@ -621,13 +620,13 @@ static mbuf_table_t mbuf_table[] = { * usage patterns on iOS. */ { MC_MBUF, NULL, TAILQ_HEAD_INITIALIZER(m_slablist(MC_MBUF)), - NULL, NULL, 0, 0, 0, 0, 3000 }, + NULL, NULL, 0, 0, 0, 0, 3000, 0 }, { MC_CL, NULL, TAILQ_HEAD_INITIALIZER(m_slablist(MC_CL)), - NULL, NULL, 0, 0, 0, 0, 2000 }, + NULL, NULL, 0, 0, 0, 0, 2000, 0 }, { MC_BIGCL, NULL, TAILQ_HEAD_INITIALIZER(m_slablist(MC_BIGCL)), - NULL, NULL, 0, 0, 0, 0, 1000 }, + NULL, NULL, 0, 0, 0, 0, 1000, 0 }, { MC_16KCL, NULL, TAILQ_HEAD_INITIALIZER(m_slablist(MC_16KCL)), - NULL, NULL, 0, 0, 0, 0, 1000 }, + NULL, NULL, 0, 0, 0, 0, 200, 0 }, /* * The following are special caches; they serve as intermediate * caches backed by the above rudimentary caches. Each object @@ -636,9 +635,9 @@ static mbuf_table_t mbuf_table[] = { * deal with the slab structures; instead, the constructed * cached elements are simply stored in the freelists. */ - { MC_MBUF_CL, NULL, { NULL, NULL }, NULL, NULL, 0, 0, 0, 0, 2000 }, - { MC_MBUF_BIGCL, NULL, { NULL, NULL }, NULL, NULL, 0, 0, 0, 0, 1000 }, - { MC_MBUF_16KCL, NULL, { NULL, NULL }, NULL, NULL, 0, 0, 0, 0, 1000 }, + { MC_MBUF_CL, NULL, { NULL, NULL }, NULL, NULL, 0, 0, 0, 0, 2000, 0 }, + { MC_MBUF_BIGCL, NULL, { NULL, NULL }, NULL, NULL, 0, 0, 0, 0, 1000, 0 }, + { MC_MBUF_16KCL, NULL, { NULL, NULL }, NULL, NULL, 0, 0, 0, 0, 200, 0 }, }; #define NELEM(a) (sizeof (a) / sizeof ((a)[0])) @@ -665,8 +664,13 @@ static char *mbuf_dump_buf; * mb_drain_maxint controls the amount of time to wait (in seconds) before * consecutive calls to m_drain(). */ +#if CONFIG_EMBEDDED +static unsigned int mb_watchdog = 1; +static unsigned int mb_drain_maxint = 60; +#else static unsigned int mb_watchdog = 0; static unsigned int mb_drain_maxint = 0; +#endif /* CONFIG_EMBEDDED */ uintptr_t mb_obscure_extfree __attribute__((visibility("hidden"))); uintptr_t mb_obscure_extref __attribute__((visibility("hidden"))); @@ -932,7 +936,7 @@ struct mb_stat *mb_stat; struct omb_stat *omb_stat; /* For backwards compatibility */ #define MB_STAT_SIZE(n) \ - ((size_t)(&((mb_stat_t *)0)->mbs_class[n])) + __builtin_offsetof(mb_stat_t, mbs_class[n]) #define OMB_STAT_SIZE(n) \ ((size_t)(&((struct omb_stat *)0)->mbs_class[n])) @@ -999,7 +1003,7 @@ mbuf_mtypes_sync(boolean_t locked) mtypes_cpu_t mtc; if (locked) - lck_mtx_assert(mbuf_mlock, LCK_MTX_ASSERT_OWNED); + LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED); bzero(&mtc, sizeof (mtc)); for (m = 0; m < ncpu; m++) { @@ -1037,7 +1041,7 @@ mbuf_stat_sync(void) mcache_t *cp; int k, m, bktsize; - lck_mtx_assert(mbuf_mlock, LCK_MTX_ASSERT_OWNED); + LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED); for (k = 0; k < NELEM(mbuf_table); k++) { cp = m_cache(k); @@ -1455,6 +1459,7 @@ mbinit(void) _CASSERT(MBUF_TSO_IPV6 == CSUM_TSO_IPV6); _CASSERT(MBUF_CSUM_REQ_SUM16 == CSUM_PARTIAL); _CASSERT(MBUF_CSUM_TCP_SUM16 == MBUF_CSUM_REQ_SUM16); + _CASSERT(MBUF_CSUM_REQ_ZERO_INVERT == CSUM_ZERO_INVERT); _CASSERT(MBUF_CSUM_REQ_IP == CSUM_IP); _CASSERT(MBUF_CSUM_REQ_TCP == CSUM_TCP); _CASSERT(MBUF_CSUM_REQ_UDP == CSUM_UDP); @@ -1571,6 +1576,21 @@ mbinit(void) mleak_activate(); + /* + * Allocate structure for per-CPU statistics that's aligned + * on the CPU cache boundary; this code assumes that we never + * uninitialize this framework, since the original address + * before alignment is not saved. + */ + ncpu = ml_get_max_cpus(); + MALLOC(buf, void *, MBUF_MTYPES_SIZE(ncpu) + CPU_CACHE_LINE_SIZE, + M_TEMP, M_WAITOK); + VERIFY(buf != NULL); + + mbuf_mtypes = (mbuf_mtypes_t *)P2ROUNDUP((intptr_t)buf, + CPU_CACHE_LINE_SIZE); + bzero(mbuf_mtypes, MBUF_MTYPES_SIZE(ncpu)); + /* Calculate the number of pages assigned to the cluster pool */ mcl_pages = (nmbclusters << MCLSHIFT) / PAGE_SIZE; MALLOC(mcl_paddr, ppnum_t *, mcl_pages * sizeof (ppnum_t), @@ -1661,21 +1681,6 @@ mbinit(void) (void *)(uintptr_t)m, flags, MCR_SLEEP); } - /* - * Allocate structure for per-CPU statistics that's aligned - * on the CPU cache boundary; this code assumes that we never - * uninitialize this framework, since the original address - * before alignment is not saved. - */ - ncpu = ml_get_max_cpus(); - MALLOC(buf, void *, MBUF_MTYPES_SIZE(ncpu) + CPU_CACHE_LINE_SIZE, - M_TEMP, M_WAITOK); - VERIFY(buf != NULL); - - mbuf_mtypes = (mbuf_mtypes_t *)P2ROUNDUP((intptr_t)buf, - CPU_CACHE_LINE_SIZE); - bzero(mbuf_mtypes, MBUF_MTYPES_SIZE(ncpu)); - /* * Set the max limit on sb_max to be 1/16 th of the size of * memory allocated for mbuf clusters. @@ -1742,7 +1747,7 @@ slab_alloc(mbuf_class_t class, int wait) mcl_slab_t *sp; mcache_obj_t *buf; - lck_mtx_assert(mbuf_mlock, LCK_MTX_ASSERT_OWNED); + LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED); /* This should always be NULL for us */ VERIFY(m_cobjlist(class) == NULL); @@ -1876,7 +1881,7 @@ slab_free(mbuf_class_t class, mcache_obj_t *buf) boolean_t reinit_supercl = false; mbuf_class_t super_class; - lck_mtx_assert(mbuf_mlock, LCK_MTX_ASSERT_OWNED); + LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED); VERIFY(class != MC_16KCL || njcl > 0); VERIFY(buf->obj_next == NULL); @@ -2147,7 +2152,7 @@ mbuf_slab_alloc(void *arg, mcache_obj_t ***plist, unsigned int num, int wait) mbuf_sleep(class, need, wait)) break; - lck_mtx_assert(mbuf_mlock, LCK_MTX_ASSERT_OWNED); + LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED); } } @@ -2280,7 +2285,7 @@ cslab_alloc(mbuf_class_t class, mcache_obj_t ***plist, unsigned int num) VERIFY(need > 0); VERIFY(class != MC_MBUF_16KCL || njcl > 0); - lck_mtx_assert(mbuf_mlock, LCK_MTX_ASSERT_OWNED); + LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED); /* Get what we can from the freelist */ while ((*list = m_cobjlist(class)) != NULL) { @@ -2344,7 +2349,7 @@ cslab_free(mbuf_class_t class, mcache_obj_t *list, int purged) ASSERT(MBUF_CLASS_VALID(class) && MBUF_CLASS_COMPOSITE(class)); VERIFY(class != MC_MBUF_16KCL || njcl > 0); - lck_mtx_assert(mbuf_mlock, LCK_MTX_ASSERT_OWNED); + LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED); if (class == MC_MBUF_CL) { cl_class = MC_CL; @@ -2805,7 +2810,7 @@ m_clalloc(const u_int32_t num, const int wait, const u_int32_t bufsize) else class = MC_16KCL; - lck_mtx_assert(mbuf_mlock, LCK_MTX_ASSERT_OWNED); + LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED); /* * Multiple threads may attempt to populate the cluster map one @@ -2819,7 +2824,7 @@ m_clalloc(const u_int32_t num, const int wait, const u_int32_t bufsize) mb_clalloc_waiters++; (void) msleep(mb_clalloc_waitchan, mbuf_mlock, (PZERO-1), "m_clalloc", NULL); - lck_mtx_assert(mbuf_mlock, LCK_MTX_ASSERT_OWNED); + LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED); } /* We are busy now; tell everyone else to go away */ @@ -2830,7 +2835,7 @@ m_clalloc(const u_int32_t num, const int wait, const u_int32_t bufsize) * to grow the pool asynchronously using the mbuf worker thread. */ i = m_howmany(num, bufsize); - if (i == 0 || (wait & M_DONTWAIT)) + if (i <= 0 || (wait & M_DONTWAIT)) goto out; lck_mtx_unlock(mbuf_mlock); @@ -3000,7 +3005,7 @@ m_clalloc(const u_int32_t num, const int wait, const u_int32_t bufsize) return (count); out: - lck_mtx_assert(mbuf_mlock, LCK_MTX_ASSERT_OWNED); + LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED); /* We're done; let others enter */ mb_clalloc_busy = FALSE; @@ -3024,8 +3029,8 @@ out: * at this time. */ i += m_total(MC_BIGCL); - if (i > mbuf_expand_big) { - mbuf_expand_big = i; + if (i > m_region_expand(MC_BIGCL)) { + m_region_expand(MC_BIGCL) = i; } } if (m_infree(MC_BIGCL) >= num) @@ -3037,8 +3042,8 @@ out: * at this time. */ i += m_total(MC_16KCL); - if (i > mbuf_expand_16k) { - mbuf_expand_16k = i; + if (i > m_region_expand(MC_16KCL)) { + m_region_expand(MC_16KCL) = i; } } if (m_infree(MC_16KCL) >= num) @@ -3060,7 +3065,7 @@ freelist_populate(mbuf_class_t class, unsigned int num, int wait) VERIFY(class == MC_MBUF || class == MC_CL || class == MC_BIGCL || class == MC_16KCL); - lck_mtx_assert(mbuf_mlock, LCK_MTX_ASSERT_OWNED); + LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED); VERIFY(PAGE_SIZE == m_maxsize(MC_BIGCL) || PAGE_SIZE == m_maxsize(MC_16KCL)); @@ -3083,12 +3088,6 @@ freelist_populate(mbuf_class_t class, unsigned int num, int wait) i = m_clalloc(numpages, wait, m_maxsize(super_class)); - /* Respect the minimum limit of super class */ - if (m_total(super_class) == m_maxlimit(super_class) && - m_infree(super_class) <= m_minlimit(super_class)) - if (wait & MCR_COMP) - return (0); - /* how many objects will we cut the page into? */ int numobj = PAGE_SIZE / m_maxsize(class); @@ -3138,6 +3137,7 @@ freelist_populate(mbuf_class_t class, unsigned int num, int wait) mbstat.m_bigclusters = m_total(MC_BIGCL); m_total(class) += numobj; + VERIFY(m_total(class) <= m_maxlimit(class)); m_infree(class) += numobj; if (!mb_peak_newreport && mbuf_report_usage(class)) @@ -3204,7 +3204,7 @@ freelist_populate(mbuf_class_t class, unsigned int num, int wait) static void freelist_init(mbuf_class_t class) { - lck_mtx_assert(mbuf_mlock, LCK_MTX_ASSERT_OWNED); + LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED); VERIFY(class == MC_CL || class == MC_BIGCL); VERIFY(m_total(class) == 0); @@ -3269,7 +3269,7 @@ mbuf_steal(mbuf_class_t class, unsigned int num) mcache_obj_t **list = ⊤ unsigned int tot = 0; - lck_mtx_assert(mbuf_mlock, LCK_MTX_ASSERT_OWNED); + LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED); switch (class) { case MC_MBUF: @@ -3305,7 +3305,7 @@ m_reclaim(mbuf_class_t class, unsigned int num, boolean_t comp) { int m, bmap = 0; - lck_mtx_assert(mbuf_mlock, LCK_MTX_ASSERT_OWNED); + LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED); VERIFY(m_total(MC_CL) <= m_maxlimit(MC_CL)); VERIFY(m_total(MC_BIGCL) <= m_maxlimit(MC_BIGCL)); @@ -3962,10 +3962,11 @@ m_classifier_init(struct mbuf *m, uint32_t pktf_mask) (void) m_set_service_class(m, MBUF_SC_BE); if (!(m->m_pkthdr.pkt_flags & PKTF_IFAINFO)) m->m_pkthdr.pkt_ifainfo = 0; -#if MEASURE_BW - m->m_pkthdr.pkt_bwseq = 0; -#endif /* MEASURE_BW */ - m->m_pkthdr.pkt_timestamp = 0; + /* + * Preserve timestamp if requested + */ + if (!(m->m_pkthdr.pkt_flags & PKTF_TS_VALID)) + m->m_pkthdr.pkt_timestamp = 0; } void @@ -3980,9 +3981,6 @@ m_copy_classifier(struct mbuf *to, struct mbuf *from) to->m_pkthdr.pkt_flags = from->m_pkthdr.pkt_flags; (void) m_set_service_class(to, from->m_pkthdr.pkt_svc); to->m_pkthdr.pkt_ifainfo = from->m_pkthdr.pkt_ifainfo; -#if MEASURE_BW - to->m_pkthdr.pkt_bwseq = from->m_pkthdr.pkt_bwseq; -#endif /* MEASURE_BW */ } /* @@ -5039,23 +5037,33 @@ nospace: void m_copydata(struct mbuf *m, int off, int len, void *vp) { + int off0 = off, len0 = len; + struct mbuf *m0 = m; unsigned count; char *cp = vp; - if (off < 0 || len < 0) - panic("m_copydata: invalid offset %d or len %d", off, len); + if (__improbable(off < 0 || len < 0)) { + panic("%s: invalid offset %d or len %d", __func__, off, len); + /* NOTREACHED */ + } while (off > 0) { - if (m == NULL) - panic("m_copydata: invalid mbuf chain"); + if (__improbable(m == NULL)) { + panic("%s: invalid mbuf chain %p [off %d, len %d]", + __func__, m0, off0, len0); + /* NOTREACHED */ + } if (off < m->m_len) break; off -= m->m_len; m = m->m_next; } while (len > 0) { - if (m == NULL) - panic("m_copydata: invalid mbuf chain"); + if (__improbable(m == NULL)) { + panic("%s: invalid mbuf chain %p [off %d, len %d]", + __func__, m0, off0, len0); + /* NOTREACHED */ + } count = MIN(m->m_len - off, len); bcopy(MTOD(m, caddr_t) + off, cp, count); len -= count; @@ -5445,7 +5453,7 @@ m_howmany(int num, size_t bufsize) VERIFY(bufsize == m_maxsize(MC_BIGCL) || bufsize == m_maxsize(MC_16KCL)); - lck_mtx_assert(mbuf_mlock, LCK_MTX_ASSERT_OWNED); + LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED); /* Numbers in 2K cluster units */ m_mbclusters = m_total(MC_MBUF) >> NMBPCLSHIFT; @@ -5535,9 +5543,8 @@ m_howmany(int num, size_t bufsize) i = MAX(i, j); /* Check to ensure we don't go over limit */ - if (i + m_16kclusters >= m_maxlimit(MC_16KCL)) - i = m_maxlimit(MC_16KCL) - m_16kclusters; - VERIFY((m_total(MC_16KCL) + i) <= m_maxlimit(MC_16KCL)); + if ((i + m_total(MC_16KCL)) >= m_maxlimit(MC_16KCL)) + i = m_maxlimit(MC_16KCL) - m_total(MC_16KCL); } return (i); } @@ -6196,6 +6203,8 @@ m_defrag_offset(struct mbuf *m0, u_int32_t off, int how) if (length > MCLBYTES) length = MCLBYTES; length -= ((m_new == m_final) ? off : 0); + if (length < 0) + goto nospace; if (m_new == NULL) { if (length > MLEN) @@ -6357,7 +6366,7 @@ mbuf_sleep(mbuf_class_t class, unsigned int num, int wait) { boolean_t mcache_retry = FALSE; - lck_mtx_assert(mbuf_mlock, LCK_MTX_ASSERT_OWNED); + LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED); /* Check if there's anything at the cache layer */ if (mbuf_cached_above(class, wait)) { @@ -6399,6 +6408,14 @@ mbuf_sleep(mbuf_class_t class, unsigned int num, int wait) mbuf_watchdog(); mb_waiters++; + m_region_expand(class) += num; + /* wake up the worker thread */ + if (class > MC_MBUF && mbuf_worker_ready && + mbuf_worker_needs_wakeup) { + wakeup((caddr_t)&mbuf_worker_needs_wakeup); + mbuf_worker_needs_wakeup = FALSE; + } + (void) msleep(mb_waitchan, mbuf_mlock, (PZERO-1), m_cname(class), NULL); /* We are now up; stop getting notified until next round */ @@ -6425,41 +6442,41 @@ mbuf_worker_thread(void) while (1) { lck_mtx_lock(mbuf_mlock); mbuf_expand = 0; - if (mbuf_expand_mcl) { + if (m_region_expand(MC_CL) > 0) { int n; /* Adjust to current number of cluster in use */ - n = mbuf_expand_mcl - + n = m_region_expand(MC_CL) - (m_total(MC_CL) - m_infree(MC_CL)); if ((n + m_total(MC_CL)) > m_maxlimit(MC_CL)) n = m_maxlimit(MC_CL) - m_total(MC_CL); - mbuf_expand_mcl = 0; + m_region_expand(MC_CL) = 0; if (n > 0 && freelist_populate(MC_CL, n, M_WAIT) > 0) mbuf_expand++; } - if (mbuf_expand_big) { + if (m_region_expand(MC_BIGCL) > 0) { int n; /* Adjust to current number of 4 KB cluster in use */ - n = mbuf_expand_big - + n = m_region_expand(MC_BIGCL) - (m_total(MC_BIGCL) - m_infree(MC_BIGCL)); if ((n + m_total(MC_BIGCL)) > m_maxlimit(MC_BIGCL)) n = m_maxlimit(MC_BIGCL) - m_total(MC_BIGCL); - mbuf_expand_big = 0; + m_region_expand(MC_BIGCL) = 0; if (n > 0 && freelist_populate(MC_BIGCL, n, M_WAIT) > 0) mbuf_expand++; } - if (mbuf_expand_16k) { + if (m_region_expand(MC_16KCL) > 0) { int n; /* Adjust to current number of 16 KB cluster in use */ - n = mbuf_expand_16k - + n = m_region_expand(MC_16KCL) - (m_total(MC_16KCL) - m_infree(MC_16KCL)); if ((n + m_total(MC_16KCL)) > m_maxlimit(MC_16KCL)) n = m_maxlimit(MC_16KCL) - m_total(MC_16KCL); - mbuf_expand_16k = 0; + m_region_expand(MC_16KCL) = 0; if (n > 0) (void) freelist_populate(MC_16KCL, n, M_WAIT); @@ -6501,7 +6518,7 @@ slab_get(void *buf) mcl_slabg_t *slg; unsigned int ix, k; - lck_mtx_assert(mbuf_mlock, LCK_MTX_ASSERT_OWNED); + LCK_MTX_ASSERT(mbuf_mlock, LCK_MTX_ASSERT_OWNED); VERIFY(MBUF_IN_MAP(buf)); ix = ((unsigned char *)buf - mbutl) >> MBSHIFT; @@ -6686,7 +6703,7 @@ mcl_audit_init(void *buf, mcache_audit_t **mca_list, VERIFY(ix < maxclaudit); /* Make sure we haven't been here before */ - for (i = 0; i < NMBPG; i++) + for (i = 0; i < num; i++) VERIFY(mclaudit[ix].cl_audit[i] == NULL); mca = mca_tail = *mca_list; @@ -7616,8 +7633,8 @@ m_set_ext(struct mbuf *m, struct ext_ref *rfa, m_ext_free_func_t ext_free, m->m_ext.ext_free = (m_ext_free_func_t) (((uintptr_t)ext_free) ^ rfa->ext_token); if (ext_arg != NULL) { - m->m_ext.ext_arg = (((uintptr_t)ext_arg) ^ - rfa->ext_token); + m->m_ext.ext_arg = + (caddr_t)(((uintptr_t)ext_arg) ^ rfa->ext_token); } else { m->m_ext.ext_arg = NULL; } @@ -7633,10 +7650,12 @@ m_set_ext(struct mbuf *m, struct ext_ref *rfa, m_ext_free_func_t ext_free, * to obscure the ext_free and ext_arg pointers. */ if (ext_free != NULL) { - m->m_ext.ext_free = ((uintptr_t)ext_free ^ + m->m_ext.ext_free = + (m_ext_free_func_t)((uintptr_t)ext_free ^ mb_obscure_extfree); if (ext_arg != NULL) { - m->m_ext.ext_arg = ((uintptr_t)ext_arg ^ + m->m_ext.ext_arg = + (caddr_t)((uintptr_t)ext_arg ^ mb_obscure_extfree); } else { m->m_ext.ext_arg = NULL; @@ -7667,7 +7686,7 @@ m_get_ext_free(struct mbuf *m) rfa = m_get_rfa(m); if (rfa == NULL) - return ((uintptr_t)m->m_ext.ext_free ^ mb_obscure_extfree); + return ((m_ext_free_func_t)((uintptr_t)m->m_ext.ext_free ^ mb_obscure_extfree)); else return ((m_ext_free_func_t)(((uintptr_t)m->m_ext.ext_free) ^ rfa->ext_token)); @@ -7682,7 +7701,7 @@ m_get_ext_arg(struct mbuf *m) rfa = m_get_rfa(m); if (rfa == NULL) { - return ((uintptr_t)m->m_ext.ext_arg ^ mb_obscure_extfree); + return ((caddr_t)((uintptr_t)m->m_ext.ext_arg ^ mb_obscure_extfree)); } else { return ((caddr_t)(((uintptr_t)m->m_ext.ext_arg) ^ rfa->ext_token)); @@ -7919,8 +7938,14 @@ m_drain(void) 0); nsp->sl_flags = 0; } - if (mclaudit != NULL) - mcl_audit_free(sp->sl_base, 1); + if (mclaudit != NULL) { + if (sp->sl_len == PAGE_SIZE) { + mcl_audit_free(sp->sl_base, + NMBPG); + } else { + mcl_audit_free(sp->sl_base, 1); + } + } break; default: /*