/*
- * Copyright (c) 2004 Apple Computer, Inc. All rights reserved.
+ * Copyright (c) 2004-2014 Apple Inc. All rights reserved.
*
- * @APPLE_LICENSE_HEADER_START@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
- * The contents of this file constitute Original Code as defined in and
- * are subject to the Apple Public Source License Version 1.1 (the
- * "License"). You may not use this file except in compliance with the
- * License. Please obtain a copy of the License at
- * http://www.apple.com/publicsource and read it before using this file.
+ * This file contains Original Code and/or Modifications of Original Code
+ * as defined in and that are subject to the Apple Public Source License
+ * Version 2.0 (the 'License'). You may not use this file except in
+ * compliance with the License. The rights granted to you under the License
+ * may not be used to create, or enable the creation or redistribution of,
+ * unlawful or unlicensed copies of an Apple operating system, or to
+ * circumvent, violate, or enable the circumvention or violation of, any
+ * terms of an Apple operating system software license agreement.
*
- * This Original Code and all software distributed under the License are
- * distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY KIND, EITHER
+ * Please obtain a copy of the License at
+ * http://www.opensource.apple.com/apsl/ and read it before using this file.
+ *
+ * The Original Code and all software distributed under the License are
+ * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
* EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
* INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT. Please see the
- * License for the specific language governing rights and limitations
- * under the License.
+ * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
+ * Please see the License for the specific language governing rights and
+ * limitations under the License.
*
- * @APPLE_LICENSE_HEADER_END@
+ * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
*/
#define __KPI__
#include <sys/param.h>
#include <sys/mbuf.h>
+#include <sys/mcache.h>
#include <sys/socket.h>
#include <kern/debug.h>
#include <libkern/OSAtomic.h>
#include <kern/kalloc.h>
#include <string.h>
+#include <netinet/in.h>
+#include <netinet/ip_var.h>
+
+#include "net/net_str_id.h"
-void mbuf_tag_id_first_last(u_long *first, u_long *last);
-errno_t mbuf_tag_id_find_internal(const char *string, u_long *out_id, int create);
+/* mbuf flags visible to KPI clients; do not add private flags here */
+static const mbuf_flags_t mbuf_flags_mask = (MBUF_EXT | MBUF_PKTHDR | MBUF_EOR |
+ MBUF_LOOP | MBUF_BCAST | MBUF_MCAST | MBUF_FRAG | MBUF_FIRSTFRAG |
+ MBUF_LASTFRAG | MBUF_PROMISC | MBUF_HASFCS);
-static const mbuf_flags_t mbuf_flags_mask = MBUF_EXT | MBUF_PKTHDR | MBUF_EOR |
- MBUF_BCAST | MBUF_MCAST | MBUF_FRAG | MBUF_FIRSTFRAG |
- MBUF_LASTFRAG | MBUF_PROMISC;
+/* Unalterable mbuf flags */
+static const mbuf_flags_t mbuf_cflags_mask = (MBUF_EXT);
void* mbuf_data(mbuf_t mbuf)
{
- return m_mtod(mbuf);
+ return mbuf->m_data;
}
void* mbuf_datastart(mbuf_t mbuf)
return 0;
}
+/* This function is used to provide mcl_to_paddr via symbol indirection,
+ * please avoid any change in behavior or remove the indirection in
+ * config/Unsupported*
+ */
addr64_t mbuf_data_to_physical(void* ptr)
{
- return (addr64_t)mcl_to_paddr(ptr);
+ return ((addr64_t)mcl_to_paddr(ptr));
}
errno_t mbuf_get(mbuf_how_t how, mbuf_type_t type, mbuf_t *mbuf)
return (*mbuf == NULL) ? ENOMEM : 0;
}
-extern struct mbuf * m_mbigget(struct mbuf *m, int nowait);
+errno_t
+mbuf_attachcluster(mbuf_how_t how, mbuf_type_t type, mbuf_t *mbuf,
+ caddr_t extbuf, void (*extfree)(caddr_t , u_int, caddr_t),
+ size_t extsize, caddr_t extarg)
+{
+ if (mbuf == NULL || extbuf == NULL || extfree == NULL || extsize == 0)
+ return (EINVAL);
+
+ if ((*mbuf = m_clattach(*mbuf, type, extbuf,
+ extfree, extsize, extarg, how)) == NULL)
+ return (ENOMEM);
+
+ return (0);
+}
-errno_t mbuf_getcluster(mbuf_how_t how, mbuf_type_t type, size_t size, mbuf_t* mbuf)
+errno_t
+mbuf_alloccluster(mbuf_how_t how, size_t *size, caddr_t *addr)
+{
+ if (size == NULL || *size == 0 || addr == NULL)
+ return (EINVAL);
+
+ *addr = NULL;
+
+ /* Jumbo cluster pool not available? */
+ if (*size > MBIGCLBYTES && njcl == 0)
+ return (ENOTSUP);
+
+ if (*size <= MCLBYTES && (*addr = m_mclalloc(how)) != NULL)
+ *size = MCLBYTES;
+ else if (*size > MCLBYTES && *size <= MBIGCLBYTES &&
+ (*addr = m_bigalloc(how)) != NULL)
+ *size = MBIGCLBYTES;
+ else if (*size > MBIGCLBYTES && *size <= M16KCLBYTES &&
+ (*addr = m_16kalloc(how)) != NULL)
+ *size = M16KCLBYTES;
+ else
+ *size = 0;
+
+ if (*addr == NULL)
+ return (ENOMEM);
+
+ return (0);
+}
+
+void
+mbuf_freecluster(caddr_t addr, size_t size)
+{
+ if (size != MCLBYTES && size != MBIGCLBYTES && size != M16KCLBYTES)
+ panic("%s: invalid size (%ld) for cluster %p", __func__,
+ size, (void *)addr);
+
+ if (size == MCLBYTES)
+ m_mclfree(addr);
+ else if (size == MBIGCLBYTES)
+ m_bigfree(addr, MBIGCLBYTES, NULL);
+ else if (njcl > 0)
+ m_16kfree(addr, M16KCLBYTES, NULL);
+ else
+ panic("%s: freeing jumbo cluster to an empty pool", __func__);
+}
+
+errno_t
+mbuf_getcluster(mbuf_how_t how, mbuf_type_t type, size_t size, mbuf_t* mbuf)
{
/* Must set *mbuf to NULL in failure case */
errno_t error = 0;
- int created = 0;
+ int created = 0;
if (mbuf == NULL)
return EINVAL;
created = 1;
}
/*
- * At the time this code was written, m_mclget and m_mbigget would always
- * return the same value that was passed in to it.
+ * At the time this code was written, m_{mclget,mbigget,m16kget}
+ * would always return the same value that was passed in to it.
*/
if (size == MCLBYTES) {
*mbuf = m_mclget(*mbuf, how);
- } else if (size == NBPG) {
+ } else if (size == MBIGCLBYTES) {
*mbuf = m_mbigget(*mbuf, how);
+ } else if (size == M16KCLBYTES) {
+ if (njcl > 0) {
+ *mbuf = m_m16kget(*mbuf, how);
+ } else {
+ /* Jumbo cluster pool not available? */
+ error = ENOTSUP;
+ goto out;
+ }
} else {
error = EINVAL;
goto out;
error = ENOMEM;
out:
if (created && error != 0) {
- error = ENOMEM;
mbuf_free(*mbuf);
*mbuf = NULL;
}
return error;
}
+/* This function is used to provide m_free via symbol indirection, please avoid
+ * any change in behavior or remove the indirection in config/Unsupported*
+ */
mbuf_t mbuf_free(mbuf_t mbuf)
{
return m_free(mbuf);
}
+/* This function is used to provide m_freem via symbol indirection, please avoid
+ * any change in behavior or remove the indirection in config/Unsupported*
+ */
void mbuf_freem(mbuf_t mbuf)
{
m_freem(mbuf);
return m_freem_list(mbuf);
}
-size_t mbuf_leadingspace(mbuf_t mbuf)
+size_t mbuf_leadingspace(const mbuf_t mbuf)
{
return m_leadingspace(mbuf);
}
-size_t mbuf_trailingspace(mbuf_t mbuf)
+/* This function is used to provide m_trailingspace via symbol indirection,
+ * please avoid any change in behavior or remove the indirection in
+ * config/Unsupported*
+ */
+size_t mbuf_trailingspace(const mbuf_t mbuf)
{
return m_trailingspace(mbuf);
}
/* Manipulation */
-errno_t mbuf_copym(mbuf_t src, size_t offset, size_t len,
+errno_t mbuf_copym(const mbuf_t src, size_t offset, size_t len,
mbuf_how_t how, mbuf_t *new_mbuf)
{
/* Must set *mbuf to NULL in failure case */
return (*new_mbuf == NULL) ? ENOMEM : 0;
}
-errno_t mbuf_dup(mbuf_t src, mbuf_how_t how, mbuf_t *new_mbuf)
+errno_t mbuf_dup(const mbuf_t src, mbuf_how_t how, mbuf_t *new_mbuf)
{
/* Must set *new_mbuf to NULL in failure case */
*new_mbuf = m_dup(src, how);
return (*location == NULL) ? ENOMEM : 0;
}
+/* This function is used to provide m_adj via symbol indirection, please avoid
+ * any change in behavior or remove the indirection in config/Unsupported*
+ */
void mbuf_adj(mbuf_t mbuf, int len)
{
m_adj(mbuf, len);
}
-errno_t mbuf_copydata(mbuf_t m, size_t off, size_t len, void* out_data)
+errno_t mbuf_adjustlen(mbuf_t m, int amount)
+{
+ /* Verify m_len will be valid after adding amount */
+ if (amount > 0) {
+ int used = (size_t)mbuf_data(m) - (size_t)mbuf_datastart(m) +
+ m->m_len;
+
+ if ((size_t)(amount + used) > mbuf_maxlen(m))
+ return EINVAL;
+ }
+ else if (-amount > m->m_len) {
+ return EINVAL;
+ }
+
+ m->m_len += amount;
+ return 0;
+}
+
+mbuf_t
+mbuf_concatenate(mbuf_t dst, mbuf_t src)
+{
+ if (dst == NULL)
+ return (NULL);
+
+ m_cat(dst, src);
+
+ /* return dst as is in the current implementation */
+ return (dst);
+}
+errno_t mbuf_copydata(const mbuf_t m0, size_t off, size_t len, void* out_data)
{
/* Copied m_copydata, added error handling (don't just panic) */
int count;
+ mbuf_t m = m0;
while (off > 0) {
if (m == 0)
return 0;
}
-int mbuf_mclref(mbuf_t mbuf)
-{
- return m_mclref(mbuf);
-}
-
-int mbuf_mclunref(mbuf_t mbuf)
-{
- return m_mclunref(mbuf);
-}
-
int mbuf_mclhasreference(mbuf_t mbuf)
{
if ((mbuf->m_flags & M_EXT))
/* mbuf header */
-mbuf_t mbuf_next(mbuf_t mbuf)
+mbuf_t mbuf_next(const mbuf_t mbuf)
{
return mbuf->m_next;
}
return 0;
}
-mbuf_t mbuf_nextpkt(mbuf_t mbuf)
+mbuf_t mbuf_nextpkt(const mbuf_t mbuf)
{
return mbuf->m_nextpkt;
}
mbuf->m_nextpkt = nextpkt;
}
-size_t mbuf_len(mbuf_t mbuf)
+size_t mbuf_len(const mbuf_t mbuf)
{
return mbuf->m_len;
}
mbuf->m_len = len;
}
-size_t mbuf_maxlen(mbuf_t mbuf)
+size_t mbuf_maxlen(const mbuf_t mbuf)
{
if (mbuf->m_flags & M_EXT)
return mbuf->m_ext.ext_size;
return &mbuf->m_dat[MLEN] - ((char*)mbuf_datastart(mbuf));
}
-mbuf_type_t mbuf_type(mbuf_t mbuf)
+mbuf_type_t mbuf_type(const mbuf_t mbuf)
{
return mbuf->m_type;
}
return 0;
}
-mbuf_flags_t mbuf_flags(mbuf_t mbuf)
+mbuf_flags_t
+mbuf_flags(const mbuf_t mbuf)
{
- return mbuf->m_flags & mbuf_flags_mask;
+ return (mbuf->m_flags & mbuf_flags_mask);
}
-errno_t mbuf_setflags(mbuf_t mbuf, mbuf_flags_t flags)
+errno_t
+mbuf_setflags(mbuf_t mbuf, mbuf_flags_t flags)
{
- if ((flags & ~mbuf_flags_mask) != 0) return EINVAL;
- mbuf->m_flags = flags |
- (mbuf->m_flags & ~mbuf_flags_mask);
-
- return 0;
+ errno_t ret = 0;
+ mbuf_flags_t oflags = mbuf->m_flags;
+
+ /*
+ * 1. Return error if public but un-alterable flags are changed
+ * in flags argument.
+ * 2. Return error if bits other than public flags are set in passed
+ * flags argument.
+ * Please note that private flag bits must be passed as reset by kexts,
+ * as they must use mbuf_flags KPI to get current set of mbuf flags
+ * and mbuf_flags KPI does not expose private flags.
+ */
+ if ((flags ^ oflags) & mbuf_cflags_mask) {
+ ret = EINVAL;
+ } else if (flags & ~mbuf_flags_mask) {
+ ret = EINVAL;
+ } else {
+ mbuf->m_flags = flags | (mbuf->m_flags & ~mbuf_flags_mask);
+ /*
+ * If M_PKTHDR bit has changed, we have work to do;
+ * m_reinit() will take care of setting/clearing the
+ * bit, as well as the rest of bookkeeping.
+ */
+ if ((oflags ^ mbuf->m_flags) & M_PKTHDR) {
+ mbuf->m_flags ^= M_PKTHDR; /* restore */
+ ret = m_reinit(mbuf,
+ (mbuf->m_flags & M_PKTHDR) ? 0 : 1);
+ }
+ }
+
+ return (ret);
}
-errno_t mbuf_setflags_mask(mbuf_t mbuf, mbuf_flags_t flags, mbuf_flags_t mask)
+errno_t
+mbuf_setflags_mask(mbuf_t mbuf, mbuf_flags_t flags, mbuf_flags_t mask)
{
- if (((flags | mask) & ~mbuf_flags_mask) != 0) return EINVAL;
-
- mbuf->m_flags = (flags & mask) | (mbuf->m_flags & ~mask);
-
- return 0;
+ errno_t ret = 0;
+
+ if (mask & (~mbuf_flags_mask | mbuf_cflags_mask)) {
+ ret = EINVAL;
+ } else {
+ mbuf_flags_t oflags = mbuf->m_flags;
+ mbuf->m_flags = (flags & mask) | (mbuf->m_flags & ~mask);
+ /*
+ * If M_PKTHDR bit has changed, we have work to do;
+ * m_reinit() will take care of setting/clearing the
+ * bit, as well as the rest of bookkeeping.
+ */
+ if ((oflags ^ mbuf->m_flags) & M_PKTHDR) {
+ mbuf->m_flags ^= M_PKTHDR; /* restore */
+ ret = m_reinit(mbuf,
+ (mbuf->m_flags & M_PKTHDR) ? 0 : 1);
+ }
+ }
+
+ return (ret);
}
-errno_t mbuf_copy_pkthdr(mbuf_t dest, mbuf_t src)
+errno_t mbuf_copy_pkthdr(mbuf_t dest, const mbuf_t src)
{
if (((src)->m_flags & M_PKTHDR) == 0)
return EINVAL;
return 0;
}
-size_t mbuf_pkthdr_len(mbuf_t mbuf)
+size_t mbuf_pkthdr_len(const mbuf_t mbuf)
{
return mbuf->m_pkthdr.len;
}
+__private_extern__ size_t mbuf_pkthdr_maxlen(mbuf_t m)
+{
+ size_t maxlen = 0;
+ mbuf_t n = m;
+
+ while (n) {
+ maxlen += mbuf_maxlen(n);
+ n = mbuf_next(n);
+ }
+ return (maxlen);
+}
+
void mbuf_pkthdr_setlen(mbuf_t mbuf, size_t len)
{
mbuf->m_pkthdr.len = len;
}
-ifnet_t mbuf_pkthdr_rcvif(mbuf_t mbuf)
+void mbuf_pkthdr_adjustlen(mbuf_t mbuf, int amount)
+{
+ mbuf->m_pkthdr.len += amount;
+}
+
+ifnet_t mbuf_pkthdr_rcvif(const mbuf_t mbuf)
{
// If we reference count ifnets, we should take a reference here before returning
return mbuf->m_pkthdr.rcvif;
return 0;
}
-void* mbuf_pkthdr_header(mbuf_t mbuf)
+void* mbuf_pkthdr_header(const mbuf_t mbuf)
{
- return mbuf->m_pkthdr.header;
+ return mbuf->m_pkthdr.pkt_hdr;
}
void mbuf_pkthdr_setheader(mbuf_t mbuf, void *header)
{
- mbuf->m_pkthdr.header = (void*)header;
-}
-
-/* mbuf aux data */
-errno_t mbuf_aux_add(mbuf_t mbuf, int family, mbuf_type_t type, mbuf_t *aux_mbuf)
-{
- *aux_mbuf = m_aux_add(mbuf, family, type);
- return (*aux_mbuf == NULL) ? ENOMEM : 0;
-}
-
-mbuf_t mbuf_aux_find(mbuf_t mbuf, int family, mbuf_type_t type)
-{
- return m_aux_find(mbuf, family, type);
-}
-
-void mbuf_aux_delete(mbuf_t mbuf, mbuf_t aux)
-{
- m_aux_delete(mbuf, aux);
+ mbuf->m_pkthdr.pkt_hdr = (void*)header;
}
void
mbuf->m_pkthdr.csum_flags = 0;
}
-extern void in_cksum_offset(struct mbuf* m, size_t ip_offset);
-extern void in_delayed_cksum_offset(struct mbuf *m, int ip_offset);
-
void
-mbuf_outbound_finalize(mbuf_t mbuf, u_long protocol_family, size_t protocol_offset)
+mbuf_outbound_finalize(struct mbuf *m, u_int32_t pf, size_t o)
{
- if ((mbuf->m_pkthdr.csum_flags &
- (CSUM_DELAY_DATA | CSUM_DELAY_IP | CSUM_TCP_SUM16)) == 0)
- return;
-
/* Generate the packet in software, client needs it */
- switch (protocol_family) {
- case PF_INET:
- if (mbuf->m_pkthdr.csum_flags & CSUM_TCP_SUM16) {
- /*
- * If you're wondering where this lovely code comes
- * from, we're trying to undo what happens in ip_output.
- * Look for CSUM_TCP_SUM16 in ip_output.
- */
- u_int16_t first, second;
- mbuf->m_pkthdr.csum_flags &= ~CSUM_TCP_SUM16;
- mbuf->m_pkthdr.csum_flags |= CSUM_TCP;
- first = mbuf->m_pkthdr.csum_data >> 16;
- second = mbuf->m_pkthdr.csum_data & 0xffff;
- mbuf->m_pkthdr.csum_data = first - second;
- }
- if (mbuf->m_pkthdr.csum_flags & CSUM_DELAY_DATA) {
- in_delayed_cksum_offset(mbuf, protocol_offset);
- }
-
- if (mbuf->m_pkthdr.csum_flags & CSUM_DELAY_IP) {
- in_cksum_offset(mbuf, protocol_offset);
- }
-
- mbuf->m_pkthdr.csum_flags &= ~(CSUM_DELAY_DATA | CSUM_DELAY_IP);
- break;
-
- default:
- /*
- * Not sure what to do here if anything.
- * Hardware checksum code looked pretty IPv4 specific.
- */
- if ((mbuf->m_pkthdr.csum_flags & (CSUM_DELAY_DATA | CSUM_DELAY_IP)) != 0)
- panic("mbuf_outbound_finalize - CSUM flags set for non-IPv4 packet (%d)!\n", protocol_family);
+ switch (pf) {
+ case PF_INET:
+ (void) in_finalize_cksum(m, o, m->m_pkthdr.csum_flags);
+ break;
+
+ case PF_INET6:
+#if INET6
+ /*
+ * Checksum offload should not have been enabled when
+ * extension headers exist; indicate that the callee
+ * should skip such case by setting optlen to -1.
+ */
+ (void) in6_finalize_cksum(m, o, -1, -1, m->m_pkthdr.csum_flags);
+#endif /* INET6 */
+ break;
+
+ default:
+ break;
}
}
}
static const mbuf_csum_request_flags_t mbuf_valid_csum_request_flags =
- MBUF_CSUM_REQ_IP | MBUF_CSUM_REQ_TCP | MBUF_CSUM_REQ_UDP | MBUF_CSUM_REQ_SUM16;
+ MBUF_CSUM_REQ_IP | MBUF_CSUM_REQ_TCP | MBUF_CSUM_REQ_UDP |
+ MBUF_CSUM_PARTIAL | MBUF_CSUM_REQ_TCPIPV6 | MBUF_CSUM_REQ_UDPIPV6;
errno_t
mbuf_set_csum_requested(
return 0;
}
+static const mbuf_tso_request_flags_t mbuf_valid_tso_request_flags =
+ MBUF_TSO_IPV4 | MBUF_TSO_IPV6;
+
+errno_t
+mbuf_get_tso_requested(
+ mbuf_t mbuf,
+ mbuf_tso_request_flags_t *request,
+ u_int32_t *value)
+{
+ if (mbuf == NULL || (mbuf->m_flags & M_PKTHDR) == 0 ||
+ request == NULL || value == NULL)
+ return EINVAL;
+
+ *request = mbuf->m_pkthdr.csum_flags;
+ *request &= mbuf_valid_tso_request_flags;
+ if (*request && value != NULL)
+ *value = mbuf->m_pkthdr.tso_segsz;
+
+ return 0;
+}
+
errno_t
mbuf_get_csum_requested(
mbuf_t mbuf,
static const mbuf_csum_performed_flags_t mbuf_valid_csum_performed_flags =
MBUF_CSUM_DID_IP | MBUF_CSUM_IP_GOOD | MBUF_CSUM_DID_DATA |
- MBUF_CSUM_PSEUDO_HDR | MBUF_CSUM_TCP_SUM16;
+ MBUF_CSUM_PSEUDO_HDR | MBUF_CSUM_PARTIAL;
errno_t
mbuf_set_csum_performed(
return 0;
}
-/*
- * Mbuf tag KPIs
- */
+errno_t
+mbuf_inet_cksum(mbuf_t mbuf, int protocol, u_int32_t offset, u_int32_t length,
+ u_int16_t *csum)
+{
+ if (mbuf == NULL || length == 0 || csum == NULL ||
+ (u_int32_t)mbuf->m_pkthdr.len < (offset + length))
+ return (EINVAL);
-struct mbuf_tag_id_entry {
- SLIST_ENTRY(mbuf_tag_id_entry) next;
- mbuf_tag_id_t id;
- char string[];
-};
+ *csum = inet_cksum(mbuf, protocol, offset, length);
+ return (0);
+}
-#define MBUF_TAG_ID_ENTRY_SIZE(__str) \
- ((size_t)&(((struct mbuf_tag_id_entry*)0)->string[0]) + \
- strlen(__str) + 1)
+#if INET6
+errno_t
+mbuf_inet6_cksum(mbuf_t mbuf, int protocol, u_int32_t offset, u_int32_t length,
+ u_int16_t *csum)
+{
+ if (mbuf == NULL || length == 0 || csum == NULL ||
+ (u_int32_t)mbuf->m_pkthdr.len < (offset + length))
+ return (EINVAL);
-#define MTAG_FIRST_ID 1000
-static u_long mtag_id_next = MTAG_FIRST_ID;
-static SLIST_HEAD(,mbuf_tag_id_entry) mtag_id_list = {NULL};
-static lck_mtx_t *mtag_id_lock = NULL;
+ *csum = inet6_cksum(mbuf, protocol, offset, length);
+ return (0);
+}
+#else /* INET6 */
+errno_t
+mbuf_inet6_cksum(__unused mbuf_t mbuf, __unused int protocol,
+ __unused u_int32_t offset, __unused u_int32_t length,
+ __unused u_int16_t *csum)
+{
+ panic("mbuf_inet6_cksum() doesn't exist on this platform\n");
+ return (0);
+}
-__private_extern__ void
-mbuf_tag_id_first_last(
- u_long *first,
- u_long *last)
+u_int16_t
+inet6_cksum(__unused struct mbuf *m, __unused unsigned int nxt,
+ __unused unsigned int off, __unused unsigned int len)
{
- *first = MTAG_FIRST_ID;
- *last = mtag_id_next - 1;
+ panic("inet6_cksum() doesn't exist on this platform\n");
+ return (0);
}
-__private_extern__ errno_t
-mbuf_tag_id_find_internal(
- const char *string,
- u_long *out_id,
- int create)
+void nd6_lookup_ipv6(void);
+void
+nd6_lookup_ipv6(void)
{
- struct mbuf_tag_id_entry *entry = NULL;
-
-
- *out_id = 0;
-
- if (string == NULL || out_id == NULL) {
- return EINVAL;
- }
-
- /* Don't bother allocating the lock if we're only doing a lookup */
- if (create == 0 && mtag_id_lock == NULL)
- return ENOENT;
-
- /* Allocate lock if necessary */
- if (mtag_id_lock == NULL) {
- lck_grp_attr_t *grp_attrib = NULL;
- lck_attr_t *lck_attrb = NULL;
- lck_grp_t *lck_group = NULL;
- lck_mtx_t *new_lock = NULL;
-
- grp_attrib = lck_grp_attr_alloc_init();
- lck_group = lck_grp_alloc_init("mbuf_tag_allocate_id", grp_attrib);
- lck_grp_attr_free(grp_attrib);
- lck_attrb = lck_attr_alloc_init();
-
- new_lock = lck_mtx_alloc_init(lck_group, lck_attrb);
- if (!OSCompareAndSwap((UInt32)0, (UInt32)new_lock, (UInt32*)&mtag_id_lock)) {
- /*
- * If the atomic swap fails, someone else has already
- * done this work. We can free the stuff we allocated.
- */
- lck_mtx_free(new_lock, lck_group);
- lck_grp_free(lck_group);
- }
- lck_attr_free(lck_attrb);
- }
-
- /* Look for an existing entry */
- lck_mtx_lock(mtag_id_lock);
- SLIST_FOREACH(entry, &mtag_id_list, next) {
- if (strcmp(string, entry->string) == 0) {
- break;
- }
- }
-
- if (entry == NULL) {
- if (create == 0) {
- lck_mtx_unlock(mtag_id_lock);
- return ENOENT;
- }
-
- entry = kalloc(MBUF_TAG_ID_ENTRY_SIZE(string));
- if (entry == NULL) {
- lck_mtx_unlock(mtag_id_lock);
- return ENOMEM;
- }
-
- strcpy(entry->string, string);
- entry->id = mtag_id_next;
- mtag_id_next++;
- SLIST_INSERT_HEAD(&mtag_id_list, entry, next);
- }
- lck_mtx_unlock(mtag_id_lock);
-
- *out_id = entry->id;
-
- return 0;
+ panic("nd6_lookup_ipv6() doesn't exist on this platform\n");
+}
+
+int
+in6addr_local(__unused struct in6_addr *a)
+{
+ panic("in6addr_local() doesn't exist on this platform\n");
+ return (0);
}
+void nd6_storelladdr(void);
+void
+nd6_storelladdr(void)
+{
+ panic("nd6_storelladdr() doesn't exist on this platform\n");
+}
+#endif /* INET6 */
+
+/*
+ * Mbuf tag KPIs
+ */
+
+#define MTAG_FIRST_ID FIRST_KPI_STR_ID
+
errno_t
mbuf_tag_id_find(
const char *string,
mbuf_tag_id_t *out_id)
{
- return mbuf_tag_id_find_internal(string, (u_long*)out_id, 1);
+ return net_str_id_find_internal(string, out_id, NSI_MBUF_TAG, 1);
}
errno_t
void** data_p)
{
struct m_tag *tag;
+ u_int32_t mtag_id_first, mtag_id_last;
if (data_p != NULL)
*data_p = NULL;
/* Sanity check parameters */
- if (mbuf == NULL || (mbuf->m_flags & M_PKTHDR) == 0 || id < MTAG_FIRST_ID ||
- id >= mtag_id_next || length < 1 || (length & 0xffff0000) != 0 ||
+ (void) net_str_id_first_last(&mtag_id_first, &mtag_id_last, NSI_MBUF_TAG);
+ if (mbuf == NULL || (mbuf->m_flags & M_PKTHDR) == 0 || id < mtag_id_first ||
+ id > mtag_id_last || length < 1 || (length & 0xffff0000) != 0 ||
data_p == NULL) {
return EINVAL;
}
}
/* Allocate an mtag */
- tag = m_tag_alloc(id, type, length, how);
+ tag = m_tag_create(id, type, length, how, mbuf);
if (tag == NULL) {
return how == M_WAITOK ? ENOMEM : EWOULDBLOCK;
}
void** data_p)
{
struct m_tag *tag;
+ u_int32_t mtag_id_first, mtag_id_last;
if (length != NULL)
*length = 0;
*data_p = NULL;
/* Sanity check parameters */
- if (mbuf == NULL || (mbuf->m_flags & M_PKTHDR) == 0 || id < MTAG_FIRST_ID ||
- id >= mtag_id_next || length == NULL || data_p == NULL) {
+ (void) net_str_id_first_last(&mtag_id_first, &mtag_id_last, NSI_MBUF_TAG);
+ if (mbuf == NULL || (mbuf->m_flags & M_PKTHDR) == 0 || id < mtag_id_first ||
+ id > mtag_id_last || length == NULL || data_p == NULL) {
return EINVAL;
}
mbuf_tag_type_t type)
{
struct m_tag *tag;
+ u_int32_t mtag_id_first, mtag_id_last;
- if (mbuf == NULL || (mbuf->m_flags & M_PKTHDR) == 0 || id < MTAG_FIRST_ID ||
- id >= mtag_id_next)
+ /* Sanity check parameters */
+ (void) net_str_id_first_last(&mtag_id_first, &mtag_id_last, NSI_MBUF_TAG);
+ if (mbuf == NULL || (mbuf->m_flags & M_PKTHDR) == 0 || id < mtag_id_first ||
+ id > mtag_id_last)
return;
tag = m_tag_locate(mbuf, id, type, NULL);
return;
}
+/*
+ * Maximum length of driver auxiliary data; keep this small to
+ * fit in a single mbuf to avoid wasting memory, rounded down to
+ * the nearest 64-bit boundary. This takes into account mbuf
+ * tag-related (m_taghdr + m_tag) as well m_drvaux_tag structs.
+ */
+#define MBUF_DRVAUX_MAXLEN \
+ P2ROUNDDOWN(MLEN - sizeof (struct m_taghdr) - \
+ M_TAG_ALIGN(sizeof (struct m_drvaux_tag)), sizeof (uint64_t))
+
+errno_t
+mbuf_add_drvaux(mbuf_t mbuf, mbuf_how_t how, u_int32_t family,
+ u_int32_t subfamily, size_t length, void **data_p)
+{
+ struct m_drvaux_tag *p;
+ struct m_tag *tag;
+
+ if (mbuf == NULL || !(mbuf->m_flags & M_PKTHDR) ||
+ length == 0 || length > MBUF_DRVAUX_MAXLEN)
+ return (EINVAL);
+
+ if (data_p != NULL)
+ *data_p = NULL;
+
+ /* Check if one is already associated */
+ if ((tag = m_tag_locate(mbuf, KERNEL_MODULE_TAG_ID,
+ KERNEL_TAG_TYPE_DRVAUX, NULL)) != NULL)
+ return (EEXIST);
+
+ /* Tag is (m_drvaux_tag + module specific data) */
+ if ((tag = m_tag_create(KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_DRVAUX,
+ sizeof (*p) + length, how, mbuf)) == NULL)
+ return ((how == MBUF_WAITOK) ? ENOMEM : EWOULDBLOCK);
+
+ p = (struct m_drvaux_tag *)(tag + 1);
+ p->da_family = family;
+ p->da_subfamily = subfamily;
+ p->da_length = length;
+
+ /* Associate the tag */
+ m_tag_prepend(mbuf, tag);
+
+ if (data_p != NULL)
+ *data_p = (p + 1);
+
+ return (0);
+}
+
+errno_t
+mbuf_find_drvaux(mbuf_t mbuf, u_int32_t *family_p, u_int32_t *subfamily_p,
+ u_int32_t *length_p, void **data_p)
+{
+ struct m_drvaux_tag *p;
+ struct m_tag *tag;
+
+ if (mbuf == NULL || !(mbuf->m_flags & M_PKTHDR) || data_p == NULL)
+ return (EINVAL);
+
+ *data_p = NULL;
+
+ if ((tag = m_tag_locate(mbuf, KERNEL_MODULE_TAG_ID,
+ KERNEL_TAG_TYPE_DRVAUX, NULL)) == NULL)
+ return (ENOENT);
+
+ /* Must be at least size of m_drvaux_tag */
+ VERIFY(tag->m_tag_len >= sizeof (*p));
+
+ p = (struct m_drvaux_tag *)(tag + 1);
+ VERIFY(p->da_length > 0 && p->da_length <= MBUF_DRVAUX_MAXLEN);
+
+ if (family_p != NULL)
+ *family_p = p->da_family;
+ if (subfamily_p != NULL)
+ *subfamily_p = p->da_subfamily;
+ if (length_p != NULL)
+ *length_p = p->da_length;
+
+ *data_p = (p + 1);
+
+ return (0);
+}
+
+void
+mbuf_del_drvaux(mbuf_t mbuf)
+{
+ struct m_tag *tag;
+
+ if (mbuf == NULL || !(mbuf->m_flags & M_PKTHDR))
+ return;
+
+ if ((tag = m_tag_locate(mbuf, KERNEL_MODULE_TAG_ID,
+ KERNEL_TAG_TYPE_DRVAUX, NULL)) != NULL)
+ m_tag_delete(mbuf, tag);
+}
+
/* mbuf stats */
void mbuf_stats(struct mbuf_stat *stats)
{
else
error = ENOMEM;
} else {
+ if (maxchunks)
+ *maxchunks = numchunks;
error = 0;
*mbuf = m;
}
return error;
}
+errno_t
+mbuf_allocpacket_list(unsigned int numpkts, mbuf_how_t how, size_t packetlen, unsigned int *maxchunks, mbuf_t *mbuf)
+{
+ errno_t error;
+ struct mbuf *m;
+ unsigned int numchunks = maxchunks ? *maxchunks : 0;
+
+ if (numpkts == 0) {
+ error = EINVAL;
+ goto out;
+ }
+ if (packetlen == 0) {
+ error = EINVAL;
+ goto out;
+ }
+ m = m_allocpacket_internal(&numpkts, packetlen, maxchunks ? &numchunks : NULL, how, 1, 0);
+ if (m == 0) {
+ if (maxchunks && *maxchunks && numchunks > *maxchunks)
+ error = ENOBUFS;
+ else
+ error = ENOMEM;
+ } else {
+ if (maxchunks)
+ *maxchunks = numchunks;
+ error = 0;
+ *mbuf = m;
+ }
+out:
+ return error;
+}
+
+__private_extern__ size_t
+mbuf_pkt_list_len(mbuf_t m)
+{
+ size_t len = 0;
+ mbuf_t n = m;
+
+ while (n) {
+ len += mbuf_pkthdr_len(n);
+ n = mbuf_nextpkt(n);
+ }
+ return (len);
+}
+
+__private_extern__ size_t
+mbuf_pkt_list_maxlen(mbuf_t m)
+{
+ size_t maxlen = 0;
+ mbuf_t n = m;
+
+ while (n) {
+ maxlen += mbuf_pkthdr_maxlen(n);
+ n = mbuf_nextpkt(n);
+ }
+ return (maxlen);
+}
/*
* mbuf_copyback differs from m_copyback in a few ways:
return result;
}
+
+u_int32_t
+mbuf_get_mlen(void)
+{
+ return (_MLEN);
+}
+
+u_int32_t
+mbuf_get_mhlen(void)
+{
+ return (_MHLEN);
+}
+
+u_int32_t
+mbuf_get_minclsize(void)
+{
+ return (MHLEN + MLEN);
+}
+
+u_int32_t
+mbuf_get_traffic_class_max_count(void)
+{
+ return (MBUF_TC_MAX);
+}
+
+errno_t
+mbuf_get_traffic_class_index(mbuf_traffic_class_t tc, u_int32_t *index)
+{
+ if (index == NULL || (u_int32_t)tc >= MBUF_TC_MAX)
+ return (EINVAL);
+
+ *index = MBUF_SCIDX(m_service_class_from_val(MBUF_TC2SCVAL(tc)));
+ return (0);
+}
+
+mbuf_traffic_class_t
+mbuf_get_traffic_class(mbuf_t m)
+{
+ if (m == NULL || !(m->m_flags & M_PKTHDR))
+ return (MBUF_TC_BE);
+
+ return (m_get_traffic_class(m));
+}
+
+errno_t
+mbuf_set_traffic_class(mbuf_t m, mbuf_traffic_class_t tc)
+{
+ if (m == NULL || !(m->m_flags & M_PKTHDR) ||
+ ((u_int32_t)tc >= MBUF_TC_MAX))
+ return (EINVAL);
+
+ return (m_set_traffic_class(m, tc));
+}
+
+int
+mbuf_is_traffic_class_privileged(mbuf_t m)
+{
+ if (m == NULL || !(m->m_flags & M_PKTHDR) ||
+ !MBUF_VALID_SC(m->m_pkthdr.pkt_svc))
+ return (0);
+
+ return ((m->m_pkthdr.pkt_flags & PKTF_PRIO_PRIVILEGED) ? 1 : 0);
+}
+
+u_int32_t
+mbuf_get_service_class_max_count(void)
+{
+ return (MBUF_SC_MAX_CLASSES);
+}
+
+errno_t
+mbuf_get_service_class_index(mbuf_svc_class_t sc, u_int32_t *index)
+{
+ if (index == NULL || !MBUF_VALID_SC(sc))
+ return (EINVAL);
+
+ *index = MBUF_SCIDX(sc);
+ return (0);
+}
+
+mbuf_svc_class_t
+mbuf_get_service_class(mbuf_t m)
+{
+ if (m == NULL || !(m->m_flags & M_PKTHDR))
+ return (MBUF_SC_BE);
+
+ return (m_get_service_class(m));
+}
+
+errno_t
+mbuf_set_service_class(mbuf_t m, mbuf_svc_class_t sc)
+{
+ if (m == NULL || !(m->m_flags & M_PKTHDR))
+ return (EINVAL);
+
+ return (m_set_service_class(m, sc));
+}
+
+errno_t
+mbuf_pkthdr_aux_flags(mbuf_t m, mbuf_pkthdr_aux_flags_t *flagsp)
+{
+ u_int32_t flags;
+
+ if (m == NULL || !(m->m_flags & M_PKTHDR) || flagsp == NULL)
+ return (EINVAL);
+
+ *flagsp = 0;
+ flags = m->m_pkthdr.pkt_flags;
+ if ((flags & (PKTF_INET_RESOLVE|PKTF_RESOLVE_RTR)) ==
+ (PKTF_INET_RESOLVE|PKTF_RESOLVE_RTR))
+ *flagsp |= MBUF_PKTAUXF_INET_RESOLVE_RTR;
+ if ((flags & (PKTF_INET6_RESOLVE|PKTF_RESOLVE_RTR)) ==
+ (PKTF_INET6_RESOLVE|PKTF_RESOLVE_RTR))
+ *flagsp |= MBUF_PKTAUXF_INET6_RESOLVE_RTR;
+
+ /* These 2 flags are mutually exclusive */
+ VERIFY((*flagsp &
+ (MBUF_PKTAUXF_INET_RESOLVE_RTR | MBUF_PKTAUXF_INET6_RESOLVE_RTR)) !=
+ (MBUF_PKTAUXF_INET_RESOLVE_RTR | MBUF_PKTAUXF_INET6_RESOLVE_RTR));
+
+ return (0);
+}
+
+errno_t
+mbuf_get_driver_scratch(mbuf_t m, u_int8_t **area, size_t *area_len)
+{
+ if (m == NULL || area == NULL || area_len == NULL ||
+ !(m->m_flags & M_PKTHDR))
+ return (EINVAL);
+
+ *area_len = m_scratch_get(m, area);
+ return (0);
+}