+
+errno_t
+ifnet_getset_log(ifnet_t ifp, u_long cmd, struct ifreq *ifr,
+ struct proc *p)
+{
+#pragma unused(p)
+ errno_t result = 0;
+ uint32_t flags;
+ int level, category, subcategory;
+
+ VERIFY(cmd == SIOCSIFLOG || cmd == SIOCGIFLOG);
+
+ if (cmd == SIOCSIFLOG) {
+ if ((result = priv_check_cred(kauth_cred_get(),
+ PRIV_NET_INTERFACE_CONTROL, 0)) != 0)
+ return (result);
+
+ level = ifr->ifr_log.ifl_level;
+ if (level < IFNET_LOG_MIN || level > IFNET_LOG_MAX)
+ result = EINVAL;
+
+ flags = ifr->ifr_log.ifl_flags;
+ if ((flags &= IFNET_LOGF_MASK) == 0)
+ result = EINVAL;
+
+ category = ifr->ifr_log.ifl_category;
+ subcategory = ifr->ifr_log.ifl_subcategory;
+
+ if (result == 0)
+ result = ifnet_set_log(ifp, level, flags,
+ category, subcategory);
+ } else {
+ result = ifnet_get_log(ifp, &level, &flags, &category,
+ &subcategory);
+ if (result == 0) {
+ ifr->ifr_log.ifl_level = level;
+ ifr->ifr_log.ifl_flags = flags;
+ ifr->ifr_log.ifl_category = category;
+ ifr->ifr_log.ifl_subcategory = subcategory;
+ }
+ }
+
+ return (result);
+}
+
+int
+ifnet_set_log(struct ifnet *ifp, int32_t level, uint32_t flags,
+ int32_t category, int32_t subcategory)
+{
+ int err = 0;
+
+ VERIFY(level >= IFNET_LOG_MIN && level <= IFNET_LOG_MAX);
+ VERIFY(flags & IFNET_LOGF_MASK);
+
+ /*
+ * The logging level applies to all facilities; make sure to
+ * update them all with the most current level.
+ */
+ flags |= ifp->if_log.flags;
+
+ if (ifp->if_output_ctl != NULL) {
+ struct ifnet_log_params l;
+
+ bzero(&l, sizeof (l));
+ l.level = level;
+ l.flags = flags;
+ l.flags &= ~IFNET_LOGF_DLIL;
+ l.category = category;
+ l.subcategory = subcategory;
+
+ /* Send this request to lower layers */
+ if (l.flags != 0) {
+ err = ifp->if_output_ctl(ifp, IFNET_CTL_SET_LOG,
+ sizeof (l), &l);
+ }
+ } else if ((flags & ~IFNET_LOGF_DLIL) && ifp->if_output_ctl == NULL) {
+ /*
+ * If targeted to the lower layers without an output
+ * control callback registered on the interface, just
+ * silently ignore facilities other than ours.
+ */
+ flags &= IFNET_LOGF_DLIL;
+ if (flags == 0 && (!(ifp->if_log.flags & IFNET_LOGF_DLIL)))
+ level = 0;
+ }
+
+ if (err == 0) {
+ if ((ifp->if_log.level = level) == IFNET_LOG_DEFAULT)
+ ifp->if_log.flags = 0;
+ else
+ ifp->if_log.flags |= flags;
+
+ log(LOG_INFO, "%s: logging level set to %d flags=%b "
+ "arg=%b, category=%d subcategory=%d\n", if_name(ifp),
+ ifp->if_log.level, ifp->if_log.flags,
+ IFNET_LOGF_BITS, flags, IFNET_LOGF_BITS,
+ category, subcategory);
+ }
+
+ return (err);
+}
+
+int
+ifnet_get_log(struct ifnet *ifp, int32_t *level, uint32_t *flags,
+ int32_t *category, int32_t *subcategory)
+{
+ if (level != NULL)
+ *level = ifp->if_log.level;
+ if (flags != NULL)
+ *flags = ifp->if_log.flags;
+ if (category != NULL)
+ *category = ifp->if_log.category;
+ if (subcategory != NULL)
+ *subcategory = ifp->if_log.subcategory;
+
+ return (0);
+}
+
+int
+ifnet_notify_address(struct ifnet *ifp, int af)
+{
+ struct ifnet_notify_address_params na;
+
+#if PF
+ (void) pf_ifaddr_hook(ifp);
+#endif /* PF */
+
+ if (ifp->if_output_ctl == NULL)
+ return (EOPNOTSUPP);
+
+ bzero(&na, sizeof (na));
+ na.address_family = af;
+
+ return (ifp->if_output_ctl(ifp, IFNET_CTL_NOTIFY_ADDRESS,
+ sizeof (na), &na));
+}
+
+errno_t
+ifnet_flowid(struct ifnet *ifp, uint32_t *flowid)
+{
+ if (ifp == NULL || flowid == NULL) {
+ return (EINVAL);
+ } else if (!(ifp->if_eflags & IFEF_TXSTART) ||
+ !(ifp->if_refflags & IFRF_ATTACHED)) {
+ return (ENXIO);
+ }
+
+ *flowid = ifp->if_flowhash;
+
+ return (0);
+}
+
+errno_t
+ifnet_disable_output(struct ifnet *ifp)
+{
+ int err;
+
+ if (ifp == NULL) {
+ return (EINVAL);
+ } else if (!(ifp->if_eflags & IFEF_TXSTART) ||
+ !(ifp->if_refflags & IFRF_ATTACHED)) {
+ return (ENXIO);
+ }
+
+ if ((err = ifnet_fc_add(ifp)) == 0) {
+ lck_mtx_lock_spin(&ifp->if_start_lock);
+ ifp->if_start_flags |= IFSF_FLOW_CONTROLLED;
+ lck_mtx_unlock(&ifp->if_start_lock);
+ }
+ return (err);
+}
+
+errno_t
+ifnet_enable_output(struct ifnet *ifp)
+{
+ if (ifp == NULL) {
+ return (EINVAL);
+ } else if (!(ifp->if_eflags & IFEF_TXSTART) ||
+ !(ifp->if_refflags & IFRF_ATTACHED)) {
+ return (ENXIO);
+ }
+
+ ifnet_start_common(ifp, 1);
+ return (0);
+}
+
+void
+ifnet_flowadv(uint32_t flowhash)
+{
+ struct ifnet_fc_entry *ifce;
+ struct ifnet *ifp;
+
+ ifce = ifnet_fc_get(flowhash);
+ if (ifce == NULL)
+ return;
+
+ VERIFY(ifce->ifce_ifp != NULL);
+ ifp = ifce->ifce_ifp;
+
+ /* flow hash gets recalculated per attach, so check */
+ if (ifnet_is_attached(ifp, 1)) {
+ if (ifp->if_flowhash == flowhash)
+ (void) ifnet_enable_output(ifp);
+ ifnet_decr_iorefcnt(ifp);
+ }
+ ifnet_fc_entry_free(ifce);
+}
+
+/*
+ * Function to compare ifnet_fc_entries in ifnet flow control tree
+ */
+static inline int
+ifce_cmp(const struct ifnet_fc_entry *fc1, const struct ifnet_fc_entry *fc2)
+{
+ return (fc1->ifce_flowhash - fc2->ifce_flowhash);
+}
+
+static int
+ifnet_fc_add(struct ifnet *ifp)
+{
+ struct ifnet_fc_entry keyfc, *ifce;
+ uint32_t flowhash;
+
+ VERIFY(ifp != NULL && (ifp->if_eflags & IFEF_TXSTART));
+ VERIFY(ifp->if_flowhash != 0);
+ flowhash = ifp->if_flowhash;
+
+ bzero(&keyfc, sizeof (keyfc));
+ keyfc.ifce_flowhash = flowhash;
+
+ lck_mtx_lock_spin(&ifnet_fc_lock);
+ ifce = RB_FIND(ifnet_fc_tree, &ifnet_fc_tree, &keyfc);
+ if (ifce != NULL && ifce->ifce_ifp == ifp) {
+ /* Entry is already in ifnet_fc_tree, return */
+ lck_mtx_unlock(&ifnet_fc_lock);
+ return (0);
+ }
+
+ if (ifce != NULL) {
+ /*
+ * There is a different fc entry with the same flow hash
+ * but different ifp pointer. There can be a collision
+ * on flow hash but the probability is low. Let's just
+ * avoid adding a second one when there is a collision.
+ */
+ lck_mtx_unlock(&ifnet_fc_lock);
+ return (EAGAIN);
+ }
+
+ /* become regular mutex */
+ lck_mtx_convert_spin(&ifnet_fc_lock);
+
+ ifce = zalloc_noblock(ifnet_fc_zone);
+ if (ifce == NULL) {
+ /* memory allocation failed */
+ lck_mtx_unlock(&ifnet_fc_lock);
+ return (ENOMEM);
+ }
+ bzero(ifce, ifnet_fc_zone_size);
+
+ ifce->ifce_flowhash = flowhash;
+ ifce->ifce_ifp = ifp;
+
+ RB_INSERT(ifnet_fc_tree, &ifnet_fc_tree, ifce);
+ lck_mtx_unlock(&ifnet_fc_lock);
+ return (0);
+}
+
+static struct ifnet_fc_entry *
+ifnet_fc_get(uint32_t flowhash)
+{
+ struct ifnet_fc_entry keyfc, *ifce;
+ struct ifnet *ifp;
+
+ bzero(&keyfc, sizeof (keyfc));
+ keyfc.ifce_flowhash = flowhash;
+
+ lck_mtx_lock_spin(&ifnet_fc_lock);
+ ifce = RB_FIND(ifnet_fc_tree, &ifnet_fc_tree, &keyfc);
+ if (ifce == NULL) {
+ /* Entry is not present in ifnet_fc_tree, return */
+ lck_mtx_unlock(&ifnet_fc_lock);
+ return (NULL);
+ }
+
+ RB_REMOVE(ifnet_fc_tree, &ifnet_fc_tree, ifce);
+
+ VERIFY(ifce->ifce_ifp != NULL);
+ ifp = ifce->ifce_ifp;
+
+ /* become regular mutex */
+ lck_mtx_convert_spin(&ifnet_fc_lock);
+
+ if (!ifnet_is_attached(ifp, 0)) {
+ /*
+ * This ifp is not attached or in the process of being
+ * detached; just don't process it.
+ */
+ ifnet_fc_entry_free(ifce);
+ ifce = NULL;
+ }
+ lck_mtx_unlock(&ifnet_fc_lock);
+
+ return (ifce);
+}
+
+static void
+ifnet_fc_entry_free(struct ifnet_fc_entry *ifce)
+{
+ zfree(ifnet_fc_zone, ifce);
+}
+
+static uint32_t
+ifnet_calc_flowhash(struct ifnet *ifp)
+{
+ struct ifnet_flowhash_key fh __attribute__((aligned(8)));
+ uint32_t flowhash = 0;
+
+ if (ifnet_flowhash_seed == 0)
+ ifnet_flowhash_seed = RandomULong();
+
+ bzero(&fh, sizeof (fh));
+
+ (void) snprintf(fh.ifk_name, sizeof (fh.ifk_name), "%s", ifp->if_name);
+ fh.ifk_unit = ifp->if_unit;
+ fh.ifk_flags = ifp->if_flags;
+ fh.ifk_eflags = ifp->if_eflags;
+ fh.ifk_capabilities = ifp->if_capabilities;
+ fh.ifk_capenable = ifp->if_capenable;
+ fh.ifk_output_sched_model = ifp->if_output_sched_model;
+ fh.ifk_rand1 = RandomULong();
+ fh.ifk_rand2 = RandomULong();
+
+try_again:
+ flowhash = net_flowhash(&fh, sizeof (fh), ifnet_flowhash_seed);
+ if (flowhash == 0) {
+ /* try to get a non-zero flowhash */
+ ifnet_flowhash_seed = RandomULong();
+ goto try_again;
+ }
+
+ return (flowhash);
+}
+
+int
+ifnet_set_netsignature(struct ifnet *ifp, uint8_t family, uint8_t len,
+ uint16_t flags, uint8_t *data)
+{
+#pragma unused(flags)
+ int error = 0;
+
+ switch (family) {
+ case AF_INET:
+ if_inetdata_lock_exclusive(ifp);
+ if (IN_IFEXTRA(ifp) != NULL) {
+ if (len == 0) {
+ /* Allow clearing the signature */
+ IN_IFEXTRA(ifp)->netsig_len = 0;
+ bzero(IN_IFEXTRA(ifp)->netsig,
+ sizeof (IN_IFEXTRA(ifp)->netsig));
+ if_inetdata_lock_done(ifp);
+ break;
+ } else if (len > sizeof (IN_IFEXTRA(ifp)->netsig)) {
+ error = EINVAL;
+ if_inetdata_lock_done(ifp);
+ break;
+ }
+ IN_IFEXTRA(ifp)->netsig_len = len;
+ bcopy(data, IN_IFEXTRA(ifp)->netsig, len);
+ } else {
+ error = ENOMEM;
+ }
+ if_inetdata_lock_done(ifp);
+ break;
+
+ case AF_INET6:
+ if_inet6data_lock_exclusive(ifp);
+ if (IN6_IFEXTRA(ifp) != NULL) {
+ if (len == 0) {
+ /* Allow clearing the signature */
+ IN6_IFEXTRA(ifp)->netsig_len = 0;
+ bzero(IN6_IFEXTRA(ifp)->netsig,
+ sizeof (IN6_IFEXTRA(ifp)->netsig));
+ if_inet6data_lock_done(ifp);
+ break;
+ } else if (len > sizeof (IN6_IFEXTRA(ifp)->netsig)) {
+ error = EINVAL;
+ if_inet6data_lock_done(ifp);
+ break;
+ }
+ IN6_IFEXTRA(ifp)->netsig_len = len;
+ bcopy(data, IN6_IFEXTRA(ifp)->netsig, len);
+ } else {
+ error = ENOMEM;
+ }
+ if_inet6data_lock_done(ifp);
+ break;
+
+ default:
+ error = EINVAL;
+ break;
+ }
+
+ return (error);
+}
+
+int
+ifnet_get_netsignature(struct ifnet *ifp, uint8_t family, uint8_t *len,
+ uint16_t *flags, uint8_t *data)
+{
+ int error = 0;
+
+ if (ifp == NULL || len == NULL || flags == NULL || data == NULL)
+ return (EINVAL);
+
+ switch (family) {
+ case AF_INET:
+ if_inetdata_lock_shared(ifp);
+ if (IN_IFEXTRA(ifp) != NULL) {
+ if (*len == 0 || *len < IN_IFEXTRA(ifp)->netsig_len) {
+ error = EINVAL;
+ if_inetdata_lock_done(ifp);
+ break;
+ }
+ if ((*len = IN_IFEXTRA(ifp)->netsig_len) > 0)
+ bcopy(IN_IFEXTRA(ifp)->netsig, data, *len);
+ else
+ error = ENOENT;
+ } else {
+ error = ENOMEM;
+ }
+ if_inetdata_lock_done(ifp);
+ break;
+
+ case AF_INET6:
+ if_inet6data_lock_shared(ifp);
+ if (IN6_IFEXTRA(ifp) != NULL) {
+ if (*len == 0 || *len < IN6_IFEXTRA(ifp)->netsig_len) {
+ error = EINVAL;
+ if_inet6data_lock_done(ifp);
+ break;
+ }
+ if ((*len = IN6_IFEXTRA(ifp)->netsig_len) > 0)
+ bcopy(IN6_IFEXTRA(ifp)->netsig, data, *len);
+ else
+ error = ENOENT;
+ } else {
+ error = ENOMEM;
+ }
+ if_inet6data_lock_done(ifp);
+ break;
+
+ default:
+ error = EINVAL;
+ break;
+ }
+
+ if (error == 0)
+ *flags = 0;
+
+ return (error);
+}
+
+static void
+dlil_output_cksum_dbg(struct ifnet *ifp, struct mbuf *m, uint32_t hoff,
+ protocol_family_t pf)
+{
+#pragma unused(ifp)
+ uint32_t did_sw;
+
+ if (!(hwcksum_dbg_mode & HWCKSUM_DBG_FINALIZE_FORCED) ||
+ (m->m_pkthdr.csum_flags & (CSUM_TSO_IPV4|CSUM_TSO_IPV6)))
+ return;
+
+ switch (pf) {
+ case PF_INET:
+ did_sw = in_finalize_cksum(m, hoff, m->m_pkthdr.csum_flags);
+ if (did_sw & CSUM_DELAY_IP)
+ hwcksum_dbg_finalized_hdr++;
+ if (did_sw & CSUM_DELAY_DATA)
+ hwcksum_dbg_finalized_data++;
+ break;
+#if INET6
+ case PF_INET6:
+ /*
+ * Checksum offload should not have been enabled when
+ * extension headers exist; that also means that we
+ * cannot force-finalize packets with extension headers.
+ * Indicate to the callee should it skip such case by
+ * setting optlen to -1.
+ */
+ did_sw = in6_finalize_cksum(m, hoff, -1, -1,
+ m->m_pkthdr.csum_flags);
+ if (did_sw & CSUM_DELAY_IPV6_DATA)
+ hwcksum_dbg_finalized_data++;
+ break;
+#endif /* INET6 */
+ default:
+ return;
+ }
+}
+
+static void
+dlil_input_cksum_dbg(struct ifnet *ifp, struct mbuf *m, char *frame_header,
+ protocol_family_t pf)
+{
+ uint16_t sum;
+ uint32_t hlen;
+
+ if (frame_header == NULL ||
+ frame_header < (char *)mbuf_datastart(m) ||
+ frame_header > (char *)m->m_data) {
+ printf("%s: frame header pointer 0x%llx out of range "
+ "[0x%llx,0x%llx] for mbuf 0x%llx\n", if_name(ifp),
+ (uint64_t)VM_KERNEL_ADDRPERM(frame_header),
+ (uint64_t)VM_KERNEL_ADDRPERM(mbuf_datastart(m)),
+ (uint64_t)VM_KERNEL_ADDRPERM(m->m_data),
+ (uint64_t)VM_KERNEL_ADDRPERM(m));
+ return;
+ }
+ hlen = (m->m_data - frame_header);
+
+ switch (pf) {
+ case PF_INET:
+#if INET6
+ case PF_INET6:
+#endif /* INET6 */
+ break;
+ default:
+ return;
+ }
+
+ /*
+ * Force partial checksum offload; useful to simulate cases
+ * where the hardware does not support partial checksum offload,
+ * in order to validate correctness throughout the layers above.
+ */
+ if (hwcksum_dbg_mode & HWCKSUM_DBG_PARTIAL_FORCED) {
+ uint32_t foff = hwcksum_dbg_partial_rxoff_forced;
+
+ if (foff > (uint32_t)m->m_pkthdr.len)
+ return;
+
+ m->m_pkthdr.csum_flags &= ~CSUM_RX_FLAGS;
+
+ /* Compute 16-bit 1's complement sum from forced offset */
+ sum = m_sum16(m, foff, (m->m_pkthdr.len - foff));
+
+ m->m_pkthdr.csum_flags |= (CSUM_DATA_VALID | CSUM_PARTIAL);
+ m->m_pkthdr.csum_rx_val = sum;
+ m->m_pkthdr.csum_rx_start = (foff + hlen);
+
+ hwcksum_dbg_partial_forced++;
+ hwcksum_dbg_partial_forced_bytes += m->m_pkthdr.len;
+ }
+
+ /*
+ * Partial checksum offload verification (and adjustment);
+ * useful to validate and test cases where the hardware
+ * supports partial checksum offload.
+ */
+ if ((m->m_pkthdr.csum_flags &
+ (CSUM_DATA_VALID | CSUM_PARTIAL | CSUM_PSEUDO_HDR)) ==
+ (CSUM_DATA_VALID | CSUM_PARTIAL)) {
+ uint32_t rxoff;
+
+ /* Start offset must begin after frame header */
+ rxoff = m->m_pkthdr.csum_rx_start;
+ if (hlen > rxoff) {
+ hwcksum_dbg_bad_rxoff++;
+ if (dlil_verbose) {
+ printf("%s: partial cksum start offset %d "
+ "is less than frame header length %d for "
+ "mbuf 0x%llx\n", if_name(ifp), rxoff, hlen,
+ (uint64_t)VM_KERNEL_ADDRPERM(m));
+ }
+ return;
+ }
+ rxoff -= hlen;
+
+ if (!(hwcksum_dbg_mode & HWCKSUM_DBG_PARTIAL_FORCED)) {
+ /*
+ * Compute the expected 16-bit 1's complement sum;
+ * skip this if we've already computed it above
+ * when partial checksum offload is forced.
+ */
+ sum = m_sum16(m, rxoff, (m->m_pkthdr.len - rxoff));
+
+ /* Hardware or driver is buggy */
+ if (sum != m->m_pkthdr.csum_rx_val) {
+ hwcksum_dbg_bad_cksum++;
+ if (dlil_verbose) {
+ printf("%s: bad partial cksum value "
+ "0x%x (expected 0x%x) for mbuf "
+ "0x%llx [rx_start %d]\n",
+ if_name(ifp),
+ m->m_pkthdr.csum_rx_val, sum,
+ (uint64_t)VM_KERNEL_ADDRPERM(m),
+ m->m_pkthdr.csum_rx_start);
+ }
+ return;
+ }
+ }
+ hwcksum_dbg_verified++;
+
+ /*
+ * This code allows us to emulate various hardwares that
+ * perform 16-bit 1's complement sum beginning at various
+ * start offset values.
+ */
+ if (hwcksum_dbg_mode & HWCKSUM_DBG_PARTIAL_RXOFF_ADJ) {
+ uint32_t aoff = hwcksum_dbg_partial_rxoff_adj;
+
+ if (aoff == rxoff || aoff > (uint32_t)m->m_pkthdr.len)
+ return;
+
+ sum = m_adj_sum16(m, rxoff, aoff, sum);
+
+ m->m_pkthdr.csum_rx_val = sum;
+ m->m_pkthdr.csum_rx_start = (aoff + hlen);
+
+ hwcksum_dbg_adjusted++;
+ }
+ }
+}
+
+static int
+sysctl_hwcksum_dbg_mode SYSCTL_HANDLER_ARGS
+{
+#pragma unused(arg1, arg2)
+ u_int32_t i;
+ int err;
+
+ i = hwcksum_dbg_mode;
+
+ err = sysctl_handle_int(oidp, &i, 0, req);
+ if (err != 0 || req->newptr == USER_ADDR_NULL)
+ return (err);
+
+ if (hwcksum_dbg == 0)
+ return (ENODEV);
+
+ if ((i & ~HWCKSUM_DBG_MASK) != 0)
+ return (EINVAL);
+
+ hwcksum_dbg_mode = (i & HWCKSUM_DBG_MASK);
+
+ return (err);
+}
+
+static int
+sysctl_hwcksum_dbg_partial_rxoff_forced SYSCTL_HANDLER_ARGS
+{
+#pragma unused(arg1, arg2)
+ u_int32_t i;
+ int err;
+
+ i = hwcksum_dbg_partial_rxoff_forced;
+
+ err = sysctl_handle_int(oidp, &i, 0, req);
+ if (err != 0 || req->newptr == USER_ADDR_NULL)
+ return (err);
+
+ if (!(hwcksum_dbg_mode & HWCKSUM_DBG_PARTIAL_FORCED))
+ return (ENODEV);
+
+ hwcksum_dbg_partial_rxoff_forced = i;
+
+ return (err);
+}
+
+static int
+sysctl_hwcksum_dbg_partial_rxoff_adj SYSCTL_HANDLER_ARGS
+{
+#pragma unused(arg1, arg2)
+ u_int32_t i;
+ int err;
+
+ i = hwcksum_dbg_partial_rxoff_adj;
+
+ err = sysctl_handle_int(oidp, &i, 0, req);
+ if (err != 0 || req->newptr == USER_ADDR_NULL)
+ return (err);
+
+ if (!(hwcksum_dbg_mode & HWCKSUM_DBG_PARTIAL_RXOFF_ADJ))
+ return (ENODEV);
+
+ hwcksum_dbg_partial_rxoff_adj = i;
+
+ return (err);
+}
+
+static int
+sysctl_tx_chain_len_stats SYSCTL_HANDLER_ARGS
+{
+#pragma unused(oidp, arg1, arg2)
+ int err;
+
+ if (req->oldptr == USER_ADDR_NULL) {
+
+ }
+ if (req->newptr != USER_ADDR_NULL) {
+ return (EPERM);
+ }
+ err = SYSCTL_OUT(req, &tx_chain_len_stats,
+ sizeof(struct chain_len_stats));
+
+ return (err);
+}
+
+
+#if DEBUG
+/* Blob for sum16 verification */
+static uint8_t sumdata[] = {
+ 0x1f, 0x8b, 0x08, 0x08, 0x4c, 0xe5, 0x9a, 0x4f, 0x00, 0x03,
+ 0x5f, 0x00, 0x5d, 0x91, 0x41, 0x4e, 0xc4, 0x30, 0x0c, 0x45,
+ 0xf7, 0x9c, 0xc2, 0x07, 0x18, 0xf5, 0x0e, 0xb0, 0xe2, 0x00,
+ 0x48, 0x88, 0xa5, 0xdb, 0xba, 0x49, 0x34, 0x69, 0xdc, 0x71,
+ 0x92, 0xa9, 0xc2, 0x8a, 0x6b, 0x70, 0x3d, 0x4e, 0x82, 0x93,
+ 0xb4, 0x08, 0xd8, 0xc5, 0xb1, 0xfd, 0xff, 0xb3, 0xfd, 0x4c,
+ 0x42, 0x5f, 0x1f, 0x9f, 0x11, 0x12, 0x43, 0xb2, 0x04, 0x93,
+ 0xe0, 0x7b, 0x01, 0x0e, 0x14, 0x07, 0x78, 0xd1, 0x78, 0x75,
+ 0x71, 0x71, 0xe9, 0x08, 0x84, 0x46, 0xf2, 0xc7, 0x3b, 0x09,
+ 0xe7, 0xd1, 0xd3, 0x8a, 0x57, 0x92, 0x33, 0xcd, 0x39, 0xcc,
+ 0xb0, 0x91, 0x89, 0xe0, 0x42, 0x53, 0x8b, 0xb7, 0x8c, 0x42,
+ 0x60, 0xd9, 0x9f, 0x7a, 0x55, 0x19, 0x76, 0xcb, 0x10, 0x49,
+ 0x35, 0xac, 0x0b, 0x5a, 0x3c, 0xbb, 0x65, 0x51, 0x8c, 0x90,
+ 0x7c, 0x69, 0x45, 0x45, 0x81, 0xb4, 0x2b, 0x70, 0x82, 0x85,
+ 0x55, 0x91, 0x17, 0x90, 0xdc, 0x14, 0x1e, 0x35, 0x52, 0xdd,
+ 0x02, 0x16, 0xef, 0xb5, 0x40, 0x89, 0xe2, 0x46, 0x53, 0xad,
+ 0x93, 0x6e, 0x98, 0x30, 0xe5, 0x08, 0xb7, 0xcc, 0x03, 0xbc,
+ 0x71, 0x86, 0x09, 0x43, 0x0d, 0x52, 0xf5, 0xa2, 0xf5, 0xa2,
+ 0x56, 0x11, 0x8d, 0xa8, 0xf5, 0xee, 0x92, 0x3d, 0xfe, 0x8c,
+ 0x67, 0x71, 0x8b, 0x0e, 0x2d, 0x70, 0x77, 0xbe, 0xbe, 0xea,
+ 0xbf, 0x9a, 0x8d, 0x9c, 0x53, 0x53, 0xe5, 0xe0, 0x4b, 0x87,
+ 0x85, 0xd2, 0x45, 0x95, 0x30, 0xc1, 0xcc, 0xe0, 0x74, 0x54,
+ 0x13, 0x58, 0xe8, 0xe8, 0x79, 0xa2, 0x09, 0x73, 0xa4, 0x0e,
+ 0x39, 0x59, 0x0c, 0xe6, 0x9c, 0xb2, 0x4f, 0x06, 0x5b, 0x8e,
+ 0xcd, 0x17, 0x6c, 0x5e, 0x95, 0x4d, 0x70, 0xa2, 0x0a, 0xbf,
+ 0xa3, 0xcc, 0x03, 0xbc, 0x5a, 0xe7, 0x75, 0x06, 0x5e, 0x75,
+ 0xef, 0x58, 0x8e, 0x15, 0xd1, 0x0a, 0x18, 0xff, 0xdd, 0xe6,
+ 0x02, 0x3b, 0xb5, 0xb4, 0xa1, 0xe0, 0x72, 0xfc, 0xe3, 0xab,
+ 0x07, 0xe0, 0x4d, 0x65, 0xea, 0x92, 0xeb, 0xf2, 0x7b, 0x17,
+ 0x05, 0xce, 0xc6, 0xf6, 0x2b, 0xbb, 0x70, 0x3d, 0x00, 0x95,
+ 0xe0, 0x07, 0x52, 0x3b, 0x58, 0xfc, 0x7c, 0x69, 0x4d, 0xe9,
+ 0xf7, 0xa9, 0x66, 0x1e, 0x1e, 0xbe, 0x01, 0x69, 0x98, 0xfe,
+ 0xc8, 0x28, 0x02, 0x00, 0x00
+};
+
+/* Precomputed 16-bit 1's complement sums for various spans of the above data */
+static struct {
+ int len;
+ uint16_t sum;
+} sumtbl[] = {
+ { 11, 0xcb6d },
+ { 20, 0x20dd },
+ { 27, 0xbabd },
+ { 32, 0xf3e8 },
+ { 37, 0x197d },
+ { 43, 0x9eae },
+ { 64, 0x4678 },
+ { 127, 0x9399 },
+ { 256, 0xd147 },
+ { 325, 0x0358 }
+};
+#define SUMTBL_MAX ((int)sizeof (sumtbl) / (int)sizeof (sumtbl[0]))
+
+static void
+dlil_verify_sum16(void)
+{
+ struct mbuf *m;
+ uint8_t *buf;
+ int n;
+
+ /* Make sure test data plus extra room for alignment fits in cluster */
+ _CASSERT((sizeof (sumdata) + (sizeof (uint64_t) * 2)) <= MCLBYTES);
+
+ m = m_getcl(M_WAITOK, MT_DATA, M_PKTHDR);
+ MH_ALIGN(m, sizeof (uint32_t)); /* 32-bit starting alignment */
+ buf = mtod(m, uint8_t *); /* base address */
+
+ for (n = 0; n < SUMTBL_MAX; n++) {
+ uint16_t len = sumtbl[n].len;
+ int i;
+
+ /* Verify for all possible alignments */
+ for (i = 0; i < (int)sizeof (uint64_t); i++) {
+ uint16_t sum;
+ uint8_t *c;
+
+ /* Copy over test data to mbuf */
+ VERIFY(len <= sizeof (sumdata));
+ c = buf + i;
+ bcopy(sumdata, c, len);
+
+ /* Zero-offset test (align by data pointer) */
+ m->m_data = (caddr_t)c;
+ m->m_len = len;
+ sum = m_sum16(m, 0, len);
+
+ /* Something is horribly broken; stop now */
+ if (sum != sumtbl[n].sum) {
+ panic("%s: broken m_sum16 for len=%d align=%d "
+ "sum=0x%04x [expected=0x%04x]\n", __func__,
+ len, i, sum, sumtbl[n].sum);
+ /* NOTREACHED */
+ }
+
+ /* Alignment test by offset (fixed data pointer) */
+ m->m_data = (caddr_t)buf;
+ m->m_len = i + len;
+ sum = m_sum16(m, i, len);
+
+ /* Something is horribly broken; stop now */
+ if (sum != sumtbl[n].sum) {
+ panic("%s: broken m_sum16 for len=%d offset=%d "
+ "sum=0x%04x [expected=0x%04x]\n", __func__,
+ len, i, sum, sumtbl[n].sum);
+ /* NOTREACHED */
+ }
+#if INET
+ /* Simple sum16 contiguous buffer test by aligment */
+ sum = b_sum16(c, len);
+
+ /* Something is horribly broken; stop now */
+ if (sum != sumtbl[n].sum) {
+ panic("%s: broken b_sum16 for len=%d align=%d "
+ "sum=0x%04x [expected=0x%04x]\n", __func__,
+ len, i, sum, sumtbl[n].sum);
+ /* NOTREACHED */
+ }
+#endif /* INET */
+ }
+ }
+ m_freem(m);
+
+ printf("DLIL: SUM16 self-tests PASSED\n");
+}
+#endif /* DEBUG */
+
+#define CASE_STRINGIFY(x) case x: return #x
+
+__private_extern__ const char *
+dlil_kev_dl_code_str(u_int32_t event_code)
+{
+ switch (event_code) {
+ CASE_STRINGIFY(KEV_DL_SIFFLAGS);
+ CASE_STRINGIFY(KEV_DL_SIFMETRICS);
+ CASE_STRINGIFY(KEV_DL_SIFMTU);
+ CASE_STRINGIFY(KEV_DL_SIFPHYS);
+ CASE_STRINGIFY(KEV_DL_SIFMEDIA);
+ CASE_STRINGIFY(KEV_DL_SIFGENERIC);
+ CASE_STRINGIFY(KEV_DL_ADDMULTI);
+ CASE_STRINGIFY(KEV_DL_DELMULTI);
+ CASE_STRINGIFY(KEV_DL_IF_ATTACHED);
+ CASE_STRINGIFY(KEV_DL_IF_DETACHING);
+ CASE_STRINGIFY(KEV_DL_IF_DETACHED);
+ CASE_STRINGIFY(KEV_DL_LINK_OFF);
+ CASE_STRINGIFY(KEV_DL_LINK_ON);
+ CASE_STRINGIFY(KEV_DL_PROTO_ATTACHED);
+ CASE_STRINGIFY(KEV_DL_PROTO_DETACHED);
+ CASE_STRINGIFY(KEV_DL_LINK_ADDRESS_CHANGED);
+ CASE_STRINGIFY(KEV_DL_WAKEFLAGS_CHANGED);
+ CASE_STRINGIFY(KEV_DL_IF_IDLE_ROUTE_REFCNT);
+ CASE_STRINGIFY(KEV_DL_IFCAP_CHANGED);
+ CASE_STRINGIFY(KEV_DL_LINK_QUALITY_METRIC_CHANGED);
+ CASE_STRINGIFY(KEV_DL_NODE_PRESENCE);
+ CASE_STRINGIFY(KEV_DL_NODE_ABSENCE);
+ CASE_STRINGIFY(KEV_DL_MASTER_ELECTED);
+ CASE_STRINGIFY(KEV_DL_ISSUES);
+ CASE_STRINGIFY(KEV_DL_IFDELEGATE_CHANGED);
+ default:
+ break;
+ }
+ return ("");
+}
+
+/*
+ * Mirror the arguments of ifnet_get_local_ports_extended()
+ * ifindex
+ * protocol
+ * flags
+ */
+static int
+sysctl_get_ports_used SYSCTL_HANDLER_ARGS
+{
+#pragma unused(oidp)
+ int *name = (int *)arg1;
+ int namelen = arg2;
+ int error = 0;
+ int idx;
+ protocol_family_t protocol;
+ u_int32_t flags;
+ ifnet_t ifp = NULL;
+ u_int8_t *bitfield = NULL;
+
+ if (req->newptr != USER_ADDR_NULL) {
+ error = EPERM;
+ goto done;
+ }
+ if (namelen != 3) {
+ error = ENOENT;
+ goto done;
+ }
+
+ if (req->oldptr == USER_ADDR_NULL) {
+ req->oldidx = bitstr_size(65536);
+ goto done;
+ }
+ if (req->oldlen < bitstr_size(65536)) {
+ error = ENOMEM;
+ goto done;
+ }
+
+ idx = name[0];
+ protocol = name[1];
+ flags = name[2];
+
+ ifnet_head_lock_shared();
+ if (idx > if_index) {
+ ifnet_head_done();
+ error = ENOENT;
+ goto done;
+ }
+ ifp = ifindex2ifnet[idx];
+ ifnet_head_done();
+
+ bitfield = _MALLOC(bitstr_size(65536), M_TEMP, M_WAITOK);
+ if (bitfield == NULL) {
+ error = ENOMEM;
+ goto done;
+ }
+ error = ifnet_get_local_ports_extended(ifp, protocol, flags, bitfield);
+ if (error != 0) {
+ printf("%s: ifnet_get_local_ports_extended() error %d\n",
+ __func__, error);
+ goto done;
+ }
+ error = SYSCTL_OUT(req, bitfield, bitstr_size(65536));
+done:
+ if (bitfield != NULL)
+ _FREE(bitfield, M_TEMP);
+ return (error);
+}
+
+#if (DEVELOPMENT || DEBUG)
+/*
+ * The sysctl variable name contains the input parameters of
+ * ifnet_get_keepalive_offload_frames()
+ * ifp (interface index): name[0]
+ * frames_array_count: name[1]
+ * frame_data_offset: name[2]
+ * The return length gives used_frames_count
+ */
+static int
+sysctl_get_kao_frames SYSCTL_HANDLER_ARGS
+{
+#pragma unused(oidp)
+ int *name = (int *)arg1;
+ u_int namelen = arg2;
+ int idx;
+ ifnet_t ifp = NULL;
+ u_int32_t frames_array_count;
+ size_t frame_data_offset;
+ u_int32_t used_frames_count;
+ struct ifnet_keepalive_offload_frame *frames_array = NULL;
+ int error = 0;
+ u_int32_t i;
+
+ /*
+ * Only root can get look at other people TCP frames
+ */
+ error = proc_suser(current_proc());
+ if (error != 0)
+ goto done;
+ /*
+ * Validate the input parameters
+ */
+ if (req->newptr != USER_ADDR_NULL) {
+ error = EPERM;
+ goto done;
+ }
+ if (namelen != 3) {
+ error = EINVAL;
+ goto done;
+ }
+ if (req->oldptr == USER_ADDR_NULL) {
+ error = EINVAL;
+ goto done;
+ }
+ if (req->oldlen == 0) {
+ error = EINVAL;
+ goto done;
+ }
+ idx = name[0];
+ frames_array_count = name[1];
+ frame_data_offset = name[2];
+
+ /* Make sure the passed buffer is large enough */
+ if (frames_array_count * sizeof(struct ifnet_keepalive_offload_frame) >
+ req->oldlen) {
+ error = ENOMEM;
+ goto done;
+ }
+
+ ifnet_head_lock_shared();
+ if (idx > if_index) {
+ ifnet_head_done();
+ error = ENOENT;
+ goto done;
+ }
+ ifp = ifindex2ifnet[idx];
+ ifnet_head_done();
+
+ frames_array = _MALLOC(frames_array_count *
+ sizeof(struct ifnet_keepalive_offload_frame), M_TEMP, M_WAITOK);
+ if (frames_array == NULL) {
+ error = ENOMEM;
+ goto done;
+ }
+
+ error = ifnet_get_keepalive_offload_frames(ifp, frames_array,
+ frames_array_count, frame_data_offset, &used_frames_count);
+ if (error != 0) {
+ printf("%s: ifnet_get_keepalive_offload_frames error %d\n",
+ __func__, error);
+ goto done;
+ }
+
+ for (i = 0; i < used_frames_count; i++) {
+ error = SYSCTL_OUT(req, frames_array + i,
+ sizeof(struct ifnet_keepalive_offload_frame));
+ if (error != 0) {
+ goto done;
+ }
+ }
+done:
+ if (frames_array != NULL)
+ _FREE(frames_array, M_TEMP);
+ return (error);
+}
+#endif /* DEVELOPMENT || DEBUG */