+#endif /* NSTF */
+}
+
+static void
+ip6_input_adjust(struct mbuf *m, struct ip6_hdr *ip6, uint32_t plen,
+ struct ifnet *inifp)
+{
+ boolean_t adjust = TRUE;
+ uint32_t tot_len = sizeof(*ip6) + plen;
+
+ ASSERT(m_pktlen(m) > tot_len);
+
+ /*
+ * Invalidate hardware checksum info if ip6_adj_clear_hwcksum
+ * is set; useful to handle buggy drivers. Note that this
+ * should not be enabled by default, as we may get here due
+ * to link-layer padding.
+ */
+ if (ip6_adj_clear_hwcksum &&
+ (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) &&
+ !(inifp->if_flags & IFF_LOOPBACK) &&
+ !(m->m_pkthdr.pkt_flags & PKTF_LOOP)) {
+ m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID;
+ m->m_pkthdr.csum_data = 0;
+ ip6stat.ip6s_adj_hwcsum_clr++;
+ }
+
+ /*
+ * If partial checksum information is available, subtract
+ * out the partial sum of postpended extraneous bytes, and
+ * update the checksum metadata accordingly. By doing it
+ * here, the upper layer transport only needs to adjust any
+ * prepended extraneous bytes (else it will do both.)
+ */
+ if (ip6_adj_partial_sum &&
+ (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PARTIAL)) ==
+ (CSUM_DATA_VALID | CSUM_PARTIAL)) {
+ m->m_pkthdr.csum_rx_val = m_adj_sum16(m,
+ m->m_pkthdr.csum_rx_start, m->m_pkthdr.csum_rx_start,
+ (tot_len - m->m_pkthdr.csum_rx_start),
+ m->m_pkthdr.csum_rx_val);
+ } else if ((m->m_pkthdr.csum_flags &
+ (CSUM_DATA_VALID | CSUM_PARTIAL)) ==
+ (CSUM_DATA_VALID | CSUM_PARTIAL)) {
+ /*
+ * If packet has partial checksum info and we decided not
+ * to subtract the partial sum of postpended extraneous
+ * bytes here (not the default case), leave that work to
+ * be handled by the other layers. For now, only TCP, UDP
+ * layers are capable of dealing with this. For all other
+ * protocols (including fragments), trim and ditch the
+ * partial sum as those layers might not implement partial
+ * checksumming (or adjustment) at all.
+ */
+ if (ip6->ip6_nxt == IPPROTO_TCP ||
+ ip6->ip6_nxt == IPPROTO_UDP) {
+ adjust = FALSE;
+ } else {
+ m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID;
+ m->m_pkthdr.csum_data = 0;
+ ip6stat.ip6s_adj_hwcsum_clr++;
+ }
+ }
+
+ if (adjust) {
+ ip6stat.ip6s_adj++;
+ if (m->m_len == m->m_pkthdr.len) {
+ m->m_len = tot_len;
+ m->m_pkthdr.len = tot_len;
+ } else {
+ m_adj(m, tot_len - m->m_pkthdr.len);
+ }
+ }
+}
+static ip6_check_if_result_t
+ip6_input_check_interface(struct mbuf *m, struct ip6_hdr *ip6, struct ifnet *inifp, struct route_in6 *rin6, struct ifnet **deliverifp)
+{
+ struct in6_ifaddr *ia6 = NULL;
+ struct in6_addr tmp_dst = ip6->ip6_dst; /* copy to avoid unaligned access */
+ struct in6_ifaddr *best_ia6 = NULL;
+ ip6_check_if_result_t result = IP6_CHECK_IF_NONE;
+
+ *deliverifp = NULL;
+
+ /*
+ * Check for exact addresses in the hash bucket.
+ */
+ lck_rw_lock_shared(&in6_ifaddr_rwlock);
+ TAILQ_FOREACH(ia6, IN6ADDR_HASH(&tmp_dst), ia6_hash) {
+ /*
+ * TODO: should we accept loopbacl
+ */
+ if (IN6_ARE_ADDR_EQUAL(&ia6->ia_addr.sin6_addr, &tmp_dst)) {
+ if ((ia6->ia6_flags & (IN6_IFF_NOTREADY | IN6_IFF_CLAT46))) {
+ continue;
+ }
+ best_ia6 = ia6;
+ if (ia6->ia_ifp == inifp) {
+ /*
+ * TODO: should we also accept locally originated packets
+ * or from loopback ???
+ */
+ break;
+ }
+ /*
+ * Continue the loop in case there's a exact match with another
+ * interface
+ */
+ }
+ }
+ if (best_ia6 != NULL) {
+ if (best_ia6->ia_ifp != inifp && ip6_forwarding == 0 &&
+ ((ip6_checkinterface == IP6_CHECKINTERFACE_HYBRID_ES &&
+ (best_ia6->ia_ifp->if_family == IFNET_FAMILY_IPSEC ||
+ best_ia6->ia_ifp->if_family == IFNET_FAMILY_UTUN)) ||
+ ip6_checkinterface == IP6_CHECKINTERFACE_STRONG_ES)) {
+ /*
+ * Drop when interface address check is strict and forwarding
+ * is disabled
+ */
+ result = IP6_CHECK_IF_DROP;
+ } else {
+ result = IP6_CHECK_IF_OURS;
+ *deliverifp = best_ia6->ia_ifp;
+ ip6_setdstifaddr_info(m, 0, best_ia6);
+ }
+ }
+ lck_rw_done(&in6_ifaddr_rwlock);
+
+ if (result == IP6_CHECK_IF_NONE) {
+ /*
+ * Slow path: route lookup.
+ */
+ struct sockaddr_in6 *dst6;
+
+ dst6 = SIN6(&rin6->ro_dst);
+ dst6->sin6_len = sizeof(struct sockaddr_in6);
+ dst6->sin6_family = AF_INET6;
+ dst6->sin6_addr = ip6->ip6_dst;
+
+ rtalloc_scoped_ign((struct route *)rin6,
+ RTF_PRCLONING, IFSCOPE_NONE);
+ if (rin6->ro_rt != NULL) {
+ RT_LOCK_SPIN(rin6->ro_rt);
+ }
+
+#define rt6_key(r) (SIN6((r)->rt_nodes->rn_key))
+
+ /*
+ * Accept the packet if the forwarding interface to the destination
+ * according to the routing table is the loopback interface,
+ * unless the associated route has a gateway.
+ * Note that this approach causes to accept a packet if there is a
+ * route to the loopback interface for the destination of the packet.
+ * But we think it's even useful in some situations, e.g. when using
+ * a special daemon which wants to intercept the packet.
+ *
+ * XXX: some OSes automatically make a cloned route for the destination
+ * of an outgoing packet. If the outgoing interface of the packet
+ * is a loopback one, the kernel would consider the packet to be
+ * accepted, even if we have no such address assinged on the interface.
+ * We check the cloned flag of the route entry to reject such cases,
+ * assuming that route entries for our own addresses are not made by
+ * cloning (it should be true because in6_addloop explicitly installs
+ * the host route). However, we might have to do an explicit check
+ * while it would be less efficient. Or, should we rather install a
+ * reject route for such a case?
+ */
+ if (rin6->ro_rt != NULL &&
+ (rin6->ro_rt->rt_flags & (RTF_HOST | RTF_GATEWAY)) == RTF_HOST &&
+#if RTF_WASCLONED
+ !(rin6->ro_rt->rt_flags & RTF_WASCLONED) &&