+ if (ip->ip_p == IPPROTO_UDP) {
+ struct udpiphdr *ui;
+ ui = mtod(m, struct udpiphdr *);
+ if (ntohs(ui->ui_dport) == IPPORT_BOOTPC) {
+ goto ours;
+ }
+ }
+
+ tmp_mbuf = m;
+ struct mbuf *nxt_mbuf = NULL;
+ while (tmp_mbuf) {
+ nxt_mbuf = mbuf_nextpkt(tmp_mbuf);
+ /*
+ * Not for us; forward if possible and desirable.
+ */
+ mbuf_setnextpkt(tmp_mbuf, NULL);
+ if (ipforwarding == 0) {
+ OSAddAtomic(1, &ipstat.ips_cantforward);
+ m_freem(tmp_mbuf);
+ } else {
+#if IPFIREWALL
+ ip_forward(tmp_mbuf, 0, args->fwai_next_hop);
+#else
+ ip_forward(tmp_mbuf, 0, NULL);
+#endif
+ }
+ tmp_mbuf = nxt_mbuf;
+ }
+ KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
+ return;
+ours:
+ /*
+ * If offset or IP_MF are set, must reassemble.
+ */
+ if (ip->ip_off & ~(IP_DF | IP_RF)) {
+ VERIFY(npkts_in_chain == 1);
+ /*
+ * ip_reass() will return a different mbuf, and update
+ * the divert info in div_info and args->fwai_divert_rule.
+ */
+#if IPDIVERT
+ m = ip_reass(m, (u_int16_t *)&div_info, &args->fwai_divert_rule);
+#else
+ m = ip_reass(m);
+#endif
+ if (m == NULL)
+ return;
+ ip = mtod(m, struct ip *);
+ /* Get the header length of the reassembled packet */
+ hlen = IP_VHL_HL(ip->ip_vhl) << 2;
+#if IPDIVERT
+ /* Restore original checksum before diverting packet */
+ if (div_info != 0) {
+ VERIFY(npkts_in_chain == 1);
+#if BYTE_ORDER != BIG_ENDIAN
+ HTONS(ip->ip_len);
+ HTONS(ip->ip_off);
+#endif
+ ip->ip_sum = 0;
+ ip->ip_sum = ip_cksum_hdr_in(m, hlen);
+#if BYTE_ORDER != BIG_ENDIAN
+ NTOHS(ip->ip_off);
+ NTOHS(ip->ip_len);
+#endif
+ }
+#endif
+ }
+
+ /*
+ * Further protocols expect the packet length to be w/o the
+ * IP header.
+ */
+ ip->ip_len -= hlen;
+
+#if IPDIVERT
+ /*
+ * Divert or tee packet to the divert protocol if required.
+ *
+ * If div_info is zero then cookie should be too, so we shouldn't
+ * need to clear them here. Assume divert_packet() does so also.
+ */
+ if (div_info != 0) {
+ struct mbuf *clone = NULL;
+ VERIFY(npkts_in_chain == 1);
+
+ /* Clone packet if we're doing a 'tee' */
+ if (div_info & IP_FW_PORT_TEE_FLAG)
+ clone = m_dup(m, M_DONTWAIT);
+
+ /* Restore packet header fields to original values */
+ ip->ip_len += hlen;
+
+#if BYTE_ORDER != BIG_ENDIAN
+ HTONS(ip->ip_len);
+ HTONS(ip->ip_off);
+#endif
+ /* Deliver packet to divert input routine */
+ OSAddAtomic(1, &ipstat.ips_delivered);
+ divert_packet(m, 1, div_info & 0xffff, args->fwai_divert_rule);
+
+ /* If 'tee', continue with original packet */
+ if (clone == NULL) {
+ return;
+ }
+ m = clone;
+ ip = mtod(m, struct ip *);
+ }
+#endif
+
+#if IPSEC
+ /*
+ * enforce IPsec policy checking if we are seeing last header.
+ * note that we do not visit this with protocols with pcb layer
+ * code - like udp/tcp/raw ip.
+ */
+ if (ipsec_bypass == 0 && (ip_protox[ip->ip_p]->pr_flags & PR_LASTHDR)) {
+ VERIFY(npkts_in_chain == 1);
+ if (ipsec4_in_reject(m, NULL)) {
+ IPSEC_STAT_INCREMENT(ipsecstat.in_polvio);
+ goto bad;
+ }
+ }
+#endif /* IPSEC */
+
+ /*
+ * Switch out to protocol's input routine.
+ */
+ OSAddAtomic(npkts_in_chain, &ipstat.ips_delivered);
+
+#if IPFIREWALL
+ if (args->fwai_next_hop && ip->ip_p == IPPROTO_TCP) {
+ /* TCP needs IPFORWARD info if available */
+ struct m_tag *fwd_tag;
+ struct ip_fwd_tag *ipfwd_tag;
+
+ VERIFY(npkts_in_chain == 1);
+ fwd_tag = m_tag_create(KERNEL_MODULE_TAG_ID,
+ KERNEL_TAG_TYPE_IPFORWARD, sizeof (*ipfwd_tag),
+ M_NOWAIT, m);
+ if (fwd_tag == NULL)
+ goto bad;
+
+ ipfwd_tag = (struct ip_fwd_tag *)(fwd_tag+1);
+ ipfwd_tag->next_hop = args->fwai_next_hop;
+
+ m_tag_prepend(m, fwd_tag);
+
+ KERNEL_DEBUG(DBG_LAYER_END, ip->ip_dst.s_addr,
+ ip->ip_src.s_addr, ip->ip_p, ip->ip_off, ip->ip_len);
+
+ /* TCP deals with its own locking */
+ ip_proto_dispatch_in(m, hlen, ip->ip_p, 0);
+ } else {
+ KERNEL_DEBUG(DBG_LAYER_END, ip->ip_dst.s_addr,
+ ip->ip_src.s_addr, ip->ip_p, ip->ip_off, ip->ip_len);
+
+ ip_input_dispatch_chain(m);
+
+ }
+#else /* !IPFIREWALL */
+ ip_input_dispatch_chain(m);
+
+#endif /* !IPFIREWALL */
+ KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
+ return;
+bad:
+ KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
+ m_freem(m);
+}
+
+void
+ip_input_process_list(struct mbuf *packet_list)
+{
+ pktchain_elm_t pktchain_tbl[PKTTBL_SZ];
+
+ struct mbuf *packet = NULL;
+ struct mbuf *modm = NULL; /* modified mbuf */
+ int retval = 0;
+ u_int32_t div_info = 0;
+ int ours = 0;
+#if (DEBUG || DEVELOPMENT)
+ struct timeval start_tv;
+#endif /* (DEBUG || DEVELOPMENT) */
+ int num_pkts = 0;
+ int chain = 0;
+ struct ip_fw_in_args args;
+
+ if (ip_chaining == 0) {
+ struct mbuf *m = packet_list;
+#if (DEBUG || DEVELOPMENT)
+ if (ip_input_measure)
+ net_perf_start_time(&net_perf, &start_tv);
+#endif /* (DEBUG || DEVELOPMENT) */
+
+ while (m) {
+ packet_list = mbuf_nextpkt(m);
+ mbuf_setnextpkt(m, NULL);
+ ip_input(m);
+ m = packet_list;
+ num_pkts++;
+ }
+#if (DEBUG || DEVELOPMENT)
+ if (ip_input_measure)
+ net_perf_measure_time(&net_perf, &start_tv, num_pkts);
+#endif /* (DEBUG || DEVELOPMENT) */
+ return;
+ }
+#if (DEBUG || DEVELOPMENT)
+ if (ip_input_measure)
+ net_perf_start_time(&net_perf, &start_tv);
+#endif /* (DEBUG || DEVELOPMENT) */
+
+ bzero(&pktchain_tbl, sizeof(pktchain_tbl));
+restart_list_process:
+ chain = 0;
+ for (packet = packet_list; packet; packet = packet_list) {
+ packet_list = mbuf_nextpkt(packet);
+ mbuf_setnextpkt(packet, NULL);
+
+ num_pkts++;
+ modm = NULL;
+ div_info = 0;
+ bzero(&args, sizeof (args));
+
+ retval = ip_input_first_pass(packet, &div_info, &args,
+ &ours, &modm);
+
+ if (retval == IPINPUT_DOCHAIN) {
+ if (modm)
+ packet = modm;
+ packet = ip_chain_insert(packet, &pktchain_tbl[0]);
+ if (packet == NULL) {
+ ipstat.ips_rxc_chained++;
+ chain++;
+ if (chain > ip_chainsz)
+ break;
+ } else {
+ ipstat.ips_rxc_collisions++;
+ break;
+ }
+ } else if (retval == IPINPUT_DONTCHAIN) {
+ /* in order to preserve order, exit from chaining */
+ if (modm)
+ packet = modm;
+ ipstat.ips_rxc_notchain++;
+ break;
+ } else {
+ /* packet was freed or delivered, do nothing. */
+ }
+ }
+
+ /* do second pass here for pktchain_tbl */
+ if (chain)
+ ip_input_second_pass_loop_tbl(&pktchain_tbl[0], &args);
+
+ if (packet) {
+ /*
+ * equivalent update in chaining case if performed in
+ * ip_input_second_pass_loop_tbl().
+ */
+#if (DEBUG || DEVELOPMENT)
+ if (ip_input_measure)
+ net_perf_histogram(&net_perf, 1);
+#endif /* (DEBUG || DEVELOPMENT) */
+ ip_input_second_pass(packet, packet->m_pkthdr.rcvif, div_info,
+ 1, packet->m_pkthdr.len, &args, ours);
+ }
+
+ if (packet_list)
+ goto restart_list_process;
+
+#if (DEBUG || DEVELOPMENT)
+ if (ip_input_measure)
+ net_perf_measure_time(&net_perf, &start_tv, num_pkts);
+#endif /* (DEBUG || DEVELOPMENT) */
+}
+/*
+ * Ip input routine. Checksum and byte swap header. If fragmented
+ * try to reassemble. Process options. Pass to next level.
+ */
+void
+ip_input(struct mbuf *m)
+{
+ struct ip *ip;
+ struct in_ifaddr *ia = NULL;
+ unsigned int hlen, checkif;
+ u_short sum = 0;
+ struct in_addr pkt_dst;
+#if IPFIREWALL
+ int i;
+ u_int32_t div_info = 0; /* packet divert/tee info */
+#endif
+#if IPFIREWALL || DUMMYNET
+ struct ip_fw_args args;
+ struct m_tag *tag;
+#endif
+ ipfilter_t inject_filter_ref = NULL;
+ struct ifnet *inifp;
+
+ /* Check if the mbuf is still valid after interface filter processing */
+ MBUF_INPUT_CHECK(m, m->m_pkthdr.rcvif);
+ inifp = m->m_pkthdr.rcvif;
+ VERIFY(inifp != NULL);
+
+ ipstat.ips_rxc_notlist++;
+
+ /* Perform IP header alignment fixup, if needed */
+ IP_HDR_ALIGNMENT_FIXUP(m, inifp, goto bad);
+
+ m->m_pkthdr.pkt_flags &= ~PKTF_FORWARDED;
+
+#if IPFIREWALL || DUMMYNET
+ bzero(&args, sizeof (struct ip_fw_args));
+
+ /*
+ * Don't bother searching for tag(s) if there's none.
+ */
+ if (SLIST_EMPTY(&m->m_pkthdr.tags))
+ goto ipfw_tags_done;
+
+ /* Grab info from mtags prepended to the chain */
+#if DUMMYNET
+ if ((tag = m_tag_locate(m, KERNEL_MODULE_TAG_ID,
+ KERNEL_TAG_TYPE_DUMMYNET, NULL)) != NULL) {
+ struct dn_pkt_tag *dn_tag;
+
+ dn_tag = (struct dn_pkt_tag *)(tag+1);
+ args.fwa_ipfw_rule = dn_tag->dn_ipfw_rule;
+ args.fwa_pf_rule = dn_tag->dn_pf_rule;
+
+ m_tag_delete(m, tag);
+ }
+#endif /* DUMMYNET */
+
+#if IPDIVERT
+ if ((tag = m_tag_locate(m, KERNEL_MODULE_TAG_ID,
+ KERNEL_TAG_TYPE_DIVERT, NULL)) != NULL) {
+ struct divert_tag *div_tag;
+
+ div_tag = (struct divert_tag *)(tag+1);
+ args.fwa_divert_rule = div_tag->cookie;
+
+ m_tag_delete(m, tag);
+ }
+#endif
+
+ if ((tag = m_tag_locate(m, KERNEL_MODULE_TAG_ID,
+ KERNEL_TAG_TYPE_IPFORWARD, NULL)) != NULL) {
+ struct ip_fwd_tag *ipfwd_tag;
+
+ ipfwd_tag = (struct ip_fwd_tag *)(tag+1);
+ args.fwa_next_hop = ipfwd_tag->next_hop;
+
+ m_tag_delete(m, tag);
+ }
+
+#if DIAGNOSTIC
+ if (m == NULL || !(m->m_flags & M_PKTHDR))
+ panic("ip_input no HDR");
+#endif
+
+#if DUMMYNET
+ if (args.fwa_ipfw_rule || args.fwa_pf_rule) {
+ /* dummynet already filtered us */
+ ip = mtod(m, struct ip *);
+ hlen = IP_VHL_HL(ip->ip_vhl) << 2;
+ inject_filter_ref = ipf_get_inject_filter(m);
+#if IPFIREWALL
+ if (args.fwa_ipfw_rule)
+ goto iphack;
+#endif /* IPFIREWALL */
+ if (args.fwa_pf_rule)
+ goto check_with_pf;
+ }
+#endif /* DUMMYNET */
+ipfw_tags_done:
+#endif /* IPFIREWALL || DUMMYNET */
+
+ /*
+ * No need to process packet twice if we've already seen it.
+ */
+ if (!SLIST_EMPTY(&m->m_pkthdr.tags))
+ inject_filter_ref = ipf_get_inject_filter(m);
+ if (inject_filter_ref != NULL) {
+ ip = mtod(m, struct ip *);
+ hlen = IP_VHL_HL(ip->ip_vhl) << 2;
+
+ DTRACE_IP6(receive, struct mbuf *, m, struct inpcb *, NULL,
+ struct ip *, ip, struct ifnet *, inifp,
+ struct ip *, ip, struct ip6_hdr *, NULL);
+
+ ip->ip_len = ntohs(ip->ip_len) - hlen;
+ ip->ip_off = ntohs(ip->ip_off);
+ ip_proto_dispatch_in(m, hlen, ip->ip_p, inject_filter_ref);
+ return;
+ }
+
+ OSAddAtomic(1, &ipstat.ips_total);
+ if (m->m_pkthdr.len < sizeof (struct ip))
+ goto tooshort;
+
+ if (m->m_len < sizeof (struct ip) &&
+ (m = m_pullup(m, sizeof (struct ip))) == NULL) {
+ OSAddAtomic(1, &ipstat.ips_toosmall);
+ return;
+ }
+ ip = mtod(m, struct ip *);
+
+ KERNEL_DEBUG(DBG_LAYER_BEG, ip->ip_dst.s_addr, ip->ip_src.s_addr,
+ ip->ip_p, ip->ip_off, ip->ip_len);
+
+ if (IP_VHL_V(ip->ip_vhl) != IPVERSION) {
+ OSAddAtomic(1, &ipstat.ips_badvers);
+ goto bad;
+ }
+
+ hlen = IP_VHL_HL(ip->ip_vhl) << 2;
+ if (hlen < sizeof (struct ip)) { /* minimum header length */
+ OSAddAtomic(1, &ipstat.ips_badhlen);
+ goto bad;
+ }
+ if (hlen > m->m_len) {
+ if ((m = m_pullup(m, hlen)) == NULL) {
+ OSAddAtomic(1, &ipstat.ips_badhlen);
+ return;
+ }
+ ip = mtod(m, struct ip *);
+ }
+
+ /* 127/8 must not appear on wire - RFC1122 */
+ if ((ntohl(ip->ip_dst.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET ||
+ (ntohl(ip->ip_src.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET) {
+ /*
+ * Allow for the following exceptions:
+ *
+ * 1. If the packet was sent to loopback (i.e. rcvif
+ * would have been set earlier at output time.)
+ *
+ * 2. If the packet was sent out on loopback from a local
+ * source address which belongs to a non-loopback
+ * interface (i.e. rcvif may not necessarily be a
+ * loopback interface, hence the test for PKTF_LOOP.)
+ * Unlike IPv6, there is no interface scope ID, and
+ * therefore we don't care so much about PKTF_IFINFO.
+ */
+ if (!(inifp->if_flags & IFF_LOOPBACK) &&
+ !(m->m_pkthdr.pkt_flags & PKTF_LOOP)) {
+ OSAddAtomic(1, &ipstat.ips_badaddr);
+ goto bad;
+ }
+ }
+
+ /* IPv4 Link-Local Addresses as defined in RFC3927 */
+ if ((IN_LINKLOCAL(ntohl(ip->ip_dst.s_addr)) ||
+ IN_LINKLOCAL(ntohl(ip->ip_src.s_addr)))) {
+ ip_linklocal_stat.iplls_in_total++;
+ if (ip->ip_ttl != MAXTTL) {
+ OSAddAtomic(1, &ip_linklocal_stat.iplls_in_badttl);
+ /* Silently drop link local traffic with bad TTL */
+ if (!ip_linklocal_in_allowbadttl)
+ goto bad;
+ }
+ }
+
+ sum = ip_cksum(m, hlen);
+ if (sum) {
+ goto bad;
+ }
+
+ DTRACE_IP6(receive, struct mbuf *, m, struct inpcb *, NULL,
+ struct ip *, ip, struct ifnet *, inifp,
+ struct ip *, ip, struct ip6_hdr *, NULL);
+
+ /*
+ * Naively assume we can attribute inbound data to the route we would
+ * use to send to this destination. Asymmetric routing breaks this
+ * assumption, but it still allows us to account for traffic from
+ * a remote node in the routing table.
+ * this has a very significant performance impact so we bypass
+ * if nstat_collect is disabled. We may also bypass if the
+ * protocol is tcp in the future because tcp will have a route that
+ * we can use to attribute the data to. That does mean we would not
+ * account for forwarded tcp traffic.
+ */
+ if (nstat_collect) {
+ struct rtentry *rt =
+ ifnet_cached_rtlookup_inet(inifp, ip->ip_src);
+ if (rt != NULL) {
+ nstat_route_rx(rt, 1, m->m_pkthdr.len, 0);
+ rtfree(rt);
+ }
+ }
+
+ /*
+ * Convert fields to host representation.
+ */
+#if BYTE_ORDER != BIG_ENDIAN
+ NTOHS(ip->ip_len);
+#endif
+
+ if (ip->ip_len < hlen) {
+ OSAddAtomic(1, &ipstat.ips_badlen);
+ goto bad;
+ }
+
+#if BYTE_ORDER != BIG_ENDIAN
+ NTOHS(ip->ip_off);
+#endif
+ /*
+ * Check that the amount of data in the buffers
+ * is as at least much as the IP header would have us expect.
+ * Trim mbufs if longer than we expect.
+ * Drop packet if shorter than we expect.
+ */
+ if (m->m_pkthdr.len < ip->ip_len) {
+tooshort:
+ OSAddAtomic(1, &ipstat.ips_tooshort);
+ goto bad;
+ }
+ if (m->m_pkthdr.len > ip->ip_len) {
+ ip_input_adjust(m, ip, inifp);
+ }
+
+ /* for consistency */
+ m->m_pkthdr.pkt_proto = ip->ip_p;
+
+#if DUMMYNET
+check_with_pf:
+#endif
+#if PF
+ /* Invoke inbound packet filter */
+ if (PF_IS_ENABLED) {
+ int error;
+#if DUMMYNET
+ error = pf_af_hook(inifp, NULL, &m, AF_INET, TRUE, &args);
+#else
+ error = pf_af_hook(inifp, NULL, &m, AF_INET, TRUE, NULL);
+#endif /* DUMMYNET */
+ if (error != 0 || m == NULL) {
+ if (m != NULL) {
+ panic("%s: unexpected packet %p\n",
+ __func__, m);
+ /* NOTREACHED */
+ }
+ /* Already freed by callee */
+ return;
+ }
+ ip = mtod(m, struct ip *);
+ hlen = IP_VHL_HL(ip->ip_vhl) << 2;
+ }
+#endif /* PF */
+
+#if IPSEC
+ if (ipsec_bypass == 0 && ipsec_gethist(m, NULL))
+ goto pass;
+#endif
+
+#if IPFIREWALL
+#if DUMMYNET
+iphack:
+#endif /* DUMMYNET */
+ /*
+ * Check if we want to allow this packet to be processed.
+ * Consider it to be bad if not.
+ */
+ if (fw_enable && IPFW_LOADED) {
+#if IPFIREWALL_FORWARD
+ /*
+ * If we've been forwarded from the output side, then
+ * skip the firewall a second time
+ */
+ if (args.fwa_next_hop)
+ goto ours;
+#endif /* IPFIREWALL_FORWARD */
+
+ args.fwa_m = m;
+
+ i = ip_fw_chk_ptr(&args);
+ m = args.fwa_m;
+
+ if ((i & IP_FW_PORT_DENY_FLAG) || m == NULL) { /* drop */
+ if (m)
+ m_freem(m);
+ return;
+ }
+ ip = mtod(m, struct ip *); /* just in case m changed */
+
+ if (i == 0 && args.fwa_next_hop == NULL) { /* common case */
+ goto pass;
+ }
+#if DUMMYNET
+ if (DUMMYNET_LOADED && (i & IP_FW_PORT_DYNT_FLAG) != 0) {
+ /* Send packet to the appropriate pipe */
+ ip_dn_io_ptr(m, i&0xffff, DN_TO_IP_IN, &args,
+ DN_CLIENT_IPFW);
+ return;
+ }
+#endif /* DUMMYNET */
+#if IPDIVERT
+ if (i != 0 && (i & IP_FW_PORT_DYNT_FLAG) == 0) {
+ /* Divert or tee packet */
+ div_info = i;
+ goto ours;
+ }
+#endif
+#if IPFIREWALL_FORWARD
+ if (i == 0 && args.fwa_next_hop != NULL) {
+ goto pass;
+ }
+#endif
+ /*
+ * if we get here, the packet must be dropped
+ */
+ m_freem(m);
+ return;
+ }
+#endif /* IPFIREWALL */
+#if IPSEC | IPFIREWALL
+pass:
+#endif
+ /*
+ * Process options and, if not destined for us,
+ * ship it on. ip_dooptions returns 1 when an
+ * error was detected (causing an icmp message
+ * to be sent and the original packet to be freed).
+ */
+ ip_nhops = 0; /* for source routed packets */
+#if IPFIREWALL
+ if (hlen > sizeof (struct ip) &&
+ ip_dooptions(m, 0, args.fwa_next_hop)) {
+#else /* !IPFIREWALL */
+ if (hlen > sizeof (struct ip) && ip_dooptions(m, 0, NULL)) {
+#endif /* !IPFIREWALL */
+ return;
+ }
+
+ /*
+ * Check our list of addresses, to see if the packet is for us.
+ * If we don't have any addresses, assume any unicast packet
+ * we receive might be for us (and let the upper layers deal
+ * with it).
+ */
+ if (TAILQ_EMPTY(&in_ifaddrhead) && !(m->m_flags & (M_MCAST|M_BCAST))) {
+ ip_setdstifaddr_info(m, inifp->if_index, NULL);
+ goto ours;
+ }
+
+ /*
+ * Cache the destination address of the packet; this may be
+ * changed by use of 'ipfw fwd'.
+ */
+#if IPFIREWALL
+ pkt_dst = args.fwa_next_hop == NULL ?
+ ip->ip_dst : args.fwa_next_hop->sin_addr;
+#else /* !IPFIREWALL */
+ pkt_dst = ip->ip_dst;
+#endif /* !IPFIREWALL */
+
+ /*
+ * Enable a consistency check between the destination address
+ * and the arrival interface for a unicast packet (the RFC 1122
+ * strong ES model) if IP forwarding is disabled and the packet
+ * is not locally generated and the packet is not subject to
+ * 'ipfw fwd'.
+ *
+ * XXX - Checking also should be disabled if the destination
+ * address is ipnat'ed to a different interface.
+ *
+ * XXX - Checking is incompatible with IP aliases added
+ * to the loopback interface instead of the interface where
+ * the packets are received.
+ */
+ checkif = ip_checkinterface && (ipforwarding == 0) &&
+ !(inifp->if_flags & IFF_LOOPBACK) &&
+ !(m->m_pkthdr.pkt_flags & PKTF_LOOP)
+#if IPFIREWALL
+ && (args.fwa_next_hop == NULL);
+#else /* !IPFIREWALL */
+ ;
+#endif /* !IPFIREWALL */
+
+ /*
+ * Check for exact addresses in the hash bucket.
+ */
+ lck_rw_lock_shared(in_ifaddr_rwlock);
+ TAILQ_FOREACH(ia, INADDR_HASH(pkt_dst.s_addr), ia_hash) {
+ /*
+ * If the address matches, verify that the packet
+ * arrived via the correct interface if checking is
+ * enabled.
+ */
+ if (IA_SIN(ia)->sin_addr.s_addr == pkt_dst.s_addr &&
+ (!checkif || ia->ia_ifp == inifp)) {
+ ip_setdstifaddr_info(m, 0, ia);
+ lck_rw_done(in_ifaddr_rwlock);
+ goto ours;
+ }
+ }
+ lck_rw_done(in_ifaddr_rwlock);
+
+ /*
+ * Check for broadcast addresses.
+ *
+ * Only accept broadcast packets that arrive via the matching
+ * interface. Reception of forwarded directed broadcasts would be
+ * handled via ip_forward() and ether_frameout() with the loopback
+ * into the stack for SIMPLEX interfaces handled by ether_frameout().
+ */
+ if (inifp->if_flags & IFF_BROADCAST) {
+ struct ifaddr *ifa;
+
+ ifnet_lock_shared(inifp);
+ TAILQ_FOREACH(ifa, &inifp->if_addrhead, ifa_link) {
+ if (ifa->ifa_addr->sa_family != AF_INET) {
+ continue;
+ }
+ ia = ifatoia(ifa);
+ if (satosin(&ia->ia_broadaddr)->sin_addr.s_addr ==
+ pkt_dst.s_addr || ia->ia_netbroadcast.s_addr ==
+ pkt_dst.s_addr) {
+ ip_setdstifaddr_info(m, 0, ia);
+ ifnet_lock_done(inifp);
+ goto ours;
+ }
+ }
+ ifnet_lock_done(inifp);
+ }
+
+ if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) {
+ struct in_multi *inm;
+ /*
+ * See if we belong to the destination multicast group on the
+ * arrival interface.
+ */
+ in_multihead_lock_shared();
+ IN_LOOKUP_MULTI(&ip->ip_dst, inifp, inm);
+ in_multihead_lock_done();
+ if (inm == NULL) {
+ OSAddAtomic(1, &ipstat.ips_notmember);
+ m_freem(m);
+ return;
+ }
+ ip_setdstifaddr_info(m, inifp->if_index, NULL);
+ INM_REMREF(inm);
+ goto ours;
+ }
+ if (ip->ip_dst.s_addr == (u_int32_t)INADDR_BROADCAST ||
+ ip->ip_dst.s_addr == INADDR_ANY) {
+ ip_setdstifaddr_info(m, inifp->if_index, NULL);
+ goto ours;
+ }
+
+ /* Allow DHCP/BootP responses through */
+ if ((inifp->if_eflags & IFEF_AUTOCONFIGURING) &&
+ hlen == sizeof (struct ip) && ip->ip_p == IPPROTO_UDP) {
+ struct udpiphdr *ui;
+
+ if (m->m_len < sizeof (struct udpiphdr) &&
+ (m = m_pullup(m, sizeof (struct udpiphdr))) == NULL) {
+ OSAddAtomic(1, &udpstat.udps_hdrops);
+ return;
+ }
+ ui = mtod(m, struct udpiphdr *);
+ if (ntohs(ui->ui_dport) == IPPORT_BOOTPC) {
+ ip_setdstifaddr_info(m, inifp->if_index, NULL);
+ goto ours;
+ }
+ ip = mtod(m, struct ip *); /* in case it changed */
+ }
+
+ /*
+ * Not for us; forward if possible and desirable.
+ */
+ if (ipforwarding == 0) {
+ OSAddAtomic(1, &ipstat.ips_cantforward);
+ m_freem(m);
+ } else {
+#if IPFIREWALL
+ ip_forward(m, 0, args.fwa_next_hop);
+#else
+ ip_forward(m, 0, NULL);
+#endif
+ }
+ return;
+
+ours:
+ /*
+ * If offset or IP_MF are set, must reassemble.
+ */
+ if (ip->ip_off & ~(IP_DF | IP_RF)) {
+ /*
+ * ip_reass() will return a different mbuf, and update
+ * the divert info in div_info and args.fwa_divert_rule.
+ */
+#if IPDIVERT
+ m = ip_reass(m, (u_int16_t *)&div_info, &args.fwa_divert_rule);
+#else
+ m = ip_reass(m);
+#endif
+ if (m == NULL)
+ return;
+ ip = mtod(m, struct ip *);
+ /* Get the header length of the reassembled packet */
+ hlen = IP_VHL_HL(ip->ip_vhl) << 2;
+#if IPDIVERT
+ /* Restore original checksum before diverting packet */
+ if (div_info != 0) {
+#if BYTE_ORDER != BIG_ENDIAN
+ HTONS(ip->ip_len);
+ HTONS(ip->ip_off);
+#endif
+ ip->ip_sum = 0;
+ ip->ip_sum = ip_cksum_hdr_in(m, hlen);
+#if BYTE_ORDER != BIG_ENDIAN
+ NTOHS(ip->ip_off);
+ NTOHS(ip->ip_len);
+#endif
+ }
+#endif
+ }
+
+ /*
+ * Further protocols expect the packet length to be w/o the
+ * IP header.
+ */
+ ip->ip_len -= hlen;
+
+#if IPDIVERT
+ /*
+ * Divert or tee packet to the divert protocol if required.
+ *
+ * If div_info is zero then cookie should be too, so we shouldn't
+ * need to clear them here. Assume divert_packet() does so also.
+ */
+ if (div_info != 0) {
+ struct mbuf *clone = NULL;
+
+ /* Clone packet if we're doing a 'tee' */
+ if (div_info & IP_FW_PORT_TEE_FLAG)
+ clone = m_dup(m, M_DONTWAIT);
+
+ /* Restore packet header fields to original values */
+ ip->ip_len += hlen;
+
+#if BYTE_ORDER != BIG_ENDIAN
+ HTONS(ip->ip_len);
+ HTONS(ip->ip_off);
+#endif
+ /* Deliver packet to divert input routine */
+ OSAddAtomic(1, &ipstat.ips_delivered);
+ divert_packet(m, 1, div_info & 0xffff, args.fwa_divert_rule);
+
+ /* If 'tee', continue with original packet */
+ if (clone == NULL) {
+ return;
+ }
+ m = clone;
+ ip = mtod(m, struct ip *);
+ }
+#endif
+
+#if IPSEC
+ /*
+ * enforce IPsec policy checking if we are seeing last header.
+ * note that we do not visit this with protocols with pcb layer
+ * code - like udp/tcp/raw ip.
+ */
+ if (ipsec_bypass == 0 && (ip_protox[ip->ip_p]->pr_flags & PR_LASTHDR)) {
+ if (ipsec4_in_reject(m, NULL)) {
+ IPSEC_STAT_INCREMENT(ipsecstat.in_polvio);
+ goto bad;
+ }
+ }
+#endif /* IPSEC */
+
+ /*
+ * Switch out to protocol's input routine.
+ */
+ OSAddAtomic(1, &ipstat.ips_delivered);
+
+#if IPFIREWALL
+ if (args.fwa_next_hop && ip->ip_p == IPPROTO_TCP) {
+ /* TCP needs IPFORWARD info if available */
+ struct m_tag *fwd_tag;
+ struct ip_fwd_tag *ipfwd_tag;
+
+ fwd_tag = m_tag_create(KERNEL_MODULE_TAG_ID,
+ KERNEL_TAG_TYPE_IPFORWARD, sizeof (*ipfwd_tag),
+ M_NOWAIT, m);
+ if (fwd_tag == NULL)
+ goto bad;
+
+ ipfwd_tag = (struct ip_fwd_tag *)(fwd_tag+1);
+ ipfwd_tag->next_hop = args.fwa_next_hop;
+
+ m_tag_prepend(m, fwd_tag);
+
+ KERNEL_DEBUG(DBG_LAYER_END, ip->ip_dst.s_addr,
+ ip->ip_src.s_addr, ip->ip_p, ip->ip_off, ip->ip_len);
+
+ /* TCP deals with its own locking */
+ ip_proto_dispatch_in(m, hlen, ip->ip_p, 0);
+ } else {
+ KERNEL_DEBUG(DBG_LAYER_END, ip->ip_dst.s_addr,
+ ip->ip_src.s_addr, ip->ip_p, ip->ip_off, ip->ip_len);
+
+ if ((sw_lro) && (ip->ip_p == IPPROTO_TCP)) {
+ m = tcp_lro(m, hlen);
+ if (m == NULL)
+ return;
+ }
+
+ ip_proto_dispatch_in(m, hlen, ip->ip_p, 0);
+ }
+#else /* !IPFIREWALL */
+ if ((sw_lro) && (ip->ip_p == IPPROTO_TCP)) {
+ m = tcp_lro(m, hlen);
+ if (m == NULL)
+ return;
+ }
+ ip_proto_dispatch_in(m, hlen, ip->ip_p, 0);
+#endif /* !IPFIREWALL */
+ return;
+
+bad:
+ KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
+ m_freem(m);
+}
+
+static void
+ipq_updateparams(void)
+{
+ LCK_MTX_ASSERT(&ipqlock, LCK_MTX_ASSERT_OWNED);
+ /*
+ * -1 for unlimited allocation.
+ */
+ if (maxnipq < 0)
+ ipq_limit = 0;
+ /*
+ * Positive number for specific bound.
+ */
+ if (maxnipq > 0)
+ ipq_limit = maxnipq;
+ /*
+ * Zero specifies no further fragment queue allocation -- set the
+ * bound very low, but rely on implementation elsewhere to actually
+ * prevent allocation and reclaim current queues.
+ */
+ if (maxnipq == 0)
+ ipq_limit = 1;
+ /*
+ * Arm the purge timer if not already and if there's work to do
+ */
+ frag_sched_timeout();
+}
+
+static int
+sysctl_maxnipq SYSCTL_HANDLER_ARGS
+{
+#pragma unused(arg1, arg2)
+ int error, i;
+
+ lck_mtx_lock(&ipqlock);
+ i = maxnipq;
+ error = sysctl_handle_int(oidp, &i, 0, req);
+ if (error || req->newptr == USER_ADDR_NULL)
+ goto done;
+ /* impose bounds */
+ if (i < -1 || i > (nmbclusters / 4)) {
+ error = EINVAL;
+ goto done;
+ }
+ maxnipq = i;
+ ipq_updateparams();
+done:
+ lck_mtx_unlock(&ipqlock);
+ return (error);
+}
+
+static int
+sysctl_maxfragsperpacket SYSCTL_HANDLER_ARGS
+{
+#pragma unused(arg1, arg2)
+ int error, i;
+
+ lck_mtx_lock(&ipqlock);
+ i = maxfragsperpacket;
+ error = sysctl_handle_int(oidp, &i, 0, req);
+ if (error || req->newptr == USER_ADDR_NULL)
+ goto done;
+ maxfragsperpacket = i;
+ ipq_updateparams(); /* see if we need to arm timer */
+done:
+ lck_mtx_unlock(&ipqlock);
+ return (error);
+}
+
+/*
+ * Take incoming datagram fragment and try to reassemble it into
+ * whole datagram. If a chain for reassembly of this datagram already
+ * exists, then it is given as fp; otherwise have to make a chain.
+ *
+ * When IPDIVERT enabled, keep additional state with each packet that
+ * tells us if we need to divert or tee the packet we're building.
+ *
+ * The IP header is *NOT* adjusted out of iplen (but in host byte order).
+ */
+static struct mbuf *
+#if IPDIVERT
+ip_reass(struct mbuf *m,
+#ifdef IPDIVERT_44
+ u_int32_t *divinfo,
+#else /* IPDIVERT_44 */
+ u_int16_t *divinfo,
+#endif /* IPDIVERT_44 */
+ u_int16_t *divcookie)
+#else /* IPDIVERT */
+ip_reass(struct mbuf *m)
+#endif /* IPDIVERT */
+{
+ struct ip *ip;
+ struct mbuf *p, *q, *nq, *t;
+ struct ipq *fp = NULL;
+ struct ipqhead *head;
+ int i, hlen, next;
+ u_int8_t ecn, ecn0;
+ uint32_t csum, csum_flags;
+ uint16_t hash;
+ struct fq_head dfq;
+
+ MBUFQ_INIT(&dfq); /* for deferred frees */
+
+ /* If maxnipq or maxfragsperpacket is 0, never accept fragments. */
+ if (maxnipq == 0 || maxfragsperpacket == 0) {
+ ipstat.ips_fragments++;
+ ipstat.ips_fragdropped++;