]> git.saurik.com Git - apple/xnu.git/blob - bsd/netinet/ip_input.c
xnu-6153.141.1.tar.gz
[apple/xnu.git] / bsd / netinet / ip_input.c
1 /*
2 * Copyright (c) 2000-2020 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1982, 1986, 1988, 1993
30 * The Regents of the University of California. All rights reserved.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
34 * are met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 * 3. All advertising materials mentioning features or use of this software
41 * must display the following acknowledgement:
42 * This product includes software developed by the University of
43 * California, Berkeley and its contributors.
44 * 4. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 * @(#)ip_input.c 8.2 (Berkeley) 1/4/94
61 */
62 /*
63 * NOTICE: This file was modified by SPARTA, Inc. in 2007 to introduce
64 * support for mandatory and extensible security protections. This notice
65 * is included in support of clause 2.2 (b) of the Apple Public License,
66 * Version 2.0.
67 */
68
69 #define _IP_VHL
70
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/mbuf.h>
74 #include <sys/malloc.h>
75 #include <sys/domain.h>
76 #include <sys/protosw.h>
77 #include <sys/socket.h>
78 #include <sys/time.h>
79 #include <sys/kernel.h>
80 #include <sys/syslog.h>
81 #include <sys/sysctl.h>
82 #include <sys/mcache.h>
83 #include <sys/socketvar.h>
84 #include <sys/kdebug.h>
85 #include <mach/mach_time.h>
86 #include <mach/sdt.h>
87
88 #include <machine/endian.h>
89 #include <dev/random/randomdev.h>
90
91 #include <kern/queue.h>
92 #include <kern/locks.h>
93 #include <libkern/OSAtomic.h>
94
95 #include <pexpert/pexpert.h>
96
97 #include <net/if.h>
98 #include <net/if_var.h>
99 #include <net/if_dl.h>
100 #include <net/route.h>
101 #include <net/kpi_protocol.h>
102 #include <net/ntstat.h>
103 #include <net/dlil.h>
104 #include <net/classq/classq.h>
105 #include <net/net_perf.h>
106 #include <net/init.h>
107 #if PF
108 #include <net/pfvar.h>
109 #endif /* PF */
110
111 #include <netinet/in.h>
112 #include <netinet/in_systm.h>
113 #include <netinet/in_var.h>
114 #include <netinet/in_arp.h>
115 #include <netinet/ip.h>
116 #include <netinet/in_pcb.h>
117 #include <netinet/ip_var.h>
118 #include <netinet/ip_icmp.h>
119 #include <netinet/ip_fw.h>
120 #include <netinet/ip_divert.h>
121 #include <netinet/kpi_ipfilter_var.h>
122 #include <netinet/udp.h>
123 #include <netinet/udp_var.h>
124 #include <netinet/bootp.h>
125 #include <netinet/lro_ext.h>
126
127 #if DUMMYNET
128 #include <netinet/ip_dummynet.h>
129 #endif /* DUMMYNET */
130
131 #if CONFIG_MACF_NET
132 #include <security/mac_framework.h>
133 #endif /* CONFIG_MACF_NET */
134
135 #if IPSEC
136 #include <netinet6/ipsec.h>
137 #include <netkey/key.h>
138 #endif /* IPSEC */
139
140 #include <os/log.h>
141
142 #define DBG_LAYER_BEG NETDBG_CODE(DBG_NETIP, 0)
143 #define DBG_LAYER_END NETDBG_CODE(DBG_NETIP, 2)
144 #define DBG_FNC_IP_INPUT NETDBG_CODE(DBG_NETIP, (2 << 8))
145
146 #if IPSEC
147 extern int ipsec_bypass;
148 extern lck_mtx_t *sadb_mutex;
149
150 lck_grp_t *sadb_stat_mutex_grp;
151 lck_grp_attr_t *sadb_stat_mutex_grp_attr;
152 lck_attr_t *sadb_stat_mutex_attr;
153 decl_lck_mtx_data(, sadb_stat_mutex_data);
154 lck_mtx_t *sadb_stat_mutex = &sadb_stat_mutex_data;
155 #endif /* IPSEC */
156
157 MBUFQ_HEAD(fq_head);
158
159 static int frag_timeout_run; /* frag timer is scheduled to run */
160 static void frag_timeout(void *);
161 static void frag_sched_timeout(void);
162
163 static struct ipq *ipq_alloc(int);
164 static void ipq_free(struct ipq *);
165 static void ipq_updateparams(void);
166 static void ip_input_second_pass(struct mbuf *, struct ifnet *,
167 u_int32_t, int, int, struct ip_fw_in_args *, int);
168
169 decl_lck_mtx_data(static, ipqlock);
170 static lck_attr_t *ipqlock_attr;
171 static lck_grp_t *ipqlock_grp;
172 static lck_grp_attr_t *ipqlock_grp_attr;
173
174 /* Packet reassembly stuff */
175 #define IPREASS_NHASH_LOG2 6
176 #define IPREASS_NHASH (1 << IPREASS_NHASH_LOG2)
177 #define IPREASS_HMASK (IPREASS_NHASH - 1)
178 #define IPREASS_HASH(x, y) \
179 (((((x) & 0xF) | ((((x) >> 8) & 0xF) << 4)) ^ (y)) & IPREASS_HMASK)
180
181 /* IP fragment reassembly queues (protected by ipqlock) */
182 static TAILQ_HEAD(ipqhead, ipq) ipq[IPREASS_NHASH]; /* ip reassembly queues */
183 static int maxnipq; /* max packets in reass queues */
184 static u_int32_t maxfragsperpacket; /* max frags/packet in reass queues */
185 static u_int32_t nipq; /* # of packets in reass queues */
186 static u_int32_t ipq_limit; /* ipq allocation limit */
187 static u_int32_t ipq_count; /* current # of allocated ipq's */
188
189 static int sysctl_ipforwarding SYSCTL_HANDLER_ARGS;
190 static int sysctl_maxnipq SYSCTL_HANDLER_ARGS;
191 static int sysctl_maxfragsperpacket SYSCTL_HANDLER_ARGS;
192
193 #if (DEBUG || DEVELOPMENT)
194 static int sysctl_reset_ip_input_stats SYSCTL_HANDLER_ARGS;
195 static int sysctl_ip_input_measure_bins SYSCTL_HANDLER_ARGS;
196 static int sysctl_ip_input_getperf SYSCTL_HANDLER_ARGS;
197 #endif /* (DEBUG || DEVELOPMENT) */
198
199 int ipforwarding = 0;
200 SYSCTL_PROC(_net_inet_ip, IPCTL_FORWARDING, forwarding,
201 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &ipforwarding, 0,
202 sysctl_ipforwarding, "I", "Enable IP forwarding between interfaces");
203
204 static int ipsendredirects = 1; /* XXX */
205 SYSCTL_INT(_net_inet_ip, IPCTL_SENDREDIRECTS, redirect,
206 CTLFLAG_RW | CTLFLAG_LOCKED, &ipsendredirects, 0,
207 "Enable sending IP redirects");
208
209 int ip_defttl = IPDEFTTL;
210 SYSCTL_INT(_net_inet_ip, IPCTL_DEFTTL, ttl, CTLFLAG_RW | CTLFLAG_LOCKED,
211 &ip_defttl, 0, "Maximum TTL on IP packets");
212
213 static int ip_dosourceroute = 0;
214 SYSCTL_INT(_net_inet_ip, IPCTL_SOURCEROUTE, sourceroute,
215 CTLFLAG_RW | CTLFLAG_LOCKED, &ip_dosourceroute, 0,
216 "Enable forwarding source routed IP packets");
217
218 static int ip_acceptsourceroute = 0;
219 SYSCTL_INT(_net_inet_ip, IPCTL_ACCEPTSOURCEROUTE, accept_sourceroute,
220 CTLFLAG_RW | CTLFLAG_LOCKED, &ip_acceptsourceroute, 0,
221 "Enable accepting source routed IP packets");
222
223 static int ip_sendsourcequench = 0;
224 SYSCTL_INT(_net_inet_ip, OID_AUTO, sendsourcequench,
225 CTLFLAG_RW | CTLFLAG_LOCKED, &ip_sendsourcequench, 0,
226 "Enable the transmission of source quench packets");
227
228 SYSCTL_PROC(_net_inet_ip, OID_AUTO, maxfragpackets,
229 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &maxnipq, 0, sysctl_maxnipq,
230 "I", "Maximum number of IPv4 fragment reassembly queue entries");
231
232 SYSCTL_UINT(_net_inet_ip, OID_AUTO, fragpackets, CTLFLAG_RD | CTLFLAG_LOCKED,
233 &nipq, 0, "Current number of IPv4 fragment reassembly queue entries");
234
235 SYSCTL_PROC(_net_inet_ip, OID_AUTO, maxfragsperpacket,
236 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED, &maxfragsperpacket, 0,
237 sysctl_maxfragsperpacket, "I",
238 "Maximum number of IPv4 fragments allowed per packet");
239
240 static uint32_t ip_adj_clear_hwcksum = 0;
241 SYSCTL_UINT(_net_inet_ip, OID_AUTO, adj_clear_hwcksum,
242 CTLFLAG_RW | CTLFLAG_LOCKED, &ip_adj_clear_hwcksum, 0,
243 "Invalidate hwcksum info when adjusting length");
244
245 static uint32_t ip_adj_partial_sum = 1;
246 SYSCTL_UINT(_net_inet_ip, OID_AUTO, adj_partial_sum,
247 CTLFLAG_RW | CTLFLAG_LOCKED, &ip_adj_partial_sum, 0,
248 "Perform partial sum adjustment of trailing bytes at IP layer");
249
250 /*
251 * ip_checkinterface controls the receive side of the models for multihoming
252 * that are discussed in RFC 1122.
253 *
254 * ip_checkinterface values are:
255 * IP_CHECKINTERFACE_WEAK_ES:
256 * This corresponds to the Weak End-System model where incoming packets from
257 * any interface are accepted provided the destination address of the incoming packet
258 * is assigned to some interface.
259 *
260 * IP_CHECKINTERFACE_HYBRID_ES:
261 * The Hybrid End-System model use the Strong End-System for tunnel interfaces
262 * (ipsec and utun) and the weak End-System model for other interfaces families.
263 * This prevents a rogue middle box to probe for signs of TCP connections
264 * that use the tunnel interface.
265 *
266 * IP_CHECKINTERFACE_STRONG_ES:
267 * The Strong model model requires the packet arrived on an interface that
268 * is assigned the destination address of the packet.
269 *
270 * Since the routing table and transmit implementation do not implement the Strong ES model,
271 * setting this to a value different from IP_CHECKINTERFACE_WEAK_ES may lead to unexpected results.
272 *
273 * When forwarding is enabled, the system reverts to the Weak ES model as a router
274 * is expected by design to receive packets from several interfaces to the same address.
275 *
276 * XXX - ip_checkinterface currently must be set to IP_CHECKINTERFACE_WEAK_ES if you use ipnat
277 * to translate the destination address to another local interface.
278 *
279 * XXX - ip_checkinterface must be set to IP_CHECKINTERFACE_WEAK_ES if you add IP aliases
280 * to the loopback interface instead of the interface where the
281 * packets for those addresses are received.
282 */
283 #define IP_CHECKINTERFACE_WEAK_ES 0
284 #define IP_CHECKINTERFACE_HYBRID_ES 1
285 #define IP_CHECKINTERFACE_STRONG_ES 2
286
287 static int ip_checkinterface = IP_CHECKINTERFACE_HYBRID_ES;
288
289 static int sysctl_ip_checkinterface SYSCTL_HANDLER_ARGS;
290 SYSCTL_PROC(_net_inet_ip, OID_AUTO, check_interface,
291 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
292 0, 0, sysctl_ip_checkinterface, "I", "Verify packet arrives on correct interface");
293
294 #if (DEBUG || DEVELOPMENT)
295 #define IP_CHECK_IF_DEBUG 1
296 #else
297 #define IP_CHECK_IF_DEBUG 0
298 #endif /* (DEBUG || DEVELOPMENT) */
299 static int ip_checkinterface_debug = IP_CHECK_IF_DEBUG;
300 SYSCTL_INT(_net_inet_ip, OID_AUTO, checkinterface_debug, CTLFLAG_RW | CTLFLAG_LOCKED,
301 &ip_checkinterface_debug, IP_CHECK_IF_DEBUG, "");
302
303 static int ip_chaining = 1;
304 SYSCTL_INT(_net_inet_ip, OID_AUTO, rx_chaining, CTLFLAG_RW | CTLFLAG_LOCKED,
305 &ip_chaining, 1, "Do receive side ip address based chaining");
306
307 static int ip_chainsz = 6;
308 SYSCTL_INT(_net_inet_ip, OID_AUTO, rx_chainsz, CTLFLAG_RW | CTLFLAG_LOCKED,
309 &ip_chainsz, 1, "IP receive side max chaining");
310
311 #if (DEBUG || DEVELOPMENT)
312 static int ip_input_measure = 0;
313 SYSCTL_PROC(_net_inet_ip, OID_AUTO, input_perf,
314 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
315 &ip_input_measure, 0, sysctl_reset_ip_input_stats, "I", "Do time measurement");
316
317 static uint64_t ip_input_measure_bins = 0;
318 SYSCTL_PROC(_net_inet_ip, OID_AUTO, input_perf_bins,
319 CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED, &ip_input_measure_bins, 0,
320 sysctl_ip_input_measure_bins, "I",
321 "bins for chaining performance data histogram");
322
323 static net_perf_t net_perf;
324 SYSCTL_PROC(_net_inet_ip, OID_AUTO, input_perf_data,
325 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
326 0, 0, sysctl_ip_input_getperf, "S,net_perf",
327 "IP input performance data (struct net_perf, net/net_perf.h)");
328 #endif /* (DEBUG || DEVELOPMENT) */
329
330 #if DIAGNOSTIC
331 static int ipprintfs = 0;
332 #endif
333
334 struct protosw *ip_protox[IPPROTO_MAX];
335
336 static lck_grp_attr_t *in_ifaddr_rwlock_grp_attr;
337 static lck_grp_t *in_ifaddr_rwlock_grp;
338 static lck_attr_t *in_ifaddr_rwlock_attr;
339 decl_lck_rw_data(, in_ifaddr_rwlock_data);
340 lck_rw_t *in_ifaddr_rwlock = &in_ifaddr_rwlock_data;
341
342 /* Protected by in_ifaddr_rwlock */
343 struct in_ifaddrhead in_ifaddrhead; /* first inet address */
344 struct in_ifaddrhashhead *in_ifaddrhashtbl; /* inet addr hash table */
345
346 #define INADDR_NHASH 61
347 static u_int32_t inaddr_nhash; /* hash table size */
348 static u_int32_t inaddr_hashp; /* next largest prime */
349
350 static int ip_getstat SYSCTL_HANDLER_ARGS;
351 struct ipstat ipstat;
352 SYSCTL_PROC(_net_inet_ip, IPCTL_STATS, stats,
353 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
354 0, 0, ip_getstat, "S,ipstat",
355 "IP statistics (struct ipstat, netinet/ip_var.h)");
356
357 #if IPCTL_DEFMTU
358 SYSCTL_INT(_net_inet_ip, IPCTL_DEFMTU, mtu, CTLFLAG_RW | CTLFLAG_LOCKED,
359 &ip_mtu, 0, "Default MTU");
360 #endif /* IPCTL_DEFMTU */
361
362 #if IPSTEALTH
363 static int ipstealth = 0;
364 SYSCTL_INT(_net_inet_ip, OID_AUTO, stealth, CTLFLAG_RW | CTLFLAG_LOCKED,
365 &ipstealth, 0, "");
366 #endif /* IPSTEALTH */
367
368 /* Firewall hooks */
369 #if IPFIREWALL
370 ip_fw_chk_t *ip_fw_chk_ptr;
371 int fw_enable = 1;
372 int fw_bypass = 1;
373 int fw_one_pass = 0;
374 #endif /* IPFIREWALL */
375
376 #if DUMMYNET
377 ip_dn_io_t *ip_dn_io_ptr;
378 #endif /* DUMMYNET */
379
380 SYSCTL_NODE(_net_inet_ip, OID_AUTO, linklocal,
381 CTLFLAG_RW | CTLFLAG_LOCKED, 0, "link local");
382
383 struct ip_linklocal_stat ip_linklocal_stat;
384 SYSCTL_STRUCT(_net_inet_ip_linklocal, OID_AUTO, stat,
385 CTLFLAG_RD | CTLFLAG_LOCKED, &ip_linklocal_stat, ip_linklocal_stat,
386 "Number of link local packets with TTL less than 255");
387
388 SYSCTL_NODE(_net_inet_ip_linklocal, OID_AUTO, in,
389 CTLFLAG_RW | CTLFLAG_LOCKED, 0, "link local input");
390
391 int ip_linklocal_in_allowbadttl = 1;
392 SYSCTL_INT(_net_inet_ip_linklocal_in, OID_AUTO, allowbadttl,
393 CTLFLAG_RW | CTLFLAG_LOCKED, &ip_linklocal_in_allowbadttl, 0,
394 "Allow incoming link local packets with TTL less than 255");
395
396
397 /*
398 * We need to save the IP options in case a protocol wants to respond
399 * to an incoming packet over the same route if the packet got here
400 * using IP source routing. This allows connection establishment and
401 * maintenance when the remote end is on a network that is not known
402 * to us.
403 */
404 static int ip_nhops = 0;
405 static struct ip_srcrt {
406 struct in_addr dst; /* final destination */
407 char nop; /* one NOP to align */
408 char srcopt[IPOPT_OFFSET + 1]; /* OPTVAL, OLEN and OFFSET */
409 struct in_addr route[MAX_IPOPTLEN / sizeof(struct in_addr)];
410 } ip_srcrt;
411
412 static void in_ifaddrhashtbl_init(void);
413 static void save_rte(u_char *, struct in_addr);
414 static int ip_dooptions(struct mbuf *, int, struct sockaddr_in *);
415 static void ip_forward(struct mbuf *, int, struct sockaddr_in *);
416 static void frag_freef(struct ipqhead *, struct ipq *);
417 #if IPDIVERT
418 #ifdef IPDIVERT_44
419 static struct mbuf *ip_reass(struct mbuf *, u_int32_t *, u_int16_t *);
420 #else /* !IPDIVERT_44 */
421 static struct mbuf *ip_reass(struct mbuf *, u_int16_t *, u_int16_t *);
422 #endif /* !IPDIVERT_44 */
423 #else /* !IPDIVERT */
424 static struct mbuf *ip_reass(struct mbuf *);
425 #endif /* !IPDIVERT */
426 static void ip_fwd_route_copyout(struct ifnet *, struct route *);
427 static void ip_fwd_route_copyin(struct ifnet *, struct route *);
428 static inline u_short ip_cksum(struct mbuf *, int);
429
430 int ip_use_randomid = 1;
431 SYSCTL_INT(_net_inet_ip, OID_AUTO, random_id, CTLFLAG_RW | CTLFLAG_LOCKED,
432 &ip_use_randomid, 0, "Randomize IP packets IDs");
433
434 /*
435 * On platforms which require strict alignment (currently for anything but
436 * i386 or x86_64), check if the IP header pointer is 32-bit aligned; if not,
437 * copy the contents of the mbuf chain into a new chain, and free the original
438 * one. Create some head room in the first mbuf of the new chain, in case
439 * it's needed later on.
440 */
441 #if defined(__i386__) || defined(__x86_64__)
442 #define IP_HDR_ALIGNMENT_FIXUP(_m, _ifp, _action) do { } while (0)
443 #else /* !__i386__ && !__x86_64__ */
444 #define IP_HDR_ALIGNMENT_FIXUP(_m, _ifp, _action) do { \
445 if (!IP_HDR_ALIGNED_P(mtod(_m, caddr_t))) { \
446 struct mbuf *_n; \
447 struct ifnet *__ifp = (_ifp); \
448 atomic_add_64(&(__ifp)->if_alignerrs, 1); \
449 if (((_m)->m_flags & M_PKTHDR) && \
450 (_m)->m_pkthdr.pkt_hdr != NULL) \
451 (_m)->m_pkthdr.pkt_hdr = NULL; \
452 _n = m_defrag_offset(_m, max_linkhdr, M_NOWAIT); \
453 if (_n == NULL) { \
454 atomic_add_32(&ipstat.ips_toosmall, 1); \
455 m_freem(_m); \
456 (_m) = NULL; \
457 _action; \
458 } else { \
459 VERIFY(_n != (_m)); \
460 (_m) = _n; \
461 } \
462 } \
463 } while (0)
464 #endif /* !__i386__ && !__x86_64__ */
465
466
467 typedef enum ip_check_if_result {
468 IP_CHECK_IF_NONE = 0,
469 IP_CHECK_IF_OURS = 1,
470 IP_CHECK_IF_DROP = 2,
471 IP_CHECK_IF_FORWARD = 3
472 } ip_check_if_result_t;
473
474 static ip_check_if_result_t ip_input_check_interface(struct mbuf **, struct ip *, struct ifnet *);
475
476 /*
477 * GRE input handler function, settable via ip_gre_register_input() for PPTP.
478 */
479 static gre_input_func_t gre_input_func;
480
481 static void
482 ip_init_delayed(void)
483 {
484 struct ifreq ifr;
485 int error;
486 struct sockaddr_in *sin;
487
488 bzero(&ifr, sizeof(ifr));
489 strlcpy(ifr.ifr_name, "lo0", sizeof(ifr.ifr_name));
490 sin = (struct sockaddr_in *)(void *)&ifr.ifr_addr;
491 sin->sin_len = sizeof(struct sockaddr_in);
492 sin->sin_family = AF_INET;
493 sin->sin_addr.s_addr = htonl(INADDR_LOOPBACK);
494 error = in_control(NULL, SIOCSIFADDR, (caddr_t)&ifr, lo_ifp, kernproc);
495 if (error) {
496 printf("%s: failed to initialise lo0's address, error=%d\n",
497 __func__, error);
498 }
499 }
500
501 /*
502 * IP initialization: fill in IP protocol switch table.
503 * All protocols not implemented in kernel go to raw IP protocol handler.
504 */
505 void
506 ip_init(struct protosw *pp, struct domain *dp)
507 {
508 static int ip_initialized = 0;
509 struct protosw *pr;
510 struct timeval tv;
511 int i;
512
513 domain_proto_mtx_lock_assert_held();
514 VERIFY((pp->pr_flags & (PR_INITIALIZED | PR_ATTACHED)) == PR_ATTACHED);
515
516 /* ipq_alloc() uses mbufs for IP fragment queue structures */
517 _CASSERT(sizeof(struct ipq) <= _MLEN);
518
519 /*
520 * Some ioctls (e.g. SIOCAIFADDR) use ifaliasreq struct, which is
521 * interchangeable with in_aliasreq; they must have the same size.
522 */
523 _CASSERT(sizeof(struct ifaliasreq) == sizeof(struct in_aliasreq));
524
525 if (ip_initialized) {
526 return;
527 }
528 ip_initialized = 1;
529
530 in_ifaddr_init();
531
532 in_ifaddr_rwlock_grp_attr = lck_grp_attr_alloc_init();
533 in_ifaddr_rwlock_grp = lck_grp_alloc_init("in_ifaddr_rwlock",
534 in_ifaddr_rwlock_grp_attr);
535 in_ifaddr_rwlock_attr = lck_attr_alloc_init();
536 lck_rw_init(in_ifaddr_rwlock, in_ifaddr_rwlock_grp,
537 in_ifaddr_rwlock_attr);
538
539 TAILQ_INIT(&in_ifaddrhead);
540 in_ifaddrhashtbl_init();
541
542 ip_moptions_init();
543
544 pr = pffindproto_locked(PF_INET, IPPROTO_RAW, SOCK_RAW);
545 if (pr == NULL) {
546 panic("%s: Unable to find [PF_INET,IPPROTO_RAW,SOCK_RAW]\n",
547 __func__);
548 /* NOTREACHED */
549 }
550
551 /* Initialize the entire ip_protox[] array to IPPROTO_RAW. */
552 for (i = 0; i < IPPROTO_MAX; i++) {
553 ip_protox[i] = pr;
554 }
555 /*
556 * Cycle through IP protocols and put them into the appropriate place
557 * in ip_protox[], skipping protocols IPPROTO_{IP,RAW}.
558 */
559 VERIFY(dp == inetdomain && dp->dom_family == PF_INET);
560 TAILQ_FOREACH(pr, &dp->dom_protosw, pr_entry) {
561 VERIFY(pr->pr_domain == dp);
562 if (pr->pr_protocol != 0 && pr->pr_protocol != IPPROTO_RAW) {
563 /* Be careful to only index valid IP protocols. */
564 if (pr->pr_protocol < IPPROTO_MAX) {
565 ip_protox[pr->pr_protocol] = pr;
566 }
567 }
568 }
569
570 /* IP fragment reassembly queue lock */
571 ipqlock_grp_attr = lck_grp_attr_alloc_init();
572 ipqlock_grp = lck_grp_alloc_init("ipqlock", ipqlock_grp_attr);
573 ipqlock_attr = lck_attr_alloc_init();
574 lck_mtx_init(&ipqlock, ipqlock_grp, ipqlock_attr);
575
576 lck_mtx_lock(&ipqlock);
577 /* Initialize IP reassembly queue. */
578 for (i = 0; i < IPREASS_NHASH; i++) {
579 TAILQ_INIT(&ipq[i]);
580 }
581
582 maxnipq = nmbclusters / 32;
583 maxfragsperpacket = 128; /* enough for 64k in 512 byte fragments */
584 ipq_updateparams();
585 lck_mtx_unlock(&ipqlock);
586
587 getmicrotime(&tv);
588 ip_id = RandomULong() ^ tv.tv_usec;
589 ip_initid();
590
591 ipf_init();
592
593 PE_parse_boot_argn("ip_checkinterface", &i, sizeof(i));
594 switch (i) {
595 case IP_CHECKINTERFACE_WEAK_ES:
596 case IP_CHECKINTERFACE_HYBRID_ES:
597 case IP_CHECKINTERFACE_STRONG_ES:
598 ip_checkinterface = i;
599 break;
600 default:
601 break;
602 }
603
604 #if IPSEC
605 sadb_stat_mutex_grp_attr = lck_grp_attr_alloc_init();
606 sadb_stat_mutex_grp = lck_grp_alloc_init("sadb_stat",
607 sadb_stat_mutex_grp_attr);
608 sadb_stat_mutex_attr = lck_attr_alloc_init();
609 lck_mtx_init(sadb_stat_mutex, sadb_stat_mutex_grp,
610 sadb_stat_mutex_attr);
611
612 #endif
613 arp_init();
614 net_init_add(ip_init_delayed);
615 }
616
617 /*
618 * Initialize IPv4 source address hash table.
619 */
620 static void
621 in_ifaddrhashtbl_init(void)
622 {
623 int i, k, p;
624
625 if (in_ifaddrhashtbl != NULL) {
626 return;
627 }
628
629 PE_parse_boot_argn("inaddr_nhash", &inaddr_nhash,
630 sizeof(inaddr_nhash));
631 if (inaddr_nhash == 0) {
632 inaddr_nhash = INADDR_NHASH;
633 }
634
635 MALLOC(in_ifaddrhashtbl, struct in_ifaddrhashhead *,
636 inaddr_nhash * sizeof(*in_ifaddrhashtbl),
637 M_IFADDR, M_WAITOK | M_ZERO);
638 if (in_ifaddrhashtbl == NULL) {
639 panic("in_ifaddrhashtbl_init allocation failed");
640 }
641
642 /*
643 * Generate the next largest prime greater than inaddr_nhash.
644 */
645 k = (inaddr_nhash % 2 == 0) ? inaddr_nhash + 1 : inaddr_nhash + 2;
646 for (;;) {
647 p = 1;
648 for (i = 3; i * i <= k; i += 2) {
649 if (k % i == 0) {
650 p = 0;
651 }
652 }
653 if (p == 1) {
654 break;
655 }
656 k += 2;
657 }
658 inaddr_hashp = k;
659 }
660
661 u_int32_t
662 inaddr_hashval(u_int32_t key)
663 {
664 /*
665 * The hash index is the computed prime times the key modulo
666 * the hash size, as documented in "Introduction to Algorithms"
667 * (Cormen, Leiserson, Rivest).
668 */
669 if (inaddr_nhash > 1) {
670 return (key * inaddr_hashp) % inaddr_nhash;
671 } else {
672 return 0;
673 }
674 }
675
676 void
677 ip_proto_dispatch_in_wrapper(struct mbuf *m, int hlen, u_int8_t proto)
678 {
679 ip_proto_dispatch_in(m, hlen, proto, 0);
680 }
681
682 __private_extern__ void
683 ip_proto_dispatch_in(struct mbuf *m, int hlen, u_int8_t proto,
684 ipfilter_t inject_ipfref)
685 {
686 struct ipfilter *filter;
687 int seen = (inject_ipfref == NULL);
688 int changed_header = 0;
689 struct ip *ip;
690 void (*pr_input)(struct mbuf *, int len);
691
692 if (!TAILQ_EMPTY(&ipv4_filters)) {
693 ipf_ref();
694 TAILQ_FOREACH(filter, &ipv4_filters, ipf_link) {
695 if (seen == 0) {
696 if ((struct ipfilter *)inject_ipfref == filter) {
697 seen = 1;
698 }
699 } else if (filter->ipf_filter.ipf_input) {
700 errno_t result;
701
702 if (changed_header == 0) {
703 /*
704 * Perform IP header alignment fixup,
705 * if needed, before passing packet
706 * into filter(s).
707 */
708 IP_HDR_ALIGNMENT_FIXUP(m,
709 m->m_pkthdr.rcvif, ipf_unref());
710
711 /* ipf_unref() already called */
712 if (m == NULL) {
713 return;
714 }
715
716 changed_header = 1;
717 ip = mtod(m, struct ip *);
718 ip->ip_len = htons(ip->ip_len + hlen);
719 ip->ip_off = htons(ip->ip_off);
720 ip->ip_sum = 0;
721 ip->ip_sum = ip_cksum_hdr_in(m, hlen);
722 }
723 result = filter->ipf_filter.ipf_input(
724 filter->ipf_filter.cookie, (mbuf_t *)&m,
725 hlen, proto);
726 if (result == EJUSTRETURN) {
727 ipf_unref();
728 return;
729 }
730 if (result != 0) {
731 ipf_unref();
732 m_freem(m);
733 return;
734 }
735 }
736 }
737 ipf_unref();
738 }
739
740 /* Perform IP header alignment fixup (post-filters), if needed */
741 IP_HDR_ALIGNMENT_FIXUP(m, m->m_pkthdr.rcvif, return );
742
743 ip = mtod(m, struct ip *);
744
745 if (changed_header) {
746 ip->ip_len = ntohs(ip->ip_len) - hlen;
747 ip->ip_off = ntohs(ip->ip_off);
748 }
749
750 /*
751 * If there isn't a specific lock for the protocol
752 * we're about to call, use the generic lock for AF_INET.
753 * otherwise let the protocol deal with its own locking
754 */
755 if ((pr_input = ip_protox[ip->ip_p]->pr_input) == NULL) {
756 m_freem(m);
757 } else if (!(ip_protox[ip->ip_p]->pr_flags & PR_PROTOLOCK)) {
758 lck_mtx_lock(inet_domain_mutex);
759 pr_input(m, hlen);
760 lck_mtx_unlock(inet_domain_mutex);
761 } else {
762 pr_input(m, hlen);
763 }
764 }
765
766 struct pktchain_elm {
767 struct mbuf *pkte_head;
768 struct mbuf *pkte_tail;
769 struct in_addr pkte_saddr;
770 struct in_addr pkte_daddr;
771 uint16_t pkte_npkts;
772 uint16_t pkte_proto;
773 uint32_t pkte_nbytes;
774 };
775
776 typedef struct pktchain_elm pktchain_elm_t;
777
778 /* Store upto PKTTBL_SZ unique flows on the stack */
779 #define PKTTBL_SZ 7
780
781 static struct mbuf *
782 ip_chain_insert(struct mbuf *packet, pktchain_elm_t *tbl)
783 {
784 struct ip* ip;
785 int pkttbl_idx = 0;
786
787 ip = mtod(packet, struct ip*);
788
789 /* reusing the hash function from inaddr_hashval */
790 pkttbl_idx = inaddr_hashval(ntohs(ip->ip_src.s_addr)) % PKTTBL_SZ;
791 if (tbl[pkttbl_idx].pkte_head == NULL) {
792 tbl[pkttbl_idx].pkte_head = packet;
793 tbl[pkttbl_idx].pkte_saddr.s_addr = ip->ip_src.s_addr;
794 tbl[pkttbl_idx].pkte_daddr.s_addr = ip->ip_dst.s_addr;
795 tbl[pkttbl_idx].pkte_proto = ip->ip_p;
796 } else {
797 if ((ip->ip_dst.s_addr == tbl[pkttbl_idx].pkte_daddr.s_addr) &&
798 (ip->ip_src.s_addr == tbl[pkttbl_idx].pkte_saddr.s_addr) &&
799 (ip->ip_p == tbl[pkttbl_idx].pkte_proto)) {
800 } else {
801 return packet;
802 }
803 }
804 if (tbl[pkttbl_idx].pkte_tail != NULL) {
805 mbuf_setnextpkt(tbl[pkttbl_idx].pkte_tail, packet);
806 }
807
808 tbl[pkttbl_idx].pkte_tail = packet;
809 tbl[pkttbl_idx].pkte_npkts += 1;
810 tbl[pkttbl_idx].pkte_nbytes += packet->m_pkthdr.len;
811 return NULL;
812 }
813
814 /* args is a dummy variable here for backward compatibility */
815 static void
816 ip_input_second_pass_loop_tbl(pktchain_elm_t *tbl, struct ip_fw_in_args *args)
817 {
818 int i = 0;
819
820 for (i = 0; i < PKTTBL_SZ; i++) {
821 if (tbl[i].pkte_head != NULL) {
822 struct mbuf *m = tbl[i].pkte_head;
823 ip_input_second_pass(m, m->m_pkthdr.rcvif, 0,
824 tbl[i].pkte_npkts, tbl[i].pkte_nbytes, args, 0);
825
826 if (tbl[i].pkte_npkts > 2) {
827 ipstat.ips_rxc_chainsz_gt2++;
828 }
829 if (tbl[i].pkte_npkts > 4) {
830 ipstat.ips_rxc_chainsz_gt4++;
831 }
832 #if (DEBUG || DEVELOPMENT)
833 if (ip_input_measure) {
834 net_perf_histogram(&net_perf, tbl[i].pkte_npkts);
835 }
836 #endif /* (DEBUG || DEVELOPMENT) */
837 tbl[i].pkte_head = tbl[i].pkte_tail = NULL;
838 tbl[i].pkte_npkts = 0;
839 tbl[i].pkte_nbytes = 0;
840 /* no need to initialize address and protocol in tbl */
841 }
842 }
843 }
844
845 static void
846 ip_input_cpout_args(struct ip_fw_in_args *args, struct ip_fw_args *args1,
847 boolean_t *done_init)
848 {
849 if (*done_init == FALSE) {
850 bzero(args1, sizeof(struct ip_fw_args));
851 *done_init = TRUE;
852 }
853 args1->fwa_next_hop = args->fwai_next_hop;
854 args1->fwa_ipfw_rule = args->fwai_ipfw_rule;
855 args1->fwa_pf_rule = args->fwai_pf_rule;
856 args1->fwa_divert_rule = args->fwai_divert_rule;
857 }
858
859 static void
860 ip_input_cpin_args(struct ip_fw_args *args1, struct ip_fw_in_args *args)
861 {
862 args->fwai_next_hop = args1->fwa_next_hop;
863 args->fwai_ipfw_rule = args1->fwa_ipfw_rule;
864 args->fwai_pf_rule = args1->fwa_pf_rule;
865 args->fwai_divert_rule = args1->fwa_divert_rule;
866 }
867
868 typedef enum {
869 IPINPUT_DOCHAIN = 0,
870 IPINPUT_DONTCHAIN,
871 IPINPUT_FREED,
872 IPINPUT_DONE
873 } ipinput_chain_ret_t;
874
875 static void
876 ip_input_update_nstat(struct ifnet *ifp, struct in_addr src_ip,
877 u_int32_t packets, u_int32_t bytes)
878 {
879 if (nstat_collect) {
880 struct rtentry *rt = ifnet_cached_rtlookup_inet(ifp,
881 src_ip);
882 if (rt != NULL) {
883 nstat_route_rx(rt, packets, bytes, 0);
884 rtfree(rt);
885 }
886 }
887 }
888
889 static void
890 ip_input_dispatch_chain(struct mbuf *m)
891 {
892 struct mbuf *tmp_mbuf = m;
893 struct mbuf *nxt_mbuf = NULL;
894 struct ip *ip = NULL;
895 unsigned int hlen;
896
897 ip = mtod(tmp_mbuf, struct ip *);
898 hlen = IP_VHL_HL(ip->ip_vhl) << 2;
899 while (tmp_mbuf != NULL) {
900 nxt_mbuf = mbuf_nextpkt(tmp_mbuf);
901 mbuf_setnextpkt(tmp_mbuf, NULL);
902
903 if ((sw_lro) && (ip->ip_p == IPPROTO_TCP)) {
904 tmp_mbuf = tcp_lro(tmp_mbuf, hlen);
905 }
906 if (tmp_mbuf) {
907 ip_proto_dispatch_in(tmp_mbuf, hlen, ip->ip_p, 0);
908 }
909 tmp_mbuf = nxt_mbuf;
910 if (tmp_mbuf) {
911 ip = mtod(tmp_mbuf, struct ip *);
912 /* first mbuf of chain already has adjusted ip_len */
913 hlen = IP_VHL_HL(ip->ip_vhl) << 2;
914 ip->ip_len -= hlen;
915 }
916 }
917 }
918
919 static void
920 ip_input_setdst_chain(struct mbuf *m, uint32_t ifindex, struct in_ifaddr *ia)
921 {
922 struct mbuf *tmp_mbuf = m;
923
924 while (tmp_mbuf != NULL) {
925 ip_setdstifaddr_info(tmp_mbuf, ifindex, ia);
926 tmp_mbuf = mbuf_nextpkt(tmp_mbuf);
927 }
928 }
929
930 static void
931 ip_input_adjust(struct mbuf *m, struct ip *ip, struct ifnet *inifp)
932 {
933 boolean_t adjust = TRUE;
934
935 ASSERT(m_pktlen(m) > ip->ip_len);
936
937 /*
938 * Invalidate hardware checksum info if ip_adj_clear_hwcksum
939 * is set; useful to handle buggy drivers. Note that this
940 * should not be enabled by default, as we may get here due
941 * to link-layer padding.
942 */
943 if (ip_adj_clear_hwcksum &&
944 (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) &&
945 !(inifp->if_flags & IFF_LOOPBACK) &&
946 !(m->m_pkthdr.pkt_flags & PKTF_LOOP)) {
947 m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID;
948 m->m_pkthdr.csum_data = 0;
949 ipstat.ips_adj_hwcsum_clr++;
950 }
951
952 /*
953 * If partial checksum information is available, subtract
954 * out the partial sum of postpended extraneous bytes, and
955 * update the checksum metadata accordingly. By doing it
956 * here, the upper layer transport only needs to adjust any
957 * prepended extraneous bytes (else it will do both.)
958 */
959 if (ip_adj_partial_sum &&
960 (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PARTIAL)) ==
961 (CSUM_DATA_VALID | CSUM_PARTIAL)) {
962 m->m_pkthdr.csum_rx_val = m_adj_sum16(m,
963 m->m_pkthdr.csum_rx_start, m->m_pkthdr.csum_rx_start,
964 (ip->ip_len - m->m_pkthdr.csum_rx_start),
965 m->m_pkthdr.csum_rx_val);
966 } else if ((m->m_pkthdr.csum_flags &
967 (CSUM_DATA_VALID | CSUM_PARTIAL)) ==
968 (CSUM_DATA_VALID | CSUM_PARTIAL)) {
969 /*
970 * If packet has partial checksum info and we decided not
971 * to subtract the partial sum of postpended extraneous
972 * bytes here (not the default case), leave that work to
973 * be handled by the other layers. For now, only TCP, UDP
974 * layers are capable of dealing with this. For all other
975 * protocols (including fragments), trim and ditch the
976 * partial sum as those layers might not implement partial
977 * checksumming (or adjustment) at all.
978 */
979 if ((ip->ip_off & (IP_MF | IP_OFFMASK)) == 0 &&
980 (ip->ip_p == IPPROTO_TCP || ip->ip_p == IPPROTO_UDP)) {
981 adjust = FALSE;
982 } else {
983 m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID;
984 m->m_pkthdr.csum_data = 0;
985 ipstat.ips_adj_hwcsum_clr++;
986 }
987 }
988
989 if (adjust) {
990 ipstat.ips_adj++;
991 if (m->m_len == m->m_pkthdr.len) {
992 m->m_len = ip->ip_len;
993 m->m_pkthdr.len = ip->ip_len;
994 } else {
995 m_adj(m, ip->ip_len - m->m_pkthdr.len);
996 }
997 }
998 }
999
1000 /*
1001 * First pass does all essential packet validation and places on a per flow
1002 * queue for doing operations that have same outcome for all packets of a flow.
1003 * div_info is packet divert/tee info
1004 */
1005 static ipinput_chain_ret_t
1006 ip_input_first_pass(struct mbuf *m, u_int32_t *div_info,
1007 struct ip_fw_in_args *args, int *ours, struct mbuf **modm)
1008 {
1009 struct ip *ip;
1010 struct ifnet *inifp;
1011 unsigned int hlen;
1012 int retval = IPINPUT_DOCHAIN;
1013 int len = 0;
1014 struct in_addr src_ip;
1015 #if IPFIREWALL
1016 int i;
1017 #endif
1018 #if IPFIREWALL || DUMMYNET
1019 struct m_tag *copy;
1020 struct m_tag *p;
1021 boolean_t delete = FALSE;
1022 struct ip_fw_args args1;
1023 boolean_t init = FALSE;
1024 #endif
1025 ipfilter_t inject_filter_ref = NULL;
1026
1027 #if !IPFIREWALL
1028 #pragma unused (args)
1029 #endif
1030
1031 #if !IPDIVERT
1032 #pragma unused (div_info)
1033 #pragma unused (ours)
1034 #endif
1035
1036 #if !IPFIREWALL_FORWARD
1037 #pragma unused (ours)
1038 #endif
1039
1040 /* Check if the mbuf is still valid after interface filter processing */
1041 MBUF_INPUT_CHECK(m, m->m_pkthdr.rcvif);
1042 inifp = mbuf_pkthdr_rcvif(m);
1043 VERIFY(inifp != NULL);
1044
1045 /* Perform IP header alignment fixup, if needed */
1046 IP_HDR_ALIGNMENT_FIXUP(m, inifp, goto bad);
1047
1048 m->m_pkthdr.pkt_flags &= ~PKTF_FORWARDED;
1049
1050 #if IPFIREWALL || DUMMYNET
1051
1052 /*
1053 * Don't bother searching for tag(s) if there's none.
1054 */
1055 if (SLIST_EMPTY(&m->m_pkthdr.tags)) {
1056 goto ipfw_tags_done;
1057 }
1058
1059 /* Grab info from mtags prepended to the chain */
1060 p = m_tag_first(m);
1061 while (p) {
1062 if (p->m_tag_id == KERNEL_MODULE_TAG_ID) {
1063 #if DUMMYNET
1064 if (p->m_tag_type == KERNEL_TAG_TYPE_DUMMYNET) {
1065 struct dn_pkt_tag *dn_tag;
1066
1067 dn_tag = (struct dn_pkt_tag *)(p + 1);
1068 args->fwai_ipfw_rule = dn_tag->dn_ipfw_rule;
1069 args->fwai_pf_rule = dn_tag->dn_pf_rule;
1070 delete = TRUE;
1071 }
1072 #endif
1073
1074 #if IPDIVERT
1075 if (p->m_tag_type == KERNEL_TAG_TYPE_DIVERT) {
1076 struct divert_tag *div_tag;
1077
1078 div_tag = (struct divert_tag *)(p + 1);
1079 args->fwai_divert_rule = div_tag->cookie;
1080 delete = TRUE;
1081 }
1082 #endif
1083
1084 if (p->m_tag_type == KERNEL_TAG_TYPE_IPFORWARD) {
1085 struct ip_fwd_tag *ipfwd_tag;
1086
1087 ipfwd_tag = (struct ip_fwd_tag *)(p + 1);
1088 args->fwai_next_hop = ipfwd_tag->next_hop;
1089 delete = TRUE;
1090 }
1091
1092 if (delete) {
1093 copy = p;
1094 p = m_tag_next(m, p);
1095 m_tag_delete(m, copy);
1096 } else {
1097 p = m_tag_next(m, p);
1098 }
1099 } else {
1100 p = m_tag_next(m, p);
1101 }
1102 }
1103
1104 #if DIAGNOSTIC
1105 if (m == NULL || !(m->m_flags & M_PKTHDR)) {
1106 panic("ip_input no HDR");
1107 }
1108 #endif
1109
1110 #if DUMMYNET
1111 if (args->fwai_ipfw_rule || args->fwai_pf_rule) {
1112 /* dummynet already filtered us */
1113 ip = mtod(m, struct ip *);
1114 hlen = IP_VHL_HL(ip->ip_vhl) << 2;
1115 inject_filter_ref = ipf_get_inject_filter(m);
1116 #if IPFIREWALL
1117 if (args->fwai_ipfw_rule) {
1118 goto iphack;
1119 }
1120 #endif /* IPFIREWALL */
1121 if (args->fwai_pf_rule) {
1122 goto check_with_pf;
1123 }
1124 }
1125 #endif /* DUMMYNET */
1126 ipfw_tags_done:
1127 #endif /* IPFIREWALL || DUMMYNET */
1128
1129 /*
1130 * No need to process packet twice if we've already seen it.
1131 */
1132 if (!SLIST_EMPTY(&m->m_pkthdr.tags)) {
1133 inject_filter_ref = ipf_get_inject_filter(m);
1134 }
1135 if (inject_filter_ref != NULL) {
1136 ip = mtod(m, struct ip *);
1137 hlen = IP_VHL_HL(ip->ip_vhl) << 2;
1138
1139 DTRACE_IP6(receive, struct mbuf *, m, struct inpcb *, NULL,
1140 struct ip *, ip, struct ifnet *, inifp,
1141 struct ip *, ip, struct ip6_hdr *, NULL);
1142
1143 ip->ip_len = ntohs(ip->ip_len) - hlen;
1144 ip->ip_off = ntohs(ip->ip_off);
1145 ip_proto_dispatch_in(m, hlen, ip->ip_p, inject_filter_ref);
1146 return IPINPUT_DONE;
1147 }
1148
1149 if (m->m_pkthdr.len < sizeof(struct ip)) {
1150 OSAddAtomic(1, &ipstat.ips_total);
1151 OSAddAtomic(1, &ipstat.ips_tooshort);
1152 m_freem(m);
1153 return IPINPUT_FREED;
1154 }
1155
1156 if (m->m_len < sizeof(struct ip) &&
1157 (m = m_pullup(m, sizeof(struct ip))) == NULL) {
1158 OSAddAtomic(1, &ipstat.ips_total);
1159 OSAddAtomic(1, &ipstat.ips_toosmall);
1160 return IPINPUT_FREED;
1161 }
1162
1163 ip = mtod(m, struct ip *);
1164 *modm = m;
1165
1166 KERNEL_DEBUG(DBG_LAYER_BEG, ip->ip_dst.s_addr, ip->ip_src.s_addr,
1167 ip->ip_p, ip->ip_off, ip->ip_len);
1168
1169 if (IP_VHL_V(ip->ip_vhl) != IPVERSION) {
1170 OSAddAtomic(1, &ipstat.ips_total);
1171 OSAddAtomic(1, &ipstat.ips_badvers);
1172 KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
1173 m_freem(m);
1174 return IPINPUT_FREED;
1175 }
1176
1177 hlen = IP_VHL_HL(ip->ip_vhl) << 2;
1178 if (hlen < sizeof(struct ip)) {
1179 OSAddAtomic(1, &ipstat.ips_total);
1180 OSAddAtomic(1, &ipstat.ips_badhlen);
1181 KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
1182 m_freem(m);
1183 return IPINPUT_FREED;
1184 }
1185
1186 if (hlen > m->m_len) {
1187 if ((m = m_pullup(m, hlen)) == NULL) {
1188 OSAddAtomic(1, &ipstat.ips_total);
1189 OSAddAtomic(1, &ipstat.ips_badhlen);
1190 KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
1191 return IPINPUT_FREED;
1192 }
1193 ip = mtod(m, struct ip *);
1194 *modm = m;
1195 }
1196
1197 /* 127/8 must not appear on wire - RFC1122 */
1198 if ((ntohl(ip->ip_dst.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET ||
1199 (ntohl(ip->ip_src.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET) {
1200 /*
1201 * Allow for the following exceptions:
1202 *
1203 * 1. If the packet was sent to loopback (i.e. rcvif
1204 * would have been set earlier at output time.)
1205 *
1206 * 2. If the packet was sent out on loopback from a local
1207 * source address which belongs to a non-loopback
1208 * interface (i.e. rcvif may not necessarily be a
1209 * loopback interface, hence the test for PKTF_LOOP.)
1210 * Unlike IPv6, there is no interface scope ID, and
1211 * therefore we don't care so much about PKTF_IFINFO.
1212 */
1213 if (!(inifp->if_flags & IFF_LOOPBACK) &&
1214 !(m->m_pkthdr.pkt_flags & PKTF_LOOP)) {
1215 OSAddAtomic(1, &ipstat.ips_total);
1216 OSAddAtomic(1, &ipstat.ips_badaddr);
1217 KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
1218 m_freem(m);
1219 return IPINPUT_FREED;
1220 }
1221 }
1222
1223 /* IPv4 Link-Local Addresses as defined in RFC3927 */
1224 if ((IN_LINKLOCAL(ntohl(ip->ip_dst.s_addr)) ||
1225 IN_LINKLOCAL(ntohl(ip->ip_src.s_addr)))) {
1226 ip_linklocal_stat.iplls_in_total++;
1227 if (ip->ip_ttl != MAXTTL) {
1228 OSAddAtomic(1, &ip_linklocal_stat.iplls_in_badttl);
1229 /* Silently drop link local traffic with bad TTL */
1230 if (!ip_linklocal_in_allowbadttl) {
1231 OSAddAtomic(1, &ipstat.ips_total);
1232 KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
1233 m_freem(m);
1234 return IPINPUT_FREED;
1235 }
1236 }
1237 }
1238
1239 if (ip_cksum(m, hlen)) {
1240 OSAddAtomic(1, &ipstat.ips_total);
1241 KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
1242 m_freem(m);
1243 return IPINPUT_FREED;
1244 }
1245
1246 DTRACE_IP6(receive, struct mbuf *, m, struct inpcb *, NULL,
1247 struct ip *, ip, struct ifnet *, inifp,
1248 struct ip *, ip, struct ip6_hdr *, NULL);
1249
1250 /*
1251 * Convert fields to host representation.
1252 */
1253 #if BYTE_ORDER != BIG_ENDIAN
1254 NTOHS(ip->ip_len);
1255 #endif
1256
1257 if (ip->ip_len < hlen) {
1258 OSAddAtomic(1, &ipstat.ips_total);
1259 OSAddAtomic(1, &ipstat.ips_badlen);
1260 KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
1261 m_freem(m);
1262 return IPINPUT_FREED;
1263 }
1264
1265 #if BYTE_ORDER != BIG_ENDIAN
1266 NTOHS(ip->ip_off);
1267 #endif
1268
1269 /*
1270 * Check that the amount of data in the buffers
1271 * is as at least much as the IP header would have us expect.
1272 * Trim mbufs if longer than we expect.
1273 * Drop packet if shorter than we expect.
1274 */
1275 if (m->m_pkthdr.len < ip->ip_len) {
1276 OSAddAtomic(1, &ipstat.ips_total);
1277 OSAddAtomic(1, &ipstat.ips_tooshort);
1278 KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
1279 m_freem(m);
1280 return IPINPUT_FREED;
1281 }
1282
1283 if (m->m_pkthdr.len > ip->ip_len) {
1284 ip_input_adjust(m, ip, inifp);
1285 }
1286
1287 /* for netstat route statistics */
1288 src_ip = ip->ip_src;
1289 len = m->m_pkthdr.len;
1290
1291 #if DUMMYNET
1292 check_with_pf:
1293 #endif
1294 #if PF
1295 /* Invoke inbound packet filter */
1296 if (PF_IS_ENABLED) {
1297 int error;
1298 ip_input_cpout_args(args, &args1, &init);
1299 ip = mtod(m, struct ip *);
1300 src_ip = ip->ip_src;
1301
1302 #if DUMMYNET
1303 error = pf_af_hook(inifp, NULL, &m, AF_INET, TRUE, &args1);
1304 #else
1305 error = pf_af_hook(inifp, NULL, &m, AF_INET, TRUE, NULL);
1306 #endif /* DUMMYNET */
1307 if (error != 0 || m == NULL) {
1308 if (m != NULL) {
1309 panic("%s: unexpected packet %p\n",
1310 __func__, m);
1311 /* NOTREACHED */
1312 }
1313 /* Already freed by callee */
1314 ip_input_update_nstat(inifp, src_ip, 1, len);
1315 KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
1316 OSAddAtomic(1, &ipstat.ips_total);
1317 return IPINPUT_FREED;
1318 }
1319 ip = mtod(m, struct ip *);
1320 hlen = IP_VHL_HL(ip->ip_vhl) << 2;
1321 *modm = m;
1322 ip_input_cpin_args(&args1, args);
1323 }
1324 #endif /* PF */
1325
1326 #if IPSEC
1327 if (ipsec_bypass == 0 && ipsec_gethist(m, NULL)) {
1328 retval = IPINPUT_DONTCHAIN; /* XXX scope for chaining here? */
1329 goto pass;
1330 }
1331 #endif
1332
1333 #if IPFIREWALL
1334 #if DUMMYNET
1335 iphack:
1336 #endif /* DUMMYNET */
1337 /*
1338 * Check if we want to allow this packet to be processed.
1339 * Consider it to be bad if not.
1340 */
1341 if (fw_enable && IPFW_LOADED) {
1342 #if IPFIREWALL_FORWARD
1343 /*
1344 * If we've been forwarded from the output side, then
1345 * skip the firewall a second time
1346 */
1347 if (args->fwai_next_hop) {
1348 *ours = 1;
1349 return IPINPUT_DONTCHAIN;
1350 }
1351 #endif /* IPFIREWALL_FORWARD */
1352 ip_input_cpout_args(args, &args1, &init);
1353 args1.fwa_m = m;
1354
1355 i = ip_fw_chk_ptr(&args1);
1356 m = args1.fwa_m;
1357
1358 if ((i & IP_FW_PORT_DENY_FLAG) || m == NULL) { /* drop */
1359 if (m) {
1360 m_freem(m);
1361 }
1362 ip_input_update_nstat(inifp, src_ip, 1, len);
1363 KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
1364 OSAddAtomic(1, &ipstat.ips_total);
1365 return IPINPUT_FREED;
1366 }
1367 ip = mtod(m, struct ip *); /* just in case m changed */
1368 *modm = m;
1369 ip_input_cpin_args(&args1, args);
1370
1371 if (i == 0 && args->fwai_next_hop == NULL) { /* common case */
1372 goto pass;
1373 }
1374 #if DUMMYNET
1375 if (DUMMYNET_LOADED && (i & IP_FW_PORT_DYNT_FLAG) != 0) {
1376 /* Send packet to the appropriate pipe */
1377 ip_dn_io_ptr(m, i & 0xffff, DN_TO_IP_IN, &args1,
1378 DN_CLIENT_IPFW);
1379 ip_input_update_nstat(inifp, src_ip, 1, len);
1380 KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
1381 OSAddAtomic(1, &ipstat.ips_total);
1382 return IPINPUT_FREED;
1383 }
1384 #endif /* DUMMYNET */
1385 #if IPDIVERT
1386 if (i != 0 && (i & IP_FW_PORT_DYNT_FLAG) == 0) {
1387 /* Divert or tee packet */
1388 *div_info = i;
1389 *ours = 1;
1390 return IPINPUT_DONTCHAIN;
1391 }
1392 #endif
1393 #if IPFIREWALL_FORWARD
1394 if (i == 0 && args->fwai_next_hop != NULL) {
1395 retval = IPINPUT_DONTCHAIN;
1396 goto pass;
1397 }
1398 #endif
1399 /*
1400 * if we get here, the packet must be dropped
1401 */
1402 ip_input_update_nstat(inifp, src_ip, 1, len);
1403 KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
1404 m_freem(m);
1405 OSAddAtomic(1, &ipstat.ips_total);
1406 return IPINPUT_FREED;
1407 }
1408 #endif /* IPFIREWALL */
1409 #if IPSEC | IPFIREWALL
1410 pass:
1411 #endif
1412 /*
1413 * Process options and, if not destined for us,
1414 * ship it on. ip_dooptions returns 1 when an
1415 * error was detected (causing an icmp message
1416 * to be sent and the original packet to be freed).
1417 */
1418 ip_nhops = 0; /* for source routed packets */
1419 #if IPFIREWALL
1420 if (hlen > sizeof(struct ip) &&
1421 ip_dooptions(m, 0, args->fwai_next_hop)) {
1422 #else /* !IPFIREWALL */
1423 if (hlen > sizeof(struct ip) && ip_dooptions(m, 0, NULL)) {
1424 #endif /* !IPFIREWALL */
1425 ip_input_update_nstat(inifp, src_ip, 1, len);
1426 KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
1427 OSAddAtomic(1, &ipstat.ips_total);
1428 return IPINPUT_FREED;
1429 }
1430
1431 /*
1432 * Don't chain fragmented packets as the process of determining
1433 * if it is our fragment or someone else's plus the complexity of
1434 * divert and fw args makes it harder to do chaining.
1435 */
1436 if (ip->ip_off & ~(IP_DF | IP_RF)) {
1437 return IPINPUT_DONTCHAIN;
1438 }
1439
1440 /* Allow DHCP/BootP responses through */
1441 if ((inifp->if_eflags & IFEF_AUTOCONFIGURING) &&
1442 hlen == sizeof(struct ip) && ip->ip_p == IPPROTO_UDP) {
1443 struct udpiphdr *ui;
1444
1445 if (m->m_len < sizeof(struct udpiphdr) &&
1446 (m = m_pullup(m, sizeof(struct udpiphdr))) == NULL) {
1447 OSAddAtomic(1, &udpstat.udps_hdrops);
1448 KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
1449 OSAddAtomic(1, &ipstat.ips_total);
1450 return IPINPUT_FREED;
1451 }
1452 *modm = m;
1453 ui = mtod(m, struct udpiphdr *);
1454 if (ntohs(ui->ui_dport) == IPPORT_BOOTPC) {
1455 ip_setdstifaddr_info(m, inifp->if_index, NULL);
1456 return IPINPUT_DONTCHAIN;
1457 }
1458 }
1459
1460 /* Avoid chaining raw sockets as ipsec checks occur later for them */
1461 if (ip_protox[ip->ip_p]->pr_flags & PR_LASTHDR) {
1462 return IPINPUT_DONTCHAIN;
1463 }
1464
1465 return retval;
1466 #if !defined(__i386__) && !defined(__x86_64__)
1467 bad:
1468 m_freem(m);
1469 return IPINPUT_FREED;
1470 #endif
1471 }
1472
1473 /*
1474 * Because the call to m_pullup() may freem the mbuf, the function frees the mbuf packet
1475 * chain before it return IP_CHECK_IF_DROP
1476 */
1477 static ip_check_if_result_t
1478 ip_input_check_interface(struct mbuf **mp, struct ip *ip, struct ifnet *inifp)
1479 {
1480 struct mbuf *m = *mp;
1481 struct in_ifaddr *ia = NULL;
1482 struct in_ifaddr *best_ia = NULL;
1483 struct ifnet *match_ifp = NULL;
1484 ip_check_if_result_t result = IP_CHECK_IF_NONE;
1485
1486 /*
1487 * Host broadcast and all network broadcast addresses are always a match
1488 */
1489 if (ip->ip_dst.s_addr == (u_int32_t)INADDR_BROADCAST ||
1490 ip->ip_dst.s_addr == INADDR_ANY) {
1491 ip_input_setdst_chain(m, inifp->if_index, NULL);
1492 return IP_CHECK_IF_OURS;
1493 }
1494
1495 /*
1496 * Check for a match in the hash bucket.
1497 */
1498 lck_rw_lock_shared(in_ifaddr_rwlock);
1499 TAILQ_FOREACH(ia, INADDR_HASH(ip->ip_dst.s_addr), ia_hash) {
1500 if (IA_SIN(ia)->sin_addr.s_addr == ip->ip_dst.s_addr) {
1501 best_ia = ia;
1502 match_ifp = best_ia->ia_ifp;
1503
1504 if (ia->ia_ifp == inifp || (inifp->if_flags & IFF_LOOPBACK) ||
1505 (m->m_pkthdr.pkt_flags & PKTF_LOOP)) {
1506 /*
1507 * A locally originated packet or packet from the loopback
1508 * interface is always an exact interface address match
1509 */
1510 match_ifp = inifp;
1511 break;
1512 }
1513 /*
1514 * Continue the loop in case there's a exact match with another
1515 * interface
1516 */
1517 }
1518 }
1519 if (best_ia != NULL) {
1520 if (match_ifp != inifp && ipforwarding == 0 &&
1521 ((ip_checkinterface == IP_CHECKINTERFACE_HYBRID_ES &&
1522 (match_ifp->if_family == IFNET_FAMILY_IPSEC ||
1523 match_ifp->if_family == IFNET_FAMILY_UTUN)) ||
1524 ip_checkinterface == IP_CHECKINTERFACE_STRONG_ES)) {
1525 /*
1526 * Drop when interface address check is strict and forwarding
1527 * is disabled
1528 */
1529 result = IP_CHECK_IF_DROP;
1530 } else {
1531 result = IP_CHECK_IF_OURS;
1532 ip_input_setdst_chain(m, 0, best_ia);
1533 }
1534 }
1535 lck_rw_done(in_ifaddr_rwlock);
1536
1537 if (result == IP_CHECK_IF_NONE && (inifp->if_flags & IFF_BROADCAST)) {
1538 /*
1539 * Check for broadcast addresses.
1540 *
1541 * Only accept broadcast packets that arrive via the matching
1542 * interface. Reception of forwarded directed broadcasts would be
1543 * handled via ip_forward() and ether_frameout() with the loopback
1544 * into the stack for SIMPLEX interfaces handled by ether_frameout().
1545 */
1546 struct ifaddr *ifa;
1547
1548 ifnet_lock_shared(inifp);
1549 TAILQ_FOREACH(ifa, &inifp->if_addrhead, ifa_link) {
1550 if (ifa->ifa_addr->sa_family != AF_INET) {
1551 continue;
1552 }
1553 ia = ifatoia(ifa);
1554 if (satosin(&ia->ia_broadaddr)->sin_addr.s_addr == ip->ip_dst.s_addr ||
1555 ia->ia_netbroadcast.s_addr == ip->ip_dst.s_addr) {
1556 ip_input_setdst_chain(m, 0, ia);
1557 result = IP_CHECK_IF_OURS;
1558 match_ifp = inifp;
1559 break;
1560 }
1561 }
1562 ifnet_lock_done(inifp);
1563 }
1564
1565 /* Allow DHCP/BootP responses through */
1566 if (result == IP_CHECK_IF_NONE && (inifp->if_eflags & IFEF_AUTOCONFIGURING) &&
1567 ip->ip_p == IPPROTO_UDP && (IP_VHL_HL(ip->ip_vhl) << 2) == sizeof(struct ip)) {
1568 struct udpiphdr *ui;
1569
1570 if (m->m_len < sizeof(struct udpiphdr)) {
1571 if ((m = m_pullup(m, sizeof(struct udpiphdr))) == NULL) {
1572 OSAddAtomic(1, &udpstat.udps_hdrops);
1573 *mp = NULL;
1574 return IP_CHECK_IF_DROP;
1575 }
1576 /*
1577 * m_pullup can return a different mbuf
1578 */
1579 *mp = m;
1580 ip = mtod(m, struct ip *);
1581 }
1582 ui = mtod(m, struct udpiphdr *);
1583 if (ntohs(ui->ui_dport) == IPPORT_BOOTPC) {
1584 ASSERT(m->m_nextpkt == NULL);
1585 ip_setdstifaddr_info(m, inifp->if_index, NULL);
1586 result = IP_CHECK_IF_OURS;
1587 match_ifp = inifp;
1588 }
1589 }
1590
1591 if (result == IP_CHECK_IF_NONE) {
1592 if (ipforwarding == 0) {
1593 result = IP_CHECK_IF_DROP;
1594 } else {
1595 result = IP_CHECK_IF_FORWARD;
1596 ip_input_setdst_chain(m, inifp->if_index, NULL);
1597 }
1598 }
1599
1600 if (result == IP_CHECK_IF_OURS && match_ifp != inifp) {
1601 ipstat.ips_rcv_if_weak_match++;
1602
1603 /* Logging is too noisy when forwarding is enabled */
1604 if (ip_checkinterface_debug != 0 && ipforwarding == 0) {
1605 char src_str[MAX_IPv4_STR_LEN];
1606 char dst_str[MAX_IPv4_STR_LEN];
1607
1608 inet_ntop(AF_INET, &ip->ip_src, src_str, sizeof(src_str));
1609 inet_ntop(AF_INET, &ip->ip_dst, dst_str, sizeof(dst_str));
1610 os_log_info(OS_LOG_DEFAULT,
1611 "%s: weak ES interface match to %s for packet from %s to %s proto %u received via %s",
1612 __func__, best_ia->ia_ifp->if_xname, src_str, dst_str, ip->ip_p, inifp->if_xname);
1613 }
1614 } else if (result == IP_CHECK_IF_DROP) {
1615 if (ip_checkinterface_debug > 0) {
1616 char src_str[MAX_IPv4_STR_LEN];
1617 char dst_str[MAX_IPv4_STR_LEN];
1618
1619 inet_ntop(AF_INET, &ip->ip_src, src_str, sizeof(src_str));
1620 inet_ntop(AF_INET, &ip->ip_dst, dst_str, sizeof(dst_str));
1621 os_log_info(OS_LOG_DEFAULT,
1622 "%s: no interface match for packet from %s to %s proto %u received via %s",
1623 __func__, src_str, dst_str, ip->ip_p, inifp->if_xname);
1624 }
1625 struct mbuf *tmp_mbuf = m;
1626 while (tmp_mbuf != NULL) {
1627 ipstat.ips_rcv_if_no_match++;
1628 tmp_mbuf = tmp_mbuf->m_nextpkt;
1629 }
1630 m_freem_list(m);
1631 *mp = NULL;
1632 }
1633
1634 return result;
1635 }
1636
1637 static void
1638 ip_input_second_pass(struct mbuf *m, struct ifnet *inifp, u_int32_t div_info,
1639 int npkts_in_chain, int bytes_in_chain, struct ip_fw_in_args *args, int ours)
1640 {
1641 struct mbuf *tmp_mbuf = NULL;
1642 unsigned int hlen;
1643
1644 #if !IPFIREWALL
1645 #pragma unused (args)
1646 #endif
1647
1648 #if !IPDIVERT
1649 #pragma unused (div_info)
1650 #endif
1651
1652 struct ip *ip = mtod(m, struct ip *);
1653 hlen = IP_VHL_HL(ip->ip_vhl) << 2;
1654
1655 OSAddAtomic(npkts_in_chain, &ipstat.ips_total);
1656
1657 /*
1658 * Naively assume we can attribute inbound data to the route we would
1659 * use to send to this destination. Asymmetric routing breaks this
1660 * assumption, but it still allows us to account for traffic from
1661 * a remote node in the routing table.
1662 * this has a very significant performance impact so we bypass
1663 * if nstat_collect is disabled. We may also bypass if the
1664 * protocol is tcp in the future because tcp will have a route that
1665 * we can use to attribute the data to. That does mean we would not
1666 * account for forwarded tcp traffic.
1667 */
1668 ip_input_update_nstat(inifp, ip->ip_src, npkts_in_chain,
1669 bytes_in_chain);
1670
1671 if (ours) {
1672 goto ours;
1673 }
1674
1675 /*
1676 * Check our list of addresses, to see if the packet is for us.
1677 * If we don't have any addresses, assume any unicast packet
1678 * we receive might be for us (and let the upper layers deal
1679 * with it).
1680 */
1681 tmp_mbuf = m;
1682 if (TAILQ_EMPTY(&in_ifaddrhead)) {
1683 while (tmp_mbuf != NULL) {
1684 if (!(tmp_mbuf->m_flags & (M_MCAST | M_BCAST))) {
1685 ip_setdstifaddr_info(tmp_mbuf, inifp->if_index,
1686 NULL);
1687 }
1688 tmp_mbuf = mbuf_nextpkt(tmp_mbuf);
1689 }
1690 goto ours;
1691 }
1692
1693 /*
1694 * Enable a consistency check between the destination address
1695 * and the arrival interface for a unicast packet (the RFC 1122
1696 * strong ES model) if IP forwarding is disabled and the packet
1697 * is not locally generated and the packet is not subject to
1698 * 'ipfw fwd'.
1699 *
1700 * XXX - Checking also should be disabled if the destination
1701 * address is ipnat'ed to a different interface.
1702 *
1703 * XXX - Checking is incompatible with IP aliases added
1704 * to the loopback interface instead of the interface where
1705 * the packets are received.
1706 */
1707 if (!IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) {
1708 ip_check_if_result_t ip_check_if_result = IP_CHECK_IF_NONE;
1709
1710 ip_check_if_result = ip_input_check_interface(&m, ip, inifp);
1711 ASSERT(ip_check_if_result != IP_CHECK_IF_NONE);
1712 if (ip_check_if_result == IP_CHECK_IF_OURS) {
1713 goto ours;
1714 } else if (ip_check_if_result == IP_CHECK_IF_DROP) {
1715 return;
1716 }
1717 } else {
1718 struct in_multi *inm;
1719 /*
1720 * See if we belong to the destination multicast group on the
1721 * arrival interface.
1722 */
1723 in_multihead_lock_shared();
1724 IN_LOOKUP_MULTI(&ip->ip_dst, inifp, inm);
1725 in_multihead_lock_done();
1726 if (inm == NULL) {
1727 OSAddAtomic(npkts_in_chain, &ipstat.ips_notmember);
1728 m_freem_list(m);
1729 KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
1730 return;
1731 }
1732 ip_input_setdst_chain(m, inifp->if_index, NULL);
1733 INM_REMREF(inm);
1734 goto ours;
1735 }
1736
1737 tmp_mbuf = m;
1738 struct mbuf *nxt_mbuf = NULL;
1739 while (tmp_mbuf != NULL) {
1740 nxt_mbuf = mbuf_nextpkt(tmp_mbuf);
1741 /*
1742 * Not for us; forward if possible and desirable.
1743 */
1744 mbuf_setnextpkt(tmp_mbuf, NULL);
1745 if (ipforwarding == 0) {
1746 OSAddAtomic(1, &ipstat.ips_cantforward);
1747 m_freem(tmp_mbuf);
1748 } else {
1749 #if IPFIREWALL
1750 ip_forward(tmp_mbuf, 0, args->fwai_next_hop);
1751 #else
1752 ip_forward(tmp_mbuf, 0, NULL);
1753 #endif
1754 }
1755 tmp_mbuf = nxt_mbuf;
1756 }
1757 KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
1758 return;
1759 ours:
1760 ip = mtod(m, struct ip *); /* in case it changed */
1761 /*
1762 * If offset or IP_MF are set, must reassemble.
1763 */
1764 if (ip->ip_off & ~(IP_DF | IP_RF)) {
1765 VERIFY(npkts_in_chain == 1);
1766 /*
1767 * ip_reass() will return a different mbuf, and update
1768 * the divert info in div_info and args->fwai_divert_rule.
1769 */
1770 #if IPDIVERT
1771 m = ip_reass(m, (u_int16_t *)&div_info, &args->fwai_divert_rule);
1772 #else
1773 m = ip_reass(m);
1774 #endif
1775 if (m == NULL) {
1776 return;
1777 }
1778 ip = mtod(m, struct ip *);
1779 /* Get the header length of the reassembled packet */
1780 hlen = IP_VHL_HL(ip->ip_vhl) << 2;
1781 #if IPDIVERT
1782 /* Restore original checksum before diverting packet */
1783 if (div_info != 0) {
1784 VERIFY(npkts_in_chain == 1);
1785 #if BYTE_ORDER != BIG_ENDIAN
1786 HTONS(ip->ip_len);
1787 HTONS(ip->ip_off);
1788 #endif
1789 ip->ip_sum = 0;
1790 ip->ip_sum = ip_cksum_hdr_in(m, hlen);
1791 #if BYTE_ORDER != BIG_ENDIAN
1792 NTOHS(ip->ip_off);
1793 NTOHS(ip->ip_len);
1794 #endif
1795 }
1796 #endif
1797 }
1798
1799 /*
1800 * Further protocols expect the packet length to be w/o the
1801 * IP header.
1802 */
1803 ip->ip_len -= hlen;
1804
1805 #if IPDIVERT
1806 /*
1807 * Divert or tee packet to the divert protocol if required.
1808 *
1809 * If div_info is zero then cookie should be too, so we shouldn't
1810 * need to clear them here. Assume divert_packet() does so also.
1811 */
1812 if (div_info != 0) {
1813 struct mbuf *clone = NULL;
1814 VERIFY(npkts_in_chain == 1);
1815
1816 /* Clone packet if we're doing a 'tee' */
1817 if (div_info & IP_FW_PORT_TEE_FLAG) {
1818 clone = m_dup(m, M_DONTWAIT);
1819 }
1820
1821 /* Restore packet header fields to original values */
1822 ip->ip_len += hlen;
1823
1824 #if BYTE_ORDER != BIG_ENDIAN
1825 HTONS(ip->ip_len);
1826 HTONS(ip->ip_off);
1827 #endif
1828 /* Deliver packet to divert input routine */
1829 OSAddAtomic(1, &ipstat.ips_delivered);
1830 divert_packet(m, 1, div_info & 0xffff, args->fwai_divert_rule);
1831
1832 /* If 'tee', continue with original packet */
1833 if (clone == NULL) {
1834 return;
1835 }
1836 m = clone;
1837 ip = mtod(m, struct ip *);
1838 }
1839 #endif
1840
1841 #if IPSEC
1842 /*
1843 * enforce IPsec policy checking if we are seeing last header.
1844 * note that we do not visit this with protocols with pcb layer
1845 * code - like udp/tcp/raw ip.
1846 */
1847 if (ipsec_bypass == 0 && (ip_protox[ip->ip_p]->pr_flags & PR_LASTHDR)) {
1848 VERIFY(npkts_in_chain == 1);
1849 if (ipsec4_in_reject(m, NULL)) {
1850 IPSEC_STAT_INCREMENT(ipsecstat.in_polvio);
1851 goto bad;
1852 }
1853 }
1854 #endif /* IPSEC */
1855
1856 /*
1857 * Switch out to protocol's input routine.
1858 */
1859 OSAddAtomic(npkts_in_chain, &ipstat.ips_delivered);
1860
1861 #if IPFIREWALL
1862 if (args->fwai_next_hop && ip->ip_p == IPPROTO_TCP) {
1863 /* TCP needs IPFORWARD info if available */
1864 struct m_tag *fwd_tag;
1865 struct ip_fwd_tag *ipfwd_tag;
1866
1867 VERIFY(npkts_in_chain == 1);
1868 fwd_tag = m_tag_create(KERNEL_MODULE_TAG_ID,
1869 KERNEL_TAG_TYPE_IPFORWARD, sizeof(*ipfwd_tag),
1870 M_NOWAIT, m);
1871 if (fwd_tag == NULL) {
1872 goto bad;
1873 }
1874
1875 ipfwd_tag = (struct ip_fwd_tag *)(fwd_tag + 1);
1876 ipfwd_tag->next_hop = args->fwai_next_hop;
1877
1878 m_tag_prepend(m, fwd_tag);
1879
1880 KERNEL_DEBUG(DBG_LAYER_END, ip->ip_dst.s_addr,
1881 ip->ip_src.s_addr, ip->ip_p, ip->ip_off, ip->ip_len);
1882
1883 /* TCP deals with its own locking */
1884 ip_proto_dispatch_in(m, hlen, ip->ip_p, 0);
1885 } else {
1886 KERNEL_DEBUG(DBG_LAYER_END, ip->ip_dst.s_addr,
1887 ip->ip_src.s_addr, ip->ip_p, ip->ip_off, ip->ip_len);
1888
1889 ip_input_dispatch_chain(m);
1890 }
1891 #else /* !IPFIREWALL */
1892 ip_input_dispatch_chain(m);
1893
1894 #endif /* !IPFIREWALL */
1895 KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
1896 return;
1897 bad:
1898 KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
1899 m_freem(m);
1900 }
1901
1902 void
1903 ip_input_process_list(struct mbuf *packet_list)
1904 {
1905 pktchain_elm_t pktchain_tbl[PKTTBL_SZ];
1906
1907 struct mbuf *packet = NULL;
1908 struct mbuf *modm = NULL; /* modified mbuf */
1909 int retval = 0;
1910 u_int32_t div_info = 0;
1911 int ours = 0;
1912 #if (DEBUG || DEVELOPMENT)
1913 struct timeval start_tv;
1914 #endif /* (DEBUG || DEVELOPMENT) */
1915 int num_pkts = 0;
1916 int chain = 0;
1917 struct ip_fw_in_args args;
1918
1919 if (ip_chaining == 0) {
1920 struct mbuf *m = packet_list;
1921 #if (DEBUG || DEVELOPMENT)
1922 if (ip_input_measure) {
1923 net_perf_start_time(&net_perf, &start_tv);
1924 }
1925 #endif /* (DEBUG || DEVELOPMENT) */
1926
1927 while (m) {
1928 packet_list = mbuf_nextpkt(m);
1929 mbuf_setnextpkt(m, NULL);
1930 ip_input(m);
1931 m = packet_list;
1932 num_pkts++;
1933 }
1934 #if (DEBUG || DEVELOPMENT)
1935 if (ip_input_measure) {
1936 net_perf_measure_time(&net_perf, &start_tv, num_pkts);
1937 }
1938 #endif /* (DEBUG || DEVELOPMENT) */
1939 return;
1940 }
1941 #if (DEBUG || DEVELOPMENT)
1942 if (ip_input_measure) {
1943 net_perf_start_time(&net_perf, &start_tv);
1944 }
1945 #endif /* (DEBUG || DEVELOPMENT) */
1946
1947 bzero(&pktchain_tbl, sizeof(pktchain_tbl));
1948 restart_list_process:
1949 chain = 0;
1950 for (packet = packet_list; packet; packet = packet_list) {
1951 packet_list = mbuf_nextpkt(packet);
1952 mbuf_setnextpkt(packet, NULL);
1953
1954 num_pkts++;
1955 modm = NULL;
1956 div_info = 0;
1957 bzero(&args, sizeof(args));
1958
1959 retval = ip_input_first_pass(packet, &div_info, &args,
1960 &ours, &modm);
1961
1962 if (retval == IPINPUT_DOCHAIN) {
1963 if (modm) {
1964 packet = modm;
1965 }
1966 packet = ip_chain_insert(packet, &pktchain_tbl[0]);
1967 if (packet == NULL) {
1968 ipstat.ips_rxc_chained++;
1969 chain++;
1970 if (chain > ip_chainsz) {
1971 break;
1972 }
1973 } else {
1974 ipstat.ips_rxc_collisions++;
1975 break;
1976 }
1977 } else if (retval == IPINPUT_DONTCHAIN) {
1978 /* in order to preserve order, exit from chaining */
1979 if (modm) {
1980 packet = modm;
1981 }
1982 ipstat.ips_rxc_notchain++;
1983 break;
1984 } else {
1985 /* packet was freed or delivered, do nothing. */
1986 }
1987 }
1988
1989 /* do second pass here for pktchain_tbl */
1990 if (chain) {
1991 ip_input_second_pass_loop_tbl(&pktchain_tbl[0], &args);
1992 }
1993
1994 if (packet) {
1995 /*
1996 * equivalent update in chaining case if performed in
1997 * ip_input_second_pass_loop_tbl().
1998 */
1999 #if (DEBUG || DEVELOPMENT)
2000 if (ip_input_measure) {
2001 net_perf_histogram(&net_perf, 1);
2002 }
2003 #endif /* (DEBUG || DEVELOPMENT) */
2004 ip_input_second_pass(packet, packet->m_pkthdr.rcvif, div_info,
2005 1, packet->m_pkthdr.len, &args, ours);
2006 }
2007
2008 if (packet_list) {
2009 goto restart_list_process;
2010 }
2011
2012 #if (DEBUG || DEVELOPMENT)
2013 if (ip_input_measure) {
2014 net_perf_measure_time(&net_perf, &start_tv, num_pkts);
2015 }
2016 #endif /* (DEBUG || DEVELOPMENT) */
2017 }
2018 /*
2019 * Ip input routine. Checksum and byte swap header. If fragmented
2020 * try to reassemble. Process options. Pass to next level.
2021 */
2022 void
2023 ip_input(struct mbuf *m)
2024 {
2025 struct ip *ip;
2026 unsigned int hlen;
2027 u_short sum = 0;
2028 #if DUMMYNET
2029 struct ip_fw_args args;
2030 struct m_tag *tag;
2031 #endif
2032 ipfilter_t inject_filter_ref = NULL;
2033 struct ifnet *inifp;
2034
2035 /* Check if the mbuf is still valid after interface filter processing */
2036 MBUF_INPUT_CHECK(m, m->m_pkthdr.rcvif);
2037 inifp = m->m_pkthdr.rcvif;
2038 VERIFY(inifp != NULL);
2039
2040 ipstat.ips_rxc_notlist++;
2041
2042 /* Perform IP header alignment fixup, if needed */
2043 IP_HDR_ALIGNMENT_FIXUP(m, inifp, goto bad);
2044
2045 m->m_pkthdr.pkt_flags &= ~PKTF_FORWARDED;
2046
2047 #if IPFIREWALL || DUMMYNET
2048 bzero(&args, sizeof(struct ip_fw_args));
2049
2050 /*
2051 * Don't bother searching for tag(s) if there's none.
2052 */
2053 if (SLIST_EMPTY(&m->m_pkthdr.tags)) {
2054 goto ipfw_tags_done;
2055 }
2056
2057 /* Grab info from mtags prepended to the chain */
2058 #if DUMMYNET
2059 if ((tag = m_tag_locate(m, KERNEL_MODULE_TAG_ID,
2060 KERNEL_TAG_TYPE_DUMMYNET, NULL)) != NULL) {
2061 struct dn_pkt_tag *dn_tag;
2062
2063 dn_tag = (struct dn_pkt_tag *)(tag + 1);
2064 args.fwa_ipfw_rule = dn_tag->dn_ipfw_rule;
2065 args.fwa_pf_rule = dn_tag->dn_pf_rule;
2066
2067 m_tag_delete(m, tag);
2068 }
2069 #endif /* DUMMYNET */
2070
2071 #if IPDIVERT
2072 if ((tag = m_tag_locate(m, KERNEL_MODULE_TAG_ID,
2073 KERNEL_TAG_TYPE_DIVERT, NULL)) != NULL) {
2074 struct divert_tag *div_tag;
2075
2076 div_tag = (struct divert_tag *)(tag + 1);
2077 args.fwa_divert_rule = div_tag->cookie;
2078
2079 m_tag_delete(m, tag);
2080 }
2081 #endif
2082
2083 if ((tag = m_tag_locate(m, KERNEL_MODULE_TAG_ID,
2084 KERNEL_TAG_TYPE_IPFORWARD, NULL)) != NULL) {
2085 struct ip_fwd_tag *ipfwd_tag;
2086
2087 ipfwd_tag = (struct ip_fwd_tag *)(tag + 1);
2088 args.fwa_next_hop = ipfwd_tag->next_hop;
2089
2090 m_tag_delete(m, tag);
2091 }
2092
2093 #if DIAGNOSTIC
2094 if (m == NULL || !(m->m_flags & M_PKTHDR)) {
2095 panic("ip_input no HDR");
2096 }
2097 #endif
2098
2099 #if DUMMYNET
2100 if (args.fwa_ipfw_rule || args.fwa_pf_rule) {
2101 /* dummynet already filtered us */
2102 ip = mtod(m, struct ip *);
2103 hlen = IP_VHL_HL(ip->ip_vhl) << 2;
2104 inject_filter_ref = ipf_get_inject_filter(m);
2105 #if IPFIREWALL
2106 if (args.fwa_ipfw_rule) {
2107 goto iphack;
2108 }
2109 #endif /* IPFIREWALL */
2110 if (args.fwa_pf_rule) {
2111 goto check_with_pf;
2112 }
2113 }
2114 #endif /* DUMMYNET */
2115 ipfw_tags_done:
2116 #endif /* IPFIREWALL || DUMMYNET */
2117
2118 /*
2119 * No need to process packet twice if we've already seen it.
2120 */
2121 if (!SLIST_EMPTY(&m->m_pkthdr.tags)) {
2122 inject_filter_ref = ipf_get_inject_filter(m);
2123 }
2124 if (inject_filter_ref != NULL) {
2125 ip = mtod(m, struct ip *);
2126 hlen = IP_VHL_HL(ip->ip_vhl) << 2;
2127
2128 DTRACE_IP6(receive, struct mbuf *, m, struct inpcb *, NULL,
2129 struct ip *, ip, struct ifnet *, inifp,
2130 struct ip *, ip, struct ip6_hdr *, NULL);
2131
2132 ip->ip_len = ntohs(ip->ip_len) - hlen;
2133 ip->ip_off = ntohs(ip->ip_off);
2134 ip_proto_dispatch_in(m, hlen, ip->ip_p, inject_filter_ref);
2135 return;
2136 }
2137
2138 OSAddAtomic(1, &ipstat.ips_total);
2139 if (m->m_pkthdr.len < sizeof(struct ip)) {
2140 goto tooshort;
2141 }
2142
2143 if (m->m_len < sizeof(struct ip) &&
2144 (m = m_pullup(m, sizeof(struct ip))) == NULL) {
2145 OSAddAtomic(1, &ipstat.ips_toosmall);
2146 return;
2147 }
2148 ip = mtod(m, struct ip *);
2149
2150 KERNEL_DEBUG(DBG_LAYER_BEG, ip->ip_dst.s_addr, ip->ip_src.s_addr,
2151 ip->ip_p, ip->ip_off, ip->ip_len);
2152
2153 if (IP_VHL_V(ip->ip_vhl) != IPVERSION) {
2154 OSAddAtomic(1, &ipstat.ips_badvers);
2155 goto bad;
2156 }
2157
2158 hlen = IP_VHL_HL(ip->ip_vhl) << 2;
2159 if (hlen < sizeof(struct ip)) { /* minimum header length */
2160 OSAddAtomic(1, &ipstat.ips_badhlen);
2161 goto bad;
2162 }
2163 if (hlen > m->m_len) {
2164 if ((m = m_pullup(m, hlen)) == NULL) {
2165 OSAddAtomic(1, &ipstat.ips_badhlen);
2166 return;
2167 }
2168 ip = mtod(m, struct ip *);
2169 }
2170
2171 /* 127/8 must not appear on wire - RFC1122 */
2172 if ((ntohl(ip->ip_dst.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET ||
2173 (ntohl(ip->ip_src.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET) {
2174 /*
2175 * Allow for the following exceptions:
2176 *
2177 * 1. If the packet was sent to loopback (i.e. rcvif
2178 * would have been set earlier at output time.)
2179 *
2180 * 2. If the packet was sent out on loopback from a local
2181 * source address which belongs to a non-loopback
2182 * interface (i.e. rcvif may not necessarily be a
2183 * loopback interface, hence the test for PKTF_LOOP.)
2184 * Unlike IPv6, there is no interface scope ID, and
2185 * therefore we don't care so much about PKTF_IFINFO.
2186 */
2187 if (!(inifp->if_flags & IFF_LOOPBACK) &&
2188 !(m->m_pkthdr.pkt_flags & PKTF_LOOP)) {
2189 OSAddAtomic(1, &ipstat.ips_badaddr);
2190 goto bad;
2191 }
2192 }
2193
2194 /* IPv4 Link-Local Addresses as defined in RFC3927 */
2195 if ((IN_LINKLOCAL(ntohl(ip->ip_dst.s_addr)) ||
2196 IN_LINKLOCAL(ntohl(ip->ip_src.s_addr)))) {
2197 ip_linklocal_stat.iplls_in_total++;
2198 if (ip->ip_ttl != MAXTTL) {
2199 OSAddAtomic(1, &ip_linklocal_stat.iplls_in_badttl);
2200 /* Silently drop link local traffic with bad TTL */
2201 if (!ip_linklocal_in_allowbadttl) {
2202 goto bad;
2203 }
2204 }
2205 }
2206
2207 sum = ip_cksum(m, hlen);
2208 if (sum) {
2209 goto bad;
2210 }
2211
2212 DTRACE_IP6(receive, struct mbuf *, m, struct inpcb *, NULL,
2213 struct ip *, ip, struct ifnet *, inifp,
2214 struct ip *, ip, struct ip6_hdr *, NULL);
2215
2216 /*
2217 * Naively assume we can attribute inbound data to the route we would
2218 * use to send to this destination. Asymmetric routing breaks this
2219 * assumption, but it still allows us to account for traffic from
2220 * a remote node in the routing table.
2221 * this has a very significant performance impact so we bypass
2222 * if nstat_collect is disabled. We may also bypass if the
2223 * protocol is tcp in the future because tcp will have a route that
2224 * we can use to attribute the data to. That does mean we would not
2225 * account for forwarded tcp traffic.
2226 */
2227 if (nstat_collect) {
2228 struct rtentry *rt =
2229 ifnet_cached_rtlookup_inet(inifp, ip->ip_src);
2230 if (rt != NULL) {
2231 nstat_route_rx(rt, 1, m->m_pkthdr.len, 0);
2232 rtfree(rt);
2233 }
2234 }
2235
2236 /*
2237 * Convert fields to host representation.
2238 */
2239 #if BYTE_ORDER != BIG_ENDIAN
2240 NTOHS(ip->ip_len);
2241 #endif
2242
2243 if (ip->ip_len < hlen) {
2244 OSAddAtomic(1, &ipstat.ips_badlen);
2245 goto bad;
2246 }
2247
2248 #if BYTE_ORDER != BIG_ENDIAN
2249 NTOHS(ip->ip_off);
2250 #endif
2251 /*
2252 * Check that the amount of data in the buffers
2253 * is as at least much as the IP header would have us expect.
2254 * Trim mbufs if longer than we expect.
2255 * Drop packet if shorter than we expect.
2256 */
2257 if (m->m_pkthdr.len < ip->ip_len) {
2258 tooshort:
2259 OSAddAtomic(1, &ipstat.ips_tooshort);
2260 goto bad;
2261 }
2262 if (m->m_pkthdr.len > ip->ip_len) {
2263 ip_input_adjust(m, ip, inifp);
2264 }
2265
2266 #if DUMMYNET
2267 check_with_pf:
2268 #endif
2269 #if PF
2270 /* Invoke inbound packet filter */
2271 if (PF_IS_ENABLED) {
2272 int error;
2273 #if DUMMYNET
2274 error = pf_af_hook(inifp, NULL, &m, AF_INET, TRUE, &args);
2275 #else
2276 error = pf_af_hook(inifp, NULL, &m, AF_INET, TRUE, NULL);
2277 #endif /* DUMMYNET */
2278 if (error != 0 || m == NULL) {
2279 if (m != NULL) {
2280 panic("%s: unexpected packet %p\n",
2281 __func__, m);
2282 /* NOTREACHED */
2283 }
2284 /* Already freed by callee */
2285 return;
2286 }
2287 ip = mtod(m, struct ip *);
2288 hlen = IP_VHL_HL(ip->ip_vhl) << 2;
2289 }
2290 #endif /* PF */
2291
2292 #if IPSEC
2293 if (ipsec_bypass == 0 && ipsec_gethist(m, NULL)) {
2294 goto pass;
2295 }
2296 #endif
2297
2298 #if IPFIREWALL
2299 #if DUMMYNET
2300 iphack:
2301 #endif /* DUMMYNET */
2302 /*
2303 * Check if we want to allow this packet to be processed.
2304 * Consider it to be bad if not.
2305 */
2306 if (fw_enable && IPFW_LOADED) {
2307 #if IPFIREWALL_FORWARD
2308 /*
2309 * If we've been forwarded from the output side, then
2310 * skip the firewall a second time
2311 */
2312 if (args.fwa_next_hop) {
2313 goto ours;
2314 }
2315 #endif /* IPFIREWALL_FORWARD */
2316
2317 args.fwa_m = m;
2318
2319 i = ip_fw_chk_ptr(&args);
2320 m = args.fwa_m;
2321
2322 if ((i & IP_FW_PORT_DENY_FLAG) || m == NULL) { /* drop */
2323 if (m) {
2324 m_freem(m);
2325 }
2326 return;
2327 }
2328 ip = mtod(m, struct ip *); /* just in case m changed */
2329
2330 if (i == 0 && args.fwa_next_hop == NULL) { /* common case */
2331 goto pass;
2332 }
2333 #if DUMMYNET
2334 if (DUMMYNET_LOADED && (i & IP_FW_PORT_DYNT_FLAG) != 0) {
2335 /* Send packet to the appropriate pipe */
2336 ip_dn_io_ptr(m, i & 0xffff, DN_TO_IP_IN, &args,
2337 DN_CLIENT_IPFW);
2338 return;
2339 }
2340 #endif /* DUMMYNET */
2341 #if IPDIVERT
2342 if (i != 0 && (i & IP_FW_PORT_DYNT_FLAG) == 0) {
2343 /* Divert or tee packet */
2344 div_info = i;
2345 goto ours;
2346 }
2347 #endif
2348 #if IPFIREWALL_FORWARD
2349 if (i == 0 && args.fwa_next_hop != NULL) {
2350 goto pass;
2351 }
2352 #endif
2353 /*
2354 * if we get here, the packet must be dropped
2355 */
2356 m_freem(m);
2357 return;
2358 }
2359 #endif /* IPFIREWALL */
2360 #if IPSEC | IPFIREWALL
2361 pass:
2362 #endif
2363 /*
2364 * Process options and, if not destined for us,
2365 * ship it on. ip_dooptions returns 1 when an
2366 * error was detected (causing an icmp message
2367 * to be sent and the original packet to be freed).
2368 */
2369 ip_nhops = 0; /* for source routed packets */
2370 #if IPFIREWALL
2371 if (hlen > sizeof(struct ip) &&
2372 ip_dooptions(m, 0, args.fwa_next_hop)) {
2373 #else /* !IPFIREWALL */
2374 if (hlen > sizeof(struct ip) && ip_dooptions(m, 0, NULL)) {
2375 #endif /* !IPFIREWALL */
2376 return;
2377 }
2378
2379 /*
2380 * Check our list of addresses, to see if the packet is for us.
2381 * If we don't have any addresses, assume any unicast packet
2382 * we receive might be for us (and let the upper layers deal
2383 * with it).
2384 */
2385 if (TAILQ_EMPTY(&in_ifaddrhead) && !(m->m_flags & (M_MCAST | M_BCAST))) {
2386 ip_setdstifaddr_info(m, inifp->if_index, NULL);
2387 goto ours;
2388 }
2389
2390 /*
2391 * Enable a consistency check between the destination address
2392 * and the arrival interface for a unicast packet (the RFC 1122
2393 * strong ES model) if IP forwarding is disabled and the packet
2394 * is not locally generated and the packet is not subject to
2395 * 'ipfw fwd'.
2396 *
2397 * XXX - Checking also should be disabled if the destination
2398 * address is ipnat'ed to a different interface.
2399 *
2400 * XXX - Checking is incompatible with IP aliases added
2401 * to the loopback interface instead of the interface where
2402 * the packets are received.
2403 */
2404 if (!IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) {
2405 ip_check_if_result_t check_if_result = IP_CHECK_IF_NONE;
2406
2407 check_if_result = ip_input_check_interface(&m, ip, inifp);
2408 ASSERT(check_if_result != IP_CHECK_IF_NONE);
2409 if (check_if_result == IP_CHECK_IF_OURS) {
2410 goto ours;
2411 } else if (check_if_result == IP_CHECK_IF_DROP) {
2412 return;
2413 }
2414 } else {
2415 struct in_multi *inm;
2416 /*
2417 * See if we belong to the destination multicast group on the
2418 * arrival interface.
2419 */
2420 in_multihead_lock_shared();
2421 IN_LOOKUP_MULTI(&ip->ip_dst, inifp, inm);
2422 in_multihead_lock_done();
2423 if (inm == NULL) {
2424 OSAddAtomic(1, &ipstat.ips_notmember);
2425 m_freem(m);
2426 return;
2427 }
2428 ip_setdstifaddr_info(m, inifp->if_index, NULL);
2429 INM_REMREF(inm);
2430 goto ours;
2431 }
2432
2433 /*
2434 * Not for us; forward if possible and desirable.
2435 */
2436 if (ipforwarding == 0) {
2437 OSAddAtomic(1, &ipstat.ips_cantforward);
2438 m_freem(m);
2439 } else {
2440 #if IPFIREWALL
2441 ip_forward(m, 0, args.fwa_next_hop);
2442 #else
2443 ip_forward(m, 0, NULL);
2444 #endif
2445 }
2446 return;
2447
2448 ours:
2449 /*
2450 * If offset or IP_MF are set, must reassemble.
2451 */
2452 if (ip->ip_off & ~(IP_DF | IP_RF)) {
2453 /*
2454 * ip_reass() will return a different mbuf, and update
2455 * the divert info in div_info and args.fwa_divert_rule.
2456 */
2457 #if IPDIVERT
2458 m = ip_reass(m, (u_int16_t *)&div_info, &args.fwa_divert_rule);
2459 #else
2460 m = ip_reass(m);
2461 #endif
2462 if (m == NULL) {
2463 return;
2464 }
2465 ip = mtod(m, struct ip *);
2466 /* Get the header length of the reassembled packet */
2467 hlen = IP_VHL_HL(ip->ip_vhl) << 2;
2468 #if IPDIVERT
2469 /* Restore original checksum before diverting packet */
2470 if (div_info != 0) {
2471 #if BYTE_ORDER != BIG_ENDIAN
2472 HTONS(ip->ip_len);
2473 HTONS(ip->ip_off);
2474 #endif
2475 ip->ip_sum = 0;
2476 ip->ip_sum = ip_cksum_hdr_in(m, hlen);
2477 #if BYTE_ORDER != BIG_ENDIAN
2478 NTOHS(ip->ip_off);
2479 NTOHS(ip->ip_len);
2480 #endif
2481 }
2482 #endif
2483 }
2484
2485 /*
2486 * Further protocols expect the packet length to be w/o the
2487 * IP header.
2488 */
2489 ip->ip_len -= hlen;
2490
2491 #if IPDIVERT
2492 /*
2493 * Divert or tee packet to the divert protocol if required.
2494 *
2495 * If div_info is zero then cookie should be too, so we shouldn't
2496 * need to clear them here. Assume divert_packet() does so also.
2497 */
2498 if (div_info != 0) {
2499 struct mbuf *clone = NULL;
2500
2501 /* Clone packet if we're doing a 'tee' */
2502 if (div_info & IP_FW_PORT_TEE_FLAG) {
2503 clone = m_dup(m, M_DONTWAIT);
2504 }
2505
2506 /* Restore packet header fields to original values */
2507 ip->ip_len += hlen;
2508
2509 #if BYTE_ORDER != BIG_ENDIAN
2510 HTONS(ip->ip_len);
2511 HTONS(ip->ip_off);
2512 #endif
2513 /* Deliver packet to divert input routine */
2514 OSAddAtomic(1, &ipstat.ips_delivered);
2515 divert_packet(m, 1, div_info & 0xffff, args.fwa_divert_rule);
2516
2517 /* If 'tee', continue with original packet */
2518 if (clone == NULL) {
2519 return;
2520 }
2521 m = clone;
2522 ip = mtod(m, struct ip *);
2523 }
2524 #endif
2525
2526 #if IPSEC
2527 /*
2528 * enforce IPsec policy checking if we are seeing last header.
2529 * note that we do not visit this with protocols with pcb layer
2530 * code - like udp/tcp/raw ip.
2531 */
2532 if (ipsec_bypass == 0 && (ip_protox[ip->ip_p]->pr_flags & PR_LASTHDR)) {
2533 if (ipsec4_in_reject(m, NULL)) {
2534 IPSEC_STAT_INCREMENT(ipsecstat.in_polvio);
2535 goto bad;
2536 }
2537 }
2538 #endif /* IPSEC */
2539
2540 /*
2541 * Switch out to protocol's input routine.
2542 */
2543 OSAddAtomic(1, &ipstat.ips_delivered);
2544
2545 #if IPFIREWALL
2546 if (args.fwa_next_hop && ip->ip_p == IPPROTO_TCP) {
2547 /* TCP needs IPFORWARD info if available */
2548 struct m_tag *fwd_tag;
2549 struct ip_fwd_tag *ipfwd_tag;
2550
2551 fwd_tag = m_tag_create(KERNEL_MODULE_TAG_ID,
2552 KERNEL_TAG_TYPE_IPFORWARD, sizeof(*ipfwd_tag),
2553 M_NOWAIT, m);
2554 if (fwd_tag == NULL) {
2555 goto bad;
2556 }
2557
2558 ipfwd_tag = (struct ip_fwd_tag *)(fwd_tag + 1);
2559 ipfwd_tag->next_hop = args.fwa_next_hop;
2560
2561 m_tag_prepend(m, fwd_tag);
2562
2563 KERNEL_DEBUG(DBG_LAYER_END, ip->ip_dst.s_addr,
2564 ip->ip_src.s_addr, ip->ip_p, ip->ip_off, ip->ip_len);
2565
2566 /* TCP deals with its own locking */
2567 ip_proto_dispatch_in(m, hlen, ip->ip_p, 0);
2568 } else {
2569 KERNEL_DEBUG(DBG_LAYER_END, ip->ip_dst.s_addr,
2570 ip->ip_src.s_addr, ip->ip_p, ip->ip_off, ip->ip_len);
2571
2572 if ((sw_lro) && (ip->ip_p == IPPROTO_TCP)) {
2573 m = tcp_lro(m, hlen);
2574 if (m == NULL) {
2575 return;
2576 }
2577 }
2578
2579 ip_proto_dispatch_in(m, hlen, ip->ip_p, 0);
2580 }
2581 #else /* !IPFIREWALL */
2582 if ((sw_lro) && (ip->ip_p == IPPROTO_TCP)) {
2583 m = tcp_lro(m, hlen);
2584 if (m == NULL) {
2585 return;
2586 }
2587 }
2588 ip_proto_dispatch_in(m, hlen, ip->ip_p, 0);
2589 #endif /* !IPFIREWALL */
2590 return;
2591
2592 bad:
2593 KERNEL_DEBUG(DBG_LAYER_END, 0, 0, 0, 0, 0);
2594 m_freem(m);
2595 }
2596
2597 static void
2598 ipq_updateparams(void)
2599 {
2600 LCK_MTX_ASSERT(&ipqlock, LCK_MTX_ASSERT_OWNED);
2601 /*
2602 * -1 for unlimited allocation.
2603 */
2604 if (maxnipq < 0) {
2605 ipq_limit = 0;
2606 }
2607 /*
2608 * Positive number for specific bound.
2609 */
2610 if (maxnipq > 0) {
2611 ipq_limit = maxnipq;
2612 }
2613 /*
2614 * Zero specifies no further fragment queue allocation -- set the
2615 * bound very low, but rely on implementation elsewhere to actually
2616 * prevent allocation and reclaim current queues.
2617 */
2618 if (maxnipq == 0) {
2619 ipq_limit = 1;
2620 }
2621 /*
2622 * Arm the purge timer if not already and if there's work to do
2623 */
2624 frag_sched_timeout();
2625 }
2626
2627 static int
2628 sysctl_maxnipq SYSCTL_HANDLER_ARGS
2629 {
2630 #pragma unused(arg1, arg2)
2631 int error, i;
2632
2633 lck_mtx_lock(&ipqlock);
2634 i = maxnipq;
2635 error = sysctl_handle_int(oidp, &i, 0, req);
2636 if (error || req->newptr == USER_ADDR_NULL) {
2637 goto done;
2638 }
2639 /* impose bounds */
2640 if (i < -1 || i > (nmbclusters / 4)) {
2641 error = EINVAL;
2642 goto done;
2643 }
2644 maxnipq = i;
2645 ipq_updateparams();
2646 done:
2647 lck_mtx_unlock(&ipqlock);
2648 return error;
2649 }
2650
2651 static int
2652 sysctl_maxfragsperpacket SYSCTL_HANDLER_ARGS
2653 {
2654 #pragma unused(arg1, arg2)
2655 int error, i;
2656
2657 lck_mtx_lock(&ipqlock);
2658 i = maxfragsperpacket;
2659 error = sysctl_handle_int(oidp, &i, 0, req);
2660 if (error || req->newptr == USER_ADDR_NULL) {
2661 goto done;
2662 }
2663 maxfragsperpacket = i;
2664 ipq_updateparams(); /* see if we need to arm timer */
2665 done:
2666 lck_mtx_unlock(&ipqlock);
2667 return error;
2668 }
2669
2670 /*
2671 * Take incoming datagram fragment and try to reassemble it into
2672 * whole datagram. If a chain for reassembly of this datagram already
2673 * exists, then it is given as fp; otherwise have to make a chain.
2674 *
2675 * When IPDIVERT enabled, keep additional state with each packet that
2676 * tells us if we need to divert or tee the packet we're building.
2677 *
2678 * The IP header is *NOT* adjusted out of iplen (but in host byte order).
2679 */
2680 static struct mbuf *
2681 #if IPDIVERT
2682 ip_reass(struct mbuf *m,
2683 #ifdef IPDIVERT_44
2684 u_int32_t *divinfo,
2685 #else /* IPDIVERT_44 */
2686 u_int16_t *divinfo,
2687 #endif /* IPDIVERT_44 */
2688 u_int16_t *divcookie)
2689 #else /* IPDIVERT */
2690 ip_reass(struct mbuf *m)
2691 #endif /* IPDIVERT */
2692 {
2693 struct ip *ip;
2694 struct mbuf *p, *q, *nq, *t;
2695 struct ipq *fp = NULL;
2696 struct ipqhead *head;
2697 int i, hlen, next;
2698 u_int8_t ecn, ecn0;
2699 uint32_t csum, csum_flags;
2700 uint16_t hash;
2701 struct fq_head dfq;
2702
2703 MBUFQ_INIT(&dfq); /* for deferred frees */
2704
2705 /* If maxnipq or maxfragsperpacket is 0, never accept fragments. */
2706 if (maxnipq == 0 || maxfragsperpacket == 0) {
2707 ipstat.ips_fragments++;
2708 ipstat.ips_fragdropped++;
2709 m_freem(m);
2710 if (nipq > 0) {
2711 lck_mtx_lock(&ipqlock);
2712 frag_sched_timeout(); /* purge stale fragments */
2713 lck_mtx_unlock(&ipqlock);
2714 }
2715 return NULL;
2716 }
2717
2718 ip = mtod(m, struct ip *);
2719 hlen = IP_VHL_HL(ip->ip_vhl) << 2;
2720
2721 lck_mtx_lock(&ipqlock);
2722
2723 hash = IPREASS_HASH(ip->ip_src.s_addr, ip->ip_id);
2724 head = &ipq[hash];
2725
2726 /*
2727 * Look for queue of fragments
2728 * of this datagram.
2729 */
2730 TAILQ_FOREACH(fp, head, ipq_list) {
2731 if (ip->ip_id == fp->ipq_id &&
2732 ip->ip_src.s_addr == fp->ipq_src.s_addr &&
2733 ip->ip_dst.s_addr == fp->ipq_dst.s_addr &&
2734 #if CONFIG_MACF_NET
2735 mac_ipq_label_compare(m, fp) &&
2736 #endif
2737 ip->ip_p == fp->ipq_p) {
2738 goto found;
2739 }
2740 }
2741
2742 fp = NULL;
2743
2744 /*
2745 * Attempt to trim the number of allocated fragment queues if it
2746 * exceeds the administrative limit.
2747 */
2748 if ((nipq > (unsigned)maxnipq) && (maxnipq > 0)) {
2749 /*
2750 * drop something from the tail of the current queue
2751 * before proceeding further
2752 */
2753 struct ipq *fq = TAILQ_LAST(head, ipqhead);
2754 if (fq == NULL) { /* gak */
2755 for (i = 0; i < IPREASS_NHASH; i++) {
2756 struct ipq *r = TAILQ_LAST(&ipq[i], ipqhead);
2757 if (r) {
2758 ipstat.ips_fragtimeout += r->ipq_nfrags;
2759 frag_freef(&ipq[i], r);
2760 break;
2761 }
2762 }
2763 } else {
2764 ipstat.ips_fragtimeout += fq->ipq_nfrags;
2765 frag_freef(head, fq);
2766 }
2767 }
2768
2769 found:
2770 /*
2771 * Leverage partial checksum offload for IP fragments. Narrow down
2772 * the scope to cover only UDP without IP options, as that is the
2773 * most common case.
2774 *
2775 * Perform 1's complement adjustment of octets that got included/
2776 * excluded in the hardware-calculated checksum value. Ignore cases
2777 * where the value includes the entire IPv4 header span, as the sum
2778 * for those octets would already be 0 by the time we get here; IP
2779 * has already performed its header checksum validation. Also take
2780 * care of any trailing bytes and subtract out their partial sum.
2781 */
2782 if (ip->ip_p == IPPROTO_UDP && hlen == sizeof(struct ip) &&
2783 (m->m_pkthdr.csum_flags &
2784 (CSUM_DATA_VALID | CSUM_PARTIAL | CSUM_PSEUDO_HDR)) ==
2785 (CSUM_DATA_VALID | CSUM_PARTIAL)) {
2786 uint32_t start = m->m_pkthdr.csum_rx_start;
2787 int32_t trailer = (m_pktlen(m) - ip->ip_len);
2788 uint32_t swbytes = (uint32_t)trailer;
2789
2790 csum = m->m_pkthdr.csum_rx_val;
2791
2792 ASSERT(trailer >= 0);
2793 if ((start != 0 && start != hlen) || trailer != 0) {
2794 uint32_t datalen = ip->ip_len - hlen;
2795
2796 #if BYTE_ORDER != BIG_ENDIAN
2797 if (start < hlen) {
2798 HTONS(ip->ip_len);
2799 HTONS(ip->ip_off);
2800 }
2801 #endif /* BYTE_ORDER != BIG_ENDIAN */
2802 /* callee folds in sum */
2803 csum = m_adj_sum16(m, start, hlen, datalen, csum);
2804 if (hlen > start) {
2805 swbytes += (hlen - start);
2806 } else {
2807 swbytes += (start - hlen);
2808 }
2809 #if BYTE_ORDER != BIG_ENDIAN
2810 if (start < hlen) {
2811 NTOHS(ip->ip_off);
2812 NTOHS(ip->ip_len);
2813 }
2814 #endif /* BYTE_ORDER != BIG_ENDIAN */
2815 }
2816 csum_flags = m->m_pkthdr.csum_flags;
2817
2818 if (swbytes != 0) {
2819 udp_in_cksum_stats(swbytes);
2820 }
2821 if (trailer != 0) {
2822 m_adj(m, -trailer);
2823 }
2824 } else {
2825 csum = 0;
2826 csum_flags = 0;
2827 }
2828
2829 /* Invalidate checksum */
2830 m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID;
2831
2832 ipstat.ips_fragments++;
2833
2834 /*
2835 * Adjust ip_len to not reflect header,
2836 * convert offset of this to bytes.
2837 */
2838 ip->ip_len -= hlen;
2839 if (ip->ip_off & IP_MF) {
2840 /*
2841 * Make sure that fragments have a data length
2842 * that's a non-zero multiple of 8 bytes.
2843 */
2844 if (ip->ip_len == 0 || (ip->ip_len & 0x7) != 0) {
2845 OSAddAtomic(1, &ipstat.ips_toosmall);
2846 /*
2847 * Reassembly queue may have been found if previous
2848 * fragments were valid; given that this one is bad,
2849 * we need to drop it. Make sure to set fp to NULL
2850 * if not already, since we don't want to decrement
2851 * ipq_nfrags as it doesn't include this packet.
2852 */
2853 fp = NULL;
2854 goto dropfrag;
2855 }
2856 m->m_flags |= M_FRAG;
2857 } else {
2858 /* Clear the flag in case packet comes from loopback */
2859 m->m_flags &= ~M_FRAG;
2860 }
2861 ip->ip_off <<= 3;
2862
2863 m->m_pkthdr.pkt_hdr = ip;
2864
2865 /* Previous ip_reass() started here. */
2866 /*
2867 * Presence of header sizes in mbufs
2868 * would confuse code below.
2869 */
2870 m->m_data += hlen;
2871 m->m_len -= hlen;
2872
2873 /*
2874 * If first fragment to arrive, create a reassembly queue.
2875 */
2876 if (fp == NULL) {
2877 fp = ipq_alloc(M_DONTWAIT);
2878 if (fp == NULL) {
2879 goto dropfrag;
2880 }
2881 #if CONFIG_MACF_NET
2882 if (mac_ipq_label_init(fp, M_NOWAIT) != 0) {
2883 ipq_free(fp);
2884 fp = NULL;
2885 goto dropfrag;
2886 }
2887 mac_ipq_label_associate(m, fp);
2888 #endif
2889 TAILQ_INSERT_HEAD(head, fp, ipq_list);
2890 nipq++;
2891 fp->ipq_nfrags = 1;
2892 fp->ipq_ttl = IPFRAGTTL;
2893 fp->ipq_p = ip->ip_p;
2894 fp->ipq_id = ip->ip_id;
2895 fp->ipq_src = ip->ip_src;
2896 fp->ipq_dst = ip->ip_dst;
2897 fp->ipq_frags = m;
2898 m->m_nextpkt = NULL;
2899 /*
2900 * If the first fragment has valid checksum offload
2901 * info, the rest of fragments are eligible as well.
2902 */
2903 if (csum_flags != 0) {
2904 fp->ipq_csum = csum;
2905 fp->ipq_csum_flags = csum_flags;
2906 }
2907 #if IPDIVERT
2908 /*
2909 * Transfer firewall instructions to the fragment structure.
2910 * Only trust info in the fragment at offset 0.
2911 */
2912 if (ip->ip_off == 0) {
2913 #ifdef IPDIVERT_44
2914 fp->ipq_div_info = *divinfo;
2915 #else
2916 fp->ipq_divert = *divinfo;
2917 #endif
2918 fp->ipq_div_cookie = *divcookie;
2919 }
2920 *divinfo = 0;
2921 *divcookie = 0;
2922 #endif /* IPDIVERT */
2923 m = NULL; /* nothing to return */
2924 goto done;
2925 } else {
2926 fp->ipq_nfrags++;
2927 #if CONFIG_MACF_NET
2928 mac_ipq_label_update(m, fp);
2929 #endif
2930 }
2931
2932 #define GETIP(m) ((struct ip *)((m)->m_pkthdr.pkt_hdr))
2933
2934 /*
2935 * Handle ECN by comparing this segment with the first one;
2936 * if CE is set, do not lose CE.
2937 * drop if CE and not-ECT are mixed for the same packet.
2938 */
2939 ecn = ip->ip_tos & IPTOS_ECN_MASK;
2940 ecn0 = GETIP(fp->ipq_frags)->ip_tos & IPTOS_ECN_MASK;
2941 if (ecn == IPTOS_ECN_CE) {
2942 if (ecn0 == IPTOS_ECN_NOTECT) {
2943 goto dropfrag;
2944 }
2945 if (ecn0 != IPTOS_ECN_CE) {
2946 GETIP(fp->ipq_frags)->ip_tos |= IPTOS_ECN_CE;
2947 }
2948 }
2949 if (ecn == IPTOS_ECN_NOTECT && ecn0 != IPTOS_ECN_NOTECT) {
2950 goto dropfrag;
2951 }
2952
2953 /*
2954 * Find a segment which begins after this one does.
2955 */
2956 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt) {
2957 if (GETIP(q)->ip_off > ip->ip_off) {
2958 break;
2959 }
2960 }
2961
2962 /*
2963 * If there is a preceding segment, it may provide some of
2964 * our data already. If so, drop the data from the incoming
2965 * segment. If it provides all of our data, drop us, otherwise
2966 * stick new segment in the proper place.
2967 *
2968 * If some of the data is dropped from the preceding
2969 * segment, then it's checksum is invalidated.
2970 */
2971 if (p) {
2972 i = GETIP(p)->ip_off + GETIP(p)->ip_len - ip->ip_off;
2973 if (i > 0) {
2974 if (i >= ip->ip_len) {
2975 goto dropfrag;
2976 }
2977 m_adj(m, i);
2978 fp->ipq_csum_flags = 0;
2979 ip->ip_off += i;
2980 ip->ip_len -= i;
2981 }
2982 m->m_nextpkt = p->m_nextpkt;
2983 p->m_nextpkt = m;
2984 } else {
2985 m->m_nextpkt = fp->ipq_frags;
2986 fp->ipq_frags = m;
2987 }
2988
2989 /*
2990 * While we overlap succeeding segments trim them or,
2991 * if they are completely covered, dequeue them.
2992 */
2993 for (; q != NULL && ip->ip_off + ip->ip_len > GETIP(q)->ip_off;
2994 q = nq) {
2995 i = (ip->ip_off + ip->ip_len) - GETIP(q)->ip_off;
2996 if (i < GETIP(q)->ip_len) {
2997 GETIP(q)->ip_len -= i;
2998 GETIP(q)->ip_off += i;
2999 m_adj(q, i);
3000 fp->ipq_csum_flags = 0;
3001 break;
3002 }
3003 nq = q->m_nextpkt;
3004 m->m_nextpkt = nq;
3005 ipstat.ips_fragdropped++;
3006 fp->ipq_nfrags--;
3007 /* defer freeing until after lock is dropped */
3008 MBUFQ_ENQUEUE(&dfq, q);
3009 }
3010
3011 /*
3012 * If this fragment contains similar checksum offload info
3013 * as that of the existing ones, accumulate checksum. Otherwise,
3014 * invalidate checksum offload info for the entire datagram.
3015 */
3016 if (csum_flags != 0 && csum_flags == fp->ipq_csum_flags) {
3017 fp->ipq_csum += csum;
3018 } else if (fp->ipq_csum_flags != 0) {
3019 fp->ipq_csum_flags = 0;
3020 }
3021
3022 #if IPDIVERT
3023 /*
3024 * Transfer firewall instructions to the fragment structure.
3025 * Only trust info in the fragment at offset 0.
3026 */
3027 if (ip->ip_off == 0) {
3028 #ifdef IPDIVERT_44
3029 fp->ipq_div_info = *divinfo;
3030 #else
3031 fp->ipq_divert = *divinfo;
3032 #endif
3033 fp->ipq_div_cookie = *divcookie;
3034 }
3035 *divinfo = 0;
3036 *divcookie = 0;
3037 #endif /* IPDIVERT */
3038
3039 /*
3040 * Check for complete reassembly and perform frag per packet
3041 * limiting.
3042 *
3043 * Frag limiting is performed here so that the nth frag has
3044 * a chance to complete the packet before we drop the packet.
3045 * As a result, n+1 frags are actually allowed per packet, but
3046 * only n will ever be stored. (n = maxfragsperpacket.)
3047 *
3048 */
3049 next = 0;
3050 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt) {
3051 if (GETIP(q)->ip_off != next) {
3052 if (fp->ipq_nfrags > maxfragsperpacket) {
3053 ipstat.ips_fragdropped += fp->ipq_nfrags;
3054 frag_freef(head, fp);
3055 }
3056 m = NULL; /* nothing to return */
3057 goto done;
3058 }
3059 next += GETIP(q)->ip_len;
3060 }
3061 /* Make sure the last packet didn't have the IP_MF flag */
3062 if (p->m_flags & M_FRAG) {
3063 if (fp->ipq_nfrags > maxfragsperpacket) {
3064 ipstat.ips_fragdropped += fp->ipq_nfrags;
3065 frag_freef(head, fp);
3066 }
3067 m = NULL; /* nothing to return */
3068 goto done;
3069 }
3070
3071 /*
3072 * Reassembly is complete. Make sure the packet is a sane size.
3073 */
3074 q = fp->ipq_frags;
3075 ip = GETIP(q);
3076 if (next + (IP_VHL_HL(ip->ip_vhl) << 2) > IP_MAXPACKET) {
3077 ipstat.ips_toolong++;
3078 ipstat.ips_fragdropped += fp->ipq_nfrags;
3079 frag_freef(head, fp);
3080 m = NULL; /* nothing to return */
3081 goto done;
3082 }
3083
3084 /*
3085 * Concatenate fragments.
3086 */
3087 m = q;
3088 t = m->m_next;
3089 m->m_next = NULL;
3090 m_cat(m, t);
3091 nq = q->m_nextpkt;
3092 q->m_nextpkt = NULL;
3093 for (q = nq; q != NULL; q = nq) {
3094 nq = q->m_nextpkt;
3095 q->m_nextpkt = NULL;
3096 m_cat(m, q);
3097 }
3098
3099 /*
3100 * Store partial hardware checksum info from the fragment queue;
3101 * the receive start offset is set to 20 bytes (see code at the
3102 * top of this routine.)
3103 */
3104 if (fp->ipq_csum_flags != 0) {
3105 csum = fp->ipq_csum;
3106
3107 ADDCARRY(csum);
3108
3109 m->m_pkthdr.csum_rx_val = csum;
3110 m->m_pkthdr.csum_rx_start = sizeof(struct ip);
3111 m->m_pkthdr.csum_flags = fp->ipq_csum_flags;
3112 } else if ((m->m_pkthdr.rcvif->if_flags & IFF_LOOPBACK) ||
3113 (m->m_pkthdr.pkt_flags & PKTF_LOOP)) {
3114 /* loopback checksums are always OK */
3115 m->m_pkthdr.csum_data = 0xffff;
3116 m->m_pkthdr.csum_flags =
3117 CSUM_DATA_VALID | CSUM_PSEUDO_HDR |
3118 CSUM_IP_CHECKED | CSUM_IP_VALID;
3119 }
3120
3121 #if IPDIVERT
3122 /*
3123 * Extract firewall instructions from the fragment structure.
3124 */
3125 #ifdef IPDIVERT_44
3126 *divinfo = fp->ipq_div_info;
3127 #else
3128 *divinfo = fp->ipq_divert;
3129 #endif
3130 *divcookie = fp->ipq_div_cookie;
3131 #endif /* IPDIVERT */
3132
3133 #if CONFIG_MACF_NET
3134 mac_mbuf_label_associate_ipq(fp, m);
3135 mac_ipq_label_destroy(fp);
3136 #endif
3137 /*
3138 * Create header for new ip packet by modifying header of first
3139 * packet; dequeue and discard fragment reassembly header.
3140 * Make header visible.
3141 */
3142 ip->ip_len = (IP_VHL_HL(ip->ip_vhl) << 2) + next;
3143 ip->ip_src = fp->ipq_src;
3144 ip->ip_dst = fp->ipq_dst;
3145
3146 fp->ipq_frags = NULL; /* return to caller as 'm' */
3147 frag_freef(head, fp);
3148 fp = NULL;
3149
3150 m->m_len += (IP_VHL_HL(ip->ip_vhl) << 2);
3151 m->m_data -= (IP_VHL_HL(ip->ip_vhl) << 2);
3152 /* some debugging cruft by sklower, below, will go away soon */
3153 if (m->m_flags & M_PKTHDR) { /* XXX this should be done elsewhere */
3154 m_fixhdr(m);
3155 }
3156 ipstat.ips_reassembled++;
3157
3158 /* arm the purge timer if not already and if there's work to do */
3159 frag_sched_timeout();
3160 lck_mtx_unlock(&ipqlock);
3161 /* perform deferred free (if needed) now that lock is dropped */
3162 if (!MBUFQ_EMPTY(&dfq)) {
3163 MBUFQ_DRAIN(&dfq);
3164 }
3165 VERIFY(MBUFQ_EMPTY(&dfq));
3166 return m;
3167
3168 done:
3169 VERIFY(m == NULL);
3170 /* arm the purge timer if not already and if there's work to do */
3171 frag_sched_timeout();
3172 lck_mtx_unlock(&ipqlock);
3173 /* perform deferred free (if needed) */
3174 if (!MBUFQ_EMPTY(&dfq)) {
3175 MBUFQ_DRAIN(&dfq);
3176 }
3177 VERIFY(MBUFQ_EMPTY(&dfq));
3178 return NULL;
3179
3180 dropfrag:
3181 #if IPDIVERT
3182 *divinfo = 0;
3183 *divcookie = 0;
3184 #endif /* IPDIVERT */
3185 ipstat.ips_fragdropped++;
3186 if (fp != NULL) {
3187 fp->ipq_nfrags--;
3188 }
3189 /* arm the purge timer if not already and if there's work to do */
3190 frag_sched_timeout();
3191 lck_mtx_unlock(&ipqlock);
3192 m_freem(m);
3193 /* perform deferred free (if needed) */
3194 if (!MBUFQ_EMPTY(&dfq)) {
3195 MBUFQ_DRAIN(&dfq);
3196 }
3197 VERIFY(MBUFQ_EMPTY(&dfq));
3198 return NULL;
3199 #undef GETIP
3200 }
3201
3202 /*
3203 * Free a fragment reassembly header and all
3204 * associated datagrams.
3205 */
3206 static void
3207 frag_freef(struct ipqhead *fhp, struct ipq *fp)
3208 {
3209 LCK_MTX_ASSERT(&ipqlock, LCK_MTX_ASSERT_OWNED);
3210
3211 fp->ipq_nfrags = 0;
3212 if (fp->ipq_frags != NULL) {
3213 m_freem_list(fp->ipq_frags);
3214 fp->ipq_frags = NULL;
3215 }
3216 TAILQ_REMOVE(fhp, fp, ipq_list);
3217 nipq--;
3218 ipq_free(fp);
3219 }
3220
3221 /*
3222 * IP reassembly timer processing
3223 */
3224 static void
3225 frag_timeout(void *arg)
3226 {
3227 #pragma unused(arg)
3228 struct ipq *fp;
3229 int i;
3230
3231 /*
3232 * Update coarse-grained networking timestamp (in sec.); the idea
3233 * is to piggy-back on the timeout callout to update the counter
3234 * returnable via net_uptime().
3235 */
3236 net_update_uptime();
3237
3238 lck_mtx_lock(&ipqlock);
3239 for (i = 0; i < IPREASS_NHASH; i++) {
3240 for (fp = TAILQ_FIRST(&ipq[i]); fp;) {
3241 struct ipq *fpp;
3242
3243 fpp = fp;
3244 fp = TAILQ_NEXT(fp, ipq_list);
3245 if (--fpp->ipq_ttl == 0) {
3246 ipstat.ips_fragtimeout += fpp->ipq_nfrags;
3247 frag_freef(&ipq[i], fpp);
3248 }
3249 }
3250 }
3251 /*
3252 * If we are over the maximum number of fragments
3253 * (due to the limit being lowered), drain off
3254 * enough to get down to the new limit.
3255 */
3256 if (maxnipq >= 0 && nipq > (unsigned)maxnipq) {
3257 for (i = 0; i < IPREASS_NHASH; i++) {
3258 while (nipq > (unsigned)maxnipq &&
3259 !TAILQ_EMPTY(&ipq[i])) {
3260 ipstat.ips_fragdropped +=
3261 TAILQ_FIRST(&ipq[i])->ipq_nfrags;
3262 frag_freef(&ipq[i], TAILQ_FIRST(&ipq[i]));
3263 }
3264 }
3265 }
3266 /* re-arm the purge timer if there's work to do */
3267 frag_timeout_run = 0;
3268 frag_sched_timeout();
3269 lck_mtx_unlock(&ipqlock);
3270 }
3271
3272 static void
3273 frag_sched_timeout(void)
3274 {
3275 LCK_MTX_ASSERT(&ipqlock, LCK_MTX_ASSERT_OWNED);
3276
3277 if (!frag_timeout_run && nipq > 0) {
3278 frag_timeout_run = 1;
3279 timeout(frag_timeout, NULL, hz);
3280 }
3281 }
3282
3283 /*
3284 * Drain off all datagram fragments.
3285 */
3286 static void
3287 frag_drain(void)
3288 {
3289 int i;
3290
3291 lck_mtx_lock(&ipqlock);
3292 for (i = 0; i < IPREASS_NHASH; i++) {
3293 while (!TAILQ_EMPTY(&ipq[i])) {
3294 ipstat.ips_fragdropped +=
3295 TAILQ_FIRST(&ipq[i])->ipq_nfrags;
3296 frag_freef(&ipq[i], TAILQ_FIRST(&ipq[i]));
3297 }
3298 }
3299 lck_mtx_unlock(&ipqlock);
3300 }
3301
3302 static struct ipq *
3303 ipq_alloc(int how)
3304 {
3305 struct mbuf *t;
3306 struct ipq *fp;
3307
3308 /*
3309 * See comments in ipq_updateparams(). Keep the count separate
3310 * from nipq since the latter represents the elements already
3311 * in the reassembly queues.
3312 */
3313 if (ipq_limit > 0 && ipq_count > ipq_limit) {
3314 return NULL;
3315 }
3316
3317 t = m_get(how, MT_FTABLE);
3318 if (t != NULL) {
3319 atomic_add_32(&ipq_count, 1);
3320 fp = mtod(t, struct ipq *);
3321 bzero(fp, sizeof(*fp));
3322 } else {
3323 fp = NULL;
3324 }
3325 return fp;
3326 }
3327
3328 static void
3329 ipq_free(struct ipq *fp)
3330 {
3331 (void) m_free(dtom(fp));
3332 atomic_add_32(&ipq_count, -1);
3333 }
3334
3335 /*
3336 * Drain callback
3337 */
3338 void
3339 ip_drain(void)
3340 {
3341 frag_drain(); /* fragments */
3342 in_rtqdrain(); /* protocol cloned routes */
3343 in_arpdrain(NULL); /* cloned routes: ARP */
3344 }
3345
3346 /*
3347 * Do option processing on a datagram,
3348 * possibly discarding it if bad options are encountered,
3349 * or forwarding it if source-routed.
3350 * The pass argument is used when operating in the IPSTEALTH
3351 * mode to tell what options to process:
3352 * [LS]SRR (pass 0) or the others (pass 1).
3353 * The reason for as many as two passes is that when doing IPSTEALTH,
3354 * non-routing options should be processed only if the packet is for us.
3355 * Returns 1 if packet has been forwarded/freed,
3356 * 0 if the packet should be processed further.
3357 */
3358 static int
3359 ip_dooptions(struct mbuf *m, int pass, struct sockaddr_in *next_hop)
3360 {
3361 #pragma unused(pass)
3362 struct ip *ip = mtod(m, struct ip *);
3363 u_char *cp;
3364 struct ip_timestamp *ipt;
3365 struct in_ifaddr *ia;
3366 int opt, optlen, cnt, off, code, type = ICMP_PARAMPROB, forward = 0;
3367 struct in_addr *sin, dst;
3368 u_int32_t ntime;
3369 struct sockaddr_in ipaddr = {
3370 .sin_len = sizeof(ipaddr),
3371 .sin_family = AF_INET,
3372 .sin_port = 0,
3373 .sin_addr = { .s_addr = 0 },
3374 .sin_zero = { 0, }
3375 };
3376
3377 /* Expect 32-bit aligned data pointer on strict-align platforms */
3378 MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m);
3379
3380 dst = ip->ip_dst;
3381 cp = (u_char *)(ip + 1);
3382 cnt = (IP_VHL_HL(ip->ip_vhl) << 2) - sizeof(struct ip);
3383 for (; cnt > 0; cnt -= optlen, cp += optlen) {
3384 opt = cp[IPOPT_OPTVAL];
3385 if (opt == IPOPT_EOL) {
3386 break;
3387 }
3388 if (opt == IPOPT_NOP) {
3389 optlen = 1;
3390 } else {
3391 if (cnt < IPOPT_OLEN + sizeof(*cp)) {
3392 code = &cp[IPOPT_OLEN] - (u_char *)ip;
3393 goto bad;
3394 }
3395 optlen = cp[IPOPT_OLEN];
3396 if (optlen < IPOPT_OLEN + sizeof(*cp) ||
3397 optlen > cnt) {
3398 code = &cp[IPOPT_OLEN] - (u_char *)ip;
3399 goto bad;
3400 }
3401 }
3402 switch (opt) {
3403 default:
3404 break;
3405
3406 /*
3407 * Source routing with record.
3408 * Find interface with current destination address.
3409 * If none on this machine then drop if strictly routed,
3410 * or do nothing if loosely routed.
3411 * Record interface address and bring up next address
3412 * component. If strictly routed make sure next
3413 * address is on directly accessible net.
3414 */
3415 case IPOPT_LSRR:
3416 case IPOPT_SSRR:
3417 if (optlen < IPOPT_OFFSET + sizeof(*cp)) {
3418 code = &cp[IPOPT_OLEN] - (u_char *)ip;
3419 goto bad;
3420 }
3421 if ((off = cp[IPOPT_OFFSET]) < IPOPT_MINOFF) {
3422 code = &cp[IPOPT_OFFSET] - (u_char *)ip;
3423 goto bad;
3424 }
3425 ipaddr.sin_addr = ip->ip_dst;
3426 ia = (struct in_ifaddr *)ifa_ifwithaddr(SA(&ipaddr));
3427 if (ia == NULL) {
3428 if (opt == IPOPT_SSRR) {
3429 type = ICMP_UNREACH;
3430 code = ICMP_UNREACH_SRCFAIL;
3431 goto bad;
3432 }
3433 if (!ip_dosourceroute) {
3434 goto nosourcerouting;
3435 }
3436 /*
3437 * Loose routing, and not at next destination
3438 * yet; nothing to do except forward.
3439 */
3440 break;
3441 } else {
3442 IFA_REMREF(&ia->ia_ifa);
3443 ia = NULL;
3444 }
3445 off--; /* 0 origin */
3446 if (off > optlen - (int)sizeof(struct in_addr)) {
3447 /*
3448 * End of source route. Should be for us.
3449 */
3450 if (!ip_acceptsourceroute) {
3451 goto nosourcerouting;
3452 }
3453 save_rte(cp, ip->ip_src);
3454 break;
3455 }
3456
3457 if (!ip_dosourceroute) {
3458 if (ipforwarding) {
3459 char buf[MAX_IPv4_STR_LEN];
3460 char buf2[MAX_IPv4_STR_LEN];
3461 /*
3462 * Acting as a router, so generate ICMP
3463 */
3464 nosourcerouting:
3465 log(LOG_WARNING,
3466 "attempted source route from %s "
3467 "to %s\n",
3468 inet_ntop(AF_INET, &ip->ip_src,
3469 buf, sizeof(buf)),
3470 inet_ntop(AF_INET, &ip->ip_dst,
3471 buf2, sizeof(buf2)));
3472 type = ICMP_UNREACH;
3473 code = ICMP_UNREACH_SRCFAIL;
3474 goto bad;
3475 } else {
3476 /*
3477 * Not acting as a router,
3478 * so silently drop.
3479 */
3480 OSAddAtomic(1, &ipstat.ips_cantforward);
3481 m_freem(m);
3482 return 1;
3483 }
3484 }
3485
3486 /*
3487 * locate outgoing interface
3488 */
3489 (void) memcpy(&ipaddr.sin_addr, cp + off,
3490 sizeof(ipaddr.sin_addr));
3491
3492 if (opt == IPOPT_SSRR) {
3493 #define INA struct in_ifaddr *
3494 if ((ia = (INA)ifa_ifwithdstaddr(
3495 SA(&ipaddr))) == NULL) {
3496 ia = (INA)ifa_ifwithnet(SA(&ipaddr));
3497 }
3498 } else {
3499 ia = ip_rtaddr(ipaddr.sin_addr);
3500 }
3501 if (ia == NULL) {
3502 type = ICMP_UNREACH;
3503 code = ICMP_UNREACH_SRCFAIL;
3504 goto bad;
3505 }
3506 ip->ip_dst = ipaddr.sin_addr;
3507 IFA_LOCK(&ia->ia_ifa);
3508 (void) memcpy(cp + off, &(IA_SIN(ia)->sin_addr),
3509 sizeof(struct in_addr));
3510 IFA_UNLOCK(&ia->ia_ifa);
3511 IFA_REMREF(&ia->ia_ifa);
3512 ia = NULL;
3513 cp[IPOPT_OFFSET] += sizeof(struct in_addr);
3514 /*
3515 * Let ip_intr's mcast routing check handle mcast pkts
3516 */
3517 forward = !IN_MULTICAST(ntohl(ip->ip_dst.s_addr));
3518 break;
3519
3520 case IPOPT_RR:
3521 if (optlen < IPOPT_OFFSET + sizeof(*cp)) {
3522 code = &cp[IPOPT_OFFSET] - (u_char *)ip;
3523 goto bad;
3524 }
3525 if ((off = cp[IPOPT_OFFSET]) < IPOPT_MINOFF) {
3526 code = &cp[IPOPT_OFFSET] - (u_char *)ip;
3527 goto bad;
3528 }
3529 /*
3530 * If no space remains, ignore.
3531 */
3532 off--; /* 0 origin */
3533 if (off > optlen - (int)sizeof(struct in_addr)) {
3534 break;
3535 }
3536 (void) memcpy(&ipaddr.sin_addr, &ip->ip_dst,
3537 sizeof(ipaddr.sin_addr));
3538 /*
3539 * locate outgoing interface; if we're the destination,
3540 * use the incoming interface (should be same).
3541 */
3542 if ((ia = (INA)ifa_ifwithaddr(SA(&ipaddr))) == NULL) {
3543 if ((ia = ip_rtaddr(ipaddr.sin_addr)) == NULL) {
3544 type = ICMP_UNREACH;
3545 code = ICMP_UNREACH_HOST;
3546 goto bad;
3547 }
3548 }
3549 IFA_LOCK(&ia->ia_ifa);
3550 (void) memcpy(cp + off, &(IA_SIN(ia)->sin_addr),
3551 sizeof(struct in_addr));
3552 IFA_UNLOCK(&ia->ia_ifa);
3553 IFA_REMREF(&ia->ia_ifa);
3554 ia = NULL;
3555 cp[IPOPT_OFFSET] += sizeof(struct in_addr);
3556 break;
3557
3558 case IPOPT_TS:
3559 code = cp - (u_char *)ip;
3560 ipt = (struct ip_timestamp *)(void *)cp;
3561 if (ipt->ipt_len < 4 || ipt->ipt_len > 40) {
3562 code = (u_char *)&ipt->ipt_len - (u_char *)ip;
3563 goto bad;
3564 }
3565 if (ipt->ipt_ptr < 5) {
3566 code = (u_char *)&ipt->ipt_ptr - (u_char *)ip;
3567 goto bad;
3568 }
3569 if (ipt->ipt_ptr >
3570 ipt->ipt_len - (int)sizeof(int32_t)) {
3571 if (++ipt->ipt_oflw == 0) {
3572 code = (u_char *)&ipt->ipt_ptr -
3573 (u_char *)ip;
3574 goto bad;
3575 }
3576 break;
3577 }
3578 sin = (struct in_addr *)(void *)(cp + ipt->ipt_ptr - 1);
3579 switch (ipt->ipt_flg) {
3580 case IPOPT_TS_TSONLY:
3581 break;
3582
3583 case IPOPT_TS_TSANDADDR:
3584 if (ipt->ipt_ptr - 1 + sizeof(n_time) +
3585 sizeof(struct in_addr) > ipt->ipt_len) {
3586 code = (u_char *)&ipt->ipt_ptr -
3587 (u_char *)ip;
3588 goto bad;
3589 }
3590 ipaddr.sin_addr = dst;
3591 ia = (INA)ifaof_ifpforaddr(SA(&ipaddr),
3592 m->m_pkthdr.rcvif);
3593 if (ia == NULL) {
3594 continue;
3595 }
3596 IFA_LOCK(&ia->ia_ifa);
3597 (void) memcpy(sin, &IA_SIN(ia)->sin_addr,
3598 sizeof(struct in_addr));
3599 IFA_UNLOCK(&ia->ia_ifa);
3600 ipt->ipt_ptr += sizeof(struct in_addr);
3601 IFA_REMREF(&ia->ia_ifa);
3602 ia = NULL;
3603 break;
3604
3605 case IPOPT_TS_PRESPEC:
3606 if (ipt->ipt_ptr - 1 + sizeof(n_time) +
3607 sizeof(struct in_addr) > ipt->ipt_len) {
3608 code = (u_char *)&ipt->ipt_ptr -
3609 (u_char *)ip;
3610 goto bad;
3611 }
3612 (void) memcpy(&ipaddr.sin_addr, sin,
3613 sizeof(struct in_addr));
3614 if ((ia = (struct in_ifaddr *)ifa_ifwithaddr(
3615 SA(&ipaddr))) == NULL) {
3616 continue;
3617 }
3618 IFA_REMREF(&ia->ia_ifa);
3619 ia = NULL;
3620 ipt->ipt_ptr += sizeof(struct in_addr);
3621 break;
3622
3623 default:
3624 /* XXX can't take &ipt->ipt_flg */
3625 code = (u_char *)&ipt->ipt_ptr -
3626 (u_char *)ip + 1;
3627 goto bad;
3628 }
3629 ntime = iptime();
3630 (void) memcpy(cp + ipt->ipt_ptr - 1, &ntime,
3631 sizeof(n_time));
3632 ipt->ipt_ptr += sizeof(n_time);
3633 }
3634 }
3635 if (forward && ipforwarding) {
3636 ip_forward(m, 1, next_hop);
3637 return 1;
3638 }
3639 return 0;
3640 bad:
3641 icmp_error(m, type, code, 0, 0);
3642 OSAddAtomic(1, &ipstat.ips_badoptions);
3643 return 1;
3644 }
3645
3646 /*
3647 * Check for the presence of the IP Router Alert option [RFC2113]
3648 * in the header of an IPv4 datagram.
3649 *
3650 * This call is not intended for use from the forwarding path; it is here
3651 * so that protocol domains may check for the presence of the option.
3652 * Given how FreeBSD's IPv4 stack is currently structured, the Router Alert
3653 * option does not have much relevance to the implementation, though this
3654 * may change in future.
3655 * Router alert options SHOULD be passed if running in IPSTEALTH mode and
3656 * we are not the endpoint.
3657 * Length checks on individual options should already have been peformed
3658 * by ip_dooptions() therefore they are folded under DIAGNOSTIC here.
3659 *
3660 * Return zero if not present or options are invalid, non-zero if present.
3661 */
3662 int
3663 ip_checkrouteralert(struct mbuf *m)
3664 {
3665 struct ip *ip = mtod(m, struct ip *);
3666 u_char *cp;
3667 int opt, optlen, cnt, found_ra;
3668
3669 found_ra = 0;
3670 cp = (u_char *)(ip + 1);
3671 cnt = (IP_VHL_HL(ip->ip_vhl) << 2) - sizeof(struct ip);
3672 for (; cnt > 0; cnt -= optlen, cp += optlen) {
3673 opt = cp[IPOPT_OPTVAL];
3674 if (opt == IPOPT_EOL) {
3675 break;
3676 }
3677 if (opt == IPOPT_NOP) {
3678 optlen = 1;
3679 } else {
3680 #ifdef DIAGNOSTIC
3681 if (cnt < IPOPT_OLEN + sizeof(*cp)) {
3682 break;
3683 }
3684 #endif
3685 optlen = cp[IPOPT_OLEN];
3686 #ifdef DIAGNOSTIC
3687 if (optlen < IPOPT_OLEN + sizeof(*cp) || optlen > cnt) {
3688 break;
3689 }
3690 #endif
3691 }
3692 switch (opt) {
3693 case IPOPT_RA:
3694 #ifdef DIAGNOSTIC
3695 if (optlen != IPOPT_OFFSET + sizeof(uint16_t) ||
3696 (*((uint16_t *)(void *)&cp[IPOPT_OFFSET]) != 0)) {
3697 break;
3698 } else
3699 #endif
3700 found_ra = 1;
3701 break;
3702 default:
3703 break;
3704 }
3705 }
3706
3707 return found_ra;
3708 }
3709
3710 /*
3711 * Given address of next destination (final or next hop),
3712 * return internet address info of interface to be used to get there.
3713 */
3714 struct in_ifaddr *
3715 ip_rtaddr(struct in_addr dst)
3716 {
3717 struct sockaddr_in *sin;
3718 struct ifaddr *rt_ifa;
3719 struct route ro;
3720
3721 bzero(&ro, sizeof(ro));
3722 sin = SIN(&ro.ro_dst);
3723 sin->sin_family = AF_INET;
3724 sin->sin_len = sizeof(*sin);
3725 sin->sin_addr = dst;
3726
3727 rtalloc_ign(&ro, RTF_PRCLONING);
3728 if (ro.ro_rt == NULL) {
3729 ROUTE_RELEASE(&ro);
3730 return NULL;
3731 }
3732
3733 RT_LOCK(ro.ro_rt);
3734 if ((rt_ifa = ro.ro_rt->rt_ifa) != NULL) {
3735 IFA_ADDREF(rt_ifa);
3736 }
3737 RT_UNLOCK(ro.ro_rt);
3738 ROUTE_RELEASE(&ro);
3739
3740 return (struct in_ifaddr *)rt_ifa;
3741 }
3742
3743 /*
3744 * Save incoming source route for use in replies,
3745 * to be picked up later by ip_srcroute if the receiver is interested.
3746 */
3747 void
3748 save_rte(u_char *option, struct in_addr dst)
3749 {
3750 unsigned olen;
3751
3752 olen = option[IPOPT_OLEN];
3753 #if DIAGNOSTIC
3754 if (ipprintfs) {
3755 printf("save_rte: olen %d\n", olen);
3756 }
3757 #endif
3758 if (olen > sizeof(ip_srcrt) - (1 + sizeof(dst))) {
3759 return;
3760 }
3761 bcopy(option, ip_srcrt.srcopt, olen);
3762 ip_nhops = (olen - IPOPT_OFFSET - 1) / sizeof(struct in_addr);
3763 ip_srcrt.dst = dst;
3764 }
3765
3766 /*
3767 * Retrieve incoming source route for use in replies,
3768 * in the same form used by setsockopt.
3769 * The first hop is placed before the options, will be removed later.
3770 */
3771 struct mbuf *
3772 ip_srcroute(void)
3773 {
3774 struct in_addr *p, *q;
3775 struct mbuf *m;
3776
3777 if (ip_nhops == 0) {
3778 return NULL;
3779 }
3780
3781 m = m_get(M_DONTWAIT, MT_HEADER);
3782 if (m == NULL) {
3783 return NULL;
3784 }
3785
3786 #define OPTSIZ (sizeof (ip_srcrt.nop) + sizeof (ip_srcrt.srcopt))
3787
3788 /* length is (nhops+1)*sizeof(addr) + sizeof(nop + srcrt header) */
3789 m->m_len = ip_nhops * sizeof(struct in_addr) +
3790 sizeof(struct in_addr) + OPTSIZ;
3791 #if DIAGNOSTIC
3792 if (ipprintfs) {
3793 printf("ip_srcroute: nhops %d mlen %d", ip_nhops, m->m_len);
3794 }
3795 #endif
3796
3797 /*
3798 * First save first hop for return route
3799 */
3800 p = &ip_srcrt.route[ip_nhops - 1];
3801 *(mtod(m, struct in_addr *)) = *p--;
3802 #if DIAGNOSTIC
3803 if (ipprintfs) {
3804 printf(" hops %lx",
3805 (u_int32_t)ntohl(mtod(m, struct in_addr *)->s_addr));
3806 }
3807 #endif
3808
3809 /*
3810 * Copy option fields and padding (nop) to mbuf.
3811 */
3812 ip_srcrt.nop = IPOPT_NOP;
3813 ip_srcrt.srcopt[IPOPT_OFFSET] = IPOPT_MINOFF;
3814 (void) memcpy(mtod(m, caddr_t) + sizeof(struct in_addr),
3815 &ip_srcrt.nop, OPTSIZ);
3816 q = (struct in_addr *)(void *)(mtod(m, caddr_t) +
3817 sizeof(struct in_addr) + OPTSIZ);
3818 #undef OPTSIZ
3819 /*
3820 * Record return path as an IP source route,
3821 * reversing the path (pointers are now aligned).
3822 */
3823 while (p >= ip_srcrt.route) {
3824 #if DIAGNOSTIC
3825 if (ipprintfs) {
3826 printf(" %lx", (u_int32_t)ntohl(q->s_addr));
3827 }
3828 #endif
3829 *q++ = *p--;
3830 }
3831 /*
3832 * Last hop goes to final destination.
3833 */
3834 *q = ip_srcrt.dst;
3835 #if DIAGNOSTIC
3836 if (ipprintfs) {
3837 printf(" %lx\n", (u_int32_t)ntohl(q->s_addr));
3838 }
3839 #endif
3840 return m;
3841 }
3842
3843 /*
3844 * Strip out IP options, at higher level protocol in the kernel.
3845 */
3846 void
3847 ip_stripoptions(struct mbuf *m)
3848 {
3849 int i;
3850 struct ip *ip = mtod(m, struct ip *);
3851 caddr_t opts;
3852 int olen;
3853
3854 /* Expect 32-bit aligned data pointer on strict-align platforms */
3855 MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m);
3856
3857 /* use bcopy() since it supports overlapping range */
3858 olen = (IP_VHL_HL(ip->ip_vhl) << 2) - sizeof(struct ip);
3859 opts = (caddr_t)(ip + 1);
3860 i = m->m_len - (sizeof(struct ip) + olen);
3861 bcopy(opts + olen, opts, (unsigned)i);
3862 m->m_len -= olen;
3863 if (m->m_flags & M_PKTHDR) {
3864 m->m_pkthdr.len -= olen;
3865 }
3866 ip->ip_vhl = IP_MAKE_VHL(IPVERSION, sizeof(struct ip) >> 2);
3867
3868 /*
3869 * We expect ip_{off,len} to be in host order by now, and
3870 * that the original IP header length has been subtracted
3871 * out from ip_len. Temporarily adjust ip_len for checksum
3872 * recalculation, and restore it afterwards.
3873 */
3874 ip->ip_len += sizeof(struct ip);
3875
3876 /* recompute checksum now that IP header is smaller */
3877 #if BYTE_ORDER != BIG_ENDIAN
3878 HTONS(ip->ip_len);
3879 HTONS(ip->ip_off);
3880 #endif /* BYTE_ORDER != BIG_ENDIAN */
3881 ip->ip_sum = in_cksum_hdr(ip);
3882 #if BYTE_ORDER != BIG_ENDIAN
3883 NTOHS(ip->ip_off);
3884 NTOHS(ip->ip_len);
3885 #endif /* BYTE_ORDER != BIG_ENDIAN */
3886
3887 ip->ip_len -= sizeof(struct ip);
3888
3889 /*
3890 * Given that we've just stripped IP options from the header,
3891 * we need to adjust the start offset accordingly if this
3892 * packet had gone thru partial checksum offload.
3893 */
3894 if ((m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PARTIAL)) ==
3895 (CSUM_DATA_VALID | CSUM_PARTIAL)) {
3896 if (m->m_pkthdr.csum_rx_start >= (sizeof(struct ip) + olen)) {
3897 /* most common case */
3898 m->m_pkthdr.csum_rx_start -= olen;
3899 } else {
3900 /* compute checksum in software instead */
3901 m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID;
3902 m->m_pkthdr.csum_data = 0;
3903 ipstat.ips_adj_hwcsum_clr++;
3904 }
3905 }
3906 }
3907
3908 u_char inetctlerrmap[PRC_NCMDS] = {
3909 0, 0, 0, 0,
3910 0, EMSGSIZE, EHOSTDOWN, EHOSTUNREACH,
3911 ENETUNREACH, EHOSTUNREACH, ECONNREFUSED, ECONNREFUSED,
3912 EMSGSIZE, EHOSTUNREACH, 0, 0,
3913 0, 0, EHOSTUNREACH, 0,
3914 ENOPROTOOPT, ECONNREFUSED
3915 };
3916
3917 static int
3918 sysctl_ipforwarding SYSCTL_HANDLER_ARGS
3919 {
3920 #pragma unused(arg1, arg2)
3921 int i, was_ipforwarding = ipforwarding;
3922
3923 i = sysctl_handle_int(oidp, oidp->oid_arg1, oidp->oid_arg2, req);
3924 if (i != 0 || req->newptr == USER_ADDR_NULL) {
3925 return i;
3926 }
3927
3928 if (was_ipforwarding && !ipforwarding) {
3929 /* clean up IPv4 forwarding cached routes */
3930 ifnet_head_lock_shared();
3931 for (i = 0; i <= if_index; i++) {
3932 struct ifnet *ifp = ifindex2ifnet[i];
3933 if (ifp != NULL) {
3934 lck_mtx_lock(&ifp->if_cached_route_lock);
3935 ROUTE_RELEASE(&ifp->if_fwd_route);
3936 bzero(&ifp->if_fwd_route,
3937 sizeof(ifp->if_fwd_route));
3938 lck_mtx_unlock(&ifp->if_cached_route_lock);
3939 }
3940 }
3941 ifnet_head_done();
3942 }
3943
3944 return 0;
3945 }
3946
3947 /*
3948 * Similar to inp_route_{copyout,copyin} routines except that these copy
3949 * out the cached IPv4 forwarding route from struct ifnet instead of the
3950 * inpcb. See comments for those routines for explanations.
3951 */
3952 static void
3953 ip_fwd_route_copyout(struct ifnet *ifp, struct route *dst)
3954 {
3955 struct route *src = &ifp->if_fwd_route;
3956
3957 lck_mtx_lock_spin(&ifp->if_cached_route_lock);
3958 lck_mtx_convert_spin(&ifp->if_cached_route_lock);
3959
3960 /* Minor sanity check */
3961 if (src->ro_rt != NULL && rt_key(src->ro_rt)->sa_family != AF_INET) {
3962 panic("%s: wrong or corrupted route: %p", __func__, src);
3963 }
3964
3965 route_copyout(dst, src, sizeof(*dst));
3966
3967 lck_mtx_unlock(&ifp->if_cached_route_lock);
3968 }
3969
3970 static void
3971 ip_fwd_route_copyin(struct ifnet *ifp, struct route *src)
3972 {
3973 struct route *dst = &ifp->if_fwd_route;
3974
3975 lck_mtx_lock_spin(&ifp->if_cached_route_lock);
3976 lck_mtx_convert_spin(&ifp->if_cached_route_lock);
3977
3978 /* Minor sanity check */
3979 if (src->ro_rt != NULL && rt_key(src->ro_rt)->sa_family != AF_INET) {
3980 panic("%s: wrong or corrupted route: %p", __func__, src);
3981 }
3982
3983 if (ifp->if_fwd_cacheok) {
3984 route_copyin(src, dst, sizeof(*src));
3985 }
3986
3987 lck_mtx_unlock(&ifp->if_cached_route_lock);
3988 }
3989
3990 /*
3991 * Forward a packet. If some error occurs return the sender
3992 * an icmp packet. Note we can't always generate a meaningful
3993 * icmp message because icmp doesn't have a large enough repertoire
3994 * of codes and types.
3995 *
3996 * If not forwarding, just drop the packet. This could be confusing
3997 * if ipforwarding was zero but some routing protocol was advancing
3998 * us as a gateway to somewhere. However, we must let the routing
3999 * protocol deal with that.
4000 *
4001 * The srcrt parameter indicates whether the packet is being forwarded
4002 * via a source route.
4003 */
4004 static void
4005 ip_forward(struct mbuf *m, int srcrt, struct sockaddr_in *next_hop)
4006 {
4007 #if !IPFIREWALL
4008 #pragma unused(next_hop)
4009 #endif
4010 struct ip *ip = mtod(m, struct ip *);
4011 struct sockaddr_in *sin;
4012 struct rtentry *rt;
4013 struct route fwd_rt;
4014 int error, type = 0, code = 0;
4015 struct mbuf *mcopy;
4016 n_long dest;
4017 struct in_addr pkt_dst;
4018 u_int32_t nextmtu = 0, len;
4019 struct ip_out_args ipoa;
4020 struct ifnet *rcvifp = m->m_pkthdr.rcvif;
4021
4022 bzero(&ipoa, sizeof(ipoa));
4023 ipoa.ipoa_boundif = IFSCOPE_NONE;
4024 ipoa.ipoa_sotc = SO_TC_UNSPEC;
4025 ipoa.ipoa_netsvctype = _NET_SERVICE_TYPE_UNSPEC;
4026
4027 #if IPSEC
4028 struct secpolicy *sp = NULL;
4029 int ipsecerror;
4030 #endif /* IPSEC */
4031 #if PF
4032 struct pf_mtag *pf_mtag;
4033 #endif /* PF */
4034
4035 dest = 0;
4036 #if IPFIREWALL
4037 /*
4038 * Cache the destination address of the packet; this may be
4039 * changed by use of 'ipfw fwd'.
4040 */
4041 pkt_dst = ((next_hop != NULL) ? next_hop->sin_addr : ip->ip_dst);
4042 #else /* !IPFIREWALL */
4043 pkt_dst = ip->ip_dst;
4044 #endif /* !IPFIREWALL */
4045
4046 #if DIAGNOSTIC
4047 if (ipprintfs) {
4048 printf("forward: src %lx dst %lx ttl %x\n",
4049 (u_int32_t)ip->ip_src.s_addr, (u_int32_t)pkt_dst.s_addr,
4050 ip->ip_ttl);
4051 }
4052 #endif
4053
4054 if (m->m_flags & (M_BCAST | M_MCAST) || !in_canforward(pkt_dst)) {
4055 OSAddAtomic(1, &ipstat.ips_cantforward);
4056 m_freem(m);
4057 return;
4058 }
4059 #if IPSTEALTH
4060 if (!ipstealth) {
4061 #endif /* IPSTEALTH */
4062 if (ip->ip_ttl <= IPTTLDEC) {
4063 icmp_error(m, ICMP_TIMXCEED, ICMP_TIMXCEED_INTRANS,
4064 dest, 0);
4065 return;
4066 }
4067 #if IPSTEALTH
4068 }
4069 #endif /* IPSTEALTH */
4070
4071 #if PF
4072 pf_mtag = pf_find_mtag(m);
4073 if (pf_mtag != NULL && pf_mtag->pftag_rtableid != IFSCOPE_NONE) {
4074 ipoa.ipoa_boundif = pf_mtag->pftag_rtableid;
4075 ipoa.ipoa_flags |= IPOAF_BOUND_IF;
4076 }
4077 #endif /* PF */
4078
4079 ip_fwd_route_copyout(rcvifp, &fwd_rt);
4080
4081 sin = SIN(&fwd_rt.ro_dst);
4082 if (ROUTE_UNUSABLE(&fwd_rt) || pkt_dst.s_addr != sin->sin_addr.s_addr) {
4083 ROUTE_RELEASE(&fwd_rt);
4084
4085 sin->sin_family = AF_INET;
4086 sin->sin_len = sizeof(*sin);
4087 sin->sin_addr = pkt_dst;
4088
4089 rtalloc_scoped_ign(&fwd_rt, RTF_PRCLONING, ipoa.ipoa_boundif);
4090 if (fwd_rt.ro_rt == NULL) {
4091 icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_HOST, dest, 0);
4092 goto done;
4093 }
4094 }
4095 rt = fwd_rt.ro_rt;
4096
4097 /*
4098 * Save the IP header and at most 8 bytes of the payload,
4099 * in case we need to generate an ICMP message to the src.
4100 *
4101 * We don't use m_copy() because it might return a reference
4102 * to a shared cluster. Both this function and ip_output()
4103 * assume exclusive access to the IP header in `m', so any
4104 * data in a cluster may change before we reach icmp_error().
4105 */
4106 MGET(mcopy, M_DONTWAIT, m->m_type);
4107 if (mcopy != NULL) {
4108 M_COPY_PKTHDR(mcopy, m);
4109 mcopy->m_len = imin((IP_VHL_HL(ip->ip_vhl) << 2) + 8,
4110 (int)ip->ip_len);
4111 m_copydata(m, 0, mcopy->m_len, mtod(mcopy, caddr_t));
4112 }
4113
4114 #if IPSTEALTH
4115 if (!ipstealth) {
4116 #endif /* IPSTEALTH */
4117 ip->ip_ttl -= IPTTLDEC;
4118 #if IPSTEALTH
4119 }
4120 #endif /* IPSTEALTH */
4121
4122 /*
4123 * If forwarding packet using same interface that it came in on,
4124 * perhaps should send a redirect to sender to shortcut a hop.
4125 * Only send redirect if source is sending directly to us,
4126 * and if packet was not source routed (or has any options).
4127 * Also, don't send redirect if forwarding using a default route
4128 * or a route modified by a redirect.
4129 */
4130 RT_LOCK_SPIN(rt);
4131 if (rt->rt_ifp == m->m_pkthdr.rcvif &&
4132 !(rt->rt_flags & (RTF_DYNAMIC | RTF_MODIFIED)) &&
4133 satosin(rt_key(rt))->sin_addr.s_addr != INADDR_ANY &&
4134 ipsendredirects && !srcrt && rt->rt_ifa != NULL) {
4135 struct in_ifaddr *ia = (struct in_ifaddr *)rt->rt_ifa;
4136 u_int32_t src = ntohl(ip->ip_src.s_addr);
4137
4138 /* Become a regular mutex */
4139 RT_CONVERT_LOCK(rt);
4140 IFA_LOCK_SPIN(&ia->ia_ifa);
4141 if ((src & ia->ia_subnetmask) == ia->ia_subnet) {
4142 if (rt->rt_flags & RTF_GATEWAY) {
4143 dest = satosin(rt->rt_gateway)->sin_addr.s_addr;
4144 } else {
4145 dest = pkt_dst.s_addr;
4146 }
4147 /*
4148 * Router requirements says to only send
4149 * host redirects.
4150 */
4151 type = ICMP_REDIRECT;
4152 code = ICMP_REDIRECT_HOST;
4153 #if DIAGNOSTIC
4154 if (ipprintfs) {
4155 printf("redirect (%d) to %lx\n", code,
4156 (u_int32_t)dest);
4157 }
4158 #endif
4159 }
4160 IFA_UNLOCK(&ia->ia_ifa);
4161 }
4162 RT_UNLOCK(rt);
4163
4164 #if IPFIREWALL
4165 if (next_hop != NULL) {
4166 /* Pass IPFORWARD info if available */
4167 struct m_tag *tag;
4168 struct ip_fwd_tag *ipfwd_tag;
4169
4170 tag = m_tag_create(KERNEL_MODULE_TAG_ID,
4171 KERNEL_TAG_TYPE_IPFORWARD,
4172 sizeof(*ipfwd_tag), M_NOWAIT, m);
4173 if (tag == NULL) {
4174 error = ENOBUFS;
4175 m_freem(m);
4176 goto done;
4177 }
4178
4179 ipfwd_tag = (struct ip_fwd_tag *)(tag + 1);
4180 ipfwd_tag->next_hop = next_hop;
4181
4182 m_tag_prepend(m, tag);
4183 }
4184 #endif /* IPFIREWALL */
4185
4186 /* Mark this packet as being forwarded from another interface */
4187 m->m_pkthdr.pkt_flags |= PKTF_FORWARDED;
4188 len = m_pktlen(m);
4189
4190 error = ip_output(m, NULL, &fwd_rt, IP_FORWARDING | IP_OUTARGS,
4191 NULL, &ipoa);
4192
4193 /* Refresh rt since the route could have changed while in IP */
4194 rt = fwd_rt.ro_rt;
4195
4196 if (error != 0) {
4197 OSAddAtomic(1, &ipstat.ips_cantforward);
4198 } else {
4199 /*
4200 * Increment stats on the source interface; the ones
4201 * for destination interface has been taken care of
4202 * during output above by virtue of PKTF_FORWARDED.
4203 */
4204 rcvifp->if_fpackets++;
4205 rcvifp->if_fbytes += len;
4206
4207 OSAddAtomic(1, &ipstat.ips_forward);
4208 if (type != 0) {
4209 OSAddAtomic(1, &ipstat.ips_redirectsent);
4210 } else {
4211 if (mcopy != NULL) {
4212 /*
4213 * If we didn't have to go thru ipflow and
4214 * the packet was successfully consumed by
4215 * ip_output, the mcopy is rather a waste;
4216 * this could be further optimized.
4217 */
4218 m_freem(mcopy);
4219 }
4220 goto done;
4221 }
4222 }
4223 if (mcopy == NULL) {
4224 goto done;
4225 }
4226
4227 switch (error) {
4228 case 0: /* forwarded, but need redirect */
4229 /* type, code set above */
4230 break;
4231
4232 case ENETUNREACH: /* shouldn't happen, checked above */
4233 case EHOSTUNREACH:
4234 case ENETDOWN:
4235 case EHOSTDOWN:
4236 default:
4237 type = ICMP_UNREACH;
4238 code = ICMP_UNREACH_HOST;
4239 break;
4240
4241 case EMSGSIZE:
4242 type = ICMP_UNREACH;
4243 code = ICMP_UNREACH_NEEDFRAG;
4244
4245 if (rt == NULL) {
4246 break;
4247 } else {
4248 RT_LOCK_SPIN(rt);
4249 if (rt->rt_ifp != NULL) {
4250 nextmtu = rt->rt_ifp->if_mtu;
4251 }
4252 RT_UNLOCK(rt);
4253 }
4254 #ifdef IPSEC
4255 if (ipsec_bypass) {
4256 break;
4257 }
4258
4259 /*
4260 * If the packet is routed over IPsec tunnel, tell the
4261 * originator the tunnel MTU.
4262 * tunnel MTU = if MTU - sizeof(IP) - ESP/AH hdrsiz
4263 * XXX quickhack!!!
4264 */
4265 sp = ipsec4_getpolicybyaddr(mcopy, IPSEC_DIR_OUTBOUND,
4266 IP_FORWARDING, &ipsecerror);
4267
4268 if (sp == NULL) {
4269 break;
4270 }
4271
4272 /*
4273 * find the correct route for outer IPv4
4274 * header, compute tunnel MTU.
4275 */
4276 nextmtu = 0;
4277
4278 if (sp->req != NULL &&
4279 sp->req->saidx.mode == IPSEC_MODE_TUNNEL) {
4280 struct secasindex saidx;
4281 struct secasvar *sav;
4282 struct route *ro;
4283 struct ip *ipm;
4284 int ipsechdr;
4285
4286 /* count IPsec header size */
4287 ipsechdr = ipsec_hdrsiz(sp);
4288
4289 ipm = mtod(mcopy, struct ip *);
4290 bcopy(&sp->req->saidx, &saidx, sizeof(saidx));
4291 saidx.mode = sp->req->saidx.mode;
4292 saidx.reqid = sp->req->saidx.reqid;
4293 sin = SIN(&saidx.src);
4294 if (sin->sin_len == 0) {
4295 sin->sin_len = sizeof(*sin);
4296 sin->sin_family = AF_INET;
4297 sin->sin_port = IPSEC_PORT_ANY;
4298 bcopy(&ipm->ip_src, &sin->sin_addr,
4299 sizeof(sin->sin_addr));
4300 }
4301 sin = SIN(&saidx.dst);
4302 if (sin->sin_len == 0) {
4303 sin->sin_len = sizeof(*sin);
4304 sin->sin_family = AF_INET;
4305 sin->sin_port = IPSEC_PORT_ANY;
4306 bcopy(&ipm->ip_dst, &sin->sin_addr,
4307 sizeof(sin->sin_addr));
4308 }
4309 sav = key_allocsa_policy(&saidx);
4310 if (sav != NULL) {
4311 lck_mtx_lock(sadb_mutex);
4312 if (sav->sah != NULL) {
4313 ro = (struct route *)&sav->sah->sa_route;
4314 if (ro->ro_rt != NULL) {
4315 RT_LOCK(ro->ro_rt);
4316 if (ro->ro_rt->rt_ifp != NULL) {
4317 nextmtu = ro->ro_rt->
4318 rt_ifp->if_mtu;
4319 nextmtu -= ipsechdr;
4320 }
4321 RT_UNLOCK(ro->ro_rt);
4322 }
4323 }
4324 key_freesav(sav, KEY_SADB_LOCKED);
4325 lck_mtx_unlock(sadb_mutex);
4326 }
4327 }
4328 key_freesp(sp, KEY_SADB_UNLOCKED);
4329 #endif /* IPSEC */
4330 break;
4331
4332 case ENOBUFS:
4333 /*
4334 * A router should not generate ICMP_SOURCEQUENCH as
4335 * required in RFC1812 Requirements for IP Version 4 Routers.
4336 * Source quench could be a big problem under DoS attacks,
4337 * or if the underlying interface is rate-limited.
4338 * Those who need source quench packets may re-enable them
4339 * via the net.inet.ip.sendsourcequench sysctl.
4340 */
4341 if (ip_sendsourcequench == 0) {
4342 m_freem(mcopy);
4343 goto done;
4344 } else {
4345 type = ICMP_SOURCEQUENCH;
4346 code = 0;
4347 }
4348 break;
4349
4350 case EACCES: /* ipfw denied packet */
4351 m_freem(mcopy);
4352 goto done;
4353 }
4354
4355 if (type == ICMP_UNREACH && code == ICMP_UNREACH_NEEDFRAG) {
4356 OSAddAtomic(1, &ipstat.ips_cantfrag);
4357 }
4358
4359 icmp_error(mcopy, type, code, dest, nextmtu);
4360 done:
4361 ip_fwd_route_copyin(rcvifp, &fwd_rt);
4362 }
4363
4364 int
4365 ip_savecontrol(struct inpcb *inp, struct mbuf **mp, struct ip *ip,
4366 struct mbuf *m)
4367 {
4368 *mp = NULL;
4369 if (inp->inp_socket->so_options & SO_TIMESTAMP) {
4370 struct timeval tv;
4371
4372 getmicrotime(&tv);
4373 mp = sbcreatecontrol_mbuf((caddr_t)&tv, sizeof(tv),
4374 SCM_TIMESTAMP, SOL_SOCKET, mp);
4375 if (*mp == NULL) {
4376 goto no_mbufs;
4377 }
4378 }
4379 if (inp->inp_socket->so_options & SO_TIMESTAMP_MONOTONIC) {
4380 uint64_t time;
4381
4382 time = mach_absolute_time();
4383 mp = sbcreatecontrol_mbuf((caddr_t)&time, sizeof(time),
4384 SCM_TIMESTAMP_MONOTONIC, SOL_SOCKET, mp);
4385 if (*mp == NULL) {
4386 goto no_mbufs;
4387 }
4388 }
4389 if (inp->inp_socket->so_options & SO_TIMESTAMP_CONTINUOUS) {
4390 uint64_t time;
4391
4392 time = mach_continuous_time();
4393 mp = sbcreatecontrol_mbuf((caddr_t)&time, sizeof(time),
4394 SCM_TIMESTAMP_CONTINUOUS, SOL_SOCKET, mp);
4395 if (*mp == NULL) {
4396 goto no_mbufs;
4397 }
4398 }
4399 if (inp->inp_flags & INP_RECVDSTADDR) {
4400 mp = sbcreatecontrol_mbuf((caddr_t)&ip->ip_dst,
4401 sizeof(struct in_addr), IP_RECVDSTADDR, IPPROTO_IP, mp);
4402 if (*mp == NULL) {
4403 goto no_mbufs;
4404 }
4405 }
4406 #ifdef notyet
4407 /*
4408 * XXX
4409 * Moving these out of udp_input() made them even more broken
4410 * than they already were.
4411 */
4412 /* options were tossed already */
4413 if (inp->inp_flags & INP_RECVOPTS) {
4414 mp = sbcreatecontrol_mbuf((caddr_t)opts_deleted_above,
4415 sizeof(struct in_addr), IP_RECVOPTS, IPPROTO_IP, mp);
4416 if (*mp == NULL) {
4417 goto no_mbufs;
4418 }
4419 }
4420 /* ip_srcroute doesn't do what we want here, need to fix */
4421 if (inp->inp_flags & INP_RECVRETOPTS) {
4422 mp = sbcreatecontrol_mbuf((caddr_t)ip_srcroute(),
4423 sizeof(struct in_addr), IP_RECVRETOPTS, IPPROTO_IP, mp);
4424 if (*mp == NULL) {
4425 goto no_mbufs;
4426 }
4427 }
4428 #endif /* notyet */
4429 if (inp->inp_flags & INP_RECVIF) {
4430 struct ifnet *ifp;
4431 uint8_t sdlbuf[SOCK_MAXADDRLEN + 1];
4432 struct sockaddr_dl *sdl2 = SDL(&sdlbuf);
4433
4434 /*
4435 * Make sure to accomodate the largest possible
4436 * size of SA(if_lladdr)->sa_len.
4437 */
4438 _CASSERT(sizeof(sdlbuf) == (SOCK_MAXADDRLEN + 1));
4439
4440 ifnet_head_lock_shared();
4441 if ((ifp = m->m_pkthdr.rcvif) != NULL &&
4442 ifp->if_index && (ifp->if_index <= if_index)) {
4443 struct ifaddr *ifa = ifnet_addrs[ifp->if_index - 1];
4444 struct sockaddr_dl *sdp;
4445
4446 if (!ifa || !ifa->ifa_addr) {
4447 goto makedummy;
4448 }
4449
4450 IFA_LOCK_SPIN(ifa);
4451 sdp = SDL(ifa->ifa_addr);
4452 /*
4453 * Change our mind and don't try copy.
4454 */
4455 if (sdp->sdl_family != AF_LINK) {
4456 IFA_UNLOCK(ifa);
4457 goto makedummy;
4458 }
4459 /* the above _CASSERT ensures sdl_len fits in sdlbuf */
4460 bcopy(sdp, sdl2, sdp->sdl_len);
4461 IFA_UNLOCK(ifa);
4462 } else {
4463 makedummy:
4464 sdl2->sdl_len =
4465 offsetof(struct sockaddr_dl, sdl_data[0]);
4466 sdl2->sdl_family = AF_LINK;
4467 sdl2->sdl_index = 0;
4468 sdl2->sdl_nlen = sdl2->sdl_alen = sdl2->sdl_slen = 0;
4469 }
4470 ifnet_head_done();
4471 mp = sbcreatecontrol_mbuf((caddr_t)sdl2, sdl2->sdl_len,
4472 IP_RECVIF, IPPROTO_IP, mp);
4473 if (*mp == NULL) {
4474 goto no_mbufs;
4475 }
4476 }
4477 if (inp->inp_flags & INP_RECVTTL) {
4478 mp = sbcreatecontrol_mbuf((caddr_t)&ip->ip_ttl,
4479 sizeof(ip->ip_ttl), IP_RECVTTL, IPPROTO_IP, mp);
4480 if (*mp == NULL) {
4481 goto no_mbufs;
4482 }
4483 }
4484 if (inp->inp_socket->so_flags & SOF_RECV_TRAFFIC_CLASS) {
4485 int tc = m_get_traffic_class(m);
4486
4487 mp = sbcreatecontrol_mbuf((caddr_t)&tc, sizeof(tc),
4488 SO_TRAFFIC_CLASS, SOL_SOCKET, mp);
4489 if (*mp == NULL) {
4490 goto no_mbufs;
4491 }
4492 }
4493 if (inp->inp_flags & INP_PKTINFO) {
4494 struct in_pktinfo pi;
4495
4496 bzero(&pi, sizeof(struct in_pktinfo));
4497 bcopy(&ip->ip_dst, &pi.ipi_addr, sizeof(struct in_addr));
4498 pi.ipi_ifindex = (m != NULL && m->m_pkthdr.rcvif != NULL) ?
4499 m->m_pkthdr.rcvif->if_index : 0;
4500
4501 mp = sbcreatecontrol_mbuf((caddr_t)&pi,
4502 sizeof(struct in_pktinfo), IP_RECVPKTINFO, IPPROTO_IP, mp);
4503 if (*mp == NULL) {
4504 goto no_mbufs;
4505 }
4506 }
4507 if (inp->inp_flags & INP_RECVTOS) {
4508 mp = sbcreatecontrol_mbuf((caddr_t)&ip->ip_tos,
4509 sizeof(u_char), IP_RECVTOS, IPPROTO_IP, mp);
4510 if (*mp == NULL) {
4511 goto no_mbufs;
4512 }
4513 }
4514 return 0;
4515
4516 no_mbufs:
4517 ipstat.ips_pktdropcntrl++;
4518 return ENOBUFS;
4519 }
4520
4521 static inline u_short
4522 ip_cksum(struct mbuf *m, int hlen)
4523 {
4524 u_short sum;
4525
4526 if (m->m_pkthdr.csum_flags & CSUM_IP_CHECKED) {
4527 sum = !(m->m_pkthdr.csum_flags & CSUM_IP_VALID);
4528 } else if (!(m->m_pkthdr.rcvif->if_flags & IFF_LOOPBACK) &&
4529 !(m->m_pkthdr.pkt_flags & PKTF_LOOP)) {
4530 /*
4531 * The packet arrived on an interface which isn't capable
4532 * of performing IP header checksum; compute it now.
4533 */
4534 sum = ip_cksum_hdr_in(m, hlen);
4535 } else {
4536 sum = 0;
4537 m->m_pkthdr.csum_flags |= (CSUM_DATA_VALID | CSUM_PSEUDO_HDR |
4538 CSUM_IP_CHECKED | CSUM_IP_VALID);
4539 m->m_pkthdr.csum_data = 0xffff;
4540 }
4541
4542 if (sum != 0) {
4543 OSAddAtomic(1, &ipstat.ips_badsum);
4544 }
4545
4546 return sum;
4547 }
4548
4549 static int
4550 ip_getstat SYSCTL_HANDLER_ARGS
4551 {
4552 #pragma unused(oidp, arg1, arg2)
4553 if (req->oldptr == USER_ADDR_NULL) {
4554 req->oldlen = (size_t)sizeof(struct ipstat);
4555 }
4556
4557 return SYSCTL_OUT(req, &ipstat, MIN(sizeof(ipstat), req->oldlen));
4558 }
4559
4560 void
4561 ip_setsrcifaddr_info(struct mbuf *m, uint32_t src_idx, struct in_ifaddr *ia)
4562 {
4563 VERIFY(m->m_flags & M_PKTHDR);
4564
4565 /*
4566 * If the source ifaddr is specified, pick up the information
4567 * from there; otherwise just grab the passed-in ifindex as the
4568 * caller may not have the ifaddr available.
4569 */
4570 if (ia != NULL) {
4571 m->m_pkthdr.pkt_flags |= PKTF_IFAINFO;
4572 m->m_pkthdr.src_ifindex = ia->ia_ifp->if_index;
4573 } else {
4574 m->m_pkthdr.src_ifindex = src_idx;
4575 if (src_idx != 0) {
4576 m->m_pkthdr.pkt_flags |= PKTF_IFAINFO;
4577 }
4578 }
4579 }
4580
4581 void
4582 ip_setdstifaddr_info(struct mbuf *m, uint32_t dst_idx, struct in_ifaddr *ia)
4583 {
4584 VERIFY(m->m_flags & M_PKTHDR);
4585
4586 /*
4587 * If the destination ifaddr is specified, pick up the information
4588 * from there; otherwise just grab the passed-in ifindex as the
4589 * caller may not have the ifaddr available.
4590 */
4591 if (ia != NULL) {
4592 m->m_pkthdr.pkt_flags |= PKTF_IFAINFO;
4593 m->m_pkthdr.dst_ifindex = ia->ia_ifp->if_index;
4594 } else {
4595 m->m_pkthdr.dst_ifindex = dst_idx;
4596 if (dst_idx != 0) {
4597 m->m_pkthdr.pkt_flags |= PKTF_IFAINFO;
4598 }
4599 }
4600 }
4601
4602 int
4603 ip_getsrcifaddr_info(struct mbuf *m, uint32_t *src_idx, uint32_t *iaf)
4604 {
4605 VERIFY(m->m_flags & M_PKTHDR);
4606
4607 if (!(m->m_pkthdr.pkt_flags & PKTF_IFAINFO)) {
4608 return -1;
4609 }
4610
4611 if (src_idx != NULL) {
4612 *src_idx = m->m_pkthdr.src_ifindex;
4613 }
4614
4615 if (iaf != NULL) {
4616 *iaf = 0;
4617 }
4618
4619 return 0;
4620 }
4621
4622 int
4623 ip_getdstifaddr_info(struct mbuf *m, uint32_t *dst_idx, uint32_t *iaf)
4624 {
4625 VERIFY(m->m_flags & M_PKTHDR);
4626
4627 if (!(m->m_pkthdr.pkt_flags & PKTF_IFAINFO)) {
4628 return -1;
4629 }
4630
4631 if (dst_idx != NULL) {
4632 *dst_idx = m->m_pkthdr.dst_ifindex;
4633 }
4634
4635 if (iaf != NULL) {
4636 *iaf = 0;
4637 }
4638
4639 return 0;
4640 }
4641
4642 /*
4643 * Protocol input handler for IPPROTO_GRE.
4644 */
4645 void
4646 gre_input(struct mbuf *m, int off)
4647 {
4648 gre_input_func_t fn = gre_input_func;
4649
4650 /*
4651 * If there is a registered GRE input handler, pass mbuf to it.
4652 */
4653 if (fn != NULL) {
4654 lck_mtx_unlock(inet_domain_mutex);
4655 m = fn(m, off, (mtod(m, struct ip *))->ip_p);
4656 lck_mtx_lock(inet_domain_mutex);
4657 }
4658
4659 /*
4660 * If no matching tunnel that is up is found, we inject
4661 * the mbuf to raw ip socket to see if anyone picks it up.
4662 */
4663 if (m != NULL) {
4664 rip_input(m, off);
4665 }
4666 }
4667
4668 /*
4669 * Private KPI for PPP/PPTP.
4670 */
4671 int
4672 ip_gre_register_input(gre_input_func_t fn)
4673 {
4674 lck_mtx_lock(inet_domain_mutex);
4675 gre_input_func = fn;
4676 lck_mtx_unlock(inet_domain_mutex);
4677
4678 return 0;
4679 }
4680
4681 #if (DEBUG || DEVELOPMENT)
4682 static int
4683 sysctl_reset_ip_input_stats SYSCTL_HANDLER_ARGS
4684 {
4685 #pragma unused(arg1, arg2)
4686 int error, i;
4687
4688 i = ip_input_measure;
4689 error = sysctl_handle_int(oidp, &i, 0, req);
4690 if (error || req->newptr == USER_ADDR_NULL) {
4691 goto done;
4692 }
4693 /* impose bounds */
4694 if (i < 0 || i > 1) {
4695 error = EINVAL;
4696 goto done;
4697 }
4698 if (ip_input_measure != i && i == 1) {
4699 net_perf_initialize(&net_perf, ip_input_measure_bins);
4700 }
4701 ip_input_measure = i;
4702 done:
4703 return error;
4704 }
4705
4706 static int
4707 sysctl_ip_input_measure_bins SYSCTL_HANDLER_ARGS
4708 {
4709 #pragma unused(arg1, arg2)
4710 int error;
4711 uint64_t i;
4712
4713 i = ip_input_measure_bins;
4714 error = sysctl_handle_quad(oidp, &i, 0, req);
4715 if (error || req->newptr == USER_ADDR_NULL) {
4716 goto done;
4717 }
4718 /* validate data */
4719 if (!net_perf_validate_bins(i)) {
4720 error = EINVAL;
4721 goto done;
4722 }
4723 ip_input_measure_bins = i;
4724 done:
4725 return error;
4726 }
4727
4728 static int
4729 sysctl_ip_input_getperf SYSCTL_HANDLER_ARGS
4730 {
4731 #pragma unused(oidp, arg1, arg2)
4732 if (req->oldptr == USER_ADDR_NULL) {
4733 req->oldlen = (size_t)sizeof(struct ipstat);
4734 }
4735
4736 return SYSCTL_OUT(req, &net_perf, MIN(sizeof(net_perf), req->oldlen));
4737 }
4738 #endif /* (DEBUG || DEVELOPMENT) */
4739
4740 static int
4741 sysctl_ip_checkinterface SYSCTL_HANDLER_ARGS
4742 {
4743 #pragma unused(arg1, arg2)
4744 int error, i;
4745
4746 i = ip_checkinterface;
4747 error = sysctl_handle_int(oidp, &i, 0, req);
4748 if (error != 0 || req->newptr == USER_ADDR_NULL) {
4749 return error;
4750 }
4751
4752 switch (i) {
4753 case IP_CHECKINTERFACE_WEAK_ES:
4754 case IP_CHECKINTERFACE_HYBRID_ES:
4755 case IP_CHECKINTERFACE_STRONG_ES:
4756 if (ip_checkinterface != i) {
4757 ip_checkinterface = i;
4758 os_log(OS_LOG_DEFAULT, "%s: ip_checkinterface is now %d\n",
4759 __func__, ip_checkinterface);
4760 }
4761 break;
4762 default:
4763 error = EINVAL;
4764 break;
4765 }
4766 return error;
4767 }