]> git.saurik.com Git - apple/xnu.git/blob - bsd/netinet/ip_input.c
xnu-1228.tar.gz
[apple/xnu.git] / bsd / netinet / ip_input.c
1 /*
2 * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1982, 1986, 1988, 1993
30 * The Regents of the University of California. All rights reserved.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
34 * are met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 * 3. All advertising materials mentioning features or use of this software
41 * must display the following acknowledgement:
42 * This product includes software developed by the University of
43 * California, Berkeley and its contributors.
44 * 4. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 * @(#)ip_input.c 8.2 (Berkeley) 1/4/94
61 * $FreeBSD: src/sys/netinet/ip_input.c,v 1.130.2.25 2001/08/29 21:41:37 jesper Exp $
62 */
63 /*
64 * NOTICE: This file was modified by SPARTA, Inc. in 2007 to introduce
65 * support for mandatory and extensible security protections. This notice
66 * is included in support of clause 2.2 (b) of the Apple Public License,
67 * Version 2.0.
68 */
69
70 #define _IP_VHL
71
72 #include <sys/param.h>
73 #include <sys/systm.h>
74 #include <sys/mbuf.h>
75 #include <sys/malloc.h>
76 #include <sys/domain.h>
77 #include <sys/protosw.h>
78 #include <sys/socket.h>
79 #include <sys/time.h>
80 #include <sys/kernel.h>
81 #include <sys/syslog.h>
82 #include <sys/sysctl.h>
83
84 #include <kern/queue.h>
85 #include <kern/locks.h>
86
87 #include <pexpert/pexpert.h>
88
89 #include <net/if.h>
90 #include <net/if_var.h>
91 #include <net/if_dl.h>
92 #include <net/route.h>
93 #include <net/kpi_protocol.h>
94
95 #include <netinet/in.h>
96 #include <netinet/in_systm.h>
97 #include <netinet/in_var.h>
98 #include <netinet/ip.h>
99 #include <netinet/in_pcb.h>
100 #include <netinet/ip_var.h>
101 #include <netinet/ip_icmp.h>
102 #include <sys/socketvar.h>
103
104 #include <netinet/ip_fw.h>
105 #include <netinet/ip_divert.h>
106
107 #include <netinet/kpi_ipfilter_var.h>
108
109 /* needed for AUTOCONFIGURING: */
110 #include <netinet/udp.h>
111 #include <netinet/udp_var.h>
112 #include <netinet/bootp.h>
113
114 #if CONFIG_MACF_NET
115 #include <security/mac_framework.h>
116 #endif
117
118 #include <sys/kdebug.h>
119 #include <libkern/OSAtomic.h>
120
121 #define DBG_LAYER_BEG NETDBG_CODE(DBG_NETIP, 0)
122 #define DBG_LAYER_END NETDBG_CODE(DBG_NETIP, 2)
123 #define DBG_FNC_IP_INPUT NETDBG_CODE(DBG_NETIP, (2 << 8))
124
125
126 #if IPSEC
127 #include <netinet6/ipsec.h>
128 #include <netkey/key.h>
129 #endif
130
131 #include "faith.h"
132 #if defined(NFAITH) && NFAITH > 0
133 #include <net/if_types.h>
134 #endif
135
136 #if DUMMYNET
137 #include <netinet/ip_dummynet.h>
138 #endif
139
140 #if IPSEC
141 extern int ipsec_bypass;
142 extern lck_mtx_t *sadb_mutex;
143 #endif
144
145 int rsvp_on = 0;
146 static int ip_rsvp_on;
147 struct socket *ip_rsvpd;
148
149 int ipforwarding = 0;
150 SYSCTL_INT(_net_inet_ip, IPCTL_FORWARDING, forwarding, CTLFLAG_RW,
151 &ipforwarding, 0, "Enable IP forwarding between interfaces");
152
153 static int ipsendredirects = 1; /* XXX */
154 SYSCTL_INT(_net_inet_ip, IPCTL_SENDREDIRECTS, redirect, CTLFLAG_RW,
155 &ipsendredirects, 0, "Enable sending IP redirects");
156
157 int ip_defttl = IPDEFTTL;
158 SYSCTL_INT(_net_inet_ip, IPCTL_DEFTTL, ttl, CTLFLAG_RW,
159 &ip_defttl, 0, "Maximum TTL on IP packets");
160
161 static int ip_dosourceroute = 0;
162 SYSCTL_INT(_net_inet_ip, IPCTL_SOURCEROUTE, sourceroute, CTLFLAG_RW,
163 &ip_dosourceroute, 0, "Enable forwarding source routed IP packets");
164
165 static int ip_acceptsourceroute = 0;
166 SYSCTL_INT(_net_inet_ip, IPCTL_ACCEPTSOURCEROUTE, accept_sourceroute,
167 CTLFLAG_RW, &ip_acceptsourceroute, 0,
168 "Enable accepting source routed IP packets");
169
170 static int ip_keepfaith = 0;
171 SYSCTL_INT(_net_inet_ip, IPCTL_KEEPFAITH, keepfaith, CTLFLAG_RW,
172 &ip_keepfaith, 0,
173 "Enable packet capture for FAITH IPv4->IPv6 translater daemon");
174
175 static int nipq = 0; /* total # of reass queues */
176 static int maxnipq;
177 SYSCTL_INT(_net_inet_ip, OID_AUTO, maxfragpackets, CTLFLAG_RW,
178 &maxnipq, 0,
179 "Maximum number of IPv4 fragment reassembly queue entries");
180
181 static int maxfragsperpacket;
182 SYSCTL_INT(_net_inet_ip, OID_AUTO, maxfragsperpacket, CTLFLAG_RW,
183 &maxfragsperpacket, 0,
184 "Maximum number of IPv4 fragments allowed per packet");
185
186 static int maxfrags;
187 SYSCTL_INT(_net_inet_ip, OID_AUTO, maxfrags, CTLFLAG_RW,
188 &maxfrags, 0, "Maximum number of IPv4 fragments allowed");
189
190 static int currentfrags = 0;
191
192 /*
193 * XXX - Setting ip_checkinterface mostly implements the receive side of
194 * the Strong ES model described in RFC 1122, but since the routing table
195 * and transmit implementation do not implement the Strong ES model,
196 * setting this to 1 results in an odd hybrid.
197 *
198 * XXX - ip_checkinterface currently must be disabled if you use ipnat
199 * to translate the destination address to another local interface.
200 *
201 * XXX - ip_checkinterface must be disabled if you add IP aliases
202 * to the loopback interface instead of the interface where the
203 * packets for those addresses are received.
204 */
205 static int ip_checkinterface = 0;
206 SYSCTL_INT(_net_inet_ip, OID_AUTO, check_interface, CTLFLAG_RW,
207 &ip_checkinterface, 0, "Verify packet arrives on correct interface");
208
209
210 #if DIAGNOSTIC
211 static int ipprintfs = 0;
212 #endif
213
214 extern int in_proto_count;
215 extern struct domain inetdomain;
216 extern struct protosw inetsw[];
217 struct protosw *ip_protox[IPPROTO_MAX];
218 static int ipqmaxlen = IFQ_MAXLEN;
219 struct in_ifaddrhead in_ifaddrhead; /* first inet address */
220 struct ifqueue ipintrq;
221 SYSCTL_INT(_net_inet_ip, IPCTL_INTRQMAXLEN, intr_queue_maxlen, CTLFLAG_RW,
222 &ipintrq.ifq_maxlen, 0, "Maximum size of the IP input queue");
223 SYSCTL_INT(_net_inet_ip, IPCTL_INTRQDROPS, intr_queue_drops, CTLFLAG_RD,
224 &ipintrq.ifq_drops, 0, "Number of packets dropped from the IP input queue");
225
226 struct ipstat ipstat;
227 SYSCTL_STRUCT(_net_inet_ip, IPCTL_STATS, stats, CTLFLAG_RD,
228 &ipstat, ipstat, "IP statistics (struct ipstat, netinet/ip_var.h)");
229
230 /* Packet reassembly stuff */
231 #define IPREASS_NHASH_LOG2 6
232 #define IPREASS_NHASH (1 << IPREASS_NHASH_LOG2)
233 #define IPREASS_HMASK (IPREASS_NHASH - 1)
234 #define IPREASS_HASH(x,y) \
235 (((((x) & 0xF) | ((((x) >> 8) & 0xF) << 4)) ^ (y)) & IPREASS_HMASK)
236
237 static struct ipq ipq[IPREASS_NHASH];
238 static TAILQ_HEAD(ipq_list, ipq) ipq_list =
239 TAILQ_HEAD_INITIALIZER(ipq_list);
240 const int ipintrq_present = 1;
241 lck_mtx_t *ip_mutex;
242 lck_attr_t *ip_mutex_attr;
243 lck_grp_t *ip_mutex_grp;
244 lck_grp_attr_t *ip_mutex_grp_attr;
245 lck_mtx_t *inet_domain_mutex;
246 extern lck_mtx_t *domain_proto_mtx;
247
248 #if IPCTL_DEFMTU
249 SYSCTL_INT(_net_inet_ip, IPCTL_DEFMTU, mtu, CTLFLAG_RW,
250 &ip_mtu, 0, "Default MTU");
251 #endif
252
253 #if IPSTEALTH
254 static int ipstealth = 0;
255 SYSCTL_INT(_net_inet_ip, OID_AUTO, stealth, CTLFLAG_RW,
256 &ipstealth, 0, "");
257 #endif
258
259
260 /* Firewall hooks */
261 ip_fw_chk_t *ip_fw_chk_ptr;
262 int fw_enable = 1;
263 int fw_bypass = 1;
264 int fw_one_pass = 0;
265
266 #if DUMMYNET
267 ip_dn_io_t *ip_dn_io_ptr;
268 #endif
269
270 int (*fr_checkp)(struct ip *, int, struct ifnet *, int, struct mbuf **) = NULL;
271
272 SYSCTL_NODE(_net_inet_ip, OID_AUTO, linklocal, CTLFLAG_RW|CTLFLAG_LOCKED, 0, "link local");
273
274 struct ip_linklocal_stat ip_linklocal_stat;
275 SYSCTL_STRUCT(_net_inet_ip_linklocal, OID_AUTO, stat, CTLFLAG_RD,
276 &ip_linklocal_stat, ip_linklocal_stat,
277 "Number of link local packets with TTL less than 255");
278
279 SYSCTL_NODE(_net_inet_ip_linklocal, OID_AUTO, in, CTLFLAG_RW|CTLFLAG_LOCKED, 0, "link local input");
280
281 int ip_linklocal_in_allowbadttl = 1;
282 SYSCTL_INT(_net_inet_ip_linklocal_in, OID_AUTO, allowbadttl, CTLFLAG_RW,
283 &ip_linklocal_in_allowbadttl, 0,
284 "Allow incoming link local packets with TTL less than 255");
285
286
287 /*
288 * We need to save the IP options in case a protocol wants to respond
289 * to an incoming packet over the same route if the packet got here
290 * using IP source routing. This allows connection establishment and
291 * maintenance when the remote end is on a network that is not known
292 * to us.
293 */
294 static int ip_nhops = 0;
295 static struct ip_srcrt {
296 struct in_addr dst; /* final destination */
297 char nop; /* one NOP to align */
298 char srcopt[IPOPT_OFFSET + 1]; /* OPTVAL, OLEN and OFFSET */
299 struct in_addr route[MAX_IPOPTLEN/sizeof(struct in_addr)];
300 } ip_srcrt;
301
302
303 static void save_rte(u_char *, struct in_addr);
304 static int ip_dooptions(struct mbuf *, int, struct sockaddr_in *, struct route *ipforward_rt);
305 static void ip_forward(struct mbuf *, int, struct sockaddr_in *, struct route *ipforward_rt);
306 static void ip_freef(struct ipq *);
307 #if IPDIVERT
308 #ifdef IPDIVERT_44
309 static struct mbuf *ip_reass(struct mbuf *,
310 struct ipq *, struct ipq *, u_int32_t *, u_int16_t *);
311 #else
312 static struct mbuf *ip_reass(struct mbuf *,
313 struct ipq *, struct ipq *, u_int16_t *, u_int16_t *);
314 #endif
315 #else
316 static struct mbuf *ip_reass(struct mbuf *, struct ipq *, struct ipq *);
317 #endif
318 void ipintr(void);
319 void in_dinit(void);
320
321 #if RANDOM_IP_ID
322 extern u_short ip_id;
323
324 int ip_use_randomid = 1;
325 SYSCTL_INT(_net_inet_ip, OID_AUTO, random_id, CTLFLAG_RW,
326 &ip_use_randomid, 0, "Randomize IP packets IDs");
327 #endif
328
329 extern u_long route_generation;
330
331 /*
332 * IP initialization: fill in IP protocol switch table.
333 * All protocols not implemented in kernel go to raw IP protocol handler.
334 */
335 void
336 ip_init(void)
337 {
338 struct protosw *pr;
339 int i;
340 static int ip_initialized = 0;
341
342
343 if (!ip_initialized)
344 {
345 TAILQ_INIT(&in_ifaddrhead);
346 pr = pffindproto_locked(PF_INET, IPPROTO_RAW, SOCK_RAW);
347 if (pr == 0)
348 panic("ip_init");
349 for (i = 0; i < IPPROTO_MAX; i++)
350 ip_protox[i] = pr;
351 for (pr = inetdomain.dom_protosw; pr; pr = pr->pr_next)
352 { if(!((unsigned int)pr->pr_domain)) continue; /* If uninitialized, skip */
353 if (pr->pr_domain->dom_family == PF_INET &&
354 pr->pr_protocol && pr->pr_protocol != IPPROTO_RAW)
355 ip_protox[pr->pr_protocol] = pr;
356 }
357 for (i = 0; i < IPREASS_NHASH; i++)
358 ipq[i].next = ipq[i].prev = &ipq[i];
359
360 maxnipq = nmbclusters / 32;
361 maxfrags = maxnipq * 2;
362 maxfragsperpacket = 128; /* enough for 64k in 512 byte fragments */
363
364 #if RANDOM_IP_ID
365 {
366 struct timeval timenow;
367 getmicrotime(&timenow);
368 ip_id = timenow.tv_sec & 0xffff;
369 }
370 #endif
371 ipintrq.ifq_maxlen = ipqmaxlen;
372
373 ipf_init();
374
375 ip_mutex_grp_attr = lck_grp_attr_alloc_init();
376
377 ip_mutex_grp = lck_grp_alloc_init("ip", ip_mutex_grp_attr);
378
379 ip_mutex_attr = lck_attr_alloc_init();
380
381 if ((ip_mutex = lck_mtx_alloc_init(ip_mutex_grp, ip_mutex_attr)) == NULL) {
382 printf("ip_init: can't alloc ip_mutex\n");
383 return;
384 }
385
386 #if IPSEC
387
388 sadb_stat_mutex_grp_attr = lck_grp_attr_alloc_init();
389 sadb_stat_mutex_grp = lck_grp_alloc_init("sadb_stat", sadb_stat_mutex_grp_attr);
390 sadb_stat_mutex_attr = lck_attr_alloc_init();
391
392 if ((sadb_stat_mutex = lck_mtx_alloc_init(sadb_stat_mutex_grp, sadb_stat_mutex_attr)) == NULL) {
393 printf("ip_init: can't alloc sadb_stat_mutex\n");
394 return;
395 }
396
397 #endif
398 ip_initialized = 1;
399 }
400 }
401
402 static void
403 ip_proto_input(
404 protocol_family_t __unused protocol,
405 mbuf_t packet_list)
406 {
407 mbuf_t packet;
408 int how_many = 0 ;
409
410 /* ip_input should handle a list of packets but does not yet */
411
412 for (packet = packet_list; packet; packet = packet_list) {
413 how_many++;
414 packet_list = mbuf_nextpkt(packet);
415 mbuf_setnextpkt(packet, NULL);
416 ip_input(packet);
417 }
418 }
419
420 /* Initialize the PF_INET domain, and add in the pre-defined protos */
421 void
422 in_dinit(void)
423 {
424 int i;
425 struct protosw *pr;
426 struct domain *dp;
427 static int inetdomain_initted = 0;
428
429 if (!inetdomain_initted)
430 {
431 #if 0
432 kprintf("Initing %d protosw entries\n", in_proto_count);
433 #endif
434 dp = &inetdomain;
435 dp->dom_flags = DOM_REENTRANT;
436
437 for (i=0, pr = &inetsw[0]; i<in_proto_count; i++, pr++)
438 net_add_proto(pr, dp);
439 inet_domain_mutex = dp->dom_mtx;
440 inetdomain_initted = 1;
441
442 lck_mtx_unlock(domain_proto_mtx);
443 proto_register_input(PF_INET, ip_proto_input, NULL, 1);
444 lck_mtx_lock(domain_proto_mtx);
445 }
446 }
447
448 __private_extern__ void
449 ip_proto_dispatch_in(
450 struct mbuf *m,
451 int hlen,
452 u_int8_t proto,
453 ipfilter_t inject_ipfref)
454 {
455 struct ipfilter *filter;
456 int seen = (inject_ipfref == 0);
457 int changed_header = 0;
458 struct ip *ip;
459
460 if (!TAILQ_EMPTY(&ipv4_filters)) {
461 ipf_ref();
462 TAILQ_FOREACH(filter, &ipv4_filters, ipf_link) {
463 if (seen == 0) {
464 if ((struct ipfilter *)inject_ipfref == filter)
465 seen = 1;
466 } else if (filter->ipf_filter.ipf_input) {
467 errno_t result;
468
469 if (changed_header == 0) {
470 changed_header = 1;
471 ip = mtod(m, struct ip *);
472 ip->ip_len = htons(ip->ip_len + hlen);
473 ip->ip_off = htons(ip->ip_off);
474 ip->ip_sum = 0;
475 ip->ip_sum = in_cksum(m, hlen);
476 }
477 result = filter->ipf_filter.ipf_input(
478 filter->ipf_filter.cookie, (mbuf_t*)&m, hlen, proto);
479 if (result == EJUSTRETURN) {
480 ipf_unref();
481 return;
482 }
483 if (result != 0) {
484 ipf_unref();
485 m_freem(m);
486 return;
487 }
488 }
489 }
490 ipf_unref();
491 }
492 /*
493 * If there isn't a specific lock for the protocol
494 * we're about to call, use the generic lock for AF_INET.
495 * otherwise let the protocol deal with its own locking
496 */
497 ip = mtod(m, struct ip *);
498
499 if (changed_header) {
500 ip->ip_len = ntohs(ip->ip_len) - hlen;
501 ip->ip_off = ntohs(ip->ip_off);
502 }
503
504 if (!(ip_protox[ip->ip_p]->pr_flags & PR_PROTOLOCK)) {
505 lck_mtx_lock(inet_domain_mutex);
506 (*ip_protox[ip->ip_p]->pr_input)(m, hlen);
507 lck_mtx_unlock(inet_domain_mutex);
508 }
509 else
510 (*ip_protox[ip->ip_p]->pr_input)(m, hlen);
511
512 }
513
514 /*
515 * ipforward_rt cleared in in_addroute()
516 * when a new route is successfully created.
517 */
518 static struct sockaddr_in ipaddr = { sizeof(ipaddr), AF_INET , 0 , {0}, {0,0,0,0,0,0,0,0} };
519
520 /*
521 * Ip input routine. Checksum and byte swap header. If fragmented
522 * try to reassemble. Process options. Pass to next level.
523 */
524 void
525 ip_input(struct mbuf *m)
526 {
527 struct ip *ip;
528 struct ipq *fp;
529 struct in_ifaddr *ia = NULL;
530 int i, hlen, checkif;
531 u_short sum;
532 struct in_addr pkt_dst;
533 u_int32_t div_info = 0; /* packet divert/tee info */
534 struct ip_fw_args args;
535 ipfilter_t inject_filter_ref = 0;
536 struct m_tag *tag;
537 struct route ipforward_rt;
538
539 bzero(&ipforward_rt, sizeof(struct route));
540
541 #if IPFIREWALL
542 args.eh = NULL;
543 args.oif = NULL;
544 args.rule = NULL;
545 args.divert_rule = 0; /* divert cookie */
546 args.next_hop = NULL;
547
548 /* Grab info from mtags prepended to the chain */
549 #if DUMMYNET
550 if ((tag = m_tag_locate(m, KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_DUMMYNET, NULL)) != NULL) {
551 struct dn_pkt_tag *dn_tag;
552
553 dn_tag = (struct dn_pkt_tag *)(tag+1);
554 args.rule = dn_tag->rule;
555
556 m_tag_delete(m, tag);
557 }
558 #endif /* DUMMYNET */
559
560 if ((tag = m_tag_locate(m, KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_DIVERT, NULL)) != NULL) {
561 struct divert_tag *div_tag;
562
563 div_tag = (struct divert_tag *)(tag+1);
564 args.divert_rule = div_tag->cookie;
565
566 m_tag_delete(m, tag);
567 }
568 if ((tag = m_tag_locate(m, KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_IPFORWARD, NULL)) != NULL) {
569 struct ip_fwd_tag *ipfwd_tag;
570
571 ipfwd_tag = (struct ip_fwd_tag *)(tag+1);
572 args.next_hop = ipfwd_tag->next_hop;
573
574 m_tag_delete(m, tag);
575 }
576
577 #if DIAGNOSTIC
578 if (m == NULL || (m->m_flags & M_PKTHDR) == 0)
579 panic("ip_input no HDR");
580 #endif
581
582 if (args.rule) { /* dummynet already filtered us */
583 ip = mtod(m, struct ip *);
584 hlen = IP_VHL_HL(ip->ip_vhl) << 2;
585 inject_filter_ref = ipf_get_inject_filter(m);
586 goto iphack ;
587 }
588 #endif /* IPFIREWALL */
589
590 /*
591 * No need to proccess packet twice if we've
592 * already seen it
593 */
594 inject_filter_ref = ipf_get_inject_filter(m);
595 if (inject_filter_ref != 0) {
596 ip = mtod(m, struct ip *);
597 hlen = IP_VHL_HL(ip->ip_vhl) << 2;
598 ip->ip_len = ntohs(ip->ip_len) - hlen;
599 ip->ip_off = ntohs(ip->ip_off);
600 ip_proto_dispatch_in(m, hlen, ip->ip_p, inject_filter_ref);
601 return;
602 }
603
604 OSAddAtomic(1, (SInt32*)&ipstat.ips_total);
605
606 if (m->m_pkthdr.len < sizeof(struct ip))
607 goto tooshort;
608
609 if (m->m_len < sizeof (struct ip) &&
610 (m = m_pullup(m, sizeof (struct ip))) == 0) {
611 OSAddAtomic(1, (SInt32*)&ipstat.ips_toosmall);
612 return;
613 }
614 ip = mtod(m, struct ip *);
615
616 KERNEL_DEBUG(DBG_LAYER_BEG, ip->ip_dst.s_addr,
617 ip->ip_src.s_addr, ip->ip_p, ip->ip_off, ip->ip_len);
618
619 if (IP_VHL_V(ip->ip_vhl) != IPVERSION) {
620 OSAddAtomic(1, (SInt32*)&ipstat.ips_badvers);
621 goto bad;
622 }
623
624 hlen = IP_VHL_HL(ip->ip_vhl) << 2;
625 if (hlen < sizeof(struct ip)) { /* minimum header length */
626 OSAddAtomic(1, (SInt32*)&ipstat.ips_badhlen);
627 goto bad;
628 }
629 if (hlen > m->m_len) {
630 if ((m = m_pullup(m, hlen)) == 0) {
631 OSAddAtomic(1, (SInt32*)&ipstat.ips_badhlen);
632 return;
633 }
634 ip = mtod(m, struct ip *);
635 }
636
637 /* 127/8 must not appear on wire - RFC1122 */
638 if ((ntohl(ip->ip_dst.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET ||
639 (ntohl(ip->ip_src.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET) {
640 if ((m->m_pkthdr.rcvif->if_flags & IFF_LOOPBACK) == 0) {
641 OSAddAtomic(1, (SInt32*)&ipstat.ips_badaddr);
642 goto bad;
643 }
644 }
645
646 /* IPv4 Link-Local Addresses as defined in <draft-ietf-zeroconf-ipv4-linklocal-05.txt> */
647 if ((IN_LINKLOCAL(ntohl(ip->ip_dst.s_addr)) ||
648 IN_LINKLOCAL(ntohl(ip->ip_src.s_addr)))) {
649 ip_linklocal_stat.iplls_in_total++;
650 if (ip->ip_ttl != MAXTTL) {
651 OSAddAtomic(1, (SInt32*)&ip_linklocal_stat.iplls_in_badttl);
652 /* Silently drop link local traffic with bad TTL */
653 if (!ip_linklocal_in_allowbadttl)
654 goto bad;
655 }
656 }
657 if ((IF_HWASSIST_CSUM_FLAGS(m->m_pkthdr.rcvif->if_hwassist) == 0)
658 || (apple_hwcksum_rx == 0) ||
659 ((m->m_pkthdr.csum_flags & CSUM_TCP_SUM16) && ip->ip_p != IPPROTO_TCP)) {
660 m->m_pkthdr.csum_flags = 0; /* invalidate HW generated checksum flags */
661 }
662
663 if (m->m_pkthdr.csum_flags & CSUM_IP_CHECKED) {
664 sum = !(m->m_pkthdr.csum_flags & CSUM_IP_VALID);
665 } else if (!(m->m_pkthdr.rcvif->if_flags & IFF_LOOPBACK) ||
666 apple_hwcksum_tx == 0) {
667 /*
668 * Either this is not loopback packet coming from an interface
669 * that does not support checksum offloading, or it is loopback
670 * packet that has undergone software checksumming at the send
671 * side because apple_hwcksum_tx was set to 0. In this case,
672 * calculate the checksum in software to validate the packet.
673 */
674 sum = in_cksum(m, hlen);
675 } else {
676 /*
677 * This is a loopback packet without any valid checksum since
678 * the send side has bypassed it (apple_hwcksum_tx set to 1).
679 * We get here because apple_hwcksum_rx was set to 0, and so
680 * we pretend that all is well.
681 */
682 sum = 0;
683 m->m_pkthdr.csum_flags |= CSUM_DATA_VALID | CSUM_PSEUDO_HDR |
684 CSUM_IP_CHECKED | CSUM_IP_VALID;
685 m->m_pkthdr.csum_data = 0xffff;
686 }
687 if (sum) {
688 OSAddAtomic(1, (SInt32*)&ipstat.ips_badsum);
689 goto bad;
690 }
691
692 /*
693 * Convert fields to host representation.
694 */
695 NTOHS(ip->ip_len);
696 if (ip->ip_len < hlen) {
697 OSAddAtomic(1, (SInt32*)&ipstat.ips_badlen);
698 goto bad;
699 }
700 NTOHS(ip->ip_off);
701
702 /*
703 * Check that the amount of data in the buffers
704 * is as at least much as the IP header would have us expect.
705 * Trim mbufs if longer than we expect.
706 * Drop packet if shorter than we expect.
707 */
708 if (m->m_pkthdr.len < ip->ip_len) {
709 tooshort:
710 OSAddAtomic(1, (SInt32*)&ipstat.ips_tooshort);
711 goto bad;
712 }
713 if (m->m_pkthdr.len > ip->ip_len) {
714 /* Invalidate hwcksuming */
715 m->m_pkthdr.csum_flags = 0;
716 m->m_pkthdr.csum_data = 0;
717
718 if (m->m_len == m->m_pkthdr.len) {
719 m->m_len = ip->ip_len;
720 m->m_pkthdr.len = ip->ip_len;
721 } else
722 m_adj(m, ip->ip_len - m->m_pkthdr.len);
723 }
724
725 #if IPSEC
726 if (ipsec_bypass == 0 && ipsec_gethist(m, NULL))
727 goto pass;
728 #endif
729
730 /*
731 * IpHack's section.
732 * Right now when no processing on packet has done
733 * and it is still fresh out of network we do our black
734 * deals with it.
735 * - Firewall: deny/allow/divert
736 * - Xlate: translate packet's addr/port (NAT).
737 * - Pipe: pass pkt through dummynet.
738 * - Wrap: fake packet's addr/port <unimpl.>
739 * - Encapsulate: put it in another IP and send out. <unimp.>
740 */
741
742 #if IPFIREWALL
743 #if DUMMYNET
744 iphack:
745 #endif /* DUMMYNET */
746 /*
747 * Check if we want to allow this packet to be processed.
748 * Consider it to be bad if not.
749 */
750 if (fr_checkp) {
751 struct mbuf *m1 = m;
752
753 if (fr_checkp(ip, hlen, m->m_pkthdr.rcvif, 0, &m1) || !m1) {
754 return;
755 }
756 ip = mtod(m = m1, struct ip *);
757 }
758 if (fw_enable && IPFW_LOADED) {
759 #if IPFIREWALL_FORWARD
760 /*
761 * If we've been forwarded from the output side, then
762 * skip the firewall a second time
763 */
764 if (args.next_hop)
765 goto ours;
766 #endif /* IPFIREWALL_FORWARD */
767
768 args.m = m;
769
770 i = ip_fw_chk_ptr(&args);
771 m = args.m;
772
773 if ( (i & IP_FW_PORT_DENY_FLAG) || m == NULL) { /* drop */
774 if (m)
775 m_freem(m);
776 return;
777 }
778 ip = mtod(m, struct ip *); /* just in case m changed */
779
780 if (i == 0 && args.next_hop == NULL) { /* common case */
781 goto pass;
782 }
783 #if DUMMYNET
784 if (DUMMYNET_LOADED && (i & IP_FW_PORT_DYNT_FLAG) != 0) {
785 /* Send packet to the appropriate pipe */
786 ip_dn_io_ptr(m, i&0xffff, DN_TO_IP_IN, &args);
787 return;
788 }
789 #endif /* DUMMYNET */
790 #if IPDIVERT
791 if (i != 0 && (i & IP_FW_PORT_DYNT_FLAG) == 0) {
792 /* Divert or tee packet */
793 div_info = i;
794 goto ours;
795 }
796 #endif
797 #if IPFIREWALL_FORWARD
798 if (i == 0 && args.next_hop != NULL) {
799 goto pass;
800 }
801 #endif
802 /*
803 * if we get here, the packet must be dropped
804 */
805 m_freem(m);
806 return;
807 }
808 #endif /* IPFIREWALL */
809 pass:
810
811 /*
812 * Process options and, if not destined for us,
813 * ship it on. ip_dooptions returns 1 when an
814 * error was detected (causing an icmp message
815 * to be sent and the original packet to be freed).
816 */
817 ip_nhops = 0; /* for source routed packets */
818 if (hlen > sizeof (struct ip) && ip_dooptions(m, 0, args.next_hop, &ipforward_rt)) {
819 return;
820 }
821
822 /* greedy RSVP, snatches any PATH packet of the RSVP protocol and no
823 * matter if it is destined to another node, or whether it is
824 * a multicast one, RSVP wants it! and prevents it from being forwarded
825 * anywhere else. Also checks if the rsvp daemon is running before
826 * grabbing the packet.
827 */
828 if (rsvp_on && ip->ip_p==IPPROTO_RSVP)
829 goto ours;
830
831 /*
832 * Check our list of addresses, to see if the packet is for us.
833 * If we don't have any addresses, assume any unicast packet
834 * we receive might be for us (and let the upper layers deal
835 * with it).
836 */
837 if (TAILQ_EMPTY(&in_ifaddrhead) &&
838 (m->m_flags & (M_MCAST|M_BCAST)) == 0)
839 goto ours;
840
841 /*
842 * Cache the destination address of the packet; this may be
843 * changed by use of 'ipfw fwd'.
844 */
845 pkt_dst = args.next_hop == NULL ?
846 ip->ip_dst : args.next_hop->sin_addr;
847
848 /*
849 * Enable a consistency check between the destination address
850 * and the arrival interface for a unicast packet (the RFC 1122
851 * strong ES model) if IP forwarding is disabled and the packet
852 * is not locally generated and the packet is not subject to
853 * 'ipfw fwd'.
854 *
855 * XXX - Checking also should be disabled if the destination
856 * address is ipnat'ed to a different interface.
857 *
858 * XXX - Checking is incompatible with IP aliases added
859 * to the loopback interface instead of the interface where
860 * the packets are received.
861 */
862 checkif = ip_checkinterface && (ipforwarding == 0) &&
863 ((m->m_pkthdr.rcvif->if_flags & IFF_LOOPBACK) == 0) &&
864 (args.next_hop == NULL);
865
866 lck_mtx_lock(rt_mtx);
867 TAILQ_FOREACH(ia, &in_ifaddrhead, ia_link) {
868 #define satosin(sa) ((struct sockaddr_in *)(sa))
869
870 if (IA_SIN(ia)->sin_addr.s_addr == INADDR_ANY) {
871 lck_mtx_unlock(rt_mtx);
872 goto ours;
873 }
874
875 /*
876 * If the address matches, verify that the packet
877 * arrived via the correct interface if checking is
878 * enabled.
879 */
880 if (IA_SIN(ia)->sin_addr.s_addr == pkt_dst.s_addr &&
881 (!checkif || ia->ia_ifp == m->m_pkthdr.rcvif)) {
882 lck_mtx_unlock(rt_mtx);
883 goto ours;
884 }
885 /*
886 * Only accept broadcast packets that arrive via the
887 * matching interface. Reception of forwarded directed
888 * broadcasts would be handled via ip_forward() and
889 * ether_output() with the loopback into the stack for
890 * SIMPLEX interfaces handled by ether_output().
891 */
892 if ((!checkif || ia->ia_ifp == m->m_pkthdr.rcvif) &&
893 ia->ia_ifp && ia->ia_ifp->if_flags & IFF_BROADCAST) {
894 if (satosin(&ia->ia_broadaddr)->sin_addr.s_addr ==
895 pkt_dst.s_addr) {
896 lck_mtx_unlock(rt_mtx);
897 goto ours;
898 }
899 if (ia->ia_netbroadcast.s_addr == pkt_dst.s_addr) {
900 lck_mtx_unlock(rt_mtx);
901 goto ours;
902 }
903 }
904 }
905 lck_mtx_unlock(rt_mtx);
906 if (IN_MULTICAST(ntohl(ip->ip_dst.s_addr))) {
907 struct in_multi *inm;
908 #if MROUTING
909 if (ip_mrouter) {
910 /*
911 * If we are acting as a multicast router, all
912 * incoming multicast packets are passed to the
913 * kernel-level multicast forwarding function.
914 * The packet is returned (relatively) intact; if
915 * ip_mforward() returns a non-zero value, the packet
916 * must be discarded, else it may be accepted below.
917 */
918 lck_mtx_lock(ip_mutex);
919 if (ip_mforward &&
920 ip_mforward(ip, m->m_pkthdr.rcvif, m, 0) != 0) {
921 OSAddAtomic(1, (SInt32*)&ipstat.ips_cantforward);
922 m_freem(m);
923 lck_mtx_unlock(ip_mutex);
924 return;
925 }
926
927 /*
928 * The process-level routing daemon needs to receive
929 * all multicast IGMP packets, whether or not this
930 * host belongs to their destination groups.
931 */
932 if (ip->ip_p == IPPROTO_IGMP)
933 goto ours;
934 OSAddAtomic(1, (SInt32*)&ipstat.ips_forward);
935 }
936 #endif /* MROUTING */
937 /*
938 * See if we belong to the destination multicast group on the
939 * arrival interface.
940 */
941 IN_LOOKUP_MULTI(ip->ip_dst, m->m_pkthdr.rcvif, inm);
942 if (inm == NULL) {
943 OSAddAtomic(1, (SInt32*)&ipstat.ips_notmember);
944 m_freem(m);
945 return;
946 }
947 goto ours;
948 }
949 if (ip->ip_dst.s_addr == (u_long)INADDR_BROADCAST)
950 goto ours;
951 if (ip->ip_dst.s_addr == INADDR_ANY)
952 goto ours;
953
954 /* Allow DHCP/BootP responses through */
955 if (m->m_pkthdr.rcvif != NULL
956 && (m->m_pkthdr.rcvif->if_eflags & IFEF_AUTOCONFIGURING)
957 && hlen == sizeof(struct ip)
958 && ip->ip_p == IPPROTO_UDP) {
959 struct udpiphdr *ui;
960 if (m->m_len < sizeof(struct udpiphdr)
961 && (m = m_pullup(m, sizeof(struct udpiphdr))) == 0) {
962 OSAddAtomic(1, (SInt32*)&udpstat.udps_hdrops);
963 return;
964 }
965 ui = mtod(m, struct udpiphdr *);
966 if (ntohs(ui->ui_dport) == IPPORT_BOOTPC) {
967 goto ours;
968 }
969 ip = mtod(m, struct ip *); /* in case it changed */
970 }
971
972 #if defined(NFAITH) && 0 < NFAITH
973 /*
974 * FAITH(Firewall Aided Internet Translator)
975 */
976 if (m->m_pkthdr.rcvif && m->m_pkthdr.rcvif->if_type == IFT_FAITH) {
977 if (ip_keepfaith) {
978 if (ip->ip_p == IPPROTO_TCP || ip->ip_p == IPPROTO_ICMP)
979 goto ours;
980 }
981 m_freem(m);
982 return;
983 }
984 #endif
985 /*
986 * Not for us; forward if possible and desirable.
987 */
988 if (ipforwarding == 0) {
989 OSAddAtomic(1, (SInt32*)&ipstat.ips_cantforward);
990 m_freem(m);
991 } else {
992 ip_forward(m, 0, args.next_hop, &ipforward_rt);
993 if (ipforward_rt.ro_rt != NULL) {
994 rtfree(ipforward_rt.ro_rt);
995 ipforward_rt.ro_rt = NULL;
996 }
997 }
998 return;
999
1000 ours:
1001 /*
1002 * If offset or IP_MF are set, must reassemble.
1003 * Otherwise, nothing need be done.
1004 * (We could look in the reassembly queue to see
1005 * if the packet was previously fragmented,
1006 * but it's not worth the time; just let them time out.)
1007 */
1008 if (ip->ip_off & (IP_MF | IP_OFFMASK | IP_RF)) {
1009
1010 /* If maxnipq is 0, never accept fragments. */
1011 if (maxnipq == 0) {
1012
1013 OSAddAtomic(1, (SInt32*)&ipstat.ips_fragments);
1014 OSAddAtomic(1, (SInt32*)&ipstat.ips_fragdropped);
1015 goto bad;
1016 }
1017
1018 /*
1019 * If we will exceed the number of fragments in queues, timeout the
1020 * oldest fragemented packet to make space.
1021 */
1022 lck_mtx_lock(ip_mutex);
1023 if (currentfrags >= maxfrags) {
1024 fp = TAILQ_LAST(&ipq_list, ipq_list);
1025 OSAddAtomic(fp->ipq_nfrags, (SInt32*)&ipstat.ips_fragtimeout);
1026
1027 if (ip->ip_id == fp->ipq_id &&
1028 ip->ip_src.s_addr == fp->ipq_src.s_addr &&
1029 ip->ip_dst.s_addr == fp->ipq_dst.s_addr &&
1030 ip->ip_p == fp->ipq_p) {
1031 /*
1032 * If we match the fragment queue we were going to
1033 * discard, drop this packet too.
1034 */
1035 OSAddAtomic(1, (SInt32*)&ipstat.ips_fragdropped);
1036 ip_freef(fp);
1037 lck_mtx_unlock(ip_mutex);
1038 goto bad;
1039 }
1040
1041 ip_freef(fp);
1042 }
1043
1044 sum = IPREASS_HASH(ip->ip_src.s_addr, ip->ip_id);
1045 /*
1046 * Look for queue of fragments
1047 * of this datagram.
1048 */
1049 for (fp = ipq[sum].next; fp != &ipq[sum]; fp = fp->next)
1050 if (ip->ip_id == fp->ipq_id &&
1051 ip->ip_src.s_addr == fp->ipq_src.s_addr &&
1052 ip->ip_dst.s_addr == fp->ipq_dst.s_addr &&
1053 #if CONFIG_MACF_NET
1054 mac_ipq_label_compare(m, fp) &&
1055 #endif
1056 ip->ip_p == fp->ipq_p)
1057 goto found;
1058
1059 /*
1060 * Enforce upper bound on number of fragmented packets
1061 * for which we attempt reassembly;
1062 * If maxnipq is -1, accept all fragments without limitation.
1063 */
1064 if ((nipq > maxnipq) && (maxnipq > 0)) {
1065 /*
1066 * drop the oldest fragment before proceeding further
1067 */
1068 fp = TAILQ_LAST(&ipq_list, ipq_list);
1069 OSAddAtomic(fp->ipq_nfrags, (SInt32*)&ipstat.ips_fragtimeout);
1070 ip_freef(fp);
1071 }
1072
1073 fp = NULL;
1074
1075 found:
1076 /*
1077 * Adjust ip_len to not reflect header,
1078 * convert offset of this to bytes.
1079 */
1080 ip->ip_len -= hlen;
1081 if (ip->ip_off & IP_MF) {
1082 /*
1083 * Make sure that fragments have a data length
1084 * that's a non-zero multiple of 8 bytes.
1085 */
1086 if (ip->ip_len == 0 || (ip->ip_len & 0x7) != 0) {
1087 OSAddAtomic(1, (SInt32*)&ipstat.ips_toosmall);
1088 lck_mtx_unlock(ip_mutex);
1089 goto bad;
1090 }
1091 m->m_flags |= M_FRAG;
1092 } else {
1093 /* Clear the flag in case packet comes from loopback */
1094 m->m_flags &= ~M_FRAG;
1095 }
1096 ip->ip_off <<= 3;
1097
1098 /*
1099 * Attempt reassembly; if it succeeds, proceed.
1100 * ip_reass() will return a different mbuf, and update
1101 * the divert info in div_info and args.divert_rule.
1102 */
1103 OSAddAtomic(1, (SInt32*)&ipstat.ips_fragments);
1104 m->m_pkthdr.header = ip;
1105 #if IPDIVERT
1106 m = ip_reass(m,
1107 fp, &ipq[sum], &div_info, &args.divert_rule);
1108 #else
1109 m = ip_reass(m, fp, &ipq[sum]);
1110 #endif
1111 if (m == 0) {
1112 lck_mtx_unlock(ip_mutex);
1113 return;
1114 }
1115 OSAddAtomic(1, (SInt32*)&ipstat.ips_reassembled);
1116 ip = mtod(m, struct ip *);
1117 /* Get the header length of the reassembled packet */
1118 hlen = IP_VHL_HL(ip->ip_vhl) << 2;
1119 #if IPDIVERT
1120 /* Restore original checksum before diverting packet */
1121 if (div_info != 0) {
1122 ip->ip_len += hlen;
1123 HTONS(ip->ip_len);
1124 HTONS(ip->ip_off);
1125 ip->ip_sum = 0;
1126 ip->ip_sum = in_cksum(m, hlen);
1127 NTOHS(ip->ip_off);
1128 NTOHS(ip->ip_len);
1129 ip->ip_len -= hlen;
1130 }
1131 #endif
1132 lck_mtx_unlock(ip_mutex);
1133 } else
1134 ip->ip_len -= hlen;
1135
1136 #if IPDIVERT
1137 /*
1138 * Divert or tee packet to the divert protocol if required.
1139 *
1140 * If div_info is zero then cookie should be too, so we shouldn't
1141 * need to clear them here. Assume divert_packet() does so also.
1142 */
1143 if (div_info != 0) {
1144 struct mbuf *clone = NULL;
1145
1146 /* Clone packet if we're doing a 'tee' */
1147 if ((div_info & IP_FW_PORT_TEE_FLAG) != 0)
1148 clone = m_dup(m, M_DONTWAIT);
1149
1150 /* Restore packet header fields to original values */
1151 ip->ip_len += hlen;
1152 HTONS(ip->ip_len);
1153 HTONS(ip->ip_off);
1154
1155 /* Deliver packet to divert input routine */
1156 OSAddAtomic(1, (SInt32*)&ipstat.ips_delivered);
1157 divert_packet(m, 1, div_info & 0xffff, args.divert_rule);
1158
1159 /* If 'tee', continue with original packet */
1160 if (clone == NULL) {
1161 return;
1162 }
1163 m = clone;
1164 ip = mtod(m, struct ip *);
1165 }
1166 #endif
1167
1168 #if IPSEC
1169 /*
1170 * enforce IPsec policy checking if we are seeing last header.
1171 * note that we do not visit this with protocols with pcb layer
1172 * code - like udp/tcp/raw ip.
1173 */
1174 if (ipsec_bypass == 0 && (ip_protox[ip->ip_p]->pr_flags & PR_LASTHDR) != 0) {
1175 if (ipsec4_in_reject(m, NULL)) {
1176 IPSEC_STAT_INCREMENT(ipsecstat.in_polvio);
1177 goto bad;
1178 }
1179 }
1180 #endif
1181
1182 /*
1183 * Switch out to protocol's input routine.
1184 */
1185 OSAddAtomic(1, (SInt32*)&ipstat.ips_delivered);
1186 {
1187 if (args.next_hop && ip->ip_p == IPPROTO_TCP) {
1188 /* TCP needs IPFORWARD info if available */
1189 struct m_tag *fwd_tag;
1190 struct ip_fwd_tag *ipfwd_tag;
1191
1192 fwd_tag = m_tag_alloc(KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_IPFORWARD,
1193 sizeof(struct sockaddr_in), M_NOWAIT);
1194 if (fwd_tag == NULL) {
1195 goto bad;
1196 }
1197
1198 ipfwd_tag = (struct ip_fwd_tag *)(fwd_tag+1);
1199 ipfwd_tag->next_hop = args.next_hop;
1200
1201 m_tag_prepend(m, fwd_tag);
1202
1203 KERNEL_DEBUG(DBG_LAYER_END, ip->ip_dst.s_addr,
1204 ip->ip_src.s_addr, ip->ip_p, ip->ip_off, ip->ip_len);
1205
1206
1207 /* TCP deals with its own locking */
1208 ip_proto_dispatch_in(m, hlen, ip->ip_p, 0);
1209 } else {
1210 KERNEL_DEBUG(DBG_LAYER_END, ip->ip_dst.s_addr,
1211 ip->ip_src.s_addr, ip->ip_p, ip->ip_off, ip->ip_len);
1212
1213 ip_proto_dispatch_in(m, hlen, ip->ip_p, 0);
1214 }
1215
1216 return;
1217 }
1218 bad:
1219 KERNEL_DEBUG(DBG_LAYER_END, 0,0,0,0,0);
1220 m_freem(m);
1221 }
1222
1223 /*
1224 * Take incoming datagram fragment and try to reassemble it into
1225 * whole datagram. If a chain for reassembly of this datagram already
1226 * exists, then it is given as fp; otherwise have to make a chain.
1227 *
1228 * When IPDIVERT enabled, keep additional state with each packet that
1229 * tells us if we need to divert or tee the packet we're building.
1230 */
1231
1232 static struct mbuf *
1233 #if IPDIVERT
1234 ip_reass(struct mbuf *m, struct ipq *fp, struct ipq *where,
1235 #ifdef IPDIVERT_44
1236 u_int32_t *divinfo,
1237 #else /* IPDIVERT_44 */
1238 u_int16_t *divinfo,
1239 #endif /* IPDIVERT_44 */
1240 u_int16_t *divcookie)
1241 #else /* IPDIVERT */
1242 ip_reass(struct mbuf *m, struct ipq *fp, struct ipq *where)
1243 #endif /* IPDIVERT */
1244 {
1245 struct ip *ip = mtod(m, struct ip *);
1246 struct mbuf *p = 0, *q, *nq;
1247 struct mbuf *t;
1248 int hlen = IP_VHL_HL(ip->ip_vhl) << 2;
1249 int i, next;
1250 u_int8_t ecn, ecn0;
1251
1252 lck_mtx_assert(ip_mutex, LCK_MTX_ASSERT_OWNED);
1253 /*
1254 * Presence of header sizes in mbufs
1255 * would confuse code below.
1256 */
1257 m->m_data += hlen;
1258 m->m_len -= hlen;
1259
1260 if (m->m_pkthdr.csum_flags & CSUM_TCP_SUM16)
1261 m->m_pkthdr.csum_flags = 0;
1262 /*
1263 * If first fragment to arrive, create a reassembly queue.
1264 */
1265 if (fp == 0) {
1266 if ((t = m_get(M_DONTWAIT, MT_FTABLE)) == NULL)
1267 goto dropfrag;
1268 fp = mtod(t, struct ipq *);
1269 #if CONFIG_MACF_NET
1270 if (mac_ipq_label_init(fp, M_NOWAIT) != 0) {
1271 m_free(t);
1272 fp = NULL;
1273 goto dropfrag;
1274 }
1275 mac_ipq_label_associate(m, fp);
1276 #endif
1277 insque((void*)fp, (void*)where);
1278 nipq++;
1279 fp->ipq_nfrags = 1;
1280 fp->ipq_ttl = IPFRAGTTL;
1281 fp->ipq_p = ip->ip_p;
1282 fp->ipq_id = ip->ip_id;
1283 fp->ipq_src = ip->ip_src;
1284 fp->ipq_dst = ip->ip_dst;
1285 fp->ipq_frags = m;
1286 m->m_nextpkt = NULL;
1287 #if IPDIVERT
1288 #ifdef IPDIVERT_44
1289 fp->ipq_div_info = 0;
1290 #else
1291 fp->ipq_divert = 0;
1292 #endif
1293 fp->ipq_div_cookie = 0;
1294 #endif
1295 TAILQ_INSERT_HEAD(&ipq_list, fp, ipq_list);
1296 goto inserted;
1297 } else {
1298 fp->ipq_nfrags++;
1299 #if CONFIG_MACF_NET
1300 mac_ipq_label_update(m, fp);
1301 #endif
1302 }
1303
1304 #define GETIP(m) ((struct ip*)((m)->m_pkthdr.header))
1305
1306 /*
1307 * Handle ECN by comparing this segment with the first one;
1308 * if CE is set, do not lose CE.
1309 * drop if CE and not-ECT are mixed for the same packet.
1310 */
1311 ecn = ip->ip_tos & IPTOS_ECN_MASK;
1312 ecn0 = GETIP(fp->ipq_frags)->ip_tos & IPTOS_ECN_MASK;
1313 if (ecn == IPTOS_ECN_CE) {
1314 if (ecn0 == IPTOS_ECN_NOTECT)
1315 goto dropfrag;
1316 if (ecn0 != IPTOS_ECN_CE)
1317 GETIP(fp->ipq_frags)->ip_tos |= IPTOS_ECN_CE;
1318 }
1319 if (ecn == IPTOS_ECN_NOTECT && ecn0 != IPTOS_ECN_NOTECT)
1320 goto dropfrag;
1321
1322 /*
1323 * Find a segment which begins after this one does.
1324 */
1325 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt)
1326 if (GETIP(q)->ip_off > ip->ip_off)
1327 break;
1328
1329 /*
1330 * If there is a preceding segment, it may provide some of
1331 * our data already. If so, drop the data from the incoming
1332 * segment. If it provides all of our data, drop us, otherwise
1333 * stick new segment in the proper place.
1334 *
1335 * If some of the data is dropped from the the preceding
1336 * segment, then it's checksum is invalidated.
1337 */
1338 if (p) {
1339 i = GETIP(p)->ip_off + GETIP(p)->ip_len - ip->ip_off;
1340 if (i > 0) {
1341 if (i >= ip->ip_len)
1342 goto dropfrag;
1343 m_adj(m, i);
1344 m->m_pkthdr.csum_flags = 0;
1345 ip->ip_off += i;
1346 ip->ip_len -= i;
1347 }
1348 m->m_nextpkt = p->m_nextpkt;
1349 p->m_nextpkt = m;
1350 } else {
1351 m->m_nextpkt = fp->ipq_frags;
1352 fp->ipq_frags = m;
1353 }
1354
1355 /*
1356 * While we overlap succeeding segments trim them or,
1357 * if they are completely covered, dequeue them.
1358 */
1359 for (; q != NULL && ip->ip_off + ip->ip_len > GETIP(q)->ip_off;
1360 q = nq) {
1361 i = (ip->ip_off + ip->ip_len) -
1362 GETIP(q)->ip_off;
1363 if (i < GETIP(q)->ip_len) {
1364 GETIP(q)->ip_len -= i;
1365 GETIP(q)->ip_off += i;
1366 m_adj(q, i);
1367 q->m_pkthdr.csum_flags = 0;
1368 break;
1369 }
1370 nq = q->m_nextpkt;
1371 m->m_nextpkt = nq;
1372 OSAddAtomic(1, (SInt32*)&ipstat.ips_fragdropped);
1373 fp->ipq_nfrags--;
1374 m_freem(q);
1375 }
1376
1377 inserted:
1378 currentfrags++;
1379
1380 #if IPDIVERT
1381 /*
1382 * Transfer firewall instructions to the fragment structure.
1383 * Only trust info in the fragment at offset 0.
1384 */
1385 if (ip->ip_off == 0) {
1386 #ifdef IPDIVERT_44
1387 fp->ipq_div_info = *divinfo;
1388 #else
1389 fp->ipq_divert = *divinfo;
1390 #endif
1391 fp->ipq_div_cookie = *divcookie;
1392 }
1393 *divinfo = 0;
1394 *divcookie = 0;
1395 #endif
1396
1397 /*
1398 * Check for complete reassembly and perform frag per packet
1399 * limiting.
1400 *
1401 * Frag limiting is performed here so that the nth frag has
1402 * a chance to complete the packet before we drop the packet.
1403 * As a result, n+1 frags are actually allowed per packet, but
1404 * only n will ever be stored. (n = maxfragsperpacket.)
1405 *
1406 */
1407 next = 0;
1408 for (p = NULL, q = fp->ipq_frags; q; p = q, q = q->m_nextpkt) {
1409 if (GETIP(q)->ip_off != next) {
1410 if (fp->ipq_nfrags > maxfragsperpacket) {
1411 OSAddAtomic(fp->ipq_nfrags, (SInt32*)&ipstat.ips_fragdropped);
1412 ip_freef(fp);
1413 }
1414 return (0);
1415 }
1416 next += GETIP(q)->ip_len;
1417 }
1418 /* Make sure the last packet didn't have the IP_MF flag */
1419 if (p->m_flags & M_FRAG) {
1420 if (fp->ipq_nfrags > maxfragsperpacket) {
1421 OSAddAtomic(fp->ipq_nfrags, (SInt32*)&ipstat.ips_fragdropped);
1422 ip_freef(fp);
1423 }
1424 return (0);
1425 }
1426
1427 /*
1428 * Reassembly is complete. Make sure the packet is a sane size.
1429 */
1430 q = fp->ipq_frags;
1431 ip = GETIP(q);
1432 if (next + (IP_VHL_HL(ip->ip_vhl) << 2) > IP_MAXPACKET) {
1433 OSAddAtomic(1, (SInt32*)&ipstat.ips_toolong);
1434 OSAddAtomic(fp->ipq_nfrags, (SInt32*)&ipstat.ips_fragdropped);
1435 ip_freef(fp);
1436 return (0);
1437 }
1438
1439 /*
1440 * Concatenate fragments.
1441 */
1442 m = q;
1443 t = m->m_next;
1444 m->m_next = 0;
1445 m_cat(m, t);
1446 nq = q->m_nextpkt;
1447 q->m_nextpkt = 0;
1448 for (q = nq; q != NULL; q = nq) {
1449 nq = q->m_nextpkt;
1450 q->m_nextpkt = NULL;
1451 if (q->m_pkthdr.csum_flags & CSUM_TCP_SUM16)
1452 m->m_pkthdr.csum_flags = 0;
1453 else {
1454 m->m_pkthdr.csum_flags &= q->m_pkthdr.csum_flags;
1455 m->m_pkthdr.csum_data += q->m_pkthdr.csum_data;
1456 }
1457 m_cat(m, q);
1458 }
1459
1460 #if IPDIVERT
1461 /*
1462 * Extract firewall instructions from the fragment structure.
1463 */
1464 #ifdef IPDIVERT_44
1465 *divinfo = fp->ipq_div_info;
1466 #else
1467 *divinfo = fp->ipq_divert;
1468 #endif
1469 *divcookie = fp->ipq_div_cookie;
1470 #endif
1471
1472 #if CONFIG_MACF_NET
1473 mac_mbuf_label_associate_ipq(fp, m);
1474 mac_ipq_label_destroy(fp);
1475 #endif
1476 /*
1477 * Create header for new ip packet by
1478 * modifying header of first packet;
1479 * dequeue and discard fragment reassembly header.
1480 * Make header visible.
1481 */
1482 ip->ip_len = next;
1483 ip->ip_src = fp->ipq_src;
1484 ip->ip_dst = fp->ipq_dst;
1485 remque((void*)fp);
1486 TAILQ_REMOVE(&ipq_list, fp, ipq_list);
1487 currentfrags -= fp->ipq_nfrags;
1488 nipq--;
1489 (void) m_free(dtom(fp));
1490 m->m_len += (IP_VHL_HL(ip->ip_vhl) << 2);
1491 m->m_data -= (IP_VHL_HL(ip->ip_vhl) << 2);
1492 /* some debugging cruft by sklower, below, will go away soon */
1493 if (m->m_flags & M_PKTHDR) { /* XXX this should be done elsewhere */
1494 int plen = 0;
1495 for (t = m; t; t = t->m_next)
1496 plen += t->m_len;
1497 m->m_pkthdr.len = plen;
1498 }
1499 return (m);
1500
1501 dropfrag:
1502 #if IPDIVERT
1503 *divinfo = 0;
1504 *divcookie = 0;
1505 #endif
1506 OSAddAtomic(1, (SInt32*)&ipstat.ips_fragdropped);
1507 if (fp != 0)
1508 fp->ipq_nfrags--;
1509 m_freem(m);
1510 return (0);
1511
1512 #undef GETIP
1513 }
1514
1515 /*
1516 * Free a fragment reassembly header and all
1517 * associated datagrams.
1518 */
1519 static void
1520 ip_freef(struct ipq *fp)
1521 {
1522 lck_mtx_assert(ip_mutex, LCK_MTX_ASSERT_OWNED);
1523 currentfrags -= fp->ipq_nfrags;
1524 m_freem_list(fp->ipq_frags);
1525 remque((void*)fp);
1526 TAILQ_REMOVE(&ipq_list, fp, ipq_list);
1527 (void) m_free(dtom(fp));
1528 nipq--;
1529 }
1530
1531 /*
1532 * IP timer processing;
1533 * if a timer expires on a reassembly
1534 * queue, discard it.
1535 */
1536 void
1537 ip_slowtimo(void)
1538 {
1539 struct ipq *fp;
1540 int i;
1541 lck_mtx_lock(ip_mutex);
1542 for (i = 0; i < IPREASS_NHASH; i++) {
1543 fp = ipq[i].next;
1544 if (fp == 0)
1545 continue;
1546 while (fp != &ipq[i]) {
1547 --fp->ipq_ttl;
1548 fp = fp->next;
1549 if (fp->prev->ipq_ttl == 0) {
1550 OSAddAtomic(fp->ipq_nfrags, (SInt32*)&ipstat.ips_fragtimeout);
1551 ip_freef(fp->prev);
1552 }
1553 }
1554 }
1555 /*
1556 * If we are over the maximum number of fragments
1557 * (due to the limit being lowered), drain off
1558 * enough to get down to the new limit.
1559 */
1560 if (maxnipq >= 0 && nipq > maxnipq) {
1561 for (i = 0; i < IPREASS_NHASH; i++) {
1562 while (nipq > maxnipq &&
1563 (ipq[i].next != &ipq[i])) {
1564 OSAddAtomic(ipq[i].next->ipq_nfrags, (SInt32*)&ipstat.ips_fragdropped);
1565 ip_freef(ipq[i].next);
1566 }
1567 }
1568 }
1569 ipflow_slowtimo();
1570 lck_mtx_unlock(ip_mutex);
1571 }
1572
1573 /*
1574 * Drain off all datagram fragments.
1575 */
1576 void
1577 ip_drain(void)
1578 {
1579 int i;
1580
1581 lck_mtx_lock(ip_mutex);
1582 for (i = 0; i < IPREASS_NHASH; i++) {
1583 while (ipq[i].next != &ipq[i]) {
1584 OSAddAtomic(ipq[i].next->ipq_nfrags, (SInt32*)&ipstat.ips_fragdropped);
1585 ip_freef(ipq[i].next);
1586 }
1587 }
1588 lck_mtx_unlock(ip_mutex);
1589 in_rtqdrain();
1590 }
1591
1592 /*
1593 * Do option processing on a datagram,
1594 * possibly discarding it if bad options are encountered,
1595 * or forwarding it if source-routed.
1596 * The pass argument is used when operating in the IPSTEALTH
1597 * mode to tell what options to process:
1598 * [LS]SRR (pass 0) or the others (pass 1).
1599 * The reason for as many as two passes is that when doing IPSTEALTH,
1600 * non-routing options should be processed only if the packet is for us.
1601 * Returns 1 if packet has been forwarded/freed,
1602 * 0 if the packet should be processed further.
1603 */
1604 static int
1605 ip_dooptions(struct mbuf *m, __unused int pass, struct sockaddr_in *next_hop, struct route *ipforward_rt)
1606 {
1607 struct ip *ip = mtod(m, struct ip *);
1608 u_char *cp;
1609 struct ip_timestamp *ipt;
1610 struct in_ifaddr *ia;
1611 int opt, optlen, cnt, off, code, type = ICMP_PARAMPROB, forward = 0;
1612 struct in_addr *sin, dst;
1613 n_time ntime;
1614
1615 dst = ip->ip_dst;
1616 cp = (u_char *)(ip + 1);
1617 cnt = (IP_VHL_HL(ip->ip_vhl) << 2) - sizeof (struct ip);
1618 for (; cnt > 0; cnt -= optlen, cp += optlen) {
1619 opt = cp[IPOPT_OPTVAL];
1620 if (opt == IPOPT_EOL)
1621 break;
1622 if (opt == IPOPT_NOP)
1623 optlen = 1;
1624 else {
1625 if (cnt < IPOPT_OLEN + sizeof(*cp)) {
1626 code = &cp[IPOPT_OLEN] - (u_char *)ip;
1627 goto bad;
1628 }
1629 optlen = cp[IPOPT_OLEN];
1630 if (optlen < IPOPT_OLEN + sizeof(*cp) || optlen > cnt) {
1631 code = &cp[IPOPT_OLEN] - (u_char *)ip;
1632 goto bad;
1633 }
1634 }
1635 switch (opt) {
1636
1637 default:
1638 break;
1639
1640 /*
1641 * Source routing with record.
1642 * Find interface with current destination address.
1643 * If none on this machine then drop if strictly routed,
1644 * or do nothing if loosely routed.
1645 * Record interface address and bring up next address
1646 * component. If strictly routed make sure next
1647 * address is on directly accessible net.
1648 */
1649 case IPOPT_LSRR:
1650 case IPOPT_SSRR:
1651 if (optlen < IPOPT_OFFSET + sizeof(*cp)) {
1652 code = &cp[IPOPT_OLEN] - (u_char *)ip;
1653 goto bad;
1654 }
1655 if ((off = cp[IPOPT_OFFSET]) < IPOPT_MINOFF) {
1656 code = &cp[IPOPT_OFFSET] - (u_char *)ip;
1657 goto bad;
1658 }
1659 ipaddr.sin_addr = ip->ip_dst;
1660 ia = (struct in_ifaddr *)
1661 ifa_ifwithaddr((struct sockaddr *)&ipaddr);
1662 if (ia == 0) {
1663 if (opt == IPOPT_SSRR) {
1664 type = ICMP_UNREACH;
1665 code = ICMP_UNREACH_SRCFAIL;
1666 goto bad;
1667 }
1668 if (!ip_dosourceroute)
1669 goto nosourcerouting;
1670 /*
1671 * Loose routing, and not at next destination
1672 * yet; nothing to do except forward.
1673 */
1674 break;
1675 }
1676 else {
1677 ifafree(&ia->ia_ifa);
1678 ia = NULL;
1679 }
1680 off--; /* 0 origin */
1681 if (off > optlen - (int)sizeof(struct in_addr)) {
1682 /*
1683 * End of source route. Should be for us.
1684 */
1685 if (!ip_acceptsourceroute)
1686 goto nosourcerouting;
1687 save_rte(cp, ip->ip_src);
1688 break;
1689 }
1690
1691 if (!ip_dosourceroute) {
1692 if (ipforwarding) {
1693 char buf[MAX_IPv4_STR_LEN];
1694 char buf2[MAX_IPv4_STR_LEN];
1695 /*
1696 * Acting as a router, so generate ICMP
1697 */
1698 nosourcerouting:
1699 log(LOG_WARNING,
1700 "attempted source route from %s to %s\n",
1701 inet_ntop(AF_INET, &ip->ip_src, buf, sizeof(buf)),
1702 inet_ntop(AF_INET, &ip->ip_dst, buf2, sizeof(buf2)));
1703 type = ICMP_UNREACH;
1704 code = ICMP_UNREACH_SRCFAIL;
1705 goto bad;
1706 } else {
1707 /*
1708 * Not acting as a router, so silently drop.
1709 */
1710 OSAddAtomic(1, (SInt32*)&ipstat.ips_cantforward);
1711 m_freem(m);
1712 return (1);
1713 }
1714 }
1715
1716 /*
1717 * locate outgoing interface
1718 */
1719 (void)memcpy(&ipaddr.sin_addr, cp + off,
1720 sizeof(ipaddr.sin_addr));
1721
1722 if (opt == IPOPT_SSRR) {
1723 #define INA struct in_ifaddr *
1724 #define SA struct sockaddr *
1725 if ((ia = (INA)ifa_ifwithdstaddr((SA)&ipaddr)) == 0) {
1726 ia = (INA)ifa_ifwithnet((SA)&ipaddr);
1727 }
1728 } else {
1729 ia = ip_rtaddr(ipaddr.sin_addr, ipforward_rt);
1730 }
1731 if (ia == 0) {
1732 type = ICMP_UNREACH;
1733 code = ICMP_UNREACH_SRCFAIL;
1734 goto bad;
1735 }
1736 ip->ip_dst = ipaddr.sin_addr;
1737 (void)memcpy(cp + off, &(IA_SIN(ia)->sin_addr),
1738 sizeof(struct in_addr));
1739 ifafree(&ia->ia_ifa);
1740 ia = NULL;
1741 cp[IPOPT_OFFSET] += sizeof(struct in_addr);
1742 /*
1743 * Let ip_intr's mcast routing check handle mcast pkts
1744 */
1745 forward = !IN_MULTICAST(ntohl(ip->ip_dst.s_addr));
1746 break;
1747
1748 case IPOPT_RR:
1749 if (optlen < IPOPT_OFFSET + sizeof(*cp)) {
1750 code = &cp[IPOPT_OFFSET] - (u_char *)ip;
1751 goto bad;
1752 }
1753 if ((off = cp[IPOPT_OFFSET]) < IPOPT_MINOFF) {
1754 code = &cp[IPOPT_OFFSET] - (u_char *)ip;
1755 goto bad;
1756 }
1757 /*
1758 * If no space remains, ignore.
1759 */
1760 off--; /* 0 origin */
1761 if (off > optlen - (int)sizeof(struct in_addr))
1762 break;
1763 (void)memcpy(&ipaddr.sin_addr, &ip->ip_dst,
1764 sizeof(ipaddr.sin_addr));
1765 /*
1766 * locate outgoing interface; if we're the destination,
1767 * use the incoming interface (should be same).
1768 */
1769 if ((ia = (INA)ifa_ifwithaddr((SA)&ipaddr)) == 0) {
1770 if ((ia = ip_rtaddr(ipaddr.sin_addr, ipforward_rt)) == 0) {
1771 type = ICMP_UNREACH;
1772 code = ICMP_UNREACH_HOST;
1773 goto bad;
1774 }
1775 }
1776 (void)memcpy(cp + off, &(IA_SIN(ia)->sin_addr),
1777 sizeof(struct in_addr));
1778 ifafree(&ia->ia_ifa);
1779 ia = NULL;
1780 cp[IPOPT_OFFSET] += sizeof(struct in_addr);
1781 break;
1782
1783 case IPOPT_TS:
1784 code = cp - (u_char *)ip;
1785 ipt = (struct ip_timestamp *)cp;
1786 if (ipt->ipt_len < 4 || ipt->ipt_len > 40) {
1787 code = (u_char *)&ipt->ipt_len - (u_char *)ip;
1788 goto bad;
1789 }
1790 if (ipt->ipt_ptr < 5) {
1791 code = (u_char *)&ipt->ipt_ptr - (u_char *)ip;
1792 goto bad;
1793 }
1794 if (ipt->ipt_ptr >
1795 ipt->ipt_len - (int)sizeof(int32_t)) {
1796 if (++ipt->ipt_oflw == 0) {
1797 code = (u_char *)&ipt->ipt_ptr -
1798 (u_char *)ip;
1799 goto bad;
1800 }
1801 break;
1802 }
1803 sin = (struct in_addr *)(cp + ipt->ipt_ptr - 1);
1804 switch (ipt->ipt_flg) {
1805
1806 case IPOPT_TS_TSONLY:
1807 break;
1808
1809 case IPOPT_TS_TSANDADDR:
1810 if (ipt->ipt_ptr - 1 + sizeof(n_time) +
1811 sizeof(struct in_addr) > ipt->ipt_len) {
1812 code = (u_char *)&ipt->ipt_ptr -
1813 (u_char *)ip;
1814 goto bad;
1815 }
1816 ipaddr.sin_addr = dst;
1817 ia = (INA)ifaof_ifpforaddr((SA)&ipaddr,
1818 m->m_pkthdr.rcvif);
1819 if (ia == 0)
1820 continue;
1821 (void)memcpy(sin, &IA_SIN(ia)->sin_addr,
1822 sizeof(struct in_addr));
1823 ipt->ipt_ptr += sizeof(struct in_addr);
1824 ifafree(&ia->ia_ifa);
1825 ia = NULL;
1826 break;
1827
1828 case IPOPT_TS_PRESPEC:
1829 if (ipt->ipt_ptr - 1 + sizeof(n_time) +
1830 sizeof(struct in_addr) > ipt->ipt_len) {
1831 code = (u_char *)&ipt->ipt_ptr -
1832 (u_char *)ip;
1833 goto bad;
1834 }
1835 (void)memcpy(&ipaddr.sin_addr, sin,
1836 sizeof(struct in_addr));
1837 if ((ia = (struct in_ifaddr*)ifa_ifwithaddr((SA)&ipaddr)) == 0)
1838 continue;
1839 ifafree(&ia->ia_ifa);
1840 ia = NULL;
1841 ipt->ipt_ptr += sizeof(struct in_addr);
1842 break;
1843
1844 default:
1845 /* XXX can't take &ipt->ipt_flg */
1846 code = (u_char *)&ipt->ipt_ptr -
1847 (u_char *)ip + 1;
1848 goto bad;
1849 }
1850 ntime = iptime();
1851 (void)memcpy(cp + ipt->ipt_ptr - 1, &ntime,
1852 sizeof(n_time));
1853 ipt->ipt_ptr += sizeof(n_time);
1854 }
1855 }
1856 if (forward && ipforwarding) {
1857 ip_forward(m, 1, next_hop, ipforward_rt);
1858 if (ipforward_rt->ro_rt != NULL) {
1859 rtfree(ipforward_rt->ro_rt);
1860 ipforward_rt->ro_rt = NULL;
1861 }
1862 return (1);
1863 }
1864 return (0);
1865 bad:
1866 ip->ip_len -= IP_VHL_HL(ip->ip_vhl) << 2; /* XXX icmp_error adds in hdr length */
1867 icmp_error(m, type, code, 0, 0);
1868 OSAddAtomic(1, (SInt32*)&ipstat.ips_badoptions);
1869 return (1);
1870 }
1871
1872 /*
1873 * Given address of next destination (final or next hop),
1874 * return internet address info of interface to be used to get there.
1875 */
1876 struct in_ifaddr *
1877 ip_rtaddr(struct in_addr dst, struct route *rt)
1878 {
1879 struct sockaddr_in *sin;
1880
1881 sin = (struct sockaddr_in *)&rt->ro_dst;
1882
1883 lck_mtx_lock(rt_mtx);
1884 if (rt->ro_rt == 0 || dst.s_addr != sin->sin_addr.s_addr ||
1885 rt->ro_rt->generation_id != route_generation) {
1886 if (rt->ro_rt) {
1887 rtfree_locked(rt->ro_rt);
1888 rt->ro_rt = 0;
1889 }
1890 sin->sin_family = AF_INET;
1891 sin->sin_len = sizeof(*sin);
1892 sin->sin_addr = dst;
1893
1894 rtalloc_ign_locked(rt, RTF_PRCLONING);
1895 }
1896 if (rt->ro_rt == 0) {
1897 lck_mtx_unlock(rt_mtx);
1898 return ((struct in_ifaddr *)0);
1899 }
1900
1901 if (rt->ro_rt->rt_ifa)
1902 ifaref(rt->ro_rt->rt_ifa);
1903 lck_mtx_unlock(rt_mtx);
1904 return ((struct in_ifaddr *) rt->ro_rt->rt_ifa);
1905 }
1906
1907 /*
1908 * Save incoming source route for use in replies,
1909 * to be picked up later by ip_srcroute if the receiver is interested.
1910 */
1911 void
1912 save_rte(u_char *option, struct in_addr dst)
1913 {
1914 unsigned olen;
1915
1916 olen = option[IPOPT_OLEN];
1917 #if DIAGNOSTIC
1918 if (ipprintfs)
1919 printf("save_rte: olen %d\n", olen);
1920 #endif
1921 if (olen > sizeof(ip_srcrt) - (1 + sizeof(dst)))
1922 return;
1923 bcopy(option, ip_srcrt.srcopt, olen);
1924 ip_nhops = (olen - IPOPT_OFFSET - 1) / sizeof(struct in_addr);
1925 ip_srcrt.dst = dst;
1926 }
1927
1928 /*
1929 * Retrieve incoming source route for use in replies,
1930 * in the same form used by setsockopt.
1931 * The first hop is placed before the options, will be removed later.
1932 */
1933 struct mbuf *
1934 ip_srcroute(void)
1935 {
1936 struct in_addr *p, *q;
1937 struct mbuf *m;
1938
1939 if (ip_nhops == 0)
1940 return ((struct mbuf *)0);
1941 m = m_get(M_DONTWAIT, MT_HEADER);
1942 if (m == 0)
1943 return ((struct mbuf *)0);
1944
1945 #define OPTSIZ (sizeof(ip_srcrt.nop) + sizeof(ip_srcrt.srcopt))
1946
1947 /* length is (nhops+1)*sizeof(addr) + sizeof(nop + srcrt header) */
1948 m->m_len = ip_nhops * sizeof(struct in_addr) + sizeof(struct in_addr) +
1949 OPTSIZ;
1950 #if DIAGNOSTIC
1951 if (ipprintfs)
1952 printf("ip_srcroute: nhops %d mlen %d", ip_nhops, m->m_len);
1953 #endif
1954
1955 /*
1956 * First save first hop for return route
1957 */
1958 p = &ip_srcrt.route[ip_nhops - 1];
1959 *(mtod(m, struct in_addr *)) = *p--;
1960 #if DIAGNOSTIC
1961 if (ipprintfs)
1962 printf(" hops %lx", (u_long)ntohl(mtod(m, struct in_addr *)->s_addr));
1963 #endif
1964
1965 /*
1966 * Copy option fields and padding (nop) to mbuf.
1967 */
1968 ip_srcrt.nop = IPOPT_NOP;
1969 ip_srcrt.srcopt[IPOPT_OFFSET] = IPOPT_MINOFF;
1970 (void)memcpy(mtod(m, caddr_t) + sizeof(struct in_addr),
1971 &ip_srcrt.nop, OPTSIZ);
1972 q = (struct in_addr *)(mtod(m, caddr_t) +
1973 sizeof(struct in_addr) + OPTSIZ);
1974 #undef OPTSIZ
1975 /*
1976 * Record return path as an IP source route,
1977 * reversing the path (pointers are now aligned).
1978 */
1979 while (p >= ip_srcrt.route) {
1980 #if DIAGNOSTIC
1981 if (ipprintfs)
1982 printf(" %lx", (u_long)ntohl(q->s_addr));
1983 #endif
1984 *q++ = *p--;
1985 }
1986 /*
1987 * Last hop goes to final destination.
1988 */
1989 *q = ip_srcrt.dst;
1990 #if DIAGNOSTIC
1991 if (ipprintfs)
1992 printf(" %lx\n", (u_long)ntohl(q->s_addr));
1993 #endif
1994 return (m);
1995 }
1996
1997 /*
1998 * Strip out IP options, at higher
1999 * level protocol in the kernel.
2000 * Second argument is buffer to which options
2001 * will be moved, and return value is their length.
2002 * XXX should be deleted; last arg currently ignored.
2003 */
2004 void
2005 ip_stripoptions(struct mbuf *m, __unused struct mbuf *mopt)
2006 {
2007 int i;
2008 struct ip *ip = mtod(m, struct ip *);
2009 caddr_t opts;
2010 int olen;
2011
2012 olen = (IP_VHL_HL(ip->ip_vhl) << 2) - sizeof (struct ip);
2013 opts = (caddr_t)(ip + 1);
2014 i = m->m_len - (sizeof (struct ip) + olen);
2015 bcopy(opts + olen, opts, (unsigned)i);
2016 m->m_len -= olen;
2017 if (m->m_flags & M_PKTHDR)
2018 m->m_pkthdr.len -= olen;
2019 ip->ip_vhl = IP_MAKE_VHL(IPVERSION, sizeof(struct ip) >> 2);
2020 }
2021
2022 u_char inetctlerrmap[PRC_NCMDS] = {
2023 0, 0, 0, 0,
2024 0, EMSGSIZE, EHOSTDOWN, EHOSTUNREACH,
2025 ENETUNREACH, EHOSTUNREACH, ECONNREFUSED, ECONNREFUSED,
2026 EMSGSIZE, EHOSTUNREACH, 0, 0,
2027 0, 0, 0, 0,
2028 ENOPROTOOPT, ECONNREFUSED
2029 };
2030
2031 /*
2032 * Forward a packet. If some error occurs return the sender
2033 * an icmp packet. Note we can't always generate a meaningful
2034 * icmp message because icmp doesn't have a large enough repertoire
2035 * of codes and types.
2036 *
2037 * If not forwarding, just drop the packet. This could be confusing
2038 * if ipforwarding was zero but some routing protocol was advancing
2039 * us as a gateway to somewhere. However, we must let the routing
2040 * protocol deal with that.
2041 *
2042 * The srcrt parameter indicates whether the packet is being forwarded
2043 * via a source route.
2044 */
2045 static void
2046 ip_forward(struct mbuf *m, int srcrt, struct sockaddr_in *next_hop, struct route *ipforward_rt)
2047 {
2048 struct ip *ip = mtod(m, struct ip *);
2049 struct sockaddr_in *sin;
2050 struct rtentry *rt;
2051 int error, type = 0, code = 0;
2052 struct mbuf *mcopy;
2053 n_long dest;
2054 struct in_addr pkt_dst;
2055 struct ifnet *destifp;
2056 struct ifnet *rcvif = m->m_pkthdr.rcvif;
2057 #if IPSEC
2058 struct ifnet dummyifp;
2059 #endif
2060
2061 m->m_pkthdr.rcvif = NULL;
2062
2063 dest = 0;
2064 /*
2065 * Cache the destination address of the packet; this may be
2066 * changed by use of 'ipfw fwd'.
2067 */
2068 pkt_dst = next_hop ? next_hop->sin_addr : ip->ip_dst;
2069
2070 #if DIAGNOSTIC
2071 if (ipprintfs)
2072 printf("forward: src %lx dst %lx ttl %x\n",
2073 (u_long)ip->ip_src.s_addr, (u_long)pkt_dst.s_addr,
2074 ip->ip_ttl);
2075 #endif
2076
2077
2078 if (m->m_flags & (M_BCAST|M_MCAST) || in_canforward(pkt_dst) == 0) {
2079 OSAddAtomic(1, (SInt32*)&ipstat.ips_cantforward);
2080 m_freem(m);
2081 return;
2082 }
2083 #if IPSTEALTH
2084 if (!ipstealth) {
2085 #endif
2086 if (ip->ip_ttl <= IPTTLDEC) {
2087 icmp_error(m, ICMP_TIMXCEED, ICMP_TIMXCEED_INTRANS,
2088 dest, 0);
2089 return;
2090 }
2091 #if IPSTEALTH
2092 }
2093 #endif
2094
2095 sin = (struct sockaddr_in *)&ipforward_rt->ro_dst;
2096 if ((rt = ipforward_rt->ro_rt) == 0 ||
2097 pkt_dst.s_addr != sin->sin_addr.s_addr ||
2098 ipforward_rt->ro_rt->generation_id != route_generation) {
2099 if (ipforward_rt->ro_rt) {
2100 rtfree(ipforward_rt->ro_rt);
2101 ipforward_rt->ro_rt = 0;
2102 }
2103 sin->sin_family = AF_INET;
2104 sin->sin_len = sizeof(*sin);
2105 sin->sin_addr = pkt_dst;
2106
2107 rtalloc_ign(ipforward_rt, RTF_PRCLONING);
2108 if (ipforward_rt->ro_rt == 0) {
2109 icmp_error(m, ICMP_UNREACH, ICMP_UNREACH_HOST, dest, 0);
2110 return;
2111 }
2112 rt = ipforward_rt->ro_rt;
2113 }
2114
2115 /*
2116 * Save the IP header and at most 8 bytes of the payload,
2117 * in case we need to generate an ICMP message to the src.
2118 *
2119 * We don't use m_copy() because it might return a reference
2120 * to a shared cluster. Both this function and ip_output()
2121 * assume exclusive access to the IP header in `m', so any
2122 * data in a cluster may change before we reach icmp_error().
2123 */
2124 MGET(mcopy, M_DONTWAIT, m->m_type);
2125 if (mcopy != NULL) {
2126 M_COPY_PKTHDR(mcopy, m);
2127 mcopy->m_len = imin((IP_VHL_HL(ip->ip_vhl) << 2) + 8,
2128 (int)ip->ip_len);
2129 m_copydata(m, 0, mcopy->m_len, mtod(mcopy, caddr_t));
2130 }
2131
2132 #if IPSTEALTH
2133 if (!ipstealth) {
2134 #endif
2135 ip->ip_ttl -= IPTTLDEC;
2136 #if IPSTEALTH
2137 }
2138 #endif
2139
2140 /*
2141 * If forwarding packet using same interface that it came in on,
2142 * perhaps should send a redirect to sender to shortcut a hop.
2143 * Only send redirect if source is sending directly to us,
2144 * and if packet was not source routed (or has any options).
2145 * Also, don't send redirect if forwarding using a default route
2146 * or a route modified by a redirect.
2147 */
2148 #define satosin(sa) ((struct sockaddr_in *)(sa))
2149 if (rt->rt_ifp == m->m_pkthdr.rcvif &&
2150 (rt->rt_flags & (RTF_DYNAMIC|RTF_MODIFIED)) == 0 &&
2151 satosin(rt_key(rt))->sin_addr.s_addr != 0 &&
2152 ipsendredirects && !srcrt) {
2153 #define RTA(rt) ((struct in_ifaddr *)(rt->rt_ifa))
2154 u_long src = ntohl(ip->ip_src.s_addr);
2155
2156 if (RTA(rt) &&
2157 (src & RTA(rt)->ia_subnetmask) == RTA(rt)->ia_subnet) {
2158 if (rt->rt_flags & RTF_GATEWAY)
2159 dest = satosin(rt->rt_gateway)->sin_addr.s_addr;
2160 else
2161 dest = pkt_dst.s_addr;
2162 /* Router requirements says to only send host redirects */
2163 type = ICMP_REDIRECT;
2164 code = ICMP_REDIRECT_HOST;
2165 #if DIAGNOSTIC
2166 if (ipprintfs)
2167 printf("redirect (%d) to %lx\n", code, (u_long)dest);
2168 #endif
2169 }
2170 }
2171
2172 {
2173 if (next_hop) {
2174 /* Pass IPFORWARD info if available */
2175 struct m_tag *tag;
2176 struct ip_fwd_tag *ipfwd_tag;
2177
2178 tag = m_tag_alloc(KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_IPFORWARD,
2179 sizeof(struct sockaddr_in), M_NOWAIT);
2180 if (tag == NULL) {
2181 error = ENOBUFS;
2182 m_freem(m);
2183 return;
2184 }
2185
2186 ipfwd_tag = (struct ip_fwd_tag *)(tag+1);
2187 ipfwd_tag->next_hop = next_hop;
2188
2189 m_tag_prepend(m, tag);
2190 }
2191 error = ip_output_list(m, 0, (struct mbuf *)0, ipforward_rt,
2192 IP_FORWARDING, 0, NULL);
2193 }
2194 if (error)
2195 OSAddAtomic(1, (SInt32*)&ipstat.ips_cantforward);
2196 else {
2197 OSAddAtomic(1, (SInt32*)&ipstat.ips_forward);
2198 if (type)
2199 OSAddAtomic(1, (SInt32*)&ipstat.ips_redirectsent);
2200 else {
2201 if (mcopy) {
2202 ipflow_create(ipforward_rt, mcopy);
2203 m_freem(mcopy);
2204 }
2205 return;
2206 }
2207 }
2208 if (mcopy == NULL)
2209 return;
2210 destifp = NULL;
2211
2212 switch (error) {
2213
2214 case 0: /* forwarded, but need redirect */
2215 /* type, code set above */
2216 break;
2217
2218 case ENETUNREACH: /* shouldn't happen, checked above */
2219 case EHOSTUNREACH:
2220 case ENETDOWN:
2221 case EHOSTDOWN:
2222 default:
2223 type = ICMP_UNREACH;
2224 code = ICMP_UNREACH_HOST;
2225 break;
2226
2227 case EMSGSIZE:
2228 type = ICMP_UNREACH;
2229 code = ICMP_UNREACH_NEEDFRAG;
2230 #ifndef IPSEC
2231 if (ipforward_rt->ro_rt)
2232 destifp = ipforward_rt->ro_rt->rt_ifp;
2233 #else
2234 /*
2235 * If the packet is routed over IPsec tunnel, tell the
2236 * originator the tunnel MTU.
2237 * tunnel MTU = if MTU - sizeof(IP) - ESP/AH hdrsiz
2238 * XXX quickhack!!!
2239 */
2240 if (ipforward_rt->ro_rt) {
2241 struct secpolicy *sp = NULL;
2242 int ipsecerror;
2243 int ipsechdr;
2244 struct route *ro;
2245
2246 if (ipsec_bypass) {
2247 destifp = ipforward_rt->ro_rt->rt_ifp;
2248 OSAddAtomic(1, (SInt32*)&ipstat.ips_cantfrag);
2249 break;
2250 }
2251 sp = ipsec4_getpolicybyaddr(mcopy,
2252 IPSEC_DIR_OUTBOUND,
2253 IP_FORWARDING,
2254 &ipsecerror);
2255
2256 if (sp == NULL)
2257 destifp = ipforward_rt->ro_rt->rt_ifp;
2258 else {
2259 /* count IPsec header size */
2260 ipsechdr = ipsec_hdrsiz(sp);
2261
2262 /*
2263 * find the correct route for outer IPv4
2264 * header, compute tunnel MTU.
2265 *
2266 * XXX BUG ALERT
2267 * The "dummyifp" code relies upon the fact
2268 * that icmp_error() touches only ifp->if_mtu.
2269 */
2270 /*XXX*/
2271 destifp = NULL;
2272
2273 if (sp->req != NULL) {
2274 if (sp->req->saidx.mode == IPSEC_MODE_TUNNEL) {
2275 struct secasindex saidx;
2276 struct ip *ipm;
2277 struct secasvar *sav;
2278
2279 ipm = mtod(mcopy, struct ip *);
2280 bcopy(&sp->req->saidx, &saidx, sizeof(saidx));
2281 saidx.mode = sp->req->saidx.mode;
2282 saidx.reqid = sp->req->saidx.reqid;
2283 sin = (struct sockaddr_in *)&saidx.src;
2284 if (sin->sin_len == 0) {
2285 sin->sin_len = sizeof(*sin);
2286 sin->sin_family = AF_INET;
2287 sin->sin_port = IPSEC_PORT_ANY;
2288 bcopy(&ipm->ip_src, &sin->sin_addr,
2289 sizeof(sin->sin_addr));
2290 }
2291 sin = (struct sockaddr_in *)&saidx.dst;
2292 if (sin->sin_len == 0) {
2293 sin->sin_len = sizeof(*sin);
2294 sin->sin_family = AF_INET;
2295 sin->sin_port = IPSEC_PORT_ANY;
2296 bcopy(&ipm->ip_dst, &sin->sin_addr,
2297 sizeof(sin->sin_addr));
2298 }
2299 sav = key_allocsa_policy(&saidx);
2300 if (sav != NULL) {
2301 if (sav->sah != NULL) {
2302 ro = &sav->sah->sa_route;
2303 if (ro->ro_rt && ro->ro_rt->rt_ifp) {
2304 dummyifp.if_mtu =
2305 ro->ro_rt->rt_ifp->if_mtu;
2306 dummyifp.if_mtu -= ipsechdr;
2307 destifp = &dummyifp;
2308 }
2309 }
2310 key_freesav(sav, KEY_SADB_UNLOCKED);
2311 }
2312 }
2313 }
2314 key_freesp(sp, KEY_SADB_UNLOCKED);
2315 }
2316 }
2317 #endif /*IPSEC*/
2318 OSAddAtomic(1, (SInt32*)&ipstat.ips_cantfrag);
2319 break;
2320
2321 case ENOBUFS:
2322 type = ICMP_SOURCEQUENCH;
2323 code = 0;
2324 break;
2325
2326 case EACCES: /* ipfw denied packet */
2327 m_freem(mcopy);
2328 return;
2329 }
2330 icmp_error(mcopy, type, code, dest, destifp);
2331 }
2332
2333 void
2334 ip_savecontrol(
2335 struct inpcb *inp,
2336 struct mbuf **mp,
2337 struct ip *ip,
2338 struct mbuf *m)
2339 {
2340 if (inp->inp_socket->so_options & SO_TIMESTAMP) {
2341 struct timeval tv;
2342
2343 microtime(&tv);
2344 *mp = sbcreatecontrol((caddr_t) &tv, sizeof(tv),
2345 SCM_TIMESTAMP, SOL_SOCKET);
2346 if (*mp)
2347 mp = &(*mp)->m_next;
2348 }
2349 if (inp->inp_flags & INP_RECVDSTADDR) {
2350 *mp = sbcreatecontrol((caddr_t) &ip->ip_dst,
2351 sizeof(struct in_addr), IP_RECVDSTADDR, IPPROTO_IP);
2352 if (*mp)
2353 mp = &(*mp)->m_next;
2354 }
2355 #ifdef notyet
2356 /* XXX
2357 * Moving these out of udp_input() made them even more broken
2358 * than they already were.
2359 */
2360 /* options were tossed already */
2361 if (inp->inp_flags & INP_RECVOPTS) {
2362 *mp = sbcreatecontrol((caddr_t) opts_deleted_above,
2363 sizeof(struct in_addr), IP_RECVOPTS, IPPROTO_IP);
2364 if (*mp)
2365 mp = &(*mp)->m_next;
2366 }
2367 /* ip_srcroute doesn't do what we want here, need to fix */
2368 if (inp->inp_flags & INP_RECVRETOPTS) {
2369 *mp = sbcreatecontrol((caddr_t) ip_srcroute(),
2370 sizeof(struct in_addr), IP_RECVRETOPTS, IPPROTO_IP);
2371 if (*mp)
2372 mp = &(*mp)->m_next;
2373 }
2374 #endif
2375 if (inp->inp_flags & INP_RECVIF) {
2376 struct ifnet *ifp;
2377 struct sdlbuf {
2378 struct sockaddr_dl sdl;
2379 u_char pad[32];
2380 } sdlbuf;
2381 struct sockaddr_dl *sdp;
2382 struct sockaddr_dl *sdl2 = &sdlbuf.sdl;
2383
2384 ifnet_head_lock_shared();
2385 if (((ifp = m->m_pkthdr.rcvif))
2386 && ( ifp->if_index && (ifp->if_index <= if_index))) {
2387 struct ifaddr *ifa = ifnet_addrs[ifp->if_index - 1];
2388
2389 if (!ifa || !ifa->ifa_addr)
2390 goto makedummy;
2391
2392 sdp = (struct sockaddr_dl *)ifa->ifa_addr;
2393 /*
2394 * Change our mind and don't try copy.
2395 */
2396 if ((sdp->sdl_family != AF_LINK)
2397 || (sdp->sdl_len > sizeof(sdlbuf))) {
2398 goto makedummy;
2399 }
2400 bcopy(sdp, sdl2, sdp->sdl_len);
2401 } else {
2402 makedummy:
2403 sdl2->sdl_len
2404 = offsetof(struct sockaddr_dl, sdl_data[0]);
2405 sdl2->sdl_family = AF_LINK;
2406 sdl2->sdl_index = 0;
2407 sdl2->sdl_nlen = sdl2->sdl_alen = sdl2->sdl_slen = 0;
2408 }
2409 ifnet_head_done();
2410 *mp = sbcreatecontrol((caddr_t) sdl2, sdl2->sdl_len,
2411 IP_RECVIF, IPPROTO_IP);
2412 if (*mp)
2413 mp = &(*mp)->m_next;
2414 }
2415 if (inp->inp_flags & INP_RECVTTL) {
2416 *mp = sbcreatecontrol((caddr_t)&ip->ip_ttl, sizeof(ip->ip_ttl), IP_RECVTTL, IPPROTO_IP);
2417 if (*mp) mp = &(*mp)->m_next;
2418 }
2419 }
2420
2421 int
2422 ip_rsvp_init(struct socket *so)
2423 {
2424 if (so->so_type != SOCK_RAW ||
2425 so->so_proto->pr_protocol != IPPROTO_RSVP)
2426 return EOPNOTSUPP;
2427
2428 if (ip_rsvpd != NULL)
2429 return EADDRINUSE;
2430
2431 ip_rsvpd = so;
2432 /*
2433 * This may seem silly, but we need to be sure we don't over-increment
2434 * the RSVP counter, in case something slips up.
2435 */
2436 if (!ip_rsvp_on) {
2437 ip_rsvp_on = 1;
2438 rsvp_on++;
2439 }
2440
2441 return 0;
2442 }
2443
2444 int
2445 ip_rsvp_done(void)
2446 {
2447 ip_rsvpd = NULL;
2448 /*
2449 * This may seem silly, but we need to be sure we don't over-decrement
2450 * the RSVP counter, in case something slips up.
2451 */
2452 if (ip_rsvp_on) {
2453 ip_rsvp_on = 0;
2454 rsvp_on--;
2455 }
2456 return 0;
2457 }