]> git.saurik.com Git - apple/xnu.git/blob - bsd/netinet/ip_output.c
xnu-3789.21.4.tar.gz
[apple/xnu.git] / bsd / netinet / ip_output.c
1 /*
2 * Copyright (c) 2000-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1982, 1986, 1988, 1990, 1993
30 * The Regents of the University of California. All rights reserved.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
34 * are met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 * 3. All advertising materials mentioning features or use of this software
41 * must display the following acknowledgement:
42 * This product includes software developed by the University of
43 * California, Berkeley and its contributors.
44 * 4. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 * @(#)ip_output.c 8.3 (Berkeley) 1/21/94
61 */
62 /*
63 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
64 * support for mandatory and extensible security protections. This notice
65 * is included in support of clause 2.2 (b) of the Apple Public License,
66 * Version 2.0.
67 */
68
69 #define _IP_VHL
70
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/kernel.h>
74 #include <sys/malloc.h>
75 #include <sys/mbuf.h>
76 #include <sys/protosw.h>
77 #include <sys/socket.h>
78 #include <sys/socketvar.h>
79 #include <kern/locks.h>
80 #include <sys/sysctl.h>
81 #include <sys/mcache.h>
82 #include <sys/kdebug.h>
83
84 #include <machine/endian.h>
85 #include <pexpert/pexpert.h>
86 #include <mach/sdt.h>
87
88 #include <libkern/OSAtomic.h>
89 #include <libkern/OSByteOrder.h>
90
91 #include <net/if.h>
92 #include <net/if_dl.h>
93 #include <net/if_types.h>
94 #include <net/route.h>
95 #include <net/ntstat.h>
96 #include <net/net_osdep.h>
97 #include <net/dlil.h>
98 #include <net/net_perf.h>
99
100 #include <netinet/in.h>
101 #include <netinet/in_systm.h>
102 #include <netinet/ip.h>
103 #include <netinet/in_pcb.h>
104 #include <netinet/in_var.h>
105 #include <netinet/ip_var.h>
106 #include <netinet/kpi_ipfilter_var.h>
107 #include <netinet/in_tclass.h>
108
109 #if CONFIG_MACF_NET
110 #include <security/mac_framework.h>
111 #endif /* CONFIG_MACF_NET */
112
113 #define DBG_LAYER_BEG NETDBG_CODE(DBG_NETIP, 1)
114 #define DBG_LAYER_END NETDBG_CODE(DBG_NETIP, 3)
115 #define DBG_FNC_IP_OUTPUT NETDBG_CODE(DBG_NETIP, (1 << 8) | 1)
116 #define DBG_FNC_IPSEC4_OUTPUT NETDBG_CODE(DBG_NETIP, (2 << 8) | 1)
117
118 #if IPSEC
119 #include <netinet6/ipsec.h>
120 #include <netkey/key.h>
121 #if IPSEC_DEBUG
122 #include <netkey/key_debug.h>
123 #else
124 #define KEYDEBUG(lev, arg)
125 #endif
126 #endif /* IPSEC */
127
128 #if NECP
129 #include <net/necp.h>
130 #endif /* NECP */
131
132 #if IPFIREWALL
133 #include <netinet/ip_fw.h>
134 #if IPDIVERT
135 #include <netinet/ip_divert.h>
136 #endif /* IPDIVERT */
137 #endif /* IPFIREWALL */
138
139 #if DUMMYNET
140 #include <netinet/ip_dummynet.h>
141 #endif
142
143 #if PF
144 #include <net/pfvar.h>
145 #endif /* PF */
146
147 #if IPFIREWALL_FORWARD && IPFIREWALL_FORWARD_DEBUG
148 #define print_ip(a) \
149 printf("%ld.%ld.%ld.%ld", (ntohl(a.s_addr) >> 24) & 0xFF, \
150 (ntohl(a.s_addr) >> 16) & 0xFF, \
151 (ntohl(a.s_addr) >> 8) & 0xFF, \
152 (ntohl(a.s_addr)) & 0xFF);
153 #endif /* IPFIREWALL_FORWARD && IPFIREWALL_FORWARD_DEBUG */
154
155 u_short ip_id;
156
157 static int sysctl_reset_ip_output_stats SYSCTL_HANDLER_ARGS;
158 static int sysctl_ip_output_measure_bins SYSCTL_HANDLER_ARGS;
159 static int sysctl_ip_output_getperf SYSCTL_HANDLER_ARGS;
160 static void ip_out_cksum_stats(int, u_int32_t);
161 static struct mbuf *ip_insertoptions(struct mbuf *, struct mbuf *, int *);
162 static int ip_optcopy(struct ip *, struct ip *);
163 static int ip_pcbopts(int, struct mbuf **, struct mbuf *);
164 static void imo_trace(struct ip_moptions *, int);
165 static void ip_mloopback(struct ifnet *, struct ifnet *, struct mbuf *,
166 struct sockaddr_in *, int);
167 static struct ifaddr *in_selectsrcif(struct ip *, struct route *, unsigned int);
168
169 extern struct ip_linklocal_stat ip_linklocal_stat;
170
171 /* temporary: for testing */
172 #if IPSEC
173 extern int ipsec_bypass;
174 #endif
175
176 static int ip_maxchainsent = 0;
177 SYSCTL_INT(_net_inet_ip, OID_AUTO, maxchainsent,
178 CTLFLAG_RW | CTLFLAG_LOCKED, &ip_maxchainsent, 0,
179 "use dlil_output_list");
180 #if DEBUG
181 static int forge_ce = 0;
182 SYSCTL_INT(_net_inet_ip, OID_AUTO, forge_ce,
183 CTLFLAG_RW | CTLFLAG_LOCKED, &forge_ce, 0,
184 "Forge ECN CE");
185 #endif /* DEBUG */
186
187 static int ip_select_srcif_debug = 0;
188 SYSCTL_INT(_net_inet_ip, OID_AUTO, select_srcif_debug,
189 CTLFLAG_RW | CTLFLAG_LOCKED, &ip_select_srcif_debug, 0,
190 "log source interface selection debug info");
191
192 static int ip_output_measure = 0;
193 SYSCTL_PROC(_net_inet_ip, OID_AUTO, output_perf,
194 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
195 &ip_output_measure, 0, sysctl_reset_ip_output_stats, "I",
196 "Do time measurement");
197
198 static uint64_t ip_output_measure_bins = 0;
199 SYSCTL_PROC(_net_inet_ip, OID_AUTO, output_perf_bins,
200 CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED, &ip_output_measure_bins, 0,
201 sysctl_ip_output_measure_bins, "I",
202 "bins for chaining performance data histogram");
203
204 static net_perf_t net_perf;
205 SYSCTL_PROC(_net_inet_ip, OID_AUTO, output_perf_data,
206 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
207 0, 0, sysctl_ip_output_getperf, "S,net_perf",
208 "IP output performance data (struct net_perf, net/net_perf.h)");
209
210 #define IMO_TRACE_HIST_SIZE 32 /* size of trace history */
211
212 /* For gdb */
213 __private_extern__ unsigned int imo_trace_hist_size = IMO_TRACE_HIST_SIZE;
214
215 struct ip_moptions_dbg {
216 struct ip_moptions imo; /* ip_moptions */
217 u_int16_t imo_refhold_cnt; /* # of IMO_ADDREF */
218 u_int16_t imo_refrele_cnt; /* # of IMO_REMREF */
219 /*
220 * Alloc and free callers.
221 */
222 ctrace_t imo_alloc;
223 ctrace_t imo_free;
224 /*
225 * Circular lists of IMO_ADDREF and IMO_REMREF callers.
226 */
227 ctrace_t imo_refhold[IMO_TRACE_HIST_SIZE];
228 ctrace_t imo_refrele[IMO_TRACE_HIST_SIZE];
229 };
230
231 #if DEBUG
232 static unsigned int imo_debug = 1; /* debugging (enabled) */
233 #else
234 static unsigned int imo_debug; /* debugging (disabled) */
235 #endif /* !DEBUG */
236 static unsigned int imo_size; /* size of zone element */
237 static struct zone *imo_zone; /* zone for ip_moptions */
238
239 #define IMO_ZONE_MAX 64 /* maximum elements in zone */
240 #define IMO_ZONE_NAME "ip_moptions" /* zone name */
241
242 /*
243 * IP output. The packet in mbuf chain m contains a skeletal IP
244 * header (with len, off, ttl, proto, tos, src, dst).
245 * The mbuf chain containing the packet will be freed.
246 * The mbuf opt, if present, will not be freed.
247 */
248 int
249 ip_output(struct mbuf *m0, struct mbuf *opt, struct route *ro, int flags,
250 struct ip_moptions *imo, struct ip_out_args *ipoa)
251 {
252 return (ip_output_list(m0, 0, opt, ro, flags, imo, ipoa));
253 }
254
255 /*
256 * IP output. The packet in mbuf chain m contains a skeletal IP
257 * header (with len, off, ttl, proto, tos, src, dst).
258 * The mbuf chain containing the packet will be freed.
259 * The mbuf opt, if present, will not be freed.
260 *
261 * Route ro MUST be non-NULL; if ro->ro_rt is valid, route lookup would be
262 * skipped and ro->ro_rt would be used. Otherwise the result of route
263 * lookup is stored in ro->ro_rt.
264 *
265 * In the IP forwarding case, the packet will arrive with options already
266 * inserted, so must have a NULL opt pointer.
267 */
268 int
269 ip_output_list(struct mbuf *m0, int packetchain, struct mbuf *opt,
270 struct route *ro, int flags, struct ip_moptions *imo,
271 struct ip_out_args *ipoa)
272 {
273 struct ip *ip;
274 struct ifnet *ifp = NULL; /* not refcnt'd */
275 struct mbuf *m = m0, *prevnxt = NULL, **mppn = &prevnxt;
276 int hlen = sizeof (struct ip);
277 int len = 0, error = 0;
278 struct sockaddr_in *dst = NULL;
279 struct in_ifaddr *ia = NULL, *src_ia = NULL;
280 struct in_addr pkt_dst;
281 struct ipf_pktopts *ippo = NULL;
282 ipfilter_t inject_filter_ref = NULL;
283 struct mbuf *packetlist;
284 uint32_t sw_csum, pktcnt = 0, scnt = 0, bytecnt = 0;
285 uint32_t packets_processed = 0;
286 unsigned int ifscope = IFSCOPE_NONE;
287 struct flowadv *adv = NULL;
288 struct timeval start_tv;
289 #if IPSEC
290 struct socket *so = NULL;
291 struct secpolicy *sp = NULL;
292 #endif /* IPSEC */
293 #if NECP
294 necp_kernel_policy_result necp_result = 0;
295 necp_kernel_policy_result_parameter necp_result_parameter;
296 necp_kernel_policy_id necp_matched_policy_id = 0;
297 #endif /* NECP */
298 #if IPFIREWALL
299 int ipfwoff;
300 struct sockaddr_in *next_hop_from_ipfwd_tag = NULL;
301 #endif /* IPFIREWALL */
302 #if IPFIREWALL || DUMMYNET
303 struct m_tag *tag;
304 #endif /* IPFIREWALL || DUMMYNET */
305 #if DUMMYNET
306 struct ip_out_args saved_ipoa;
307 struct sockaddr_in dst_buf;
308 #endif /* DUMMYNET */
309 struct {
310 #if IPSEC
311 struct ipsec_output_state ipsec_state;
312 #endif /* IPSEC */
313 #if NECP
314 struct route necp_route;
315 #endif /* NECP */
316 #if IPFIREWALL || DUMMYNET
317 struct ip_fw_args args;
318 #endif /* IPFIREWALL || DUMMYNET */
319 #if IPFIREWALL_FORWARD
320 struct route sro_fwd;
321 #endif /* IPFIREWALL_FORWARD */
322 #if DUMMYNET
323 struct route saved_route;
324 #endif /* DUMMYNET */
325 struct ipf_pktopts ipf_pktopts;
326 } ipobz;
327 #define ipsec_state ipobz.ipsec_state
328 #define necp_route ipobz.necp_route
329 #define args ipobz.args
330 #define sro_fwd ipobz.sro_fwd
331 #define saved_route ipobz.saved_route
332 #define ipf_pktopts ipobz.ipf_pktopts
333 union {
334 struct {
335 boolean_t select_srcif : 1; /* set once */
336 boolean_t srcbound : 1; /* set once */
337 boolean_t nocell : 1; /* set once */
338 boolean_t isbroadcast : 1;
339 boolean_t didfilter : 1;
340 boolean_t noexpensive : 1; /* set once */
341 boolean_t awdl_unrestricted : 1; /* set once */
342 #if IPFIREWALL_FORWARD
343 boolean_t fwd_rewrite_src : 1;
344 #endif /* IPFIREWALL_FORWARD */
345 };
346 uint32_t raw;
347 } ipobf = { .raw = 0 };
348
349 /*
350 * Here we check for restrictions when sending frames.
351 * N.B.: IPv4 over internal co-processor interfaces is not allowed.
352 */
353 #define IP_CHECK_RESTRICTIONS(_ifp, _ipobf) \
354 (((_ipobf).nocell && IFNET_IS_CELLULAR(_ifp)) || \
355 ((_ipobf).noexpensive && IFNET_IS_EXPENSIVE(_ifp)) || \
356 (IFNET_IS_INTCOPROC(_ifp)) || \
357 (!(_ipobf).awdl_unrestricted && IFNET_IS_AWDL_RESTRICTED(_ifp)))
358
359 if (ip_output_measure)
360 net_perf_start_time(&net_perf, &start_tv);
361 KERNEL_DEBUG(DBG_FNC_IP_OUTPUT | DBG_FUNC_START, 0, 0, 0, 0, 0);
362
363 VERIFY(m0->m_flags & M_PKTHDR);
364 packetlist = m0;
365
366 /* zero out {ipsec_state, args, sro_fwd, saved_route, ipf_pktops} */
367 bzero(&ipobz, sizeof (ipobz));
368 ippo = &ipf_pktopts;
369
370 #if IPFIREWALL || DUMMYNET
371 if (SLIST_EMPTY(&m0->m_pkthdr.tags))
372 goto ipfw_tags_done;
373
374 /* Grab info from mtags prepended to the chain */
375 #if DUMMYNET
376 if ((tag = m_tag_locate(m0, KERNEL_MODULE_TAG_ID,
377 KERNEL_TAG_TYPE_DUMMYNET, NULL)) != NULL) {
378 struct dn_pkt_tag *dn_tag;
379
380 dn_tag = (struct dn_pkt_tag *)(tag+1);
381 args.fwa_ipfw_rule = dn_tag->dn_ipfw_rule;
382 args.fwa_pf_rule = dn_tag->dn_pf_rule;
383 opt = NULL;
384 saved_route = dn_tag->dn_ro;
385 ro = &saved_route;
386
387 imo = NULL;
388 bcopy(&dn_tag->dn_dst, &dst_buf, sizeof (dst_buf));
389 dst = &dst_buf;
390 ifp = dn_tag->dn_ifp;
391 flags = dn_tag->dn_flags;
392 if ((dn_tag->dn_flags & IP_OUTARGS)) {
393 saved_ipoa = dn_tag->dn_ipoa;
394 ipoa = &saved_ipoa;
395 }
396
397 m_tag_delete(m0, tag);
398 }
399 #endif /* DUMMYNET */
400
401 #if IPDIVERT
402 if ((tag = m_tag_locate(m0, KERNEL_MODULE_TAG_ID,
403 KERNEL_TAG_TYPE_DIVERT, NULL)) != NULL) {
404 struct divert_tag *div_tag;
405
406 div_tag = (struct divert_tag *)(tag+1);
407 args.fwa_divert_rule = div_tag->cookie;
408
409 m_tag_delete(m0, tag);
410 }
411 #endif /* IPDIVERT */
412
413 #if IPFIREWALL
414 if ((tag = m_tag_locate(m0, KERNEL_MODULE_TAG_ID,
415 KERNEL_TAG_TYPE_IPFORWARD, NULL)) != NULL) {
416 struct ip_fwd_tag *ipfwd_tag;
417
418 ipfwd_tag = (struct ip_fwd_tag *)(tag+1);
419 next_hop_from_ipfwd_tag = ipfwd_tag->next_hop;
420
421 m_tag_delete(m0, tag);
422 }
423 #endif /* IPFIREWALL */
424
425 ipfw_tags_done:
426 #endif /* IPFIREWALL || DUMMYNET */
427
428 m = m0;
429 m->m_pkthdr.pkt_flags &= ~(PKTF_LOOP|PKTF_IFAINFO);
430
431 #if IPSEC
432 if (ipsec_bypass == 0 && !(flags & IP_NOIPSEC)) {
433 /* If packet is bound to an interface, check bound policies */
434 if ((flags & IP_OUTARGS) && (ipoa != NULL) &&
435 (ipoa->ipoa_flags & IPOAF_BOUND_IF) &&
436 ipoa->ipoa_boundif != IFSCOPE_NONE) {
437 if (ipsec4_getpolicybyinterface(m, IPSEC_DIR_OUTBOUND,
438 &flags, ipoa, &sp) != 0)
439 goto bad;
440 }
441 }
442 #endif /* IPSEC */
443
444 VERIFY(ro != NULL);
445
446 if (flags & IP_OUTARGS) {
447 /*
448 * In the forwarding case, only the ifscope value is used,
449 * as source interface selection doesn't take place.
450 */
451 if ((ipobf.select_srcif = (!(flags & IP_FORWARDING) &&
452 (ipoa->ipoa_flags & IPOAF_SELECT_SRCIF)))) {
453 ipf_pktopts.ippo_flags |= IPPOF_SELECT_SRCIF;
454 }
455
456 if ((ipoa->ipoa_flags & IPOAF_BOUND_IF) &&
457 ipoa->ipoa_boundif != IFSCOPE_NONE) {
458 ifscope = ipoa->ipoa_boundif;
459 ipf_pktopts.ippo_flags |=
460 (IPPOF_BOUND_IF | (ifscope << IPPOF_SHIFT_IFSCOPE));
461 }
462
463 /* double negation needed for bool bit field */
464 ipobf.srcbound = !!(ipoa->ipoa_flags & IPOAF_BOUND_SRCADDR);
465 if (ipobf.srcbound)
466 ipf_pktopts.ippo_flags |= IPPOF_BOUND_SRCADDR;
467 } else {
468 ipobf.select_srcif = FALSE;
469 ipobf.srcbound = FALSE;
470 ifscope = IFSCOPE_NONE;
471 if (flags & IP_OUTARGS) {
472 ipoa->ipoa_boundif = IFSCOPE_NONE;
473 ipoa->ipoa_flags &= ~(IPOAF_SELECT_SRCIF |
474 IPOAF_BOUND_IF | IPOAF_BOUND_SRCADDR);
475 }
476 }
477
478 if (flags & IP_OUTARGS) {
479 if (ipoa->ipoa_flags & IPOAF_NO_CELLULAR) {
480 ipobf.nocell = TRUE;
481 ipf_pktopts.ippo_flags |= IPPOF_NO_IFT_CELLULAR;
482 }
483 if (ipoa->ipoa_flags & IPOAF_NO_EXPENSIVE) {
484 ipobf.noexpensive = TRUE;
485 ipf_pktopts.ippo_flags |= IPPOF_NO_IFF_EXPENSIVE;
486 }
487 if (ipoa->ipoa_flags & IPOAF_AWDL_UNRESTRICTED)
488 ipobf.awdl_unrestricted = TRUE;
489 adv = &ipoa->ipoa_flowadv;
490 adv->code = FADV_SUCCESS;
491 ipoa->ipoa_retflags = 0;
492 }
493
494 #if IPSEC
495 if (ipsec_bypass == 0 && !(flags & IP_NOIPSEC)) {
496 so = ipsec_getsocket(m);
497 if (so != NULL) {
498 (void) ipsec_setsocket(m, NULL);
499 }
500 }
501 #endif /* IPSEC */
502
503 #if DUMMYNET
504 if (args.fwa_ipfw_rule != NULL || args.fwa_pf_rule != NULL) {
505 /* dummynet already saw us */
506 ip = mtod(m, struct ip *);
507 hlen = IP_VHL_HL(ip->ip_vhl) << 2;
508 pkt_dst = ip->ip_dst;
509 if (ro->ro_rt != NULL) {
510 RT_LOCK_SPIN(ro->ro_rt);
511 ia = (struct in_ifaddr *)ro->ro_rt->rt_ifa;
512 if (ia) {
513 /* Become a regular mutex */
514 RT_CONVERT_LOCK(ro->ro_rt);
515 IFA_ADDREF(&ia->ia_ifa);
516 }
517 RT_UNLOCK(ro->ro_rt);
518 }
519
520 #if IPFIREWALL
521 if (args.fwa_ipfw_rule != NULL)
522 goto skip_ipsec;
523 #endif /* IPFIREWALL */
524 if (args.fwa_pf_rule != NULL)
525 goto sendit;
526 }
527 #endif /* DUMMYNET */
528
529 loopit:
530 packets_processed++;
531 ipobf.isbroadcast = FALSE;
532 ipobf.didfilter = FALSE;
533 #if IPFIREWALL_FORWARD
534 ipobf.fwd_rewrite_src = FALSE;
535 #endif /* IPFIREWALL_FORWARD */
536
537 VERIFY(m->m_flags & M_PKTHDR);
538 /*
539 * No need to proccess packet twice if we've already seen it.
540 */
541 if (!SLIST_EMPTY(&m->m_pkthdr.tags))
542 inject_filter_ref = ipf_get_inject_filter(m);
543 else
544 inject_filter_ref = NULL;
545
546 if (opt) {
547 m = ip_insertoptions(m, opt, &len);
548 hlen = len;
549 /* Update the chain */
550 if (m != m0) {
551 if (m0 == packetlist)
552 packetlist = m;
553 m0 = m;
554 }
555 }
556 ip = mtod(m, struct ip *);
557
558 #if IPFIREWALL
559 /*
560 * rdar://8542331
561 *
562 * When dealing with a packet chain, we need to reset "next_hop"
563 * because "dst" may have been changed to the gateway address below
564 * for the previous packet of the chain. This could cause the route
565 * to be inavertandly changed to the route to the gateway address
566 * (instead of the route to the destination).
567 */
568 args.fwa_next_hop = next_hop_from_ipfwd_tag;
569 pkt_dst = args.fwa_next_hop ? args.fwa_next_hop->sin_addr : ip->ip_dst;
570 #else /* !IPFIREWALL */
571 pkt_dst = ip->ip_dst;
572 #endif /* !IPFIREWALL */
573
574 /*
575 * We must not send if the packet is destined to network zero.
576 * RFC1122 3.2.1.3 (a) and (b).
577 */
578 if (IN_ZERONET(ntohl(pkt_dst.s_addr))) {
579 error = EHOSTUNREACH;
580 goto bad;
581 }
582
583 /*
584 * Fill in IP header.
585 */
586 if (!(flags & (IP_FORWARDING|IP_RAWOUTPUT))) {
587 ip->ip_vhl = IP_MAKE_VHL(IPVERSION, hlen >> 2);
588 ip->ip_off &= IP_DF;
589 ip->ip_id = ip_randomid();
590 OSAddAtomic(1, &ipstat.ips_localout);
591 } else {
592 hlen = IP_VHL_HL(ip->ip_vhl) << 2;
593 }
594
595 #if DEBUG
596 /* For debugging, we let the stack forge congestion */
597 if (forge_ce != 0 &&
598 ((ip->ip_tos & IPTOS_ECN_MASK) == IPTOS_ECN_ECT1 ||
599 (ip->ip_tos & IPTOS_ECN_MASK) == IPTOS_ECN_ECT0)) {
600 ip->ip_tos = (ip->ip_tos & ~IPTOS_ECN_MASK) | IPTOS_ECN_CE;
601 forge_ce--;
602 }
603 #endif /* DEBUG */
604
605 KERNEL_DEBUG(DBG_LAYER_BEG, ip->ip_dst.s_addr, ip->ip_src.s_addr,
606 ip->ip_p, ip->ip_off, ip->ip_len);
607
608 dst = SIN(&ro->ro_dst);
609
610 /*
611 * If there is a cached route,
612 * check that it is to the same destination
613 * and is still up. If not, free it and try again.
614 * The address family should also be checked in case of sharing the
615 * cache with IPv6.
616 */
617
618 if (ro->ro_rt != NULL) {
619 if (ROUTE_UNUSABLE(ro) && ip->ip_src.s_addr != INADDR_ANY &&
620 !(flags & (IP_ROUTETOIF | IP_FORWARDING))) {
621 src_ia = ifa_foraddr(ip->ip_src.s_addr);
622 if (src_ia == NULL) {
623 error = EADDRNOTAVAIL;
624 goto bad;
625 }
626 IFA_REMREF(&src_ia->ia_ifa);
627 src_ia = NULL;
628 }
629 /*
630 * Test rt_flags without holding rt_lock for performance
631 * reasons; if the route is down it will hopefully be
632 * caught by the layer below (since it uses this route
633 * as a hint) or during the next transmit.
634 */
635 if (ROUTE_UNUSABLE(ro) || dst->sin_family != AF_INET ||
636 dst->sin_addr.s_addr != pkt_dst.s_addr)
637 ROUTE_RELEASE(ro);
638
639 /*
640 * If we're doing source interface selection, we may not
641 * want to use this route; only synch up the generation
642 * count otherwise.
643 */
644 if (!ipobf.select_srcif && ro->ro_rt != NULL &&
645 RT_GENID_OUTOFSYNC(ro->ro_rt))
646 RT_GENID_SYNC(ro->ro_rt);
647 }
648 if (ro->ro_rt == NULL) {
649 bzero(dst, sizeof (*dst));
650 dst->sin_family = AF_INET;
651 dst->sin_len = sizeof (*dst);
652 dst->sin_addr = pkt_dst;
653 }
654 /*
655 * If routing to interface only,
656 * short circuit routing lookup.
657 */
658 if (flags & IP_ROUTETOIF) {
659 if (ia != NULL)
660 IFA_REMREF(&ia->ia_ifa);
661 if ((ia = ifatoia(ifa_ifwithdstaddr(sintosa(dst)))) == NULL) {
662 ia = ifatoia(ifa_ifwithnet(sintosa(dst)));
663 if (ia == NULL) {
664 OSAddAtomic(1, &ipstat.ips_noroute);
665 error = ENETUNREACH;
666 /* XXX IPv6 APN fallback notification?? */
667 goto bad;
668 }
669 }
670 ifp = ia->ia_ifp;
671 ip->ip_ttl = 1;
672 ipobf.isbroadcast = in_broadcast(dst->sin_addr, ifp);
673 /*
674 * For consistency with other cases below. Loopback
675 * multicast case is handled separately by ip_mloopback().
676 */
677 if ((ifp->if_flags & IFF_LOOPBACK) &&
678 !IN_MULTICAST(ntohl(pkt_dst.s_addr))) {
679 m->m_pkthdr.rcvif = ifp;
680 ip_setsrcifaddr_info(m, ifp->if_index, NULL);
681 ip_setdstifaddr_info(m, ifp->if_index, NULL);
682 }
683 } else if (IN_MULTICAST(ntohl(pkt_dst.s_addr)) &&
684 imo != NULL && (ifp = imo->imo_multicast_ifp) != NULL) {
685 /*
686 * Bypass the normal routing lookup for multicast
687 * packets if the interface is specified.
688 */
689 ipobf.isbroadcast = FALSE;
690 if (ia != NULL)
691 IFA_REMREF(&ia->ia_ifa);
692
693 /* Macro takes reference on ia */
694 IFP_TO_IA(ifp, ia);
695 } else {
696 struct ifaddr *ia0 = NULL;
697 boolean_t cloneok = FALSE;
698 /*
699 * Perform source interface selection; the source IP address
700 * must belong to one of the addresses of the interface used
701 * by the route. For performance reasons, do this only if
702 * there is no route, or if the routing table has changed,
703 * or if we haven't done source interface selection on this
704 * route (for this PCB instance) before.
705 */
706 if (ipobf.select_srcif &&
707 ip->ip_src.s_addr != INADDR_ANY && (ROUTE_UNUSABLE(ro) ||
708 !(ro->ro_flags & ROF_SRCIF_SELECTED))) {
709 /* Find the source interface */
710 ia0 = in_selectsrcif(ip, ro, ifscope);
711
712 /*
713 * If the source address belongs to a restricted
714 * interface and the caller forbids our using
715 * interfaces of such type, pretend that there is no
716 * route.
717 */
718 if (ia0 != NULL &&
719 IP_CHECK_RESTRICTIONS(ia0->ifa_ifp, ipobf)) {
720 IFA_REMREF(ia0);
721 ia0 = NULL;
722 error = EHOSTUNREACH;
723 if (flags & IP_OUTARGS)
724 ipoa->ipoa_retflags |= IPOARF_IFDENIED;
725 goto bad;
726 }
727
728 /*
729 * If the source address is spoofed (in the case of
730 * IP_RAWOUTPUT on an unbounded socket), or if this
731 * is destined for local/loopback, just let it go out
732 * using the interface of the route. Otherwise,
733 * there's no interface having such an address,
734 * so bail out.
735 */
736 if (ia0 == NULL && (!(flags & IP_RAWOUTPUT) ||
737 ipobf.srcbound) && ifscope != lo_ifp->if_index) {
738 error = EADDRNOTAVAIL;
739 goto bad;
740 }
741
742 /*
743 * If the caller didn't explicitly specify the scope,
744 * pick it up from the source interface. If the cached
745 * route was wrong and was blown away as part of source
746 * interface selection, don't mask out RTF_PRCLONING
747 * since that route may have been allocated by the ULP,
748 * unless the IP header was created by the caller or
749 * the destination is IPv4 LLA. The check for the
750 * latter is needed because IPv4 LLAs are never scoped
751 * in the current implementation, and we don't want to
752 * replace the resolved IPv4 LLA route with one whose
753 * gateway points to that of the default gateway on
754 * the primary interface of the system.
755 */
756 if (ia0 != NULL) {
757 if (ifscope == IFSCOPE_NONE)
758 ifscope = ia0->ifa_ifp->if_index;
759 cloneok = (!(flags & IP_RAWOUTPUT) &&
760 !(IN_LINKLOCAL(ntohl(ip->ip_dst.s_addr))));
761 }
762 }
763
764 /*
765 * If this is the case, we probably don't want to allocate
766 * a protocol-cloned route since we didn't get one from the
767 * ULP. This lets TCP do its thing, while not burdening
768 * forwarding or ICMP with the overhead of cloning a route.
769 * Of course, we still want to do any cloning requested by
770 * the link layer, as this is probably required in all cases
771 * for correct operation (as it is for ARP).
772 */
773 if (ro->ro_rt == NULL) {
774 unsigned long ign = RTF_PRCLONING;
775 /*
776 * We make an exception here: if the destination
777 * address is INADDR_BROADCAST, allocate a protocol-
778 * cloned host route so that we end up with a route
779 * marked with the RTF_BROADCAST flag. Otherwise,
780 * we would end up referring to the default route,
781 * instead of creating a cloned host route entry.
782 * That would introduce inconsistencies between ULPs
783 * that allocate a route and those that don't. The
784 * RTF_BROADCAST route is important since we'd want
785 * to send out undirected IP broadcast packets using
786 * link-level broadcast address. Another exception
787 * is for ULP-created routes that got blown away by
788 * source interface selection (see above).
789 *
790 * These exceptions will no longer be necessary when
791 * the RTF_PRCLONING scheme is no longer present.
792 */
793 if (cloneok || dst->sin_addr.s_addr == INADDR_BROADCAST)
794 ign &= ~RTF_PRCLONING;
795
796 /*
797 * Loosen the route lookup criteria if the ifscope
798 * corresponds to the loopback interface; this is
799 * needed to support Application Layer Gateways
800 * listening on loopback, in conjunction with packet
801 * filter redirection rules. The final source IP
802 * address will be rewritten by the packet filter
803 * prior to the RFC1122 loopback check below.
804 */
805 if (ifscope == lo_ifp->if_index)
806 rtalloc_ign(ro, ign);
807 else
808 rtalloc_scoped_ign(ro, ign, ifscope);
809
810 /*
811 * If the route points to a cellular/expensive interface
812 * and the caller forbids our using interfaces of such type,
813 * pretend that there is no route.
814 */
815 if (ro->ro_rt != NULL) {
816 RT_LOCK_SPIN(ro->ro_rt);
817 if (IP_CHECK_RESTRICTIONS(ro->ro_rt->rt_ifp,
818 ipobf)) {
819 RT_UNLOCK(ro->ro_rt);
820 ROUTE_RELEASE(ro);
821 if (flags & IP_OUTARGS) {
822 ipoa->ipoa_retflags |=
823 IPOARF_IFDENIED;
824 }
825 } else {
826 RT_UNLOCK(ro->ro_rt);
827 }
828 }
829 }
830
831 if (ro->ro_rt == NULL) {
832 OSAddAtomic(1, &ipstat.ips_noroute);
833 error = EHOSTUNREACH;
834 if (ia0 != NULL) {
835 IFA_REMREF(ia0);
836 ia0 = NULL;
837 }
838 goto bad;
839 }
840
841 if (ia != NULL)
842 IFA_REMREF(&ia->ia_ifa);
843 RT_LOCK_SPIN(ro->ro_rt);
844 ia = ifatoia(ro->ro_rt->rt_ifa);
845 if (ia != NULL) {
846 /* Become a regular mutex */
847 RT_CONVERT_LOCK(ro->ro_rt);
848 IFA_ADDREF(&ia->ia_ifa);
849 }
850 /*
851 * Note: ia_ifp may not be the same as rt_ifp; the latter
852 * is what we use for determining outbound i/f, mtu, etc.
853 */
854 ifp = ro->ro_rt->rt_ifp;
855 ro->ro_rt->rt_use++;
856 if (ro->ro_rt->rt_flags & RTF_GATEWAY) {
857 dst = SIN(ro->ro_rt->rt_gateway);
858 }
859 if (ro->ro_rt->rt_flags & RTF_HOST) {
860 /* double negation needed for bool bit field */
861 ipobf.isbroadcast =
862 !!(ro->ro_rt->rt_flags & RTF_BROADCAST);
863 } else {
864 /* Become a regular mutex */
865 RT_CONVERT_LOCK(ro->ro_rt);
866 ipobf.isbroadcast = in_broadcast(dst->sin_addr, ifp);
867 }
868 /*
869 * For consistency with IPv6, as well as to ensure that
870 * IP_RECVIF is set correctly for packets that are sent
871 * to one of the local addresses. ia (rt_ifa) would have
872 * been fixed up by rt_setif for local routes. This
873 * would make it appear as if the packet arrives on the
874 * interface which owns the local address. Loopback
875 * multicast case is handled separately by ip_mloopback().
876 */
877 if (ia != NULL && (ifp->if_flags & IFF_LOOPBACK) &&
878 !IN_MULTICAST(ntohl(pkt_dst.s_addr))) {
879 uint32_t srcidx;
880
881 m->m_pkthdr.rcvif = ia->ia_ifa.ifa_ifp;
882
883 if (ia0 != NULL)
884 srcidx = ia0->ifa_ifp->if_index;
885 else if ((ro->ro_flags & ROF_SRCIF_SELECTED) &&
886 ro->ro_srcia != NULL)
887 srcidx = ro->ro_srcia->ifa_ifp->if_index;
888 else
889 srcidx = 0;
890
891 ip_setsrcifaddr_info(m, srcidx, NULL);
892 ip_setdstifaddr_info(m, 0, ia);
893 }
894 RT_UNLOCK(ro->ro_rt);
895 if (ia0 != NULL) {
896 IFA_REMREF(ia0);
897 ia0 = NULL;
898 }
899 }
900
901 if (IN_MULTICAST(ntohl(pkt_dst.s_addr))) {
902 struct ifnet *srcifp = NULL;
903 struct in_multi *inm;
904 u_int32_t vif;
905 u_int8_t ttl = IP_DEFAULT_MULTICAST_TTL;
906 u_int8_t loop = IP_DEFAULT_MULTICAST_LOOP;
907
908 m->m_flags |= M_MCAST;
909 /*
910 * IP destination address is multicast. Make sure "dst"
911 * still points to the address in "ro". (It may have been
912 * changed to point to a gateway address, above.)
913 */
914 dst = SIN(&ro->ro_dst);
915 /*
916 * See if the caller provided any multicast options
917 */
918 if (imo != NULL) {
919 IMO_LOCK(imo);
920 vif = imo->imo_multicast_vif;
921 ttl = imo->imo_multicast_ttl;
922 loop = imo->imo_multicast_loop;
923 if (!(flags & IP_RAWOUTPUT))
924 ip->ip_ttl = ttl;
925 if (imo->imo_multicast_ifp != NULL)
926 ifp = imo->imo_multicast_ifp;
927 IMO_UNLOCK(imo);
928 } else if (!(flags & IP_RAWOUTPUT)) {
929 vif = -1;
930 ip->ip_ttl = ttl;
931 }
932 /*
933 * Confirm that the outgoing interface supports multicast.
934 */
935 if (imo == NULL || vif == -1) {
936 if (!(ifp->if_flags & IFF_MULTICAST)) {
937 OSAddAtomic(1, &ipstat.ips_noroute);
938 error = ENETUNREACH;
939 goto bad;
940 }
941 }
942 /*
943 * If source address not specified yet, use address
944 * of outgoing interface.
945 */
946 if (ip->ip_src.s_addr == INADDR_ANY) {
947 struct in_ifaddr *ia1;
948 lck_rw_lock_shared(in_ifaddr_rwlock);
949 TAILQ_FOREACH(ia1, &in_ifaddrhead, ia_link) {
950 IFA_LOCK_SPIN(&ia1->ia_ifa);
951 if (ia1->ia_ifp == ifp) {
952 ip->ip_src = IA_SIN(ia1)->sin_addr;
953 srcifp = ifp;
954 IFA_UNLOCK(&ia1->ia_ifa);
955 break;
956 }
957 IFA_UNLOCK(&ia1->ia_ifa);
958 }
959 lck_rw_done(in_ifaddr_rwlock);
960 if (ip->ip_src.s_addr == INADDR_ANY) {
961 error = ENETUNREACH;
962 goto bad;
963 }
964 }
965
966 in_multihead_lock_shared();
967 IN_LOOKUP_MULTI(&pkt_dst, ifp, inm);
968 in_multihead_lock_done();
969 if (inm != NULL && (imo == NULL || loop)) {
970 /*
971 * If we belong to the destination multicast group
972 * on the outgoing interface, and the caller did not
973 * forbid loopback, loop back a copy.
974 */
975 if (!TAILQ_EMPTY(&ipv4_filters)) {
976 struct ipfilter *filter;
977 int seen = (inject_filter_ref == NULL);
978
979 if (imo != NULL) {
980 ipf_pktopts.ippo_flags |=
981 IPPOF_MCAST_OPTS;
982 ipf_pktopts.ippo_mcast_ifnet = ifp;
983 ipf_pktopts.ippo_mcast_ttl = ttl;
984 ipf_pktopts.ippo_mcast_loop = loop;
985 }
986
987 ipf_ref();
988
989 /*
990 * 4135317 - always pass network byte
991 * order to filter
992 */
993 #if BYTE_ORDER != BIG_ENDIAN
994 HTONS(ip->ip_len);
995 HTONS(ip->ip_off);
996 #endif
997 TAILQ_FOREACH(filter, &ipv4_filters, ipf_link) {
998 if (seen == 0) {
999 if ((struct ipfilter *)
1000 inject_filter_ref == filter)
1001 seen = 1;
1002 } else if (filter->ipf_filter.
1003 ipf_output != NULL) {
1004 errno_t result;
1005 result = filter->ipf_filter.
1006 ipf_output(filter->
1007 ipf_filter.cookie,
1008 (mbuf_t *)&m, ippo);
1009 if (result == EJUSTRETURN) {
1010 ipf_unref();
1011 INM_REMREF(inm);
1012 goto done;
1013 }
1014 if (result != 0) {
1015 ipf_unref();
1016 INM_REMREF(inm);
1017 goto bad;
1018 }
1019 }
1020 }
1021
1022 /* set back to host byte order */
1023 ip = mtod(m, struct ip *);
1024 #if BYTE_ORDER != BIG_ENDIAN
1025 NTOHS(ip->ip_len);
1026 NTOHS(ip->ip_off);
1027 #endif
1028 ipf_unref();
1029 ipobf.didfilter = TRUE;
1030 }
1031 ip_mloopback(srcifp, ifp, m, dst, hlen);
1032 }
1033 if (inm != NULL)
1034 INM_REMREF(inm);
1035 /*
1036 * Multicasts with a time-to-live of zero may be looped-
1037 * back, above, but must not be transmitted on a network.
1038 * Also, multicasts addressed to the loopback interface
1039 * are not sent -- the above call to ip_mloopback() will
1040 * loop back a copy if this host actually belongs to the
1041 * destination group on the loopback interface.
1042 */
1043 if (ip->ip_ttl == 0 || ifp->if_flags & IFF_LOOPBACK) {
1044 m_freem(m);
1045 goto done;
1046 }
1047
1048 goto sendit;
1049 }
1050 /*
1051 * If source address not specified yet, use address
1052 * of outgoing interface.
1053 */
1054 if (ip->ip_src.s_addr == INADDR_ANY) {
1055 IFA_LOCK_SPIN(&ia->ia_ifa);
1056 ip->ip_src = IA_SIN(ia)->sin_addr;
1057 IFA_UNLOCK(&ia->ia_ifa);
1058 #if IPFIREWALL_FORWARD
1059 /*
1060 * Keep note that we did this - if the firewall changes
1061 * the next-hop, our interface may change, changing the
1062 * default source IP. It's a shame so much effort happens
1063 * twice. Oh well.
1064 */
1065 ipobf.fwd_rewrite_src = TRUE;
1066 #endif /* IPFIREWALL_FORWARD */
1067 }
1068
1069 /*
1070 * Look for broadcast address and
1071 * and verify user is allowed to send
1072 * such a packet.
1073 */
1074 if (ipobf.isbroadcast) {
1075 if (!(ifp->if_flags & IFF_BROADCAST)) {
1076 error = EADDRNOTAVAIL;
1077 goto bad;
1078 }
1079 if (!(flags & IP_ALLOWBROADCAST)) {
1080 error = EACCES;
1081 goto bad;
1082 }
1083 /* don't allow broadcast messages to be fragmented */
1084 if ((u_short)ip->ip_len > ifp->if_mtu) {
1085 error = EMSGSIZE;
1086 goto bad;
1087 }
1088 m->m_flags |= M_BCAST;
1089 } else {
1090 m->m_flags &= ~M_BCAST;
1091 }
1092
1093 sendit:
1094 #if PF
1095 /* Invoke outbound packet filter */
1096 if (PF_IS_ENABLED) {
1097 int rc;
1098
1099 m0 = m; /* Save for later */
1100 #if DUMMYNET
1101 args.fwa_m = m;
1102 args.fwa_next_hop = dst;
1103 args.fwa_oif = ifp;
1104 args.fwa_ro = ro;
1105 args.fwa_dst = dst;
1106 args.fwa_oflags = flags;
1107 if (flags & IP_OUTARGS)
1108 args.fwa_ipoa = ipoa;
1109 rc = pf_af_hook(ifp, mppn, &m, AF_INET, FALSE, &args);
1110 #else /* DUMMYNET */
1111 rc = pf_af_hook(ifp, mppn, &m, AF_INET, FALSE, NULL);
1112 #endif /* DUMMYNET */
1113 if (rc != 0 || m == NULL) {
1114 /* Move to the next packet */
1115 m = *mppn;
1116
1117 /* Skip ahead if first packet in list got dropped */
1118 if (packetlist == m0)
1119 packetlist = m;
1120
1121 if (m != NULL) {
1122 m0 = m;
1123 /* Next packet in the chain */
1124 goto loopit;
1125 } else if (packetlist != NULL) {
1126 /* No more packet; send down the chain */
1127 goto sendchain;
1128 }
1129 /* Nothing left; we're done */
1130 goto done;
1131 }
1132 m0 = m;
1133 ip = mtod(m, struct ip *);
1134 pkt_dst = ip->ip_dst;
1135 hlen = IP_VHL_HL(ip->ip_vhl) << 2;
1136 }
1137 #endif /* PF */
1138 /*
1139 * Force IP TTL to 255 following draft-ietf-zeroconf-ipv4-linklocal.txt
1140 */
1141 if (IN_LINKLOCAL(ntohl(ip->ip_src.s_addr)) ||
1142 IN_LINKLOCAL(ntohl(ip->ip_dst.s_addr))) {
1143 ip_linklocal_stat.iplls_out_total++;
1144 if (ip->ip_ttl != MAXTTL) {
1145 ip_linklocal_stat.iplls_out_badttl++;
1146 ip->ip_ttl = MAXTTL;
1147 }
1148 }
1149
1150 if (!ipobf.didfilter && !TAILQ_EMPTY(&ipv4_filters)) {
1151 struct ipfilter *filter;
1152 int seen = (inject_filter_ref == NULL);
1153 ipf_pktopts.ippo_flags &= ~IPPOF_MCAST_OPTS;
1154
1155 /*
1156 * Check that a TSO frame isn't passed to a filter.
1157 * This could happen if a filter is inserted while
1158 * TCP is sending the TSO packet.
1159 */
1160 if (m->m_pkthdr.csum_flags & CSUM_TSO_IPV4) {
1161 error = EMSGSIZE;
1162 goto bad;
1163 }
1164
1165 ipf_ref();
1166
1167 /* 4135317 - always pass network byte order to filter */
1168 #if BYTE_ORDER != BIG_ENDIAN
1169 HTONS(ip->ip_len);
1170 HTONS(ip->ip_off);
1171 #endif
1172 TAILQ_FOREACH(filter, &ipv4_filters, ipf_link) {
1173 if (seen == 0) {
1174 if ((struct ipfilter *)inject_filter_ref ==
1175 filter)
1176 seen = 1;
1177 } else if (filter->ipf_filter.ipf_output) {
1178 errno_t result;
1179 result = filter->ipf_filter.
1180 ipf_output(filter->ipf_filter.cookie,
1181 (mbuf_t *)&m, ippo);
1182 if (result == EJUSTRETURN) {
1183 ipf_unref();
1184 goto done;
1185 }
1186 if (result != 0) {
1187 ipf_unref();
1188 goto bad;
1189 }
1190 }
1191 }
1192 /* set back to host byte order */
1193 ip = mtod(m, struct ip *);
1194 #if BYTE_ORDER != BIG_ENDIAN
1195 NTOHS(ip->ip_len);
1196 NTOHS(ip->ip_off);
1197 #endif
1198 ipf_unref();
1199 }
1200
1201 #if NECP
1202 /* Process Network Extension Policy. Will Pass, Drop, or Rebind packet. */
1203 necp_matched_policy_id = necp_ip_output_find_policy_match (m,
1204 flags, (flags & IP_OUTARGS) ? ipoa : NULL, &necp_result, &necp_result_parameter);
1205 if (necp_matched_policy_id) {
1206 necp_mark_packet_from_ip(m, necp_matched_policy_id);
1207 switch (necp_result) {
1208 case NECP_KERNEL_POLICY_RESULT_PASS:
1209 /* Check if the interface is allowed */
1210 if (!necp_packet_is_allowed_over_interface(m, ifp)) {
1211 error = EHOSTUNREACH;
1212 goto bad;
1213 }
1214 goto skip_ipsec;
1215 case NECP_KERNEL_POLICY_RESULT_DROP:
1216 case NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT:
1217 /* Flow divert packets should be blocked at the IP layer */
1218 error = EHOSTUNREACH;
1219 goto bad;
1220 case NECP_KERNEL_POLICY_RESULT_IP_TUNNEL: {
1221 /* Verify that the packet is being routed to the tunnel */
1222 struct ifnet *policy_ifp = necp_get_ifnet_from_result_parameter(&necp_result_parameter);
1223 if (policy_ifp == ifp) {
1224 /* Check if the interface is allowed */
1225 if (!necp_packet_is_allowed_over_interface(m, ifp)) {
1226 error = EHOSTUNREACH;
1227 goto bad;
1228 }
1229 goto skip_ipsec;
1230 } else {
1231 if (necp_packet_can_rebind_to_ifnet(m, policy_ifp, &necp_route, AF_INET)) {
1232 /* Check if the interface is allowed */
1233 if (!necp_packet_is_allowed_over_interface(m, policy_ifp)) {
1234 error = EHOSTUNREACH;
1235 goto bad;
1236 }
1237
1238 /* Set ifp to the tunnel interface, since it is compatible with the packet */
1239 ifp = policy_ifp;
1240 ro = &necp_route;
1241 goto skip_ipsec;
1242 } else {
1243 error = ENETUNREACH;
1244 goto bad;
1245 }
1246 }
1247 }
1248 default:
1249 break;
1250 }
1251 }
1252 /* Catch-all to check if the interface is allowed */
1253 if (!necp_packet_is_allowed_over_interface(m, ifp)) {
1254 error = EHOSTUNREACH;
1255 goto bad;
1256 }
1257 #endif /* NECP */
1258
1259 #if IPSEC
1260 if (ipsec_bypass != 0 || (flags & IP_NOIPSEC))
1261 goto skip_ipsec;
1262
1263 KERNEL_DEBUG(DBG_FNC_IPSEC4_OUTPUT | DBG_FUNC_START, 0, 0, 0, 0, 0);
1264
1265 if (sp == NULL) {
1266 /* get SP for this packet */
1267 if (so != NULL) {
1268 sp = ipsec4_getpolicybysock(m, IPSEC_DIR_OUTBOUND,
1269 so, &error);
1270 } else {
1271 sp = ipsec4_getpolicybyaddr(m, IPSEC_DIR_OUTBOUND,
1272 flags, &error);
1273 }
1274 if (sp == NULL) {
1275 IPSEC_STAT_INCREMENT(ipsecstat.out_inval);
1276 KERNEL_DEBUG(DBG_FNC_IPSEC4_OUTPUT | DBG_FUNC_END,
1277 0, 0, 0, 0, 0);
1278 goto bad;
1279 }
1280 }
1281
1282 error = 0;
1283
1284 /* check policy */
1285 switch (sp->policy) {
1286 case IPSEC_POLICY_DISCARD:
1287 case IPSEC_POLICY_GENERATE:
1288 /*
1289 * This packet is just discarded.
1290 */
1291 IPSEC_STAT_INCREMENT(ipsecstat.out_polvio);
1292 KERNEL_DEBUG(DBG_FNC_IPSEC4_OUTPUT | DBG_FUNC_END,
1293 1, 0, 0, 0, 0);
1294 goto bad;
1295
1296 case IPSEC_POLICY_BYPASS:
1297 case IPSEC_POLICY_NONE:
1298 /* no need to do IPsec. */
1299 KERNEL_DEBUG(DBG_FNC_IPSEC4_OUTPUT | DBG_FUNC_END,
1300 2, 0, 0, 0, 0);
1301 goto skip_ipsec;
1302
1303 case IPSEC_POLICY_IPSEC:
1304 if (sp->req == NULL) {
1305 /* acquire a policy */
1306 error = key_spdacquire(sp);
1307 KERNEL_DEBUG(DBG_FNC_IPSEC4_OUTPUT | DBG_FUNC_END,
1308 3, 0, 0, 0, 0);
1309 goto bad;
1310 }
1311 if (sp->ipsec_if) {
1312 /* Verify the redirect to ipsec interface */
1313 if (sp->ipsec_if == ifp) {
1314 goto skip_ipsec;
1315 }
1316 goto bad;
1317 }
1318 break;
1319
1320 case IPSEC_POLICY_ENTRUST:
1321 default:
1322 printf("ip_output: Invalid policy found. %d\n", sp->policy);
1323 }
1324 {
1325 ipsec_state.m = m;
1326 if (flags & IP_ROUTETOIF) {
1327 bzero(&ipsec_state.ro, sizeof (ipsec_state.ro));
1328 } else {
1329 route_copyout(&ipsec_state.ro, ro, sizeof (ipsec_state.ro));
1330 }
1331 ipsec_state.dst = SA(dst);
1332
1333 ip->ip_sum = 0;
1334
1335 /*
1336 * XXX
1337 * delayed checksums are not currently compatible with IPsec
1338 */
1339 if (m->m_pkthdr.csum_flags & CSUM_DELAY_DATA)
1340 in_delayed_cksum(m);
1341
1342 #if BYTE_ORDER != BIG_ENDIAN
1343 HTONS(ip->ip_len);
1344 HTONS(ip->ip_off);
1345 #endif
1346
1347 DTRACE_IP6(send, struct mbuf *, m, struct inpcb *, NULL,
1348 struct ip *, ip, struct ifnet *, ifp,
1349 struct ip *, ip, struct ip6_hdr *, NULL);
1350
1351 error = ipsec4_output(&ipsec_state, sp, flags);
1352 if (ipsec_state.tunneled == 6) {
1353 m0 = m = NULL;
1354 error = 0;
1355 goto bad;
1356 }
1357
1358 m0 = m = ipsec_state.m;
1359
1360 #if DUMMYNET
1361 /*
1362 * If we're about to use the route in ipsec_state
1363 * and this came from dummynet, cleaup now.
1364 */
1365 if (ro == &saved_route &&
1366 (!(flags & IP_ROUTETOIF) || ipsec_state.tunneled))
1367 ROUTE_RELEASE(ro);
1368 #endif /* DUMMYNET */
1369
1370 if (flags & IP_ROUTETOIF) {
1371 /*
1372 * if we have tunnel mode SA, we may need to ignore
1373 * IP_ROUTETOIF.
1374 */
1375 if (ipsec_state.tunneled) {
1376 flags &= ~IP_ROUTETOIF;
1377 ro = &ipsec_state.ro;
1378 }
1379 } else {
1380 ro = &ipsec_state.ro;
1381 }
1382 dst = SIN(ipsec_state.dst);
1383 if (error) {
1384 /* mbuf is already reclaimed in ipsec4_output. */
1385 m0 = NULL;
1386 switch (error) {
1387 case EHOSTUNREACH:
1388 case ENETUNREACH:
1389 case EMSGSIZE:
1390 case ENOBUFS:
1391 case ENOMEM:
1392 break;
1393 default:
1394 printf("ip4_output (ipsec): error code %d\n", error);
1395 /* FALLTHRU */
1396 case ENOENT:
1397 /* don't show these error codes to the user */
1398 error = 0;
1399 break;
1400 }
1401 KERNEL_DEBUG(DBG_FNC_IPSEC4_OUTPUT | DBG_FUNC_END,
1402 4, 0, 0, 0, 0);
1403 goto bad;
1404 }
1405 }
1406
1407 /* be sure to update variables that are affected by ipsec4_output() */
1408 ip = mtod(m, struct ip *);
1409
1410 #ifdef _IP_VHL
1411 hlen = IP_VHL_HL(ip->ip_vhl) << 2;
1412 #else /* !_IP_VHL */
1413 hlen = ip->ip_hl << 2;
1414 #endif /* !_IP_VHL */
1415 /* Check that there wasn't a route change and src is still valid */
1416 if (ROUTE_UNUSABLE(ro)) {
1417 ROUTE_RELEASE(ro);
1418 VERIFY(src_ia == NULL);
1419 if (ip->ip_src.s_addr != INADDR_ANY &&
1420 !(flags & (IP_ROUTETOIF | IP_FORWARDING)) &&
1421 (src_ia = ifa_foraddr(ip->ip_src.s_addr)) == NULL) {
1422 error = EADDRNOTAVAIL;
1423 KERNEL_DEBUG(DBG_FNC_IPSEC4_OUTPUT | DBG_FUNC_END,
1424 5, 0, 0, 0, 0);
1425 goto bad;
1426 }
1427 if (src_ia != NULL) {
1428 IFA_REMREF(&src_ia->ia_ifa);
1429 src_ia = NULL;
1430 }
1431 }
1432
1433 if (ro->ro_rt == NULL) {
1434 if (!(flags & IP_ROUTETOIF)) {
1435 printf("%s: can't update route after "
1436 "IPsec processing\n", __func__);
1437 error = EHOSTUNREACH; /* XXX */
1438 KERNEL_DEBUG(DBG_FNC_IPSEC4_OUTPUT | DBG_FUNC_END,
1439 6, 0, 0, 0, 0);
1440 goto bad;
1441 }
1442 } else {
1443 if (ia != NULL)
1444 IFA_REMREF(&ia->ia_ifa);
1445 RT_LOCK_SPIN(ro->ro_rt);
1446 ia = ifatoia(ro->ro_rt->rt_ifa);
1447 if (ia != NULL) {
1448 /* Become a regular mutex */
1449 RT_CONVERT_LOCK(ro->ro_rt);
1450 IFA_ADDREF(&ia->ia_ifa);
1451 }
1452 ifp = ro->ro_rt->rt_ifp;
1453 RT_UNLOCK(ro->ro_rt);
1454 }
1455
1456 /* make it flipped, again. */
1457 #if BYTE_ORDER != BIG_ENDIAN
1458 NTOHS(ip->ip_len);
1459 NTOHS(ip->ip_off);
1460 #endif
1461 KERNEL_DEBUG(DBG_FNC_IPSEC4_OUTPUT | DBG_FUNC_END,
1462 7, 0xff, 0xff, 0xff, 0xff);
1463
1464 /* Pass to filters again */
1465 if (!TAILQ_EMPTY(&ipv4_filters)) {
1466 struct ipfilter *filter;
1467
1468 ipf_pktopts.ippo_flags &= ~IPPOF_MCAST_OPTS;
1469
1470 /*
1471 * Check that a TSO frame isn't passed to a filter.
1472 * This could happen if a filter is inserted while
1473 * TCP is sending the TSO packet.
1474 */
1475 if (m->m_pkthdr.csum_flags & CSUM_TSO_IPV4) {
1476 error = EMSGSIZE;
1477 goto bad;
1478 }
1479
1480 ipf_ref();
1481
1482 /* 4135317 - always pass network byte order to filter */
1483 #if BYTE_ORDER != BIG_ENDIAN
1484 HTONS(ip->ip_len);
1485 HTONS(ip->ip_off);
1486 #endif
1487 TAILQ_FOREACH(filter, &ipv4_filters, ipf_link) {
1488 if (filter->ipf_filter.ipf_output) {
1489 errno_t result;
1490 result = filter->ipf_filter.
1491 ipf_output(filter->ipf_filter.cookie,
1492 (mbuf_t *)&m, ippo);
1493 if (result == EJUSTRETURN) {
1494 ipf_unref();
1495 goto done;
1496 }
1497 if (result != 0) {
1498 ipf_unref();
1499 goto bad;
1500 }
1501 }
1502 }
1503 /* set back to host byte order */
1504 ip = mtod(m, struct ip *);
1505 #if BYTE_ORDER != BIG_ENDIAN
1506 NTOHS(ip->ip_len);
1507 NTOHS(ip->ip_off);
1508 #endif
1509 ipf_unref();
1510 }
1511 skip_ipsec:
1512 #endif /* IPSEC */
1513
1514 #if IPFIREWALL
1515 /*
1516 * Check with the firewall...
1517 * but not if we are already being fwd'd from a firewall.
1518 */
1519 if (fw_enable && IPFW_LOADED && !args.fwa_next_hop) {
1520 struct sockaddr_in *old = dst;
1521
1522 args.fwa_m = m;
1523 args.fwa_next_hop = dst;
1524 args.fwa_oif = ifp;
1525 ipfwoff = ip_fw_chk_ptr(&args);
1526 m = args.fwa_m;
1527 dst = args.fwa_next_hop;
1528
1529 /*
1530 * On return we must do the following:
1531 * IP_FW_PORT_DENY_FLAG -> drop the pkt (XXX new)
1532 * 1<=off<= 0xffff -> DIVERT
1533 * (off & IP_FW_PORT_DYNT_FLAG) -> send to a DUMMYNET pipe
1534 * (off & IP_FW_PORT_TEE_FLAG) -> TEE the packet
1535 * dst != old -> IPFIREWALL_FORWARD
1536 * off==0, dst==old -> accept
1537 * If some of the above modules is not compiled in, then
1538 * we should't have to check the corresponding condition
1539 * (because the ipfw control socket should not accept
1540 * unsupported rules), but better play safe and drop
1541 * packets in case of doubt.
1542 */
1543 m0 = m;
1544 if ((ipfwoff & IP_FW_PORT_DENY_FLAG) || m == NULL) {
1545 if (m)
1546 m_freem(m);
1547 error = EACCES;
1548 goto done;
1549 }
1550 ip = mtod(m, struct ip *);
1551
1552 if (ipfwoff == 0 && dst == old) { /* common case */
1553 goto pass;
1554 }
1555 #if DUMMYNET
1556 if (DUMMYNET_LOADED && (ipfwoff & IP_FW_PORT_DYNT_FLAG) != 0) {
1557 /*
1558 * pass the pkt to dummynet. Need to include
1559 * pipe number, m, ifp, ro, dst because these are
1560 * not recomputed in the next pass.
1561 * All other parameters have been already used and
1562 * so they are not needed anymore.
1563 * XXX note: if the ifp or ro entry are deleted
1564 * while a pkt is in dummynet, we are in trouble!
1565 */
1566 args.fwa_ro = ro;
1567 args.fwa_dst = dst;
1568 args.fwa_oflags = flags;
1569 if (flags & IP_OUTARGS)
1570 args.fwa_ipoa = ipoa;
1571
1572 error = ip_dn_io_ptr(m, ipfwoff & 0xffff, DN_TO_IP_OUT,
1573 &args, DN_CLIENT_IPFW);
1574 goto done;
1575 }
1576 #endif /* DUMMYNET */
1577 #if IPDIVERT
1578 if (ipfwoff != 0 && (ipfwoff & IP_FW_PORT_DYNT_FLAG) == 0) {
1579 struct mbuf *clone = NULL;
1580
1581 /* Clone packet if we're doing a 'tee' */
1582 if ((ipfwoff & IP_FW_PORT_TEE_FLAG) != 0)
1583 clone = m_dup(m, M_DONTWAIT);
1584 /*
1585 * XXX
1586 * delayed checksums are not currently compatible
1587 * with divert sockets.
1588 */
1589 if (m->m_pkthdr.csum_flags & CSUM_DELAY_DATA)
1590 in_delayed_cksum(m);
1591
1592 /* Restore packet header fields to original values */
1593
1594 #if BYTE_ORDER != BIG_ENDIAN
1595 HTONS(ip->ip_len);
1596 HTONS(ip->ip_off);
1597 #endif
1598
1599 /* Deliver packet to divert input routine */
1600 divert_packet(m, 0, ipfwoff & 0xffff,
1601 args.fwa_divert_rule);
1602
1603 /* If 'tee', continue with original packet */
1604 if (clone != NULL) {
1605 m0 = m = clone;
1606 ip = mtod(m, struct ip *);
1607 goto pass;
1608 }
1609 goto done;
1610 }
1611 #endif /* IPDIVERT */
1612 #if IPFIREWALL_FORWARD
1613 /*
1614 * Here we check dst to make sure it's directly reachable on
1615 * the interface we previously thought it was.
1616 * If it isn't (which may be likely in some situations) we have
1617 * to re-route it (ie, find a route for the next-hop and the
1618 * associated interface) and set them here. This is nested
1619 * forwarding which in most cases is undesirable, except where
1620 * such control is nigh impossible. So we do it here.
1621 * And I'm babbling.
1622 */
1623 if (ipfwoff == 0 && old != dst) {
1624 struct in_ifaddr *ia_fw;
1625 struct route *ro_fwd = &sro_fwd;
1626
1627 #if IPFIREWALL_FORWARD_DEBUG
1628 printf("IPFIREWALL_FORWARD: New dst ip: ");
1629 print_ip(dst->sin_addr);
1630 printf("\n");
1631 #endif /* IPFIREWALL_FORWARD_DEBUG */
1632 /*
1633 * We need to figure out if we have been forwarded
1634 * to a local socket. If so then we should somehow
1635 * "loop back" to ip_input, and get directed to the
1636 * PCB as if we had received this packet. This is
1637 * because it may be dificult to identify the packets
1638 * you want to forward until they are being output
1639 * and have selected an interface. (e.g. locally
1640 * initiated packets) If we used the loopback inteface,
1641 * we would not be able to control what happens
1642 * as the packet runs through ip_input() as
1643 * it is done through a ISR.
1644 */
1645 lck_rw_lock_shared(in_ifaddr_rwlock);
1646 TAILQ_FOREACH(ia_fw, &in_ifaddrhead, ia_link) {
1647 /*
1648 * If the addr to forward to is one
1649 * of ours, we pretend to
1650 * be the destination for this packet.
1651 */
1652 IFA_LOCK_SPIN(&ia_fw->ia_ifa);
1653 if (IA_SIN(ia_fw)->sin_addr.s_addr ==
1654 dst->sin_addr.s_addr) {
1655 IFA_UNLOCK(&ia_fw->ia_ifa);
1656 break;
1657 }
1658 IFA_UNLOCK(&ia_fw->ia_ifa);
1659 }
1660 lck_rw_done(in_ifaddr_rwlock);
1661 if (ia_fw) {
1662 /* tell ip_input "dont filter" */
1663 struct m_tag *fwd_tag;
1664 struct ip_fwd_tag *ipfwd_tag;
1665
1666 fwd_tag = m_tag_create(KERNEL_MODULE_TAG_ID,
1667 KERNEL_TAG_TYPE_IPFORWARD,
1668 sizeof (*ipfwd_tag), M_NOWAIT, m);
1669 if (fwd_tag == NULL) {
1670 error = ENOBUFS;
1671 goto bad;
1672 }
1673
1674 ipfwd_tag = (struct ip_fwd_tag *)(fwd_tag+1);
1675 ipfwd_tag->next_hop = args.fwa_next_hop;
1676
1677 m_tag_prepend(m, fwd_tag);
1678
1679 if (m->m_pkthdr.rcvif == NULL)
1680 m->m_pkthdr.rcvif = lo_ifp;
1681
1682 #if BYTE_ORDER != BIG_ENDIAN
1683 HTONS(ip->ip_len);
1684 HTONS(ip->ip_off);
1685 #endif
1686 mbuf_outbound_finalize(m, PF_INET, 0);
1687
1688 /*
1689 * we need to call dlil_output to run filters
1690 * and resync to avoid recursion loops.
1691 */
1692 if (lo_ifp) {
1693 dlil_output(lo_ifp, PF_INET, m, NULL,
1694 SA(dst), 0, adv);
1695 } else {
1696 printf("%s: no loopback ifp for "
1697 "forwarding!!!\n", __func__);
1698 }
1699 goto done;
1700 }
1701 /*
1702 * Some of the logic for this was nicked from above.
1703 *
1704 * This rewrites the cached route in a local PCB.
1705 * Is this what we want to do?
1706 */
1707 ROUTE_RELEASE(ro_fwd);
1708 bcopy(dst, &ro_fwd->ro_dst, sizeof (*dst));
1709
1710 rtalloc_ign(ro_fwd, RTF_PRCLONING, false);
1711
1712 if (ro_fwd->ro_rt == NULL) {
1713 OSAddAtomic(1, &ipstat.ips_noroute);
1714 error = EHOSTUNREACH;
1715 goto bad;
1716 }
1717
1718 RT_LOCK_SPIN(ro_fwd->ro_rt);
1719 ia_fw = ifatoia(ro_fwd->ro_rt->rt_ifa);
1720 if (ia_fw != NULL) {
1721 /* Become a regular mutex */
1722 RT_CONVERT_LOCK(ro_fwd->ro_rt);
1723 IFA_ADDREF(&ia_fw->ia_ifa);
1724 }
1725 ifp = ro_fwd->ro_rt->rt_ifp;
1726 ro_fwd->ro_rt->rt_use++;
1727 if (ro_fwd->ro_rt->rt_flags & RTF_GATEWAY)
1728 dst = SIN(ro_fwd->ro_rt->rt_gateway);
1729 if (ro_fwd->ro_rt->rt_flags & RTF_HOST) {
1730 /* double negation needed for bool bit field */
1731 ipobf.isbroadcast =
1732 !!(ro_fwd->ro_rt->rt_flags & RTF_BROADCAST);
1733 } else {
1734 /* Become a regular mutex */
1735 RT_CONVERT_LOCK(ro_fwd->ro_rt);
1736 ipobf.isbroadcast =
1737 in_broadcast(dst->sin_addr, ifp);
1738 }
1739 RT_UNLOCK(ro_fwd->ro_rt);
1740 ROUTE_RELEASE(ro);
1741 ro->ro_rt = ro_fwd->ro_rt;
1742 ro_fwd->ro_rt = NULL;
1743 dst = SIN(&ro_fwd->ro_dst);
1744
1745 /*
1746 * If we added a default src ip earlier,
1747 * which would have been gotten from the-then
1748 * interface, do it again, from the new one.
1749 */
1750 if (ia_fw != NULL) {
1751 if (ipobf.fwd_rewrite_src) {
1752 IFA_LOCK_SPIN(&ia_fw->ia_ifa);
1753 ip->ip_src = IA_SIN(ia_fw)->sin_addr;
1754 IFA_UNLOCK(&ia_fw->ia_ifa);
1755 }
1756 IFA_REMREF(&ia_fw->ia_ifa);
1757 }
1758 goto pass;
1759 }
1760 #endif /* IPFIREWALL_FORWARD */
1761 /*
1762 * if we get here, none of the above matches, and
1763 * we have to drop the pkt
1764 */
1765 m_freem(m);
1766 error = EACCES; /* not sure this is the right error msg */
1767 goto done;
1768 }
1769
1770 pass:
1771 #endif /* IPFIREWALL */
1772
1773 /* 127/8 must not appear on wire - RFC1122 */
1774 if (!(ifp->if_flags & IFF_LOOPBACK) &&
1775 ((ntohl(ip->ip_src.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET ||
1776 (ntohl(ip->ip_dst.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET)) {
1777 OSAddAtomic(1, &ipstat.ips_badaddr);
1778 error = EADDRNOTAVAIL;
1779 goto bad;
1780 }
1781
1782 if (ipoa != NULL) {
1783 u_int8_t dscp = ip->ip_tos >> IPTOS_DSCP_SHIFT;
1784
1785 error = set_packet_qos(m, ifp,
1786 ipoa->ipoa_flags & IPOAF_QOSMARKING_ALLOWED ? TRUE : FALSE,
1787 ipoa->ipoa_sotc, ipoa->ipoa_netsvctype, &dscp);
1788 if (error == 0) {
1789 ip->ip_tos &= IPTOS_ECN_MASK;
1790 ip->ip_tos |= dscp << IPTOS_DSCP_SHIFT;
1791 } else {
1792 printf("%s if_dscp_for_mbuf() error %d\n", __func__, error);
1793 error = 0;
1794 }
1795 }
1796
1797 /*
1798 * Some Wi-Fi AP implementations do not correctly handle multicast IP
1799 * packets with DSCP bits set -- see radr://9331522 -- so as a
1800 * workaround we clear the DSCP bits and set the service class to BE
1801 */
1802 if (IN_MULTICAST(ntohl(pkt_dst.s_addr)) && IFNET_IS_WIFI_INFRA(ifp)) {
1803 ip->ip_tos &= IPTOS_ECN_MASK;
1804 mbuf_set_service_class(m, MBUF_SC_BE);
1805 }
1806
1807 ip_output_checksum(ifp, m, (IP_VHL_HL(ip->ip_vhl) << 2),
1808 ip->ip_len, &sw_csum);
1809
1810 /*
1811 * If small enough for interface, or the interface will take
1812 * care of the fragmentation for us, can just send directly.
1813 */
1814 if ((u_short)ip->ip_len <= ifp->if_mtu || TSO_IPV4_OK(ifp, m) ||
1815 (!(ip->ip_off & IP_DF) && (ifp->if_hwassist & CSUM_FRAGMENT))) {
1816 #if BYTE_ORDER != BIG_ENDIAN
1817 HTONS(ip->ip_len);
1818 HTONS(ip->ip_off);
1819 #endif
1820
1821 ip->ip_sum = 0;
1822 if (sw_csum & CSUM_DELAY_IP) {
1823 ip->ip_sum = ip_cksum_hdr_out(m, hlen);
1824 sw_csum &= ~CSUM_DELAY_IP;
1825 m->m_pkthdr.csum_flags &= ~CSUM_DELAY_IP;
1826 }
1827
1828 #if IPSEC
1829 /* clean ipsec history once it goes out of the node */
1830 if (ipsec_bypass == 0 && !(flags & IP_NOIPSEC))
1831 ipsec_delaux(m);
1832 #endif /* IPSEC */
1833 if ((m->m_pkthdr.csum_flags & CSUM_TSO_IPV4) &&
1834 (m->m_pkthdr.tso_segsz > 0))
1835 scnt += m->m_pkthdr.len / m->m_pkthdr.tso_segsz;
1836 else
1837 scnt++;
1838
1839 if (packetchain == 0) {
1840 if (ro->ro_rt != NULL && nstat_collect)
1841 nstat_route_tx(ro->ro_rt, scnt,
1842 m->m_pkthdr.len, 0);
1843
1844 error = dlil_output(ifp, PF_INET, m, ro->ro_rt,
1845 SA(dst), 0, adv);
1846 if (dlil_verbose && error) {
1847 printf("dlil_output error on interface %s: %d\n",
1848 ifp->if_xname, error);
1849 }
1850 scnt = 0;
1851 goto done;
1852 } else {
1853 /*
1854 * packet chaining allows us to reuse the
1855 * route for all packets
1856 */
1857 bytecnt += m->m_pkthdr.len;
1858 mppn = &m->m_nextpkt;
1859 m = m->m_nextpkt;
1860 if (m == NULL) {
1861 #if PF
1862 sendchain:
1863 #endif /* PF */
1864 if (pktcnt > ip_maxchainsent)
1865 ip_maxchainsent = pktcnt;
1866 if (ro->ro_rt != NULL && nstat_collect)
1867 nstat_route_tx(ro->ro_rt, scnt,
1868 bytecnt, 0);
1869
1870 error = dlil_output(ifp, PF_INET, packetlist,
1871 ro->ro_rt, SA(dst), 0, adv);
1872 if (dlil_verbose && error) {
1873 printf("dlil_output error on interface %s: %d\n",
1874 ifp->if_xname, error);
1875 }
1876 pktcnt = 0;
1877 scnt = 0;
1878 bytecnt = 0;
1879 goto done;
1880
1881 }
1882 m0 = m;
1883 pktcnt++;
1884 goto loopit;
1885 }
1886 }
1887 /*
1888 * Too large for interface; fragment if possible.
1889 * Must be able to put at least 8 bytes per fragment.
1890 * Balk when DF bit is set or the interface didn't support TSO.
1891 */
1892 if ((ip->ip_off & IP_DF) || pktcnt > 0 ||
1893 (m->m_pkthdr.csum_flags & CSUM_TSO_IPV4)) {
1894 error = EMSGSIZE;
1895 /*
1896 * This case can happen if the user changed the MTU
1897 * of an interface after enabling IP on it. Because
1898 * most netifs don't keep track of routes pointing to
1899 * them, there is no way for one to update all its
1900 * routes when the MTU is changed.
1901 */
1902 if (ro->ro_rt) {
1903 RT_LOCK_SPIN(ro->ro_rt);
1904 if ((ro->ro_rt->rt_flags & (RTF_UP | RTF_HOST)) &&
1905 !(ro->ro_rt->rt_rmx.rmx_locks & RTV_MTU) &&
1906 (ro->ro_rt->rt_rmx.rmx_mtu > ifp->if_mtu)) {
1907 ro->ro_rt->rt_rmx.rmx_mtu = ifp->if_mtu;
1908 }
1909 RT_UNLOCK(ro->ro_rt);
1910 }
1911 if (pktcnt > 0) {
1912 m0 = packetlist;
1913 }
1914 OSAddAtomic(1, &ipstat.ips_cantfrag);
1915 goto bad;
1916 }
1917
1918 error = ip_fragment(m, ifp, ifp->if_mtu, sw_csum);
1919 if (error != 0) {
1920 m0 = m = NULL;
1921 goto bad;
1922 }
1923
1924 KERNEL_DEBUG(DBG_LAYER_END, ip->ip_dst.s_addr,
1925 ip->ip_src.s_addr, ip->ip_p, ip->ip_off, ip->ip_len);
1926
1927 for (m = m0; m; m = m0) {
1928 m0 = m->m_nextpkt;
1929 m->m_nextpkt = 0;
1930 #if IPSEC
1931 /* clean ipsec history once it goes out of the node */
1932 if (ipsec_bypass == 0 && !(flags & IP_NOIPSEC))
1933 ipsec_delaux(m);
1934 #endif /* IPSEC */
1935 if (error == 0) {
1936 if ((packetchain != 0) && (pktcnt > 0)) {
1937 panic("%s: mix of packet in packetlist is "
1938 "wrong=%p", __func__, packetlist);
1939 /* NOTREACHED */
1940 }
1941 if (ro->ro_rt != NULL && nstat_collect) {
1942 nstat_route_tx(ro->ro_rt, 1,
1943 m->m_pkthdr.len, 0);
1944 }
1945 error = dlil_output(ifp, PF_INET, m, ro->ro_rt,
1946 SA(dst), 0, adv);
1947 if (dlil_verbose && error) {
1948 printf("dlil_output error on interface %s: %d\n",
1949 ifp->if_xname, error);
1950 }
1951 } else {
1952 m_freem(m);
1953 }
1954 }
1955
1956 if (error == 0)
1957 OSAddAtomic(1, &ipstat.ips_fragmented);
1958
1959 done:
1960 if (ia != NULL) {
1961 IFA_REMREF(&ia->ia_ifa);
1962 ia = NULL;
1963 }
1964 #if IPSEC
1965 ROUTE_RELEASE(&ipsec_state.ro);
1966 if (sp != NULL) {
1967 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
1968 printf("DP ip_output call free SP:%x\n", sp));
1969 key_freesp(sp, KEY_SADB_UNLOCKED);
1970 }
1971 #endif /* IPSEC */
1972 #if NECP
1973 ROUTE_RELEASE(&necp_route);
1974 #endif /* NECP */
1975 #if DUMMYNET
1976 ROUTE_RELEASE(&saved_route);
1977 #endif /* DUMMYNET */
1978 #if IPFIREWALL_FORWARD
1979 ROUTE_RELEASE(&sro_fwd);
1980 #endif /* IPFIREWALL_FORWARD */
1981
1982 KERNEL_DEBUG(DBG_FNC_IP_OUTPUT | DBG_FUNC_END, error, 0, 0, 0, 0);
1983 if (ip_output_measure) {
1984 net_perf_measure_time(&net_perf, &start_tv, packets_processed);
1985 net_perf_histogram(&net_perf, packets_processed);
1986 }
1987 return (error);
1988 bad:
1989 if (pktcnt > 0)
1990 m0 = packetlist;
1991 m_freem_list(m0);
1992 goto done;
1993
1994 #undef ipsec_state
1995 #undef args
1996 #undef sro_fwd
1997 #undef saved_route
1998 #undef ipf_pktopts
1999 #undef IP_CHECK_RESTRICTIONS
2000 }
2001
2002 int
2003 ip_fragment(struct mbuf *m, struct ifnet *ifp, unsigned long mtu, int sw_csum)
2004 {
2005 struct ip *ip, *mhip;
2006 int len, hlen, mhlen, firstlen, off, error = 0;
2007 struct mbuf **mnext = &m->m_nextpkt, *m0;
2008 int nfrags = 1;
2009
2010 ip = mtod(m, struct ip *);
2011 #ifdef _IP_VHL
2012 hlen = IP_VHL_HL(ip->ip_vhl) << 2;
2013 #else /* !_IP_VHL */
2014 hlen = ip->ip_hl << 2;
2015 #endif /* !_IP_VHL */
2016
2017 firstlen = len = (mtu - hlen) &~ 7;
2018 if (len < 8) {
2019 m_freem(m);
2020 return (EMSGSIZE);
2021 }
2022
2023 /*
2024 * if the interface will not calculate checksums on
2025 * fragmented packets, then do it here.
2026 */
2027 if ((m->m_pkthdr.csum_flags & CSUM_DELAY_DATA) &&
2028 !(ifp->if_hwassist & CSUM_IP_FRAGS))
2029 in_delayed_cksum(m);
2030
2031 /*
2032 * Loop through length of segment after first fragment,
2033 * make new header and copy data of each part and link onto chain.
2034 */
2035 m0 = m;
2036 mhlen = sizeof (struct ip);
2037 for (off = hlen + len; off < (u_short)ip->ip_len; off += len) {
2038 MGETHDR(m, M_DONTWAIT, MT_HEADER); /* MAC-OK */
2039 if (m == NULL) {
2040 error = ENOBUFS;
2041 OSAddAtomic(1, &ipstat.ips_odropped);
2042 goto sendorfree;
2043 }
2044 m->m_flags |= (m0->m_flags & M_MCAST) | M_FRAG;
2045 m->m_data += max_linkhdr;
2046 mhip = mtod(m, struct ip *);
2047 *mhip = *ip;
2048 if (hlen > sizeof (struct ip)) {
2049 mhlen = ip_optcopy(ip, mhip) + sizeof (struct ip);
2050 mhip->ip_vhl = IP_MAKE_VHL(IPVERSION, mhlen >> 2);
2051 }
2052 m->m_len = mhlen;
2053 mhip->ip_off = ((off - hlen) >> 3) + (ip->ip_off & ~IP_MF);
2054 if (ip->ip_off & IP_MF)
2055 mhip->ip_off |= IP_MF;
2056 if (off + len >= (u_short)ip->ip_len)
2057 len = (u_short)ip->ip_len - off;
2058 else
2059 mhip->ip_off |= IP_MF;
2060 mhip->ip_len = htons((u_short)(len + mhlen));
2061 m->m_next = m_copy(m0, off, len);
2062 if (m->m_next == NULL) {
2063 (void) m_free(m);
2064 error = ENOBUFS; /* ??? */
2065 OSAddAtomic(1, &ipstat.ips_odropped);
2066 goto sendorfree;
2067 }
2068 m->m_pkthdr.len = mhlen + len;
2069 m->m_pkthdr.rcvif = NULL;
2070 m->m_pkthdr.csum_flags = m0->m_pkthdr.csum_flags;
2071
2072 M_COPY_CLASSIFIER(m, m0);
2073 M_COPY_PFTAG(m, m0);
2074
2075 #if CONFIG_MACF_NET
2076 mac_netinet_fragment(m0, m);
2077 #endif /* CONFIG_MACF_NET */
2078
2079 #if BYTE_ORDER != BIG_ENDIAN
2080 HTONS(mhip->ip_off);
2081 #endif
2082
2083 mhip->ip_sum = 0;
2084 if (sw_csum & CSUM_DELAY_IP) {
2085 mhip->ip_sum = ip_cksum_hdr_out(m, mhlen);
2086 m->m_pkthdr.csum_flags &= ~CSUM_DELAY_IP;
2087 }
2088 *mnext = m;
2089 mnext = &m->m_nextpkt;
2090 nfrags++;
2091 }
2092 OSAddAtomic(nfrags, &ipstat.ips_ofragments);
2093
2094 /* set first/last markers for fragment chain */
2095 m->m_flags |= M_LASTFRAG;
2096 m0->m_flags |= M_FIRSTFRAG | M_FRAG;
2097 m0->m_pkthdr.csum_data = nfrags;
2098
2099 /*
2100 * Update first fragment by trimming what's been copied out
2101 * and updating header, then send each fragment (in order).
2102 */
2103 m = m0;
2104 m_adj(m, hlen + firstlen - (u_short)ip->ip_len);
2105 m->m_pkthdr.len = hlen + firstlen;
2106 ip->ip_len = htons((u_short)m->m_pkthdr.len);
2107 ip->ip_off |= IP_MF;
2108
2109 #if BYTE_ORDER != BIG_ENDIAN
2110 HTONS(ip->ip_off);
2111 #endif
2112
2113 ip->ip_sum = 0;
2114 if (sw_csum & CSUM_DELAY_IP) {
2115 ip->ip_sum = ip_cksum_hdr_out(m, hlen);
2116 m->m_pkthdr.csum_flags &= ~CSUM_DELAY_IP;
2117 }
2118 sendorfree:
2119 if (error)
2120 m_freem_list(m0);
2121
2122 return (error);
2123 }
2124
2125 static void
2126 ip_out_cksum_stats(int proto, u_int32_t len)
2127 {
2128 switch (proto) {
2129 case IPPROTO_TCP:
2130 tcp_out_cksum_stats(len);
2131 break;
2132 case IPPROTO_UDP:
2133 udp_out_cksum_stats(len);
2134 break;
2135 default:
2136 /* keep only TCP or UDP stats for now */
2137 break;
2138 }
2139 }
2140
2141 /*
2142 * Process a delayed payload checksum calculation (outbound path.)
2143 *
2144 * hoff is the number of bytes beyond the mbuf data pointer which
2145 * points to the IP header.
2146 *
2147 * Returns a bitmask representing all the work done in software.
2148 */
2149 uint32_t
2150 in_finalize_cksum(struct mbuf *m, uint32_t hoff, uint32_t csum_flags)
2151 {
2152 unsigned char buf[15 << 2] __attribute__((aligned(8)));
2153 struct ip *ip;
2154 uint32_t offset, _hlen, mlen, hlen, len, sw_csum;
2155 uint16_t csum, ip_len;
2156
2157 _CASSERT(sizeof (csum) == sizeof (uint16_t));
2158 VERIFY(m->m_flags & M_PKTHDR);
2159
2160 sw_csum = (csum_flags & m->m_pkthdr.csum_flags);
2161
2162 if ((sw_csum &= (CSUM_DELAY_IP | CSUM_DELAY_DATA)) == 0)
2163 goto done;
2164
2165 mlen = m->m_pkthdr.len; /* total mbuf len */
2166
2167 /* sanity check (need at least simple IP header) */
2168 if (mlen < (hoff + sizeof (*ip))) {
2169 panic("%s: mbuf %p pkt len (%u) < hoff+ip_hdr "
2170 "(%u+%u)\n", __func__, m, mlen, hoff,
2171 (uint32_t)sizeof (*ip));
2172 /* NOTREACHED */
2173 }
2174
2175 /*
2176 * In case the IP header is not contiguous, or not 32-bit aligned,
2177 * or if we're computing the IP header checksum, copy it to a local
2178 * buffer. Copy only the simple IP header here (IP options case
2179 * is handled below.)
2180 */
2181 if ((sw_csum & CSUM_DELAY_IP) || (hoff + sizeof (*ip)) > m->m_len ||
2182 !IP_HDR_ALIGNED_P(mtod(m, caddr_t) + hoff)) {
2183 m_copydata(m, hoff, sizeof (*ip), (caddr_t)buf);
2184 ip = (struct ip *)(void *)buf;
2185 _hlen = sizeof (*ip);
2186 } else {
2187 ip = (struct ip *)(void *)(m->m_data + hoff);
2188 _hlen = 0;
2189 }
2190
2191 hlen = IP_VHL_HL(ip->ip_vhl) << 2; /* IP header len */
2192
2193 /* sanity check */
2194 if (mlen < (hoff + hlen)) {
2195 panic("%s: mbuf %p pkt too short (%d) for IP header (%u), "
2196 "hoff %u", __func__, m, mlen, hlen, hoff);
2197 /* NOTREACHED */
2198 }
2199
2200 /*
2201 * We could be in the context of an IP or interface filter; in the
2202 * former case, ip_len would be in host (correct) order while for
2203 * the latter it would be in network order. Because of this, we
2204 * attempt to interpret the length field by comparing it against
2205 * the actual packet length. If the comparison fails, byte swap
2206 * the length and check again. If it still fails, use the actual
2207 * packet length. This also covers the trailing bytes case.
2208 */
2209 ip_len = ip->ip_len;
2210 if (ip_len != (mlen - hoff)) {
2211 ip_len = OSSwapInt16(ip_len);
2212 if (ip_len != (mlen - hoff)) {
2213 printf("%s: mbuf 0x%llx proto %d IP len %d (%x) "
2214 "[swapped %d (%x)] doesn't match actual packet "
2215 "length; %d is used instead\n", __func__,
2216 (uint64_t)VM_KERNEL_ADDRPERM(m), ip->ip_p,
2217 ip->ip_len, ip->ip_len, ip_len, ip_len,
2218 (mlen - hoff));
2219 ip_len = mlen - hoff;
2220 }
2221 }
2222
2223 len = ip_len - hlen; /* csum span */
2224
2225 if (sw_csum & CSUM_DELAY_DATA) {
2226 uint16_t ulpoff;
2227
2228 /*
2229 * offset is added to the lower 16-bit value of csum_data,
2230 * which is expected to contain the ULP offset; therefore
2231 * CSUM_PARTIAL offset adjustment must be undone.
2232 */
2233 if ((m->m_pkthdr.csum_flags & (CSUM_PARTIAL|CSUM_DATA_VALID)) ==
2234 (CSUM_PARTIAL|CSUM_DATA_VALID)) {
2235 /*
2236 * Get back the original ULP offset (this will
2237 * undo the CSUM_PARTIAL logic in ip_output.)
2238 */
2239 m->m_pkthdr.csum_data = (m->m_pkthdr.csum_tx_stuff -
2240 m->m_pkthdr.csum_tx_start);
2241 }
2242
2243 ulpoff = (m->m_pkthdr.csum_data & 0xffff); /* ULP csum offset */
2244 offset = hoff + hlen; /* ULP header */
2245
2246 if (mlen < (ulpoff + sizeof (csum))) {
2247 panic("%s: mbuf %p pkt len (%u) proto %d invalid ULP "
2248 "cksum offset (%u) cksum flags 0x%x\n", __func__,
2249 m, mlen, ip->ip_p, ulpoff, m->m_pkthdr.csum_flags);
2250 /* NOTREACHED */
2251 }
2252
2253 csum = inet_cksum(m, 0, offset, len);
2254
2255 /* Update stats */
2256 ip_out_cksum_stats(ip->ip_p, len);
2257
2258 /* RFC1122 4.1.3.4 */
2259 if (csum == 0 && (m->m_pkthdr.csum_flags & CSUM_UDP))
2260 csum = 0xffff;
2261
2262 /* Insert the checksum in the ULP csum field */
2263 offset += ulpoff;
2264 if (offset + sizeof (csum) > m->m_len) {
2265 m_copyback(m, offset, sizeof (csum), &csum);
2266 } else if (IP_HDR_ALIGNED_P(mtod(m, char *) + hoff)) {
2267 *(uint16_t *)(void *)(mtod(m, char *) + offset) = csum;
2268 } else {
2269 bcopy(&csum, (mtod(m, char *) + offset), sizeof (csum));
2270 }
2271 m->m_pkthdr.csum_flags &=
2272 ~(CSUM_DELAY_DATA | CSUM_DATA_VALID | CSUM_PARTIAL);
2273 }
2274
2275 if (sw_csum & CSUM_DELAY_IP) {
2276 /* IP header must be in the local buffer */
2277 VERIFY(_hlen == sizeof (*ip));
2278 if (_hlen != hlen) {
2279 VERIFY(hlen <= sizeof (buf));
2280 m_copydata(m, hoff, hlen, (caddr_t)buf);
2281 ip = (struct ip *)(void *)buf;
2282 _hlen = hlen;
2283 }
2284
2285 /*
2286 * Compute the IP header checksum as if the IP length
2287 * is the length which we believe is "correct"; see
2288 * how ip_len gets calculated above. Note that this
2289 * is done on the local copy and not on the real one.
2290 */
2291 ip->ip_len = htons(ip_len);
2292 ip->ip_sum = 0;
2293 csum = in_cksum_hdr_opt(ip);
2294
2295 /* Update stats */
2296 ipstat.ips_snd_swcsum++;
2297 ipstat.ips_snd_swcsum_bytes += hlen;
2298
2299 /*
2300 * Insert only the checksum in the existing IP header
2301 * csum field; all other fields are left unchanged.
2302 */
2303 offset = hoff + offsetof(struct ip, ip_sum);
2304 if (offset + sizeof (csum) > m->m_len) {
2305 m_copyback(m, offset, sizeof (csum), &csum);
2306 } else if (IP_HDR_ALIGNED_P(mtod(m, char *) + hoff)) {
2307 *(uint16_t *)(void *)(mtod(m, char *) + offset) = csum;
2308 } else {
2309 bcopy(&csum, (mtod(m, char *) + offset), sizeof (csum));
2310 }
2311 m->m_pkthdr.csum_flags &= ~CSUM_DELAY_IP;
2312 }
2313
2314 done:
2315 return (sw_csum);
2316 }
2317
2318 /*
2319 * Insert IP options into preformed packet.
2320 * Adjust IP destination as required for IP source routing,
2321 * as indicated by a non-zero in_addr at the start of the options.
2322 *
2323 * XXX This routine assumes that the packet has no options in place.
2324 */
2325 static struct mbuf *
2326 ip_insertoptions(struct mbuf *m, struct mbuf *opt, int *phlen)
2327 {
2328 struct ipoption *p = mtod(opt, struct ipoption *);
2329 struct mbuf *n;
2330 struct ip *ip = mtod(m, struct ip *);
2331 unsigned optlen;
2332
2333 optlen = opt->m_len - sizeof (p->ipopt_dst);
2334 if (optlen + (u_short)ip->ip_len > IP_MAXPACKET)
2335 return (m); /* XXX should fail */
2336 if (p->ipopt_dst.s_addr)
2337 ip->ip_dst = p->ipopt_dst;
2338 if (m->m_flags & M_EXT || m->m_data - optlen < m->m_pktdat) {
2339 MGETHDR(n, M_DONTWAIT, MT_HEADER); /* MAC-OK */
2340 if (n == NULL)
2341 return (m);
2342 n->m_pkthdr.rcvif = 0;
2343 #if CONFIG_MACF_NET
2344 mac_mbuf_label_copy(m, n);
2345 #endif /* CONFIG_MACF_NET */
2346 n->m_pkthdr.len = m->m_pkthdr.len + optlen;
2347 m->m_len -= sizeof (struct ip);
2348 m->m_data += sizeof (struct ip);
2349 n->m_next = m;
2350 m = n;
2351 m->m_len = optlen + sizeof (struct ip);
2352 m->m_data += max_linkhdr;
2353 (void) memcpy(mtod(m, void *), ip, sizeof (struct ip));
2354 } else {
2355 m->m_data -= optlen;
2356 m->m_len += optlen;
2357 m->m_pkthdr.len += optlen;
2358 ovbcopy((caddr_t)ip, mtod(m, caddr_t), sizeof (struct ip));
2359 }
2360 ip = mtod(m, struct ip *);
2361 bcopy(p->ipopt_list, ip + 1, optlen);
2362 *phlen = sizeof (struct ip) + optlen;
2363 ip->ip_vhl = IP_MAKE_VHL(IPVERSION, *phlen >> 2);
2364 ip->ip_len += optlen;
2365 return (m);
2366 }
2367
2368 /*
2369 * Copy options from ip to jp,
2370 * omitting those not copied during fragmentation.
2371 */
2372 static int
2373 ip_optcopy(struct ip *ip, struct ip *jp)
2374 {
2375 u_char *cp, *dp;
2376 int opt, optlen, cnt;
2377
2378 cp = (u_char *)(ip + 1);
2379 dp = (u_char *)(jp + 1);
2380 cnt = (IP_VHL_HL(ip->ip_vhl) << 2) - sizeof (struct ip);
2381 for (; cnt > 0; cnt -= optlen, cp += optlen) {
2382 opt = cp[0];
2383 if (opt == IPOPT_EOL)
2384 break;
2385 if (opt == IPOPT_NOP) {
2386 /* Preserve for IP mcast tunnel's LSRR alignment. */
2387 *dp++ = IPOPT_NOP;
2388 optlen = 1;
2389 continue;
2390 }
2391 #if DIAGNOSTIC
2392 if (cnt < IPOPT_OLEN + sizeof (*cp)) {
2393 panic("malformed IPv4 option passed to ip_optcopy");
2394 /* NOTREACHED */
2395 }
2396 #endif
2397 optlen = cp[IPOPT_OLEN];
2398 #if DIAGNOSTIC
2399 if (optlen < IPOPT_OLEN + sizeof (*cp) || optlen > cnt) {
2400 panic("malformed IPv4 option passed to ip_optcopy");
2401 /* NOTREACHED */
2402 }
2403 #endif
2404 /* bogus lengths should have been caught by ip_dooptions */
2405 if (optlen > cnt)
2406 optlen = cnt;
2407 if (IPOPT_COPIED(opt)) {
2408 bcopy(cp, dp, optlen);
2409 dp += optlen;
2410 }
2411 }
2412 for (optlen = dp - (u_char *)(jp+1); optlen & 0x3; optlen++)
2413 *dp++ = IPOPT_EOL;
2414 return (optlen);
2415 }
2416
2417 /*
2418 * IP socket option processing.
2419 */
2420 int
2421 ip_ctloutput(struct socket *so, struct sockopt *sopt)
2422 {
2423 struct inpcb *inp = sotoinpcb(so);
2424 int error, optval;
2425
2426 error = optval = 0;
2427 if (sopt->sopt_level != IPPROTO_IP)
2428 return (EINVAL);
2429
2430 switch (sopt->sopt_dir) {
2431 case SOPT_SET:
2432 switch (sopt->sopt_name) {
2433 #ifdef notyet
2434 case IP_RETOPTS:
2435 #endif
2436 case IP_OPTIONS: {
2437 struct mbuf *m;
2438
2439 if (sopt->sopt_valsize > MLEN) {
2440 error = EMSGSIZE;
2441 break;
2442 }
2443 MGET(m, sopt->sopt_p != kernproc ? M_WAIT : M_DONTWAIT,
2444 MT_HEADER);
2445 if (m == NULL) {
2446 error = ENOBUFS;
2447 break;
2448 }
2449 m->m_len = sopt->sopt_valsize;
2450 error = sooptcopyin(sopt, mtod(m, char *),
2451 m->m_len, m->m_len);
2452 if (error)
2453 break;
2454
2455 return (ip_pcbopts(sopt->sopt_name,
2456 &inp->inp_options, m));
2457 }
2458
2459 case IP_TOS:
2460 case IP_TTL:
2461 case IP_RECVOPTS:
2462 case IP_RECVRETOPTS:
2463 case IP_RECVDSTADDR:
2464 case IP_RECVIF:
2465 case IP_RECVTTL:
2466 case IP_RECVPKTINFO:
2467 error = sooptcopyin(sopt, &optval, sizeof (optval),
2468 sizeof (optval));
2469 if (error)
2470 break;
2471
2472 switch (sopt->sopt_name) {
2473 case IP_TOS:
2474 inp->inp_ip_tos = optval;
2475 break;
2476
2477 case IP_TTL:
2478 inp->inp_ip_ttl = optval;
2479 break;
2480 #define OPTSET(bit) \
2481 if (optval) \
2482 inp->inp_flags |= bit; \
2483 else \
2484 inp->inp_flags &= ~bit;
2485
2486 case IP_RECVOPTS:
2487 OPTSET(INP_RECVOPTS);
2488 break;
2489
2490 case IP_RECVRETOPTS:
2491 OPTSET(INP_RECVRETOPTS);
2492 break;
2493
2494 case IP_RECVDSTADDR:
2495 OPTSET(INP_RECVDSTADDR);
2496 break;
2497
2498 case IP_RECVIF:
2499 OPTSET(INP_RECVIF);
2500 break;
2501
2502 case IP_RECVTTL:
2503 OPTSET(INP_RECVTTL);
2504 break;
2505
2506 case IP_RECVPKTINFO:
2507 OPTSET(INP_PKTINFO);
2508 break;
2509 }
2510 break;
2511 #undef OPTSET
2512 /*
2513 * Multicast socket options are processed by the in_mcast
2514 * module.
2515 */
2516 case IP_MULTICAST_IF:
2517 case IP_MULTICAST_IFINDEX:
2518 case IP_MULTICAST_VIF:
2519 case IP_MULTICAST_TTL:
2520 case IP_MULTICAST_LOOP:
2521 case IP_ADD_MEMBERSHIP:
2522 case IP_DROP_MEMBERSHIP:
2523 case IP_ADD_SOURCE_MEMBERSHIP:
2524 case IP_DROP_SOURCE_MEMBERSHIP:
2525 case IP_BLOCK_SOURCE:
2526 case IP_UNBLOCK_SOURCE:
2527 case IP_MSFILTER:
2528 case MCAST_JOIN_GROUP:
2529 case MCAST_LEAVE_GROUP:
2530 case MCAST_JOIN_SOURCE_GROUP:
2531 case MCAST_LEAVE_SOURCE_GROUP:
2532 case MCAST_BLOCK_SOURCE:
2533 case MCAST_UNBLOCK_SOURCE:
2534 error = inp_setmoptions(inp, sopt);
2535 break;
2536
2537 case IP_PORTRANGE:
2538 error = sooptcopyin(sopt, &optval, sizeof (optval),
2539 sizeof (optval));
2540 if (error)
2541 break;
2542
2543 switch (optval) {
2544 case IP_PORTRANGE_DEFAULT:
2545 inp->inp_flags &= ~(INP_LOWPORT);
2546 inp->inp_flags &= ~(INP_HIGHPORT);
2547 break;
2548
2549 case IP_PORTRANGE_HIGH:
2550 inp->inp_flags &= ~(INP_LOWPORT);
2551 inp->inp_flags |= INP_HIGHPORT;
2552 break;
2553
2554 case IP_PORTRANGE_LOW:
2555 inp->inp_flags &= ~(INP_HIGHPORT);
2556 inp->inp_flags |= INP_LOWPORT;
2557 break;
2558
2559 default:
2560 error = EINVAL;
2561 break;
2562 }
2563 break;
2564
2565 #if IPSEC
2566 case IP_IPSEC_POLICY: {
2567 caddr_t req = NULL;
2568 size_t len = 0;
2569 int priv;
2570 struct mbuf *m;
2571 int optname;
2572
2573 if ((error = soopt_getm(sopt, &m)) != 0) /* XXX */
2574 break;
2575 if ((error = soopt_mcopyin(sopt, m)) != 0) /* XXX */
2576 break;
2577 priv = (proc_suser(sopt->sopt_p) == 0);
2578 if (m) {
2579 req = mtod(m, caddr_t);
2580 len = m->m_len;
2581 }
2582 optname = sopt->sopt_name;
2583 error = ipsec4_set_policy(inp, optname, req, len, priv);
2584 m_freem(m);
2585 break;
2586 }
2587 #endif /* IPSEC */
2588
2589 #if TRAFFIC_MGT
2590 case IP_TRAFFIC_MGT_BACKGROUND: {
2591 unsigned background = 0;
2592
2593 error = sooptcopyin(sopt, &background,
2594 sizeof (background), sizeof (background));
2595 if (error)
2596 break;
2597
2598 if (background) {
2599 socket_set_traffic_mgt_flags_locked(so,
2600 TRAFFIC_MGT_SO_BACKGROUND);
2601 } else {
2602 socket_clear_traffic_mgt_flags_locked(so,
2603 TRAFFIC_MGT_SO_BACKGROUND);
2604 }
2605
2606 break;
2607 }
2608 #endif /* TRAFFIC_MGT */
2609
2610 /*
2611 * On a multihomed system, scoped routing can be used to
2612 * restrict the source interface used for sending packets.
2613 * The socket option IP_BOUND_IF binds a particular AF_INET
2614 * socket to an interface such that data sent on the socket
2615 * is restricted to that interface. This is unlike the
2616 * SO_DONTROUTE option where the routing table is bypassed;
2617 * therefore it allows for a greater flexibility and control
2618 * over the system behavior, and does not place any restriction
2619 * on the destination address type (e.g. unicast, multicast,
2620 * or broadcast if applicable) or whether or not the host is
2621 * directly reachable. Note that in the multicast transmit
2622 * case, IP_MULTICAST_{IF,IFINDEX} takes precedence over
2623 * IP_BOUND_IF, since the former practically bypasses the
2624 * routing table; in this case, IP_BOUND_IF sets the default
2625 * interface used for sending multicast packets in the absence
2626 * of an explicit multicast transmit interface.
2627 */
2628 case IP_BOUND_IF:
2629 /* This option is settable only for IPv4 */
2630 if (!(inp->inp_vflag & INP_IPV4)) {
2631 error = EINVAL;
2632 break;
2633 }
2634
2635 error = sooptcopyin(sopt, &optval, sizeof (optval),
2636 sizeof (optval));
2637
2638 if (error)
2639 break;
2640
2641 error = inp_bindif(inp, optval, NULL);
2642 break;
2643
2644 case IP_NO_IFT_CELLULAR:
2645 /* This option is settable only for IPv4 */
2646 if (!(inp->inp_vflag & INP_IPV4)) {
2647 error = EINVAL;
2648 break;
2649 }
2650
2651 error = sooptcopyin(sopt, &optval, sizeof (optval),
2652 sizeof (optval));
2653
2654 if (error)
2655 break;
2656
2657 /* once set, it cannot be unset */
2658 if (!optval && INP_NO_CELLULAR(inp)) {
2659 error = EINVAL;
2660 break;
2661 }
2662
2663 error = so_set_restrictions(so,
2664 SO_RESTRICT_DENY_CELLULAR);
2665 break;
2666
2667 case IP_OUT_IF:
2668 /* This option is not settable */
2669 error = EINVAL;
2670 break;
2671
2672 default:
2673 error = ENOPROTOOPT;
2674 break;
2675 }
2676 break;
2677
2678 case SOPT_GET:
2679 switch (sopt->sopt_name) {
2680 case IP_OPTIONS:
2681 case IP_RETOPTS:
2682 if (inp->inp_options) {
2683 error = sooptcopyout(sopt,
2684 mtod(inp->inp_options, char *),
2685 inp->inp_options->m_len);
2686 } else {
2687 sopt->sopt_valsize = 0;
2688 }
2689 break;
2690
2691 case IP_TOS:
2692 case IP_TTL:
2693 case IP_RECVOPTS:
2694 case IP_RECVRETOPTS:
2695 case IP_RECVDSTADDR:
2696 case IP_RECVIF:
2697 case IP_RECVTTL:
2698 case IP_PORTRANGE:
2699 case IP_RECVPKTINFO:
2700 switch (sopt->sopt_name) {
2701
2702 case IP_TOS:
2703 optval = inp->inp_ip_tos;
2704 break;
2705
2706 case IP_TTL:
2707 optval = inp->inp_ip_ttl;
2708 break;
2709
2710 #define OPTBIT(bit) (inp->inp_flags & bit ? 1 : 0)
2711
2712 case IP_RECVOPTS:
2713 optval = OPTBIT(INP_RECVOPTS);
2714 break;
2715
2716 case IP_RECVRETOPTS:
2717 optval = OPTBIT(INP_RECVRETOPTS);
2718 break;
2719
2720 case IP_RECVDSTADDR:
2721 optval = OPTBIT(INP_RECVDSTADDR);
2722 break;
2723
2724 case IP_RECVIF:
2725 optval = OPTBIT(INP_RECVIF);
2726 break;
2727
2728 case IP_RECVTTL:
2729 optval = OPTBIT(INP_RECVTTL);
2730 break;
2731
2732 case IP_PORTRANGE:
2733 if (inp->inp_flags & INP_HIGHPORT)
2734 optval = IP_PORTRANGE_HIGH;
2735 else if (inp->inp_flags & INP_LOWPORT)
2736 optval = IP_PORTRANGE_LOW;
2737 else
2738 optval = 0;
2739 break;
2740
2741 case IP_RECVPKTINFO:
2742 optval = OPTBIT(INP_PKTINFO);
2743 break;
2744 }
2745 error = sooptcopyout(sopt, &optval, sizeof (optval));
2746 break;
2747
2748 case IP_MULTICAST_IF:
2749 case IP_MULTICAST_IFINDEX:
2750 case IP_MULTICAST_VIF:
2751 case IP_MULTICAST_TTL:
2752 case IP_MULTICAST_LOOP:
2753 case IP_MSFILTER:
2754 error = inp_getmoptions(inp, sopt);
2755 break;
2756
2757 #if IPSEC
2758 case IP_IPSEC_POLICY: {
2759 error = 0; /* This option is no longer supported */
2760 break;
2761 }
2762 #endif /* IPSEC */
2763
2764 #if TRAFFIC_MGT
2765 case IP_TRAFFIC_MGT_BACKGROUND: {
2766 unsigned background = (so->so_flags1 &
2767 SOF1_TRAFFIC_MGT_SO_BACKGROUND) ? 1 : 0;
2768 return (sooptcopyout(sopt, &background,
2769 sizeof (background)));
2770 }
2771 #endif /* TRAFFIC_MGT */
2772
2773 case IP_BOUND_IF:
2774 if (inp->inp_flags & INP_BOUND_IF)
2775 optval = inp->inp_boundifp->if_index;
2776 error = sooptcopyout(sopt, &optval, sizeof (optval));
2777 break;
2778
2779 case IP_NO_IFT_CELLULAR:
2780 optval = INP_NO_CELLULAR(inp) ? 1 : 0;
2781 error = sooptcopyout(sopt, &optval, sizeof (optval));
2782 break;
2783
2784 case IP_OUT_IF:
2785 optval = (inp->inp_last_outifp != NULL) ?
2786 inp->inp_last_outifp->if_index : 0;
2787 error = sooptcopyout(sopt, &optval, sizeof (optval));
2788 break;
2789
2790 default:
2791 error = ENOPROTOOPT;
2792 break;
2793 }
2794 break;
2795 }
2796 return (error);
2797 }
2798
2799 /*
2800 * Set up IP options in pcb for insertion in output packets.
2801 * Store in mbuf with pointer in pcbopt, adding pseudo-option
2802 * with destination address if source routed.
2803 */
2804 static int
2805 ip_pcbopts(int optname, struct mbuf **pcbopt, struct mbuf *m)
2806 {
2807 #pragma unused(optname)
2808 int cnt, optlen;
2809 u_char *cp;
2810 u_char opt;
2811
2812 /* turn off any old options */
2813 if (*pcbopt)
2814 (void) m_free(*pcbopt);
2815 *pcbopt = 0;
2816 if (m == (struct mbuf *)0 || m->m_len == 0) {
2817 /*
2818 * Only turning off any previous options.
2819 */
2820 if (m)
2821 (void) m_free(m);
2822 return (0);
2823 }
2824
2825 if (m->m_len % sizeof (int32_t))
2826 goto bad;
2827
2828 /*
2829 * IP first-hop destination address will be stored before
2830 * actual options; move other options back
2831 * and clear it when none present.
2832 */
2833 if (m->m_data + m->m_len + sizeof (struct in_addr) >= &m->m_dat[MLEN])
2834 goto bad;
2835 cnt = m->m_len;
2836 m->m_len += sizeof (struct in_addr);
2837 cp = mtod(m, u_char *) + sizeof (struct in_addr);
2838 ovbcopy(mtod(m, caddr_t), (caddr_t)cp, (unsigned)cnt);
2839 bzero(mtod(m, caddr_t), sizeof (struct in_addr));
2840
2841 for (; cnt > 0; cnt -= optlen, cp += optlen) {
2842 opt = cp[IPOPT_OPTVAL];
2843 if (opt == IPOPT_EOL)
2844 break;
2845 if (opt == IPOPT_NOP)
2846 optlen = 1;
2847 else {
2848 if (cnt < IPOPT_OLEN + sizeof (*cp))
2849 goto bad;
2850 optlen = cp[IPOPT_OLEN];
2851 if (optlen < IPOPT_OLEN + sizeof (*cp) || optlen > cnt)
2852 goto bad;
2853 }
2854 switch (opt) {
2855
2856 default:
2857 break;
2858
2859 case IPOPT_LSRR:
2860 case IPOPT_SSRR:
2861 /*
2862 * user process specifies route as:
2863 * ->A->B->C->D
2864 * D must be our final destination (but we can't
2865 * check that since we may not have connected yet).
2866 * A is first hop destination, which doesn't appear in
2867 * actual IP option, but is stored before the options.
2868 */
2869 if (optlen < IPOPT_MINOFF - 1 + sizeof (struct in_addr))
2870 goto bad;
2871 m->m_len -= sizeof (struct in_addr);
2872 cnt -= sizeof (struct in_addr);
2873 optlen -= sizeof (struct in_addr);
2874 cp[IPOPT_OLEN] = optlen;
2875 /*
2876 * Move first hop before start of options.
2877 */
2878 bcopy((caddr_t)&cp[IPOPT_OFFSET+1], mtod(m, caddr_t),
2879 sizeof (struct in_addr));
2880 /*
2881 * Then copy rest of options back
2882 * to close up the deleted entry.
2883 */
2884 ovbcopy((caddr_t)(&cp[IPOPT_OFFSET+1] +
2885 sizeof (struct in_addr)),
2886 (caddr_t)&cp[IPOPT_OFFSET+1],
2887 (unsigned)cnt + sizeof (struct in_addr));
2888 break;
2889 }
2890 }
2891 if (m->m_len > MAX_IPOPTLEN + sizeof (struct in_addr))
2892 goto bad;
2893 *pcbopt = m;
2894 return (0);
2895
2896 bad:
2897 (void) m_free(m);
2898 return (EINVAL);
2899 }
2900
2901 void
2902 ip_moptions_init(void)
2903 {
2904 PE_parse_boot_argn("ifa_debug", &imo_debug, sizeof (imo_debug));
2905
2906 imo_size = (imo_debug == 0) ? sizeof (struct ip_moptions) :
2907 sizeof (struct ip_moptions_dbg);
2908
2909 imo_zone = zinit(imo_size, IMO_ZONE_MAX * imo_size, 0,
2910 IMO_ZONE_NAME);
2911 if (imo_zone == NULL) {
2912 panic("%s: failed allocating %s", __func__, IMO_ZONE_NAME);
2913 /* NOTREACHED */
2914 }
2915 zone_change(imo_zone, Z_EXPAND, TRUE);
2916 }
2917
2918 void
2919 imo_addref(struct ip_moptions *imo, int locked)
2920 {
2921 if (!locked)
2922 IMO_LOCK(imo);
2923 else
2924 IMO_LOCK_ASSERT_HELD(imo);
2925
2926 if (++imo->imo_refcnt == 0) {
2927 panic("%s: imo %p wraparound refcnt\n", __func__, imo);
2928 /* NOTREACHED */
2929 } else if (imo->imo_trace != NULL) {
2930 (*imo->imo_trace)(imo, TRUE);
2931 }
2932
2933 if (!locked)
2934 IMO_UNLOCK(imo);
2935 }
2936
2937 void
2938 imo_remref(struct ip_moptions *imo)
2939 {
2940 int i;
2941
2942 IMO_LOCK(imo);
2943 if (imo->imo_refcnt == 0) {
2944 panic("%s: imo %p negative refcnt", __func__, imo);
2945 /* NOTREACHED */
2946 } else if (imo->imo_trace != NULL) {
2947 (*imo->imo_trace)(imo, FALSE);
2948 }
2949
2950 --imo->imo_refcnt;
2951 if (imo->imo_refcnt > 0) {
2952 IMO_UNLOCK(imo);
2953 return;
2954 }
2955
2956 for (i = 0; i < imo->imo_num_memberships; ++i) {
2957 struct in_mfilter *imf;
2958
2959 imf = imo->imo_mfilters ? &imo->imo_mfilters[i] : NULL;
2960 if (imf != NULL)
2961 imf_leave(imf);
2962
2963 (void) in_leavegroup(imo->imo_membership[i], imf);
2964
2965 if (imf != NULL)
2966 imf_purge(imf);
2967
2968 INM_REMREF(imo->imo_membership[i]);
2969 imo->imo_membership[i] = NULL;
2970 }
2971 imo->imo_num_memberships = 0;
2972 if (imo->imo_mfilters != NULL) {
2973 FREE(imo->imo_mfilters, M_INMFILTER);
2974 imo->imo_mfilters = NULL;
2975 }
2976 if (imo->imo_membership != NULL) {
2977 FREE(imo->imo_membership, M_IPMOPTS);
2978 imo->imo_membership = NULL;
2979 }
2980 IMO_UNLOCK(imo);
2981
2982 lck_mtx_destroy(&imo->imo_lock, ifa_mtx_grp);
2983
2984 if (!(imo->imo_debug & IFD_ALLOC)) {
2985 panic("%s: imo %p cannot be freed", __func__, imo);
2986 /* NOTREACHED */
2987 }
2988 zfree(imo_zone, imo);
2989 }
2990
2991 static void
2992 imo_trace(struct ip_moptions *imo, int refhold)
2993 {
2994 struct ip_moptions_dbg *imo_dbg = (struct ip_moptions_dbg *)imo;
2995 ctrace_t *tr;
2996 u_int32_t idx;
2997 u_int16_t *cnt;
2998
2999 if (!(imo->imo_debug & IFD_DEBUG)) {
3000 panic("%s: imo %p has no debug structure", __func__, imo);
3001 /* NOTREACHED */
3002 }
3003 if (refhold) {
3004 cnt = &imo_dbg->imo_refhold_cnt;
3005 tr = imo_dbg->imo_refhold;
3006 } else {
3007 cnt = &imo_dbg->imo_refrele_cnt;
3008 tr = imo_dbg->imo_refrele;
3009 }
3010
3011 idx = atomic_add_16_ov(cnt, 1) % IMO_TRACE_HIST_SIZE;
3012 ctrace_record(&tr[idx]);
3013 }
3014
3015 struct ip_moptions *
3016 ip_allocmoptions(int how)
3017 {
3018 struct ip_moptions *imo;
3019
3020 imo = (how == M_WAITOK) ? zalloc(imo_zone) : zalloc_noblock(imo_zone);
3021 if (imo != NULL) {
3022 bzero(imo, imo_size);
3023 lck_mtx_init(&imo->imo_lock, ifa_mtx_grp, ifa_mtx_attr);
3024 imo->imo_debug |= IFD_ALLOC;
3025 if (imo_debug != 0) {
3026 imo->imo_debug |= IFD_DEBUG;
3027 imo->imo_trace = imo_trace;
3028 }
3029 IMO_ADDREF(imo);
3030 }
3031
3032 return (imo);
3033 }
3034
3035 /*
3036 * Routine called from ip_output() to loop back a copy of an IP multicast
3037 * packet to the input queue of a specified interface. Note that this
3038 * calls the output routine of the loopback "driver", but with an interface
3039 * pointer that might NOT be a loopback interface -- evil, but easier than
3040 * replicating that code here.
3041 */
3042 static void
3043 ip_mloopback(struct ifnet *srcifp, struct ifnet *origifp, struct mbuf *m,
3044 struct sockaddr_in *dst, int hlen)
3045 {
3046 struct mbuf *copym;
3047 struct ip *ip;
3048
3049 if (lo_ifp == NULL)
3050 return;
3051
3052 /*
3053 * Copy the packet header as it's needed for the checksum
3054 * Make sure to deep-copy IP header portion in case the data
3055 * is in an mbuf cluster, so that we can safely override the IP
3056 * header portion later.
3057 */
3058 copym = m_copym_mode(m, 0, M_COPYALL, M_DONTWAIT, M_COPYM_COPY_HDR);
3059 if (copym != NULL && ((copym->m_flags & M_EXT) || copym->m_len < hlen))
3060 copym = m_pullup(copym, hlen);
3061
3062 if (copym == NULL)
3063 return;
3064
3065 /*
3066 * We don't bother to fragment if the IP length is greater
3067 * than the interface's MTU. Can this possibly matter?
3068 */
3069 ip = mtod(copym, struct ip *);
3070 #if BYTE_ORDER != BIG_ENDIAN
3071 HTONS(ip->ip_len);
3072 HTONS(ip->ip_off);
3073 #endif
3074 ip->ip_sum = 0;
3075 ip->ip_sum = ip_cksum_hdr_out(copym, hlen);
3076
3077 /*
3078 * Mark checksum as valid unless receive checksum offload is
3079 * disabled; if so, compute checksum in software. If the
3080 * interface itself is lo0, this will be overridden by if_loop.
3081 */
3082 if (hwcksum_rx) {
3083 copym->m_pkthdr.csum_flags &= ~CSUM_PARTIAL;
3084 copym->m_pkthdr.csum_flags |=
3085 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
3086 copym->m_pkthdr.csum_data = 0xffff;
3087 } else if (copym->m_pkthdr.csum_flags & CSUM_DELAY_DATA) {
3088 #if BYTE_ORDER != BIG_ENDIAN
3089 NTOHS(ip->ip_len);
3090 #endif
3091 in_delayed_cksum(copym);
3092 #if BYTE_ORDER != BIG_ENDIAN
3093 HTONS(ip->ip_len);
3094 #endif
3095 }
3096
3097 /*
3098 * Stuff the 'real' ifp into the pkthdr, to be used in matching
3099 * in ip_input(); we need the loopback ifp/dl_tag passed as args
3100 * to make the loopback driver compliant with the data link
3101 * requirements.
3102 */
3103 copym->m_pkthdr.rcvif = origifp;
3104
3105 /*
3106 * Also record the source interface (which owns the source address).
3107 * This is basically a stripped down version of ifa_foraddr().
3108 */
3109 if (srcifp == NULL) {
3110 struct in_ifaddr *ia;
3111
3112 lck_rw_lock_shared(in_ifaddr_rwlock);
3113 TAILQ_FOREACH(ia, INADDR_HASH(ip->ip_src.s_addr), ia_hash) {
3114 IFA_LOCK_SPIN(&ia->ia_ifa);
3115 if (IA_SIN(ia)->sin_addr.s_addr == ip->ip_src.s_addr) {
3116 srcifp = ia->ia_ifp;
3117 IFA_UNLOCK(&ia->ia_ifa);
3118 break;
3119 }
3120 IFA_UNLOCK(&ia->ia_ifa);
3121 }
3122 lck_rw_done(in_ifaddr_rwlock);
3123 }
3124 if (srcifp != NULL)
3125 ip_setsrcifaddr_info(copym, srcifp->if_index, NULL);
3126 ip_setdstifaddr_info(copym, origifp->if_index, NULL);
3127
3128 dlil_output(lo_ifp, PF_INET, copym, NULL, SA(dst), 0, NULL);
3129 }
3130
3131 /*
3132 * Given a source IP address (and route, if available), determine the best
3133 * interface to send the packet from. Checking for (and updating) the
3134 * ROF_SRCIF_SELECTED flag in the pcb-supplied route placeholder is done
3135 * without any locks based on the assumption that ip_output() is single-
3136 * threaded per-pcb, i.e. for any given pcb there can only be one thread
3137 * performing output at the IP layer.
3138 *
3139 * This routine is analogous to in6_selectroute() for IPv6.
3140 */
3141 static struct ifaddr *
3142 in_selectsrcif(struct ip *ip, struct route *ro, unsigned int ifscope)
3143 {
3144 struct ifaddr *ifa = NULL;
3145 struct in_addr src = ip->ip_src;
3146 struct in_addr dst = ip->ip_dst;
3147 struct ifnet *rt_ifp;
3148 char s_src[MAX_IPv4_STR_LEN], s_dst[MAX_IPv4_STR_LEN];
3149
3150 VERIFY(src.s_addr != INADDR_ANY);
3151
3152 if (ip_select_srcif_debug) {
3153 (void) inet_ntop(AF_INET, &src.s_addr, s_src, sizeof (s_src));
3154 (void) inet_ntop(AF_INET, &dst.s_addr, s_dst, sizeof (s_dst));
3155 }
3156
3157 if (ro->ro_rt != NULL)
3158 RT_LOCK(ro->ro_rt);
3159
3160 rt_ifp = (ro->ro_rt != NULL) ? ro->ro_rt->rt_ifp : NULL;
3161
3162 /*
3163 * Given the source IP address, find a suitable source interface
3164 * to use for transmission; if the caller has specified a scope,
3165 * optimize the search by looking at the addresses only for that
3166 * interface. This is still suboptimal, however, as we need to
3167 * traverse the per-interface list.
3168 */
3169 if (ifscope != IFSCOPE_NONE || ro->ro_rt != NULL) {
3170 unsigned int scope = ifscope;
3171
3172 /*
3173 * If no scope is specified and the route is stale (pointing
3174 * to a defunct interface) use the current primary interface;
3175 * this happens when switching between interfaces configured
3176 * with the same IP address. Otherwise pick up the scope
3177 * information from the route; the ULP may have looked up a
3178 * correct route and we just need to verify it here and mark
3179 * it with the ROF_SRCIF_SELECTED flag below.
3180 */
3181 if (scope == IFSCOPE_NONE) {
3182 scope = rt_ifp->if_index;
3183 if (scope != get_primary_ifscope(AF_INET) &&
3184 ROUTE_UNUSABLE(ro))
3185 scope = get_primary_ifscope(AF_INET);
3186 }
3187
3188 ifa = (struct ifaddr *)ifa_foraddr_scoped(src.s_addr, scope);
3189
3190 if (ifa == NULL && ip->ip_p != IPPROTO_UDP &&
3191 ip->ip_p != IPPROTO_TCP && ipforwarding) {
3192 /*
3193 * If forwarding is enabled, and if the packet isn't
3194 * TCP or UDP, check if the source address belongs
3195 * to one of our own interfaces; if so, demote the
3196 * interface scope and do a route lookup right below.
3197 */
3198 ifa = (struct ifaddr *)ifa_foraddr(src.s_addr);
3199 if (ifa != NULL) {
3200 IFA_REMREF(ifa);
3201 ifa = NULL;
3202 ifscope = IFSCOPE_NONE;
3203 }
3204 }
3205
3206 if (ip_select_srcif_debug && ifa != NULL) {
3207 if (ro->ro_rt != NULL) {
3208 printf("%s->%s ifscope %d->%d ifa_if %s "
3209 "ro_if %s\n", s_src, s_dst, ifscope,
3210 scope, if_name(ifa->ifa_ifp),
3211 if_name(rt_ifp));
3212 } else {
3213 printf("%s->%s ifscope %d->%d ifa_if %s\n",
3214 s_src, s_dst, ifscope, scope,
3215 if_name(ifa->ifa_ifp));
3216 }
3217 }
3218 }
3219
3220 /*
3221 * Slow path; search for an interface having the corresponding source
3222 * IP address if the scope was not specified by the caller, and:
3223 *
3224 * 1) There currently isn't any route, or,
3225 * 2) The interface used by the route does not own that source
3226 * IP address; in this case, the route will get blown away
3227 * and we'll do a more specific scoped search using the newly
3228 * found interface.
3229 */
3230 if (ifa == NULL && ifscope == IFSCOPE_NONE) {
3231 ifa = (struct ifaddr *)ifa_foraddr(src.s_addr);
3232
3233 /*
3234 * If we have the IP address, but not the route, we don't
3235 * really know whether or not it belongs to the correct
3236 * interface (it could be shared across multiple interfaces.)
3237 * The only way to find out is to do a route lookup.
3238 */
3239 if (ifa != NULL && ro->ro_rt == NULL) {
3240 struct rtentry *rt;
3241 struct sockaddr_in sin;
3242 struct ifaddr *oifa = NULL;
3243
3244 bzero(&sin, sizeof (sin));
3245 sin.sin_family = AF_INET;
3246 sin.sin_len = sizeof (sin);
3247 sin.sin_addr = dst;
3248
3249 lck_mtx_lock(rnh_lock);
3250 if ((rt = rt_lookup(TRUE, SA(&sin), NULL,
3251 rt_tables[AF_INET], IFSCOPE_NONE)) != NULL) {
3252 RT_LOCK(rt);
3253 /*
3254 * If the route uses a different interface,
3255 * use that one instead. The IP address of
3256 * the ifaddr that we pick up here is not
3257 * relevant.
3258 */
3259 if (ifa->ifa_ifp != rt->rt_ifp) {
3260 oifa = ifa;
3261 ifa = rt->rt_ifa;
3262 IFA_ADDREF(ifa);
3263 RT_UNLOCK(rt);
3264 } else {
3265 RT_UNLOCK(rt);
3266 }
3267 rtfree_locked(rt);
3268 }
3269 lck_mtx_unlock(rnh_lock);
3270
3271 if (oifa != NULL) {
3272 struct ifaddr *iifa;
3273
3274 /*
3275 * See if the interface pointed to by the
3276 * route is configured with the source IP
3277 * address of the packet.
3278 */
3279 iifa = (struct ifaddr *)ifa_foraddr_scoped(
3280 src.s_addr, ifa->ifa_ifp->if_index);
3281
3282 if (iifa != NULL) {
3283 /*
3284 * Found it; drop the original one
3285 * as well as the route interface
3286 * address, and use this instead.
3287 */
3288 IFA_REMREF(oifa);
3289 IFA_REMREF(ifa);
3290 ifa = iifa;
3291 } else if (!ipforwarding ||
3292 (rt->rt_flags & RTF_GATEWAY)) {
3293 /*
3294 * This interface doesn't have that
3295 * source IP address; drop the route
3296 * interface address and just use the
3297 * original one, and let the caller
3298 * do a scoped route lookup.
3299 */
3300 IFA_REMREF(ifa);
3301 ifa = oifa;
3302 } else {
3303 /*
3304 * Forwarding is enabled and the source
3305 * address belongs to one of our own
3306 * interfaces which isn't the outgoing
3307 * interface, and we have a route, and
3308 * the destination is on a network that
3309 * is directly attached (onlink); drop
3310 * the original one and use the route
3311 * interface address instead.
3312 */
3313 IFA_REMREF(oifa);
3314 }
3315 }
3316 } else if (ifa != NULL && ro->ro_rt != NULL &&
3317 !(ro->ro_rt->rt_flags & RTF_GATEWAY) &&
3318 ifa->ifa_ifp != ro->ro_rt->rt_ifp && ipforwarding) {
3319 /*
3320 * Forwarding is enabled and the source address belongs
3321 * to one of our own interfaces which isn't the same
3322 * as the interface used by the known route; drop the
3323 * original one and use the route interface address.
3324 */
3325 IFA_REMREF(ifa);
3326 ifa = ro->ro_rt->rt_ifa;
3327 IFA_ADDREF(ifa);
3328 }
3329
3330 if (ip_select_srcif_debug && ifa != NULL) {
3331 printf("%s->%s ifscope %d ifa_if %s\n",
3332 s_src, s_dst, ifscope, if_name(ifa->ifa_ifp));
3333 }
3334 }
3335
3336 if (ro->ro_rt != NULL)
3337 RT_LOCK_ASSERT_HELD(ro->ro_rt);
3338 /*
3339 * If there is a non-loopback route with the wrong interface, or if
3340 * there is no interface configured with such an address, blow it
3341 * away. Except for local/loopback, we look for one with a matching
3342 * interface scope/index.
3343 */
3344 if (ro->ro_rt != NULL &&
3345 (ifa == NULL || (ifa->ifa_ifp != rt_ifp && rt_ifp != lo_ifp) ||
3346 !(ro->ro_rt->rt_flags & RTF_UP))) {
3347 if (ip_select_srcif_debug) {
3348 if (ifa != NULL) {
3349 printf("%s->%s ifscope %d ro_if %s != "
3350 "ifa_if %s (cached route cleared)\n",
3351 s_src, s_dst, ifscope, if_name(rt_ifp),
3352 if_name(ifa->ifa_ifp));
3353 } else {
3354 printf("%s->%s ifscope %d ro_if %s "
3355 "(no ifa_if found)\n",
3356 s_src, s_dst, ifscope, if_name(rt_ifp));
3357 }
3358 }
3359
3360 RT_UNLOCK(ro->ro_rt);
3361 ROUTE_RELEASE(ro);
3362
3363 /*
3364 * If the destination is IPv4 LLA and the route's interface
3365 * doesn't match the source interface, then the source IP
3366 * address is wrong; it most likely belongs to the primary
3367 * interface associated with the IPv4 LL subnet. Drop the
3368 * packet rather than letting it go out and return an error
3369 * to the ULP. This actually applies not only to IPv4 LL
3370 * but other shared subnets; for now we explicitly test only
3371 * for the former case and save the latter for future.
3372 */
3373 if (IN_LINKLOCAL(ntohl(dst.s_addr)) &&
3374 !IN_LINKLOCAL(ntohl(src.s_addr)) && ifa != NULL) {
3375 IFA_REMREF(ifa);
3376 ifa = NULL;
3377 }
3378 }
3379
3380 if (ip_select_srcif_debug && ifa == NULL) {
3381 printf("%s->%s ifscope %d (neither ro_if/ifa_if found)\n",
3382 s_src, s_dst, ifscope);
3383 }
3384
3385 /*
3386 * If there is a route, mark it accordingly. If there isn't one,
3387 * we'll get here again during the next transmit (possibly with a
3388 * route) and the flag will get set at that point. For IPv4 LLA
3389 * destination, mark it only if the route has been fully resolved;
3390 * otherwise we want to come back here again when the route points
3391 * to the interface over which the ARP reply arrives on.
3392 */
3393 if (ro->ro_rt != NULL && (!IN_LINKLOCAL(ntohl(dst.s_addr)) ||
3394 (ro->ro_rt->rt_gateway->sa_family == AF_LINK &&
3395 SDL(ro->ro_rt->rt_gateway)->sdl_alen != 0))) {
3396 if (ifa != NULL)
3397 IFA_ADDREF(ifa); /* for route */
3398 if (ro->ro_srcia != NULL)
3399 IFA_REMREF(ro->ro_srcia);
3400 ro->ro_srcia = ifa;
3401 ro->ro_flags |= ROF_SRCIF_SELECTED;
3402 RT_GENID_SYNC(ro->ro_rt);
3403 }
3404
3405 if (ro->ro_rt != NULL)
3406 RT_UNLOCK(ro->ro_rt);
3407
3408 return (ifa);
3409 }
3410
3411 void
3412 ip_output_checksum(struct ifnet *ifp, struct mbuf *m, int hlen, int ip_len,
3413 uint32_t *sw_csum)
3414 {
3415 int tso = TSO_IPV4_OK(ifp, m);
3416 uint32_t hwcap = ifp->if_hwassist;
3417
3418 m->m_pkthdr.csum_flags |= CSUM_IP;
3419
3420 if (!hwcksum_tx) {
3421 /* do all in software; hardware checksum offload is disabled */
3422 *sw_csum = (CSUM_DELAY_DATA | CSUM_DELAY_IP) &
3423 m->m_pkthdr.csum_flags;
3424 } else {
3425 /* do in software what the hardware cannot */
3426 *sw_csum = m->m_pkthdr.csum_flags &
3427 ~IF_HWASSIST_CSUM_FLAGS(hwcap);
3428 }
3429
3430 if (hlen != sizeof (struct ip)) {
3431 *sw_csum |= ((CSUM_DELAY_DATA | CSUM_DELAY_IP) &
3432 m->m_pkthdr.csum_flags);
3433 } else if (!(*sw_csum & CSUM_DELAY_DATA) && (hwcap & CSUM_PARTIAL)) {
3434 /*
3435 * Partial checksum offload, if non-IP fragment, and TCP only
3436 * (no UDP support, as the hardware may not be able to convert
3437 * +0 to -0 (0xffff) per RFC1122 4.1.3.4.)
3438 */
3439 if (hwcksum_tx && !tso &&
3440 (m->m_pkthdr.csum_flags & CSUM_TCP) &&
3441 ip_len <= ifp->if_mtu) {
3442 uint16_t start = sizeof (struct ip);
3443 uint16_t ulpoff = m->m_pkthdr.csum_data & 0xffff;
3444 m->m_pkthdr.csum_flags |=
3445 (CSUM_DATA_VALID | CSUM_PARTIAL);
3446 m->m_pkthdr.csum_tx_stuff = (ulpoff + start);
3447 m->m_pkthdr.csum_tx_start = start;
3448 /* do IP hdr chksum in software */
3449 *sw_csum = CSUM_DELAY_IP;
3450 } else {
3451 *sw_csum |= (CSUM_DELAY_DATA & m->m_pkthdr.csum_flags);
3452 }
3453 }
3454
3455 if (*sw_csum & CSUM_DELAY_DATA) {
3456 in_delayed_cksum(m);
3457 *sw_csum &= ~CSUM_DELAY_DATA;
3458 }
3459
3460 if (hwcksum_tx) {
3461 /*
3462 * Drop off bits that aren't supported by hardware;
3463 * also make sure to preserve non-checksum related bits.
3464 */
3465 m->m_pkthdr.csum_flags =
3466 ((m->m_pkthdr.csum_flags &
3467 (IF_HWASSIST_CSUM_FLAGS(hwcap) | CSUM_DATA_VALID)) |
3468 (m->m_pkthdr.csum_flags & ~IF_HWASSIST_CSUM_MASK));
3469 } else {
3470 /* drop all bits; hardware checksum offload is disabled */
3471 m->m_pkthdr.csum_flags = 0;
3472 }
3473 }
3474
3475 /*
3476 * GRE protocol output for PPP/PPTP
3477 */
3478 int
3479 ip_gre_output(struct mbuf *m)
3480 {
3481 struct route ro;
3482 int error;
3483
3484 bzero(&ro, sizeof (ro));
3485
3486 error = ip_output(m, NULL, &ro, 0, NULL, NULL);
3487
3488 ROUTE_RELEASE(&ro);
3489
3490 return (error);
3491 }
3492
3493 static int
3494 sysctl_reset_ip_output_stats SYSCTL_HANDLER_ARGS
3495 {
3496 #pragma unused(arg1, arg2)
3497 int error, i;
3498
3499 i = ip_output_measure;
3500 error = sysctl_handle_int(oidp, &i, 0, req);
3501 if (error || req->newptr == USER_ADDR_NULL)
3502 goto done;
3503 /* impose bounds */
3504 if (i < 0 || i > 1) {
3505 error = EINVAL;
3506 goto done;
3507 }
3508 if (ip_output_measure != i && i == 1) {
3509 net_perf_initialize(&net_perf, ip_output_measure_bins);
3510 }
3511 ip_output_measure = i;
3512 done:
3513 return (error);
3514 }
3515
3516 static int
3517 sysctl_ip_output_measure_bins SYSCTL_HANDLER_ARGS
3518 {
3519 #pragma unused(arg1, arg2)
3520 int error;
3521 uint64_t i;
3522
3523 i = ip_output_measure_bins;
3524 error = sysctl_handle_quad(oidp, &i, 0, req);
3525 if (error || req->newptr == USER_ADDR_NULL)
3526 goto done;
3527 /* validate data */
3528 if (!net_perf_validate_bins(i)) {
3529 error = EINVAL;
3530 goto done;
3531 }
3532 ip_output_measure_bins = i;
3533 done:
3534 return (error);
3535 }
3536
3537 static int
3538 sysctl_ip_output_getperf SYSCTL_HANDLER_ARGS
3539 {
3540 #pragma unused(oidp, arg1, arg2)
3541 if (req->oldptr == USER_ADDR_NULL)
3542 req->oldlen = (size_t)sizeof (struct ipstat);
3543
3544 return (SYSCTL_OUT(req, &net_perf, MIN(sizeof (net_perf), req->oldlen)));
3545 }