]> git.saurik.com Git - apple/xnu.git/blob - bsd/netinet/ip_output.c
xnu-3248.50.21.tar.gz
[apple/xnu.git] / bsd / netinet / ip_output.c
1 /*
2 * Copyright (c) 2000-2014 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28 /*
29 * Copyright (c) 1982, 1986, 1988, 1990, 1993
30 * The Regents of the University of California. All rights reserved.
31 *
32 * Redistribution and use in source and binary forms, with or without
33 * modification, are permitted provided that the following conditions
34 * are met:
35 * 1. Redistributions of source code must retain the above copyright
36 * notice, this list of conditions and the following disclaimer.
37 * 2. Redistributions in binary form must reproduce the above copyright
38 * notice, this list of conditions and the following disclaimer in the
39 * documentation and/or other materials provided with the distribution.
40 * 3. All advertising materials mentioning features or use of this software
41 * must display the following acknowledgement:
42 * This product includes software developed by the University of
43 * California, Berkeley and its contributors.
44 * 4. Neither the name of the University nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 *
60 * @(#)ip_output.c 8.3 (Berkeley) 1/21/94
61 */
62 /*
63 * NOTICE: This file was modified by SPARTA, Inc. in 2005 to introduce
64 * support for mandatory and extensible security protections. This notice
65 * is included in support of clause 2.2 (b) of the Apple Public License,
66 * Version 2.0.
67 */
68
69 #define _IP_VHL
70
71 #include <sys/param.h>
72 #include <sys/systm.h>
73 #include <sys/kernel.h>
74 #include <sys/malloc.h>
75 #include <sys/mbuf.h>
76 #include <sys/protosw.h>
77 #include <sys/socket.h>
78 #include <sys/socketvar.h>
79 #include <kern/locks.h>
80 #include <sys/sysctl.h>
81 #include <sys/mcache.h>
82 #include <sys/kdebug.h>
83
84 #include <machine/endian.h>
85 #include <pexpert/pexpert.h>
86 #include <mach/sdt.h>
87
88 #include <libkern/OSAtomic.h>
89 #include <libkern/OSByteOrder.h>
90
91 #include <net/if.h>
92 #include <net/if_dl.h>
93 #include <net/if_types.h>
94 #include <net/route.h>
95 #include <net/ntstat.h>
96 #include <net/net_osdep.h>
97 #include <net/dlil.h>
98 #include <net/net_perf.h>
99
100 #include <netinet/in.h>
101 #include <netinet/in_systm.h>
102 #include <netinet/ip.h>
103 #include <netinet/in_pcb.h>
104 #include <netinet/in_var.h>
105 #include <netinet/ip_var.h>
106 #include <netinet/kpi_ipfilter_var.h>
107
108 #if CONFIG_MACF_NET
109 #include <security/mac_framework.h>
110 #endif /* CONFIG_MACF_NET */
111
112 #define DBG_LAYER_BEG NETDBG_CODE(DBG_NETIP, 1)
113 #define DBG_LAYER_END NETDBG_CODE(DBG_NETIP, 3)
114 #define DBG_FNC_IP_OUTPUT NETDBG_CODE(DBG_NETIP, (1 << 8) | 1)
115 #define DBG_FNC_IPSEC4_OUTPUT NETDBG_CODE(DBG_NETIP, (2 << 8) | 1)
116
117 #if IPSEC
118 #include <netinet6/ipsec.h>
119 #include <netkey/key.h>
120 #if IPSEC_DEBUG
121 #include <netkey/key_debug.h>
122 #else
123 #define KEYDEBUG(lev, arg)
124 #endif
125 #endif /* IPSEC */
126
127 #if NECP
128 #include <net/necp.h>
129 #endif /* NECP */
130
131 #if IPFIREWALL
132 #include <netinet/ip_fw.h>
133 #if IPDIVERT
134 #include <netinet/ip_divert.h>
135 #endif /* IPDIVERT */
136 #endif /* IPFIREWALL */
137
138 #if DUMMYNET
139 #include <netinet/ip_dummynet.h>
140 #endif
141
142 #if PF
143 #include <net/pfvar.h>
144 #endif /* PF */
145
146 #if IPFIREWALL_FORWARD && IPFIREWALL_FORWARD_DEBUG
147 #define print_ip(a) \
148 printf("%ld.%ld.%ld.%ld", (ntohl(a.s_addr) >> 24) & 0xFF, \
149 (ntohl(a.s_addr) >> 16) & 0xFF, \
150 (ntohl(a.s_addr) >> 8) & 0xFF, \
151 (ntohl(a.s_addr)) & 0xFF);
152 #endif /* IPFIREWALL_FORWARD && IPFIREWALL_FORWARD_DEBUG */
153
154 u_short ip_id;
155
156 static int sysctl_reset_ip_output_stats SYSCTL_HANDLER_ARGS;
157 static int sysctl_ip_output_measure_bins SYSCTL_HANDLER_ARGS;
158 static int sysctl_ip_output_getperf SYSCTL_HANDLER_ARGS;
159 static void ip_out_cksum_stats(int, u_int32_t);
160 static struct mbuf *ip_insertoptions(struct mbuf *, struct mbuf *, int *);
161 static int ip_optcopy(struct ip *, struct ip *);
162 static int ip_pcbopts(int, struct mbuf **, struct mbuf *);
163 static void imo_trace(struct ip_moptions *, int);
164 static void ip_mloopback(struct ifnet *, struct ifnet *, struct mbuf *,
165 struct sockaddr_in *, int);
166 static struct ifaddr *in_selectsrcif(struct ip *, struct route *, unsigned int);
167
168 extern struct ip_linklocal_stat ip_linklocal_stat;
169
170 /* temporary: for testing */
171 #if IPSEC
172 extern int ipsec_bypass;
173 #endif
174
175 static int ip_maxchainsent = 0;
176 SYSCTL_INT(_net_inet_ip, OID_AUTO, maxchainsent,
177 CTLFLAG_RW | CTLFLAG_LOCKED, &ip_maxchainsent, 0,
178 "use dlil_output_list");
179 #if DEBUG
180 static int forge_ce = 0;
181 SYSCTL_INT(_net_inet_ip, OID_AUTO, forge_ce,
182 CTLFLAG_RW | CTLFLAG_LOCKED, &forge_ce, 0,
183 "Forge ECN CE");
184 #endif /* DEBUG */
185
186 static int ip_select_srcif_debug = 0;
187 SYSCTL_INT(_net_inet_ip, OID_AUTO, select_srcif_debug,
188 CTLFLAG_RW | CTLFLAG_LOCKED, &ip_select_srcif_debug, 0,
189 "log source interface selection debug info");
190
191 static int ip_output_measure = 0;
192 SYSCTL_PROC(_net_inet_ip, OID_AUTO, output_perf,
193 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
194 &ip_output_measure, 0, sysctl_reset_ip_output_stats, "I",
195 "Do time measurement");
196
197 static uint64_t ip_output_measure_bins = 0;
198 SYSCTL_PROC(_net_inet_ip, OID_AUTO, output_perf_bins,
199 CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED, &ip_output_measure_bins, 0,
200 sysctl_ip_output_measure_bins, "I",
201 "bins for chaining performance data histogram");
202
203 static net_perf_t net_perf;
204 SYSCTL_PROC(_net_inet_ip, OID_AUTO, output_perf_data,
205 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
206 0, 0, sysctl_ip_output_getperf, "S,net_perf",
207 "IP output performance data (struct net_perf, net/net_perf.h)");
208
209 #define IMO_TRACE_HIST_SIZE 32 /* size of trace history */
210
211 /* For gdb */
212 __private_extern__ unsigned int imo_trace_hist_size = IMO_TRACE_HIST_SIZE;
213
214 struct ip_moptions_dbg {
215 struct ip_moptions imo; /* ip_moptions */
216 u_int16_t imo_refhold_cnt; /* # of IMO_ADDREF */
217 u_int16_t imo_refrele_cnt; /* # of IMO_REMREF */
218 /*
219 * Alloc and free callers.
220 */
221 ctrace_t imo_alloc;
222 ctrace_t imo_free;
223 /*
224 * Circular lists of IMO_ADDREF and IMO_REMREF callers.
225 */
226 ctrace_t imo_refhold[IMO_TRACE_HIST_SIZE];
227 ctrace_t imo_refrele[IMO_TRACE_HIST_SIZE];
228 };
229
230 #if DEBUG
231 static unsigned int imo_debug = 1; /* debugging (enabled) */
232 #else
233 static unsigned int imo_debug; /* debugging (disabled) */
234 #endif /* !DEBUG */
235 static unsigned int imo_size; /* size of zone element */
236 static struct zone *imo_zone; /* zone for ip_moptions */
237
238 #define IMO_ZONE_MAX 64 /* maximum elements in zone */
239 #define IMO_ZONE_NAME "ip_moptions" /* zone name */
240
241 /*
242 * IP output. The packet in mbuf chain m contains a skeletal IP
243 * header (with len, off, ttl, proto, tos, src, dst).
244 * The mbuf chain containing the packet will be freed.
245 * The mbuf opt, if present, will not be freed.
246 */
247 int
248 ip_output(struct mbuf *m0, struct mbuf *opt, struct route *ro, int flags,
249 struct ip_moptions *imo, struct ip_out_args *ipoa)
250 {
251 return (ip_output_list(m0, 0, opt, ro, flags, imo, ipoa));
252 }
253
254 /*
255 * IP output. The packet in mbuf chain m contains a skeletal IP
256 * header (with len, off, ttl, proto, tos, src, dst).
257 * The mbuf chain containing the packet will be freed.
258 * The mbuf opt, if present, will not be freed.
259 *
260 * Route ro MUST be non-NULL; if ro->ro_rt is valid, route lookup would be
261 * skipped and ro->ro_rt would be used. Otherwise the result of route
262 * lookup is stored in ro->ro_rt.
263 *
264 * In the IP forwarding case, the packet will arrive with options already
265 * inserted, so must have a NULL opt pointer.
266 */
267 int
268 ip_output_list(struct mbuf *m0, int packetchain, struct mbuf *opt,
269 struct route *ro, int flags, struct ip_moptions *imo,
270 struct ip_out_args *ipoa)
271 {
272 struct ip *ip;
273 struct ifnet *ifp = NULL; /* not refcnt'd */
274 struct mbuf *m = m0, *prevnxt = NULL, **mppn = &prevnxt;
275 int hlen = sizeof (struct ip);
276 int len = 0, error = 0;
277 struct sockaddr_in *dst = NULL;
278 struct in_ifaddr *ia = NULL, *src_ia = NULL;
279 struct in_addr pkt_dst;
280 struct ipf_pktopts *ippo = NULL;
281 ipfilter_t inject_filter_ref = NULL;
282 struct mbuf *packetlist;
283 uint32_t sw_csum, pktcnt = 0, scnt = 0, bytecnt = 0;
284 uint32_t packets_processed = 0;
285 unsigned int ifscope = IFSCOPE_NONE;
286 struct flowadv *adv = NULL;
287 struct timeval start_tv;
288 #if IPSEC
289 struct socket *so = NULL;
290 struct secpolicy *sp = NULL;
291 #endif /* IPSEC */
292 #if NECP
293 necp_kernel_policy_result necp_result = 0;
294 necp_kernel_policy_result_parameter necp_result_parameter;
295 necp_kernel_policy_id necp_matched_policy_id = 0;
296 #endif /* NECP */
297 #if IPFIREWALL
298 int ipfwoff;
299 struct sockaddr_in *next_hop_from_ipfwd_tag = NULL;
300 #endif /* IPFIREWALL */
301 #if IPFIREWALL || DUMMYNET
302 struct m_tag *tag;
303 #endif /* IPFIREWALL || DUMMYNET */
304 #if DUMMYNET
305 struct ip_out_args saved_ipoa;
306 struct sockaddr_in dst_buf;
307 #endif /* DUMMYNET */
308 struct {
309 #if IPSEC
310 struct ipsec_output_state ipsec_state;
311 #endif /* IPSEC */
312 #if NECP
313 struct route necp_route;
314 #endif /* NECP */
315 #if IPFIREWALL || DUMMYNET
316 struct ip_fw_args args;
317 #endif /* IPFIREWALL || DUMMYNET */
318 #if IPFIREWALL_FORWARD
319 struct route sro_fwd;
320 #endif /* IPFIREWALL_FORWARD */
321 #if DUMMYNET
322 struct route saved_route;
323 #endif /* DUMMYNET */
324 struct ipf_pktopts ipf_pktopts;
325 } ipobz;
326 #define ipsec_state ipobz.ipsec_state
327 #define necp_route ipobz.necp_route
328 #define args ipobz.args
329 #define sro_fwd ipobz.sro_fwd
330 #define saved_route ipobz.saved_route
331 #define ipf_pktopts ipobz.ipf_pktopts
332 union {
333 struct {
334 boolean_t select_srcif : 1; /* set once */
335 boolean_t srcbound : 1; /* set once */
336 boolean_t nocell : 1; /* set once */
337 boolean_t isbroadcast : 1;
338 boolean_t didfilter : 1;
339 boolean_t noexpensive : 1; /* set once */
340 boolean_t awdl_unrestricted : 1; /* set once */
341 #if IPFIREWALL_FORWARD
342 boolean_t fwd_rewrite_src : 1;
343 #endif /* IPFIREWALL_FORWARD */
344 };
345 uint32_t raw;
346 } ipobf = { .raw = 0 };
347
348 #define IP_CHECK_RESTRICTIONS(_ifp, _ipobf) \
349 (((_ipobf).nocell && IFNET_IS_CELLULAR(_ifp)) || \
350 ((_ipobf).noexpensive && IFNET_IS_EXPENSIVE(_ifp)) || \
351 (!(_ipobf).awdl_unrestricted && IFNET_IS_AWDL_RESTRICTED(_ifp)))
352
353 if (ip_output_measure)
354 net_perf_start_time(&net_perf, &start_tv);
355 KERNEL_DEBUG(DBG_FNC_IP_OUTPUT | DBG_FUNC_START, 0, 0, 0, 0, 0);
356
357 VERIFY(m0->m_flags & M_PKTHDR);
358 packetlist = m0;
359
360 /* zero out {ipsec_state, args, sro_fwd, saved_route, ipf_pktops} */
361 bzero(&ipobz, sizeof (ipobz));
362 ippo = &ipf_pktopts;
363
364 #if IPFIREWALL || DUMMYNET
365 if (SLIST_EMPTY(&m0->m_pkthdr.tags))
366 goto ipfw_tags_done;
367
368 /* Grab info from mtags prepended to the chain */
369 #if DUMMYNET
370 if ((tag = m_tag_locate(m0, KERNEL_MODULE_TAG_ID,
371 KERNEL_TAG_TYPE_DUMMYNET, NULL)) != NULL) {
372 struct dn_pkt_tag *dn_tag;
373
374 dn_tag = (struct dn_pkt_tag *)(tag+1);
375 args.fwa_ipfw_rule = dn_tag->dn_ipfw_rule;
376 args.fwa_pf_rule = dn_tag->dn_pf_rule;
377 opt = NULL;
378 saved_route = dn_tag->dn_ro;
379 ro = &saved_route;
380
381 imo = NULL;
382 bcopy(&dn_tag->dn_dst, &dst_buf, sizeof (dst_buf));
383 dst = &dst_buf;
384 ifp = dn_tag->dn_ifp;
385 flags = dn_tag->dn_flags;
386 if ((dn_tag->dn_flags & IP_OUTARGS)) {
387 saved_ipoa = dn_tag->dn_ipoa;
388 ipoa = &saved_ipoa;
389 }
390
391 m_tag_delete(m0, tag);
392 }
393 #endif /* DUMMYNET */
394
395 #if IPDIVERT
396 if ((tag = m_tag_locate(m0, KERNEL_MODULE_TAG_ID,
397 KERNEL_TAG_TYPE_DIVERT, NULL)) != NULL) {
398 struct divert_tag *div_tag;
399
400 div_tag = (struct divert_tag *)(tag+1);
401 args.fwa_divert_rule = div_tag->cookie;
402
403 m_tag_delete(m0, tag);
404 }
405 #endif /* IPDIVERT */
406
407 #if IPFIREWALL
408 if ((tag = m_tag_locate(m0, KERNEL_MODULE_TAG_ID,
409 KERNEL_TAG_TYPE_IPFORWARD, NULL)) != NULL) {
410 struct ip_fwd_tag *ipfwd_tag;
411
412 ipfwd_tag = (struct ip_fwd_tag *)(tag+1);
413 next_hop_from_ipfwd_tag = ipfwd_tag->next_hop;
414
415 m_tag_delete(m0, tag);
416 }
417 #endif /* IPFIREWALL */
418
419 ipfw_tags_done:
420 #endif /* IPFIREWALL || DUMMYNET */
421
422 m = m0;
423 m->m_pkthdr.pkt_flags &= ~(PKTF_LOOP|PKTF_IFAINFO);
424
425 #if IPSEC
426 if (ipsec_bypass == 0 && !(flags & IP_NOIPSEC)) {
427 /* If packet is bound to an interface, check bound policies */
428 if ((flags & IP_OUTARGS) && (ipoa != NULL) &&
429 (ipoa->ipoa_flags & IPOAF_BOUND_IF) &&
430 ipoa->ipoa_boundif != IFSCOPE_NONE) {
431 if (ipsec4_getpolicybyinterface(m, IPSEC_DIR_OUTBOUND,
432 &flags, ipoa, &sp) != 0)
433 goto bad;
434 }
435 }
436 #endif /* IPSEC */
437
438 VERIFY(ro != NULL);
439
440 if (ip_doscopedroute && (flags & IP_OUTARGS)) {
441 /*
442 * In the forwarding case, only the ifscope value is used,
443 * as source interface selection doesn't take place.
444 */
445 if ((ipobf.select_srcif = (!(flags & IP_FORWARDING) &&
446 (ipoa->ipoa_flags & IPOAF_SELECT_SRCIF)))) {
447 ipf_pktopts.ippo_flags |= IPPOF_SELECT_SRCIF;
448 }
449
450 if ((ipoa->ipoa_flags & IPOAF_BOUND_IF) &&
451 ipoa->ipoa_boundif != IFSCOPE_NONE) {
452 ifscope = ipoa->ipoa_boundif;
453 ipf_pktopts.ippo_flags |=
454 (IPPOF_BOUND_IF | (ifscope << IPPOF_SHIFT_IFSCOPE));
455 }
456
457 /* double negation needed for bool bit field */
458 ipobf.srcbound = !!(ipoa->ipoa_flags & IPOAF_BOUND_SRCADDR);
459 if (ipobf.srcbound)
460 ipf_pktopts.ippo_flags |= IPPOF_BOUND_SRCADDR;
461 } else {
462 ipobf.select_srcif = FALSE;
463 ipobf.srcbound = FALSE;
464 ifscope = IFSCOPE_NONE;
465 if (flags & IP_OUTARGS) {
466 ipoa->ipoa_boundif = IFSCOPE_NONE;
467 ipoa->ipoa_flags &= ~(IPOAF_SELECT_SRCIF |
468 IPOAF_BOUND_IF | IPOAF_BOUND_SRCADDR);
469 }
470 }
471
472 if (flags & IP_OUTARGS) {
473 if (ipoa->ipoa_flags & IPOAF_NO_CELLULAR) {
474 ipobf.nocell = TRUE;
475 ipf_pktopts.ippo_flags |= IPPOF_NO_IFT_CELLULAR;
476 }
477 if (ipoa->ipoa_flags & IPOAF_NO_EXPENSIVE) {
478 ipobf.noexpensive = TRUE;
479 ipf_pktopts.ippo_flags |= IPPOF_NO_IFF_EXPENSIVE;
480 }
481 if (ipoa->ipoa_flags & IPOAF_AWDL_UNRESTRICTED)
482 ipobf.awdl_unrestricted = TRUE;
483 adv = &ipoa->ipoa_flowadv;
484 adv->code = FADV_SUCCESS;
485 ipoa->ipoa_retflags = 0;
486 }
487
488 #if IPSEC
489 if (ipsec_bypass == 0 && !(flags & IP_NOIPSEC)) {
490 so = ipsec_getsocket(m);
491 if (so != NULL) {
492 (void) ipsec_setsocket(m, NULL);
493 }
494 }
495 #endif /* IPSEC */
496
497 #if DUMMYNET
498 if (args.fwa_ipfw_rule != NULL || args.fwa_pf_rule != NULL) {
499 /* dummynet already saw us */
500 ip = mtod(m, struct ip *);
501 hlen = IP_VHL_HL(ip->ip_vhl) << 2;
502 pkt_dst = ip->ip_dst;
503 if (ro->ro_rt != NULL) {
504 RT_LOCK_SPIN(ro->ro_rt);
505 ia = (struct in_ifaddr *)ro->ro_rt->rt_ifa;
506 if (ia) {
507 /* Become a regular mutex */
508 RT_CONVERT_LOCK(ro->ro_rt);
509 IFA_ADDREF(&ia->ia_ifa);
510 }
511 RT_UNLOCK(ro->ro_rt);
512 }
513
514 #if IPFIREWALL
515 if (args.fwa_ipfw_rule != NULL)
516 goto skip_ipsec;
517 #endif /* IPFIREWALL */
518 if (args.fwa_pf_rule != NULL)
519 goto sendit;
520 }
521 #endif /* DUMMYNET */
522
523 loopit:
524 packets_processed++;
525 ipobf.isbroadcast = FALSE;
526 ipobf.didfilter = FALSE;
527 #if IPFIREWALL_FORWARD
528 ipobf.fwd_rewrite_src = FALSE;
529 #endif /* IPFIREWALL_FORWARD */
530
531 VERIFY(m->m_flags & M_PKTHDR);
532 /*
533 * No need to proccess packet twice if we've already seen it.
534 */
535 if (!SLIST_EMPTY(&m->m_pkthdr.tags))
536 inject_filter_ref = ipf_get_inject_filter(m);
537 else
538 inject_filter_ref = NULL;
539
540 if (opt) {
541 m = ip_insertoptions(m, opt, &len);
542 hlen = len;
543 /* Update the chain */
544 if (m != m0) {
545 if (m0 == packetlist)
546 packetlist = m;
547 m0 = m;
548 }
549 }
550 ip = mtod(m, struct ip *);
551
552 #if IPFIREWALL
553 /*
554 * rdar://8542331
555 *
556 * When dealing with a packet chain, we need to reset "next_hop"
557 * because "dst" may have been changed to the gateway address below
558 * for the previous packet of the chain. This could cause the route
559 * to be inavertandly changed to the route to the gateway address
560 * (instead of the route to the destination).
561 */
562 args.fwa_next_hop = next_hop_from_ipfwd_tag;
563 pkt_dst = args.fwa_next_hop ? args.fwa_next_hop->sin_addr : ip->ip_dst;
564 #else /* !IPFIREWALL */
565 pkt_dst = ip->ip_dst;
566 #endif /* !IPFIREWALL */
567
568 /*
569 * We must not send if the packet is destined to network zero.
570 * RFC1122 3.2.1.3 (a) and (b).
571 */
572 if (IN_ZERONET(ntohl(pkt_dst.s_addr))) {
573 error = EHOSTUNREACH;
574 goto bad;
575 }
576
577 /*
578 * Fill in IP header.
579 */
580 if (!(flags & (IP_FORWARDING|IP_RAWOUTPUT))) {
581 ip->ip_vhl = IP_MAKE_VHL(IPVERSION, hlen >> 2);
582 ip->ip_off &= IP_DF;
583 ip->ip_id = ip_randomid();
584 OSAddAtomic(1, &ipstat.ips_localout);
585 } else {
586 hlen = IP_VHL_HL(ip->ip_vhl) << 2;
587 }
588
589 #if DEBUG
590 /* For debugging, we let the stack forge congestion */
591 if (forge_ce != 0 &&
592 ((ip->ip_tos & IPTOS_ECN_MASK) == IPTOS_ECN_ECT1 ||
593 (ip->ip_tos & IPTOS_ECN_MASK) == IPTOS_ECN_ECT0)) {
594 ip->ip_tos = (ip->ip_tos & ~IPTOS_ECN_MASK) | IPTOS_ECN_CE;
595 forge_ce--;
596 }
597 #endif /* DEBUG */
598
599 KERNEL_DEBUG(DBG_LAYER_BEG, ip->ip_dst.s_addr, ip->ip_src.s_addr,
600 ip->ip_p, ip->ip_off, ip->ip_len);
601
602 dst = SIN(&ro->ro_dst);
603
604 /*
605 * If there is a cached route,
606 * check that it is to the same destination
607 * and is still up. If not, free it and try again.
608 * The address family should also be checked in case of sharing the
609 * cache with IPv6.
610 */
611
612 if (ro->ro_rt != NULL) {
613 if (ROUTE_UNUSABLE(ro) && ip->ip_src.s_addr != INADDR_ANY &&
614 !(flags & (IP_ROUTETOIF | IP_FORWARDING))) {
615 src_ia = ifa_foraddr(ip->ip_src.s_addr);
616 if (src_ia == NULL) {
617 error = EADDRNOTAVAIL;
618 goto bad;
619 }
620 IFA_REMREF(&src_ia->ia_ifa);
621 src_ia = NULL;
622 }
623 /*
624 * Test rt_flags without holding rt_lock for performance
625 * reasons; if the route is down it will hopefully be
626 * caught by the layer below (since it uses this route
627 * as a hint) or during the next transmit.
628 */
629 if (ROUTE_UNUSABLE(ro) || dst->sin_family != AF_INET ||
630 dst->sin_addr.s_addr != pkt_dst.s_addr)
631 ROUTE_RELEASE(ro);
632
633 /*
634 * If we're doing source interface selection, we may not
635 * want to use this route; only synch up the generation
636 * count otherwise.
637 */
638 if (!ipobf.select_srcif && ro->ro_rt != NULL &&
639 RT_GENID_OUTOFSYNC(ro->ro_rt))
640 RT_GENID_SYNC(ro->ro_rt);
641 }
642 if (ro->ro_rt == NULL) {
643 bzero(dst, sizeof (*dst));
644 dst->sin_family = AF_INET;
645 dst->sin_len = sizeof (*dst);
646 dst->sin_addr = pkt_dst;
647 }
648 /*
649 * If routing to interface only,
650 * short circuit routing lookup.
651 */
652 if (flags & IP_ROUTETOIF) {
653 if (ia != NULL)
654 IFA_REMREF(&ia->ia_ifa);
655 if ((ia = ifatoia(ifa_ifwithdstaddr(sintosa(dst)))) == NULL) {
656 ia = ifatoia(ifa_ifwithnet(sintosa(dst)));
657 if (ia == NULL) {
658 OSAddAtomic(1, &ipstat.ips_noroute);
659 error = ENETUNREACH;
660 goto bad;
661 }
662 }
663 ifp = ia->ia_ifp;
664 ip->ip_ttl = 1;
665 ipobf.isbroadcast = in_broadcast(dst->sin_addr, ifp);
666 /*
667 * For consistency with other cases below. Loopback
668 * multicast case is handled separately by ip_mloopback().
669 */
670 if ((ifp->if_flags & IFF_LOOPBACK) &&
671 !IN_MULTICAST(ntohl(pkt_dst.s_addr))) {
672 m->m_pkthdr.rcvif = ifp;
673 ip_setsrcifaddr_info(m, ifp->if_index, NULL);
674 ip_setdstifaddr_info(m, ifp->if_index, NULL);
675 }
676 } else if (IN_MULTICAST(ntohl(pkt_dst.s_addr)) &&
677 imo != NULL && (ifp = imo->imo_multicast_ifp) != NULL) {
678 /*
679 * Bypass the normal routing lookup for multicast
680 * packets if the interface is specified.
681 */
682 ipobf.isbroadcast = FALSE;
683 if (ia != NULL)
684 IFA_REMREF(&ia->ia_ifa);
685
686 /* Macro takes reference on ia */
687 IFP_TO_IA(ifp, ia);
688 } else {
689 struct ifaddr *ia0 = NULL;
690 boolean_t cloneok = FALSE;
691 /*
692 * Perform source interface selection; the source IP address
693 * must belong to one of the addresses of the interface used
694 * by the route. For performance reasons, do this only if
695 * there is no route, or if the routing table has changed,
696 * or if we haven't done source interface selection on this
697 * route (for this PCB instance) before.
698 */
699 if (ipobf.select_srcif &&
700 ip->ip_src.s_addr != INADDR_ANY && (ROUTE_UNUSABLE(ro) ||
701 !(ro->ro_flags & ROF_SRCIF_SELECTED))) {
702 /* Find the source interface */
703 ia0 = in_selectsrcif(ip, ro, ifscope);
704
705 /*
706 * If the source address belongs to a restricted
707 * interface and the caller forbids our using
708 * interfaces of such type, pretend that there is no
709 * route.
710 */
711 if (ia0 != NULL &&
712 IP_CHECK_RESTRICTIONS(ia0->ifa_ifp, ipobf)) {
713 IFA_REMREF(ia0);
714 ia0 = NULL;
715 error = EHOSTUNREACH;
716 if (flags & IP_OUTARGS)
717 ipoa->ipoa_retflags |= IPOARF_IFDENIED;
718 goto bad;
719 }
720
721 /*
722 * If the source address is spoofed (in the case of
723 * IP_RAWOUTPUT on an unbounded socket), or if this
724 * is destined for local/loopback, just let it go out
725 * using the interface of the route. Otherwise,
726 * there's no interface having such an address,
727 * so bail out.
728 */
729 if (ia0 == NULL && (!(flags & IP_RAWOUTPUT) ||
730 ipobf.srcbound) && ifscope != lo_ifp->if_index) {
731 error = EADDRNOTAVAIL;
732 goto bad;
733 }
734
735 /*
736 * If the caller didn't explicitly specify the scope,
737 * pick it up from the source interface. If the cached
738 * route was wrong and was blown away as part of source
739 * interface selection, don't mask out RTF_PRCLONING
740 * since that route may have been allocated by the ULP,
741 * unless the IP header was created by the caller or
742 * the destination is IPv4 LLA. The check for the
743 * latter is needed because IPv4 LLAs are never scoped
744 * in the current implementation, and we don't want to
745 * replace the resolved IPv4 LLA route with one whose
746 * gateway points to that of the default gateway on
747 * the primary interface of the system.
748 */
749 if (ia0 != NULL) {
750 if (ifscope == IFSCOPE_NONE)
751 ifscope = ia0->ifa_ifp->if_index;
752 cloneok = (!(flags & IP_RAWOUTPUT) &&
753 !(IN_LINKLOCAL(ntohl(ip->ip_dst.s_addr))));
754 }
755 }
756
757 /*
758 * If this is the case, we probably don't want to allocate
759 * a protocol-cloned route since we didn't get one from the
760 * ULP. This lets TCP do its thing, while not burdening
761 * forwarding or ICMP with the overhead of cloning a route.
762 * Of course, we still want to do any cloning requested by
763 * the link layer, as this is probably required in all cases
764 * for correct operation (as it is for ARP).
765 */
766 if (ro->ro_rt == NULL) {
767 unsigned long ign = RTF_PRCLONING;
768 /*
769 * We make an exception here: if the destination
770 * address is INADDR_BROADCAST, allocate a protocol-
771 * cloned host route so that we end up with a route
772 * marked with the RTF_BROADCAST flag. Otherwise,
773 * we would end up referring to the default route,
774 * instead of creating a cloned host route entry.
775 * That would introduce inconsistencies between ULPs
776 * that allocate a route and those that don't. The
777 * RTF_BROADCAST route is important since we'd want
778 * to send out undirected IP broadcast packets using
779 * link-level broadcast address. Another exception
780 * is for ULP-created routes that got blown away by
781 * source interface selection (see above).
782 *
783 * These exceptions will no longer be necessary when
784 * the RTF_PRCLONING scheme is no longer present.
785 */
786 if (cloneok || dst->sin_addr.s_addr == INADDR_BROADCAST)
787 ign &= ~RTF_PRCLONING;
788
789 /*
790 * Loosen the route lookup criteria if the ifscope
791 * corresponds to the loopback interface; this is
792 * needed to support Application Layer Gateways
793 * listening on loopback, in conjunction with packet
794 * filter redirection rules. The final source IP
795 * address will be rewritten by the packet filter
796 * prior to the RFC1122 loopback check below.
797 */
798 if (ifscope == lo_ifp->if_index)
799 rtalloc_ign(ro, ign);
800 else
801 rtalloc_scoped_ign(ro, ign, ifscope);
802
803 /*
804 * If the route points to a cellular/expensive interface
805 * and the caller forbids our using interfaces of such type,
806 * pretend that there is no route.
807 */
808 if (ro->ro_rt != NULL) {
809 RT_LOCK_SPIN(ro->ro_rt);
810 if (IP_CHECK_RESTRICTIONS(ro->ro_rt->rt_ifp,
811 ipobf)) {
812 RT_UNLOCK(ro->ro_rt);
813 ROUTE_RELEASE(ro);
814 if (flags & IP_OUTARGS) {
815 ipoa->ipoa_retflags |=
816 IPOARF_IFDENIED;
817 }
818 } else {
819 RT_UNLOCK(ro->ro_rt);
820 }
821 }
822 }
823
824 if (ro->ro_rt == NULL) {
825 OSAddAtomic(1, &ipstat.ips_noroute);
826 error = EHOSTUNREACH;
827 if (ia0 != NULL) {
828 IFA_REMREF(ia0);
829 ia0 = NULL;
830 }
831 goto bad;
832 }
833
834 if (ia != NULL)
835 IFA_REMREF(&ia->ia_ifa);
836 RT_LOCK_SPIN(ro->ro_rt);
837 ia = ifatoia(ro->ro_rt->rt_ifa);
838 if (ia != NULL) {
839 /* Become a regular mutex */
840 RT_CONVERT_LOCK(ro->ro_rt);
841 IFA_ADDREF(&ia->ia_ifa);
842 }
843 /*
844 * Note: ia_ifp may not be the same as rt_ifp; the latter
845 * is what we use for determining outbound i/f, mtu, etc.
846 */
847 ifp = ro->ro_rt->rt_ifp;
848 ro->ro_rt->rt_use++;
849 if (ro->ro_rt->rt_flags & RTF_GATEWAY) {
850 dst = SIN(ro->ro_rt->rt_gateway);
851 }
852 if (ro->ro_rt->rt_flags & RTF_HOST) {
853 /* double negation needed for bool bit field */
854 ipobf.isbroadcast =
855 !!(ro->ro_rt->rt_flags & RTF_BROADCAST);
856 } else {
857 /* Become a regular mutex */
858 RT_CONVERT_LOCK(ro->ro_rt);
859 ipobf.isbroadcast = in_broadcast(dst->sin_addr, ifp);
860 }
861 /*
862 * For consistency with IPv6, as well as to ensure that
863 * IP_RECVIF is set correctly for packets that are sent
864 * to one of the local addresses. ia (rt_ifa) would have
865 * been fixed up by rt_setif for local routes. This
866 * would make it appear as if the packet arrives on the
867 * interface which owns the local address. Loopback
868 * multicast case is handled separately by ip_mloopback().
869 */
870 if (ia != NULL && (ifp->if_flags & IFF_LOOPBACK) &&
871 !IN_MULTICAST(ntohl(pkt_dst.s_addr))) {
872 uint32_t srcidx;
873
874 m->m_pkthdr.rcvif = ia->ia_ifa.ifa_ifp;
875
876 if (ia0 != NULL)
877 srcidx = ia0->ifa_ifp->if_index;
878 else if ((ro->ro_flags & ROF_SRCIF_SELECTED) &&
879 ro->ro_srcia != NULL)
880 srcidx = ro->ro_srcia->ifa_ifp->if_index;
881 else
882 srcidx = 0;
883
884 ip_setsrcifaddr_info(m, srcidx, NULL);
885 ip_setdstifaddr_info(m, 0, ia);
886 }
887 RT_UNLOCK(ro->ro_rt);
888 if (ia0 != NULL) {
889 IFA_REMREF(ia0);
890 ia0 = NULL;
891 }
892 }
893
894 if (IN_MULTICAST(ntohl(pkt_dst.s_addr))) {
895 struct ifnet *srcifp = NULL;
896 struct in_multi *inm;
897 u_int32_t vif;
898 u_int8_t ttl = IP_DEFAULT_MULTICAST_TTL;
899 u_int8_t loop = IP_DEFAULT_MULTICAST_LOOP;
900
901 m->m_flags |= M_MCAST;
902 /*
903 * IP destination address is multicast. Make sure "dst"
904 * still points to the address in "ro". (It may have been
905 * changed to point to a gateway address, above.)
906 */
907 dst = SIN(&ro->ro_dst);
908 /*
909 * See if the caller provided any multicast options
910 */
911 if (imo != NULL) {
912 IMO_LOCK(imo);
913 vif = imo->imo_multicast_vif;
914 ttl = imo->imo_multicast_ttl;
915 loop = imo->imo_multicast_loop;
916 if (!(flags & IP_RAWOUTPUT))
917 ip->ip_ttl = ttl;
918 if (imo->imo_multicast_ifp != NULL)
919 ifp = imo->imo_multicast_ifp;
920 IMO_UNLOCK(imo);
921 } else if (!(flags & IP_RAWOUTPUT)) {
922 vif = -1;
923 ip->ip_ttl = ttl;
924 }
925 /*
926 * Confirm that the outgoing interface supports multicast.
927 */
928 if (imo == NULL || vif == -1) {
929 if (!(ifp->if_flags & IFF_MULTICAST)) {
930 OSAddAtomic(1, &ipstat.ips_noroute);
931 error = ENETUNREACH;
932 goto bad;
933 }
934 }
935 /*
936 * If source address not specified yet, use address
937 * of outgoing interface.
938 */
939 if (ip->ip_src.s_addr == INADDR_ANY) {
940 struct in_ifaddr *ia1;
941 lck_rw_lock_shared(in_ifaddr_rwlock);
942 TAILQ_FOREACH(ia1, &in_ifaddrhead, ia_link) {
943 IFA_LOCK_SPIN(&ia1->ia_ifa);
944 if (ia1->ia_ifp == ifp) {
945 ip->ip_src = IA_SIN(ia1)->sin_addr;
946 srcifp = ifp;
947 IFA_UNLOCK(&ia1->ia_ifa);
948 break;
949 }
950 IFA_UNLOCK(&ia1->ia_ifa);
951 }
952 lck_rw_done(in_ifaddr_rwlock);
953 if (ip->ip_src.s_addr == INADDR_ANY) {
954 error = ENETUNREACH;
955 goto bad;
956 }
957 }
958
959 in_multihead_lock_shared();
960 IN_LOOKUP_MULTI(&pkt_dst, ifp, inm);
961 in_multihead_lock_done();
962 if (inm != NULL && (imo == NULL || loop)) {
963 /*
964 * If we belong to the destination multicast group
965 * on the outgoing interface, and the caller did not
966 * forbid loopback, loop back a copy.
967 */
968 if (!TAILQ_EMPTY(&ipv4_filters)) {
969 struct ipfilter *filter;
970 int seen = (inject_filter_ref == NULL);
971
972 if (imo != NULL) {
973 ipf_pktopts.ippo_flags |=
974 IPPOF_MCAST_OPTS;
975 ipf_pktopts.ippo_mcast_ifnet = ifp;
976 ipf_pktopts.ippo_mcast_ttl = ttl;
977 ipf_pktopts.ippo_mcast_loop = loop;
978 }
979
980 ipf_ref();
981
982 /*
983 * 4135317 - always pass network byte
984 * order to filter
985 */
986 #if BYTE_ORDER != BIG_ENDIAN
987 HTONS(ip->ip_len);
988 HTONS(ip->ip_off);
989 #endif
990 TAILQ_FOREACH(filter, &ipv4_filters, ipf_link) {
991 if (seen == 0) {
992 if ((struct ipfilter *)
993 inject_filter_ref == filter)
994 seen = 1;
995 } else if (filter->ipf_filter.
996 ipf_output != NULL) {
997 errno_t result;
998 result = filter->ipf_filter.
999 ipf_output(filter->
1000 ipf_filter.cookie,
1001 (mbuf_t *)&m, ippo);
1002 if (result == EJUSTRETURN) {
1003 ipf_unref();
1004 INM_REMREF(inm);
1005 goto done;
1006 }
1007 if (result != 0) {
1008 ipf_unref();
1009 INM_REMREF(inm);
1010 goto bad;
1011 }
1012 }
1013 }
1014
1015 /* set back to host byte order */
1016 ip = mtod(m, struct ip *);
1017 #if BYTE_ORDER != BIG_ENDIAN
1018 NTOHS(ip->ip_len);
1019 NTOHS(ip->ip_off);
1020 #endif
1021 ipf_unref();
1022 ipobf.didfilter = TRUE;
1023 }
1024 ip_mloopback(srcifp, ifp, m, dst, hlen);
1025 }
1026 if (inm != NULL)
1027 INM_REMREF(inm);
1028 /*
1029 * Multicasts with a time-to-live of zero may be looped-
1030 * back, above, but must not be transmitted on a network.
1031 * Also, multicasts addressed to the loopback interface
1032 * are not sent -- the above call to ip_mloopback() will
1033 * loop back a copy if this host actually belongs to the
1034 * destination group on the loopback interface.
1035 */
1036 if (ip->ip_ttl == 0 || ifp->if_flags & IFF_LOOPBACK) {
1037 m_freem(m);
1038 goto done;
1039 }
1040
1041 goto sendit;
1042 }
1043 /*
1044 * If source address not specified yet, use address
1045 * of outgoing interface.
1046 */
1047 if (ip->ip_src.s_addr == INADDR_ANY) {
1048 IFA_LOCK_SPIN(&ia->ia_ifa);
1049 ip->ip_src = IA_SIN(ia)->sin_addr;
1050 IFA_UNLOCK(&ia->ia_ifa);
1051 #if IPFIREWALL_FORWARD
1052 /*
1053 * Keep note that we did this - if the firewall changes
1054 * the next-hop, our interface may change, changing the
1055 * default source IP. It's a shame so much effort happens
1056 * twice. Oh well.
1057 */
1058 ipobf.fwd_rewrite_src = TRUE;
1059 #endif /* IPFIREWALL_FORWARD */
1060 }
1061
1062 /*
1063 * Look for broadcast address and
1064 * and verify user is allowed to send
1065 * such a packet.
1066 */
1067 if (ipobf.isbroadcast) {
1068 if (!(ifp->if_flags & IFF_BROADCAST)) {
1069 error = EADDRNOTAVAIL;
1070 goto bad;
1071 }
1072 if (!(flags & IP_ALLOWBROADCAST)) {
1073 error = EACCES;
1074 goto bad;
1075 }
1076 /* don't allow broadcast messages to be fragmented */
1077 if ((u_short)ip->ip_len > ifp->if_mtu) {
1078 error = EMSGSIZE;
1079 goto bad;
1080 }
1081 m->m_flags |= M_BCAST;
1082 } else {
1083 m->m_flags &= ~M_BCAST;
1084 }
1085
1086 sendit:
1087 #if PF
1088 /* Invoke outbound packet filter */
1089 if (PF_IS_ENABLED) {
1090 int rc;
1091
1092 m0 = m; /* Save for later */
1093 #if DUMMYNET
1094 args.fwa_m = m;
1095 args.fwa_next_hop = dst;
1096 args.fwa_oif = ifp;
1097 args.fwa_ro = ro;
1098 args.fwa_dst = dst;
1099 args.fwa_oflags = flags;
1100 if (flags & IP_OUTARGS)
1101 args.fwa_ipoa = ipoa;
1102 rc = pf_af_hook(ifp, mppn, &m, AF_INET, FALSE, &args);
1103 #else /* DUMMYNET */
1104 rc = pf_af_hook(ifp, mppn, &m, AF_INET, FALSE, NULL);
1105 #endif /* DUMMYNET */
1106 if (rc != 0 || m == NULL) {
1107 /* Move to the next packet */
1108 m = *mppn;
1109
1110 /* Skip ahead if first packet in list got dropped */
1111 if (packetlist == m0)
1112 packetlist = m;
1113
1114 if (m != NULL) {
1115 m0 = m;
1116 /* Next packet in the chain */
1117 goto loopit;
1118 } else if (packetlist != NULL) {
1119 /* No more packet; send down the chain */
1120 goto sendchain;
1121 }
1122 /* Nothing left; we're done */
1123 goto done;
1124 }
1125 m0 = m;
1126 ip = mtod(m, struct ip *);
1127 pkt_dst = ip->ip_dst;
1128 hlen = IP_VHL_HL(ip->ip_vhl) << 2;
1129 }
1130 #endif /* PF */
1131 /*
1132 * Force IP TTL to 255 following draft-ietf-zeroconf-ipv4-linklocal.txt
1133 */
1134 if (IN_LINKLOCAL(ntohl(ip->ip_src.s_addr)) ||
1135 IN_LINKLOCAL(ntohl(ip->ip_dst.s_addr))) {
1136 ip_linklocal_stat.iplls_out_total++;
1137 if (ip->ip_ttl != MAXTTL) {
1138 ip_linklocal_stat.iplls_out_badttl++;
1139 ip->ip_ttl = MAXTTL;
1140 }
1141 }
1142
1143 if (!ipobf.didfilter && !TAILQ_EMPTY(&ipv4_filters)) {
1144 struct ipfilter *filter;
1145 int seen = (inject_filter_ref == NULL);
1146 ipf_pktopts.ippo_flags &= ~IPPOF_MCAST_OPTS;
1147
1148 /*
1149 * Check that a TSO frame isn't passed to a filter.
1150 * This could happen if a filter is inserted while
1151 * TCP is sending the TSO packet.
1152 */
1153 if (m->m_pkthdr.csum_flags & CSUM_TSO_IPV4) {
1154 error = EMSGSIZE;
1155 goto bad;
1156 }
1157
1158 ipf_ref();
1159
1160 /* 4135317 - always pass network byte order to filter */
1161 #if BYTE_ORDER != BIG_ENDIAN
1162 HTONS(ip->ip_len);
1163 HTONS(ip->ip_off);
1164 #endif
1165 TAILQ_FOREACH(filter, &ipv4_filters, ipf_link) {
1166 if (seen == 0) {
1167 if ((struct ipfilter *)inject_filter_ref ==
1168 filter)
1169 seen = 1;
1170 } else if (filter->ipf_filter.ipf_output) {
1171 errno_t result;
1172 result = filter->ipf_filter.
1173 ipf_output(filter->ipf_filter.cookie,
1174 (mbuf_t *)&m, ippo);
1175 if (result == EJUSTRETURN) {
1176 ipf_unref();
1177 goto done;
1178 }
1179 if (result != 0) {
1180 ipf_unref();
1181 goto bad;
1182 }
1183 }
1184 }
1185 /* set back to host byte order */
1186 ip = mtod(m, struct ip *);
1187 #if BYTE_ORDER != BIG_ENDIAN
1188 NTOHS(ip->ip_len);
1189 NTOHS(ip->ip_off);
1190 #endif
1191 ipf_unref();
1192 }
1193
1194 #if NECP
1195 /* Process Network Extension Policy. Will Pass, Drop, or Rebind packet. */
1196 necp_matched_policy_id = necp_ip_output_find_policy_match (m,
1197 flags, (flags & IP_OUTARGS) ? ipoa : NULL, &necp_result, &necp_result_parameter);
1198 if (necp_matched_policy_id) {
1199 necp_mark_packet_from_ip(m, necp_matched_policy_id);
1200 switch (necp_result) {
1201 case NECP_KERNEL_POLICY_RESULT_PASS:
1202 /* Check if the interface is allowed */
1203 if (!necp_packet_is_allowed_over_interface(m, ifp)) {
1204 error = EHOSTUNREACH;
1205 goto bad;
1206 }
1207 goto skip_ipsec;
1208 case NECP_KERNEL_POLICY_RESULT_DROP:
1209 case NECP_KERNEL_POLICY_RESULT_SOCKET_DIVERT:
1210 /* Flow divert packets should be blocked at the IP layer */
1211 error = EHOSTUNREACH;
1212 goto bad;
1213 case NECP_KERNEL_POLICY_RESULT_IP_TUNNEL: {
1214 /* Verify that the packet is being routed to the tunnel */
1215 struct ifnet *policy_ifp = necp_get_ifnet_from_result_parameter(&necp_result_parameter);
1216 if (policy_ifp == ifp) {
1217 /* Check if the interface is allowed */
1218 if (!necp_packet_is_allowed_over_interface(m, ifp)) {
1219 error = EHOSTUNREACH;
1220 goto bad;
1221 }
1222 goto skip_ipsec;
1223 } else {
1224 if (necp_packet_can_rebind_to_ifnet(m, policy_ifp, &necp_route, AF_INET)) {
1225 /* Check if the interface is allowed */
1226 if (!necp_packet_is_allowed_over_interface(m, policy_ifp)) {
1227 error = EHOSTUNREACH;
1228 goto bad;
1229 }
1230
1231 /* Set ifp to the tunnel interface, since it is compatible with the packet */
1232 ifp = policy_ifp;
1233 ro = &necp_route;
1234 goto skip_ipsec;
1235 } else {
1236 error = ENETUNREACH;
1237 goto bad;
1238 }
1239 }
1240 break;
1241 }
1242 default:
1243 break;
1244 }
1245 }
1246 /* Catch-all to check if the interface is allowed */
1247 if (!necp_packet_is_allowed_over_interface(m, ifp)) {
1248 error = EHOSTUNREACH;
1249 goto bad;
1250 }
1251 #endif /* NECP */
1252
1253 #if IPSEC
1254 if (ipsec_bypass != 0 || (flags & IP_NOIPSEC))
1255 goto skip_ipsec;
1256
1257 KERNEL_DEBUG(DBG_FNC_IPSEC4_OUTPUT | DBG_FUNC_START, 0, 0, 0, 0, 0);
1258
1259 if (sp == NULL) {
1260 /* get SP for this packet */
1261 if (so != NULL) {
1262 sp = ipsec4_getpolicybysock(m, IPSEC_DIR_OUTBOUND,
1263 so, &error);
1264 } else {
1265 sp = ipsec4_getpolicybyaddr(m, IPSEC_DIR_OUTBOUND,
1266 flags, &error);
1267 }
1268 if (sp == NULL) {
1269 IPSEC_STAT_INCREMENT(ipsecstat.out_inval);
1270 KERNEL_DEBUG(DBG_FNC_IPSEC4_OUTPUT | DBG_FUNC_END,
1271 0, 0, 0, 0, 0);
1272 goto bad;
1273 }
1274 }
1275
1276 error = 0;
1277
1278 /* check policy */
1279 switch (sp->policy) {
1280 case IPSEC_POLICY_DISCARD:
1281 case IPSEC_POLICY_GENERATE:
1282 /*
1283 * This packet is just discarded.
1284 */
1285 IPSEC_STAT_INCREMENT(ipsecstat.out_polvio);
1286 KERNEL_DEBUG(DBG_FNC_IPSEC4_OUTPUT | DBG_FUNC_END,
1287 1, 0, 0, 0, 0);
1288 goto bad;
1289
1290 case IPSEC_POLICY_BYPASS:
1291 case IPSEC_POLICY_NONE:
1292 /* no need to do IPsec. */
1293 KERNEL_DEBUG(DBG_FNC_IPSEC4_OUTPUT | DBG_FUNC_END,
1294 2, 0, 0, 0, 0);
1295 goto skip_ipsec;
1296
1297 case IPSEC_POLICY_IPSEC:
1298 if (sp->req == NULL) {
1299 /* acquire a policy */
1300 error = key_spdacquire(sp);
1301 KERNEL_DEBUG(DBG_FNC_IPSEC4_OUTPUT | DBG_FUNC_END,
1302 3, 0, 0, 0, 0);
1303 goto bad;
1304 }
1305 if (sp->ipsec_if) {
1306 /* Verify the redirect to ipsec interface */
1307 if (sp->ipsec_if == ifp) {
1308 goto skip_ipsec;
1309 }
1310 goto bad;
1311 }
1312 break;
1313
1314 case IPSEC_POLICY_ENTRUST:
1315 default:
1316 printf("ip_output: Invalid policy found. %d\n", sp->policy);
1317 }
1318 {
1319 ipsec_state.m = m;
1320 if (flags & IP_ROUTETOIF) {
1321 bzero(&ipsec_state.ro, sizeof (ipsec_state.ro));
1322 } else {
1323 route_copyout(&ipsec_state.ro, ro, sizeof (ipsec_state.ro));
1324 }
1325 ipsec_state.dst = SA(dst);
1326
1327 ip->ip_sum = 0;
1328
1329 /*
1330 * XXX
1331 * delayed checksums are not currently compatible with IPsec
1332 */
1333 if (m->m_pkthdr.csum_flags & CSUM_DELAY_DATA)
1334 in_delayed_cksum(m);
1335
1336 #if BYTE_ORDER != BIG_ENDIAN
1337 HTONS(ip->ip_len);
1338 HTONS(ip->ip_off);
1339 #endif
1340
1341 DTRACE_IP6(send, struct mbuf *, m, struct inpcb *, NULL,
1342 struct ip *, ip, struct ifnet *, ifp,
1343 struct ip *, ip, struct ip6_hdr *, NULL);
1344
1345 error = ipsec4_output(&ipsec_state, sp, flags);
1346 if (ipsec_state.tunneled == 6) {
1347 m0 = m = NULL;
1348 error = 0;
1349 goto bad;
1350 }
1351
1352 m0 = m = ipsec_state.m;
1353
1354 #if DUMMYNET
1355 /*
1356 * If we're about to use the route in ipsec_state
1357 * and this came from dummynet, cleaup now.
1358 */
1359 if (ro == &saved_route &&
1360 (!(flags & IP_ROUTETOIF) || ipsec_state.tunneled))
1361 ROUTE_RELEASE(ro);
1362 #endif /* DUMMYNET */
1363
1364 if (flags & IP_ROUTETOIF) {
1365 /*
1366 * if we have tunnel mode SA, we may need to ignore
1367 * IP_ROUTETOIF.
1368 */
1369 if (ipsec_state.tunneled) {
1370 flags &= ~IP_ROUTETOIF;
1371 ro = &ipsec_state.ro;
1372 }
1373 } else {
1374 ro = &ipsec_state.ro;
1375 }
1376 dst = SIN(ipsec_state.dst);
1377 if (error) {
1378 /* mbuf is already reclaimed in ipsec4_output. */
1379 m0 = NULL;
1380 switch (error) {
1381 case EHOSTUNREACH:
1382 case ENETUNREACH:
1383 case EMSGSIZE:
1384 case ENOBUFS:
1385 case ENOMEM:
1386 break;
1387 default:
1388 printf("ip4_output (ipsec): error code %d\n", error);
1389 /* FALLTHRU */
1390 case ENOENT:
1391 /* don't show these error codes to the user */
1392 error = 0;
1393 break;
1394 }
1395 KERNEL_DEBUG(DBG_FNC_IPSEC4_OUTPUT | DBG_FUNC_END,
1396 4, 0, 0, 0, 0);
1397 goto bad;
1398 }
1399 }
1400
1401 /* be sure to update variables that are affected by ipsec4_output() */
1402 ip = mtod(m, struct ip *);
1403
1404 #ifdef _IP_VHL
1405 hlen = IP_VHL_HL(ip->ip_vhl) << 2;
1406 #else /* !_IP_VHL */
1407 hlen = ip->ip_hl << 2;
1408 #endif /* !_IP_VHL */
1409 /* Check that there wasn't a route change and src is still valid */
1410 if (ROUTE_UNUSABLE(ro)) {
1411 ROUTE_RELEASE(ro);
1412 VERIFY(src_ia == NULL);
1413 if (ip->ip_src.s_addr != INADDR_ANY &&
1414 !(flags & (IP_ROUTETOIF | IP_FORWARDING)) &&
1415 (src_ia = ifa_foraddr(ip->ip_src.s_addr)) == NULL) {
1416 error = EADDRNOTAVAIL;
1417 KERNEL_DEBUG(DBG_FNC_IPSEC4_OUTPUT | DBG_FUNC_END,
1418 5, 0, 0, 0, 0);
1419 goto bad;
1420 }
1421 if (src_ia != NULL) {
1422 IFA_REMREF(&src_ia->ia_ifa);
1423 src_ia = NULL;
1424 }
1425 }
1426
1427 if (ro->ro_rt == NULL) {
1428 if (!(flags & IP_ROUTETOIF)) {
1429 printf("%s: can't update route after "
1430 "IPsec processing\n", __func__);
1431 error = EHOSTUNREACH; /* XXX */
1432 KERNEL_DEBUG(DBG_FNC_IPSEC4_OUTPUT | DBG_FUNC_END,
1433 6, 0, 0, 0, 0);
1434 goto bad;
1435 }
1436 } else {
1437 if (ia != NULL)
1438 IFA_REMREF(&ia->ia_ifa);
1439 RT_LOCK_SPIN(ro->ro_rt);
1440 ia = ifatoia(ro->ro_rt->rt_ifa);
1441 if (ia != NULL) {
1442 /* Become a regular mutex */
1443 RT_CONVERT_LOCK(ro->ro_rt);
1444 IFA_ADDREF(&ia->ia_ifa);
1445 }
1446 ifp = ro->ro_rt->rt_ifp;
1447 RT_UNLOCK(ro->ro_rt);
1448 }
1449
1450 /* make it flipped, again. */
1451 #if BYTE_ORDER != BIG_ENDIAN
1452 NTOHS(ip->ip_len);
1453 NTOHS(ip->ip_off);
1454 #endif
1455 KERNEL_DEBUG(DBG_FNC_IPSEC4_OUTPUT | DBG_FUNC_END,
1456 7, 0xff, 0xff, 0xff, 0xff);
1457
1458 /* Pass to filters again */
1459 if (!TAILQ_EMPTY(&ipv4_filters)) {
1460 struct ipfilter *filter;
1461
1462 ipf_pktopts.ippo_flags &= ~IPPOF_MCAST_OPTS;
1463
1464 /*
1465 * Check that a TSO frame isn't passed to a filter.
1466 * This could happen if a filter is inserted while
1467 * TCP is sending the TSO packet.
1468 */
1469 if (m->m_pkthdr.csum_flags & CSUM_TSO_IPV4) {
1470 error = EMSGSIZE;
1471 goto bad;
1472 }
1473
1474 ipf_ref();
1475
1476 /* 4135317 - always pass network byte order to filter */
1477 #if BYTE_ORDER != BIG_ENDIAN
1478 HTONS(ip->ip_len);
1479 HTONS(ip->ip_off);
1480 #endif
1481 TAILQ_FOREACH(filter, &ipv4_filters, ipf_link) {
1482 if (filter->ipf_filter.ipf_output) {
1483 errno_t result;
1484 result = filter->ipf_filter.
1485 ipf_output(filter->ipf_filter.cookie,
1486 (mbuf_t *)&m, ippo);
1487 if (result == EJUSTRETURN) {
1488 ipf_unref();
1489 goto done;
1490 }
1491 if (result != 0) {
1492 ipf_unref();
1493 goto bad;
1494 }
1495 }
1496 }
1497 /* set back to host byte order */
1498 ip = mtod(m, struct ip *);
1499 #if BYTE_ORDER != BIG_ENDIAN
1500 NTOHS(ip->ip_len);
1501 NTOHS(ip->ip_off);
1502 #endif
1503 ipf_unref();
1504 }
1505 skip_ipsec:
1506 #endif /* IPSEC */
1507
1508 #if IPFIREWALL
1509 /*
1510 * Check with the firewall...
1511 * but not if we are already being fwd'd from a firewall.
1512 */
1513 if (fw_enable && IPFW_LOADED && !args.fwa_next_hop) {
1514 struct sockaddr_in *old = dst;
1515
1516 args.fwa_m = m;
1517 args.fwa_next_hop = dst;
1518 args.fwa_oif = ifp;
1519 ipfwoff = ip_fw_chk_ptr(&args);
1520 m = args.fwa_m;
1521 dst = args.fwa_next_hop;
1522
1523 /*
1524 * On return we must do the following:
1525 * IP_FW_PORT_DENY_FLAG -> drop the pkt (XXX new)
1526 * 1<=off<= 0xffff -> DIVERT
1527 * (off & IP_FW_PORT_DYNT_FLAG) -> send to a DUMMYNET pipe
1528 * (off & IP_FW_PORT_TEE_FLAG) -> TEE the packet
1529 * dst != old -> IPFIREWALL_FORWARD
1530 * off==0, dst==old -> accept
1531 * If some of the above modules is not compiled in, then
1532 * we should't have to check the corresponding condition
1533 * (because the ipfw control socket should not accept
1534 * unsupported rules), but better play safe and drop
1535 * packets in case of doubt.
1536 */
1537 m0 = m;
1538 if ((ipfwoff & IP_FW_PORT_DENY_FLAG) || m == NULL) {
1539 if (m)
1540 m_freem(m);
1541 error = EACCES;
1542 goto done;
1543 }
1544 ip = mtod(m, struct ip *);
1545
1546 if (ipfwoff == 0 && dst == old) { /* common case */
1547 goto pass;
1548 }
1549 #if DUMMYNET
1550 if (DUMMYNET_LOADED && (ipfwoff & IP_FW_PORT_DYNT_FLAG) != 0) {
1551 /*
1552 * pass the pkt to dummynet. Need to include
1553 * pipe number, m, ifp, ro, dst because these are
1554 * not recomputed in the next pass.
1555 * All other parameters have been already used and
1556 * so they are not needed anymore.
1557 * XXX note: if the ifp or ro entry are deleted
1558 * while a pkt is in dummynet, we are in trouble!
1559 */
1560 args.fwa_ro = ro;
1561 args.fwa_dst = dst;
1562 args.fwa_oflags = flags;
1563 if (flags & IP_OUTARGS)
1564 args.fwa_ipoa = ipoa;
1565
1566 error = ip_dn_io_ptr(m, ipfwoff & 0xffff, DN_TO_IP_OUT,
1567 &args, DN_CLIENT_IPFW);
1568 goto done;
1569 }
1570 #endif /* DUMMYNET */
1571 #if IPDIVERT
1572 if (ipfwoff != 0 && (ipfwoff & IP_FW_PORT_DYNT_FLAG) == 0) {
1573 struct mbuf *clone = NULL;
1574
1575 /* Clone packet if we're doing a 'tee' */
1576 if ((ipfwoff & IP_FW_PORT_TEE_FLAG) != 0)
1577 clone = m_dup(m, M_DONTWAIT);
1578 /*
1579 * XXX
1580 * delayed checksums are not currently compatible
1581 * with divert sockets.
1582 */
1583 if (m->m_pkthdr.csum_flags & CSUM_DELAY_DATA)
1584 in_delayed_cksum(m);
1585
1586 /* Restore packet header fields to original values */
1587
1588 #if BYTE_ORDER != BIG_ENDIAN
1589 HTONS(ip->ip_len);
1590 HTONS(ip->ip_off);
1591 #endif
1592
1593 /* Deliver packet to divert input routine */
1594 divert_packet(m, 0, ipfwoff & 0xffff,
1595 args.fwa_divert_rule);
1596
1597 /* If 'tee', continue with original packet */
1598 if (clone != NULL) {
1599 m0 = m = clone;
1600 ip = mtod(m, struct ip *);
1601 goto pass;
1602 }
1603 goto done;
1604 }
1605 #endif /* IPDIVERT */
1606 #if IPFIREWALL_FORWARD
1607 /*
1608 * Here we check dst to make sure it's directly reachable on
1609 * the interface we previously thought it was.
1610 * If it isn't (which may be likely in some situations) we have
1611 * to re-route it (ie, find a route for the next-hop and the
1612 * associated interface) and set them here. This is nested
1613 * forwarding which in most cases is undesirable, except where
1614 * such control is nigh impossible. So we do it here.
1615 * And I'm babbling.
1616 */
1617 if (ipfwoff == 0 && old != dst) {
1618 struct in_ifaddr *ia_fw;
1619 struct route *ro_fwd = &sro_fwd;
1620
1621 #if IPFIREWALL_FORWARD_DEBUG
1622 printf("IPFIREWALL_FORWARD: New dst ip: ");
1623 print_ip(dst->sin_addr);
1624 printf("\n");
1625 #endif /* IPFIREWALL_FORWARD_DEBUG */
1626 /*
1627 * We need to figure out if we have been forwarded
1628 * to a local socket. If so then we should somehow
1629 * "loop back" to ip_input, and get directed to the
1630 * PCB as if we had received this packet. This is
1631 * because it may be dificult to identify the packets
1632 * you want to forward until they are being output
1633 * and have selected an interface. (e.g. locally
1634 * initiated packets) If we used the loopback inteface,
1635 * we would not be able to control what happens
1636 * as the packet runs through ip_input() as
1637 * it is done through a ISR.
1638 */
1639 lck_rw_lock_shared(in_ifaddr_rwlock);
1640 TAILQ_FOREACH(ia_fw, &in_ifaddrhead, ia_link) {
1641 /*
1642 * If the addr to forward to is one
1643 * of ours, we pretend to
1644 * be the destination for this packet.
1645 */
1646 IFA_LOCK_SPIN(&ia_fw->ia_ifa);
1647 if (IA_SIN(ia_fw)->sin_addr.s_addr ==
1648 dst->sin_addr.s_addr) {
1649 IFA_UNLOCK(&ia_fw->ia_ifa);
1650 break;
1651 }
1652 IFA_UNLOCK(&ia_fw->ia_ifa);
1653 }
1654 lck_rw_done(in_ifaddr_rwlock);
1655 if (ia_fw) {
1656 /* tell ip_input "dont filter" */
1657 struct m_tag *fwd_tag;
1658 struct ip_fwd_tag *ipfwd_tag;
1659
1660 fwd_tag = m_tag_create(KERNEL_MODULE_TAG_ID,
1661 KERNEL_TAG_TYPE_IPFORWARD,
1662 sizeof (*ipfwd_tag), M_NOWAIT, m);
1663 if (fwd_tag == NULL) {
1664 error = ENOBUFS;
1665 goto bad;
1666 }
1667
1668 ipfwd_tag = (struct ip_fwd_tag *)(fwd_tag+1);
1669 ipfwd_tag->next_hop = args.fwa_next_hop;
1670
1671 m_tag_prepend(m, fwd_tag);
1672
1673 if (m->m_pkthdr.rcvif == NULL)
1674 m->m_pkthdr.rcvif = lo_ifp;
1675
1676 #if BYTE_ORDER != BIG_ENDIAN
1677 HTONS(ip->ip_len);
1678 HTONS(ip->ip_off);
1679 #endif
1680 mbuf_outbound_finalize(m, PF_INET, 0);
1681
1682 /*
1683 * we need to call dlil_output to run filters
1684 * and resync to avoid recursion loops.
1685 */
1686 if (lo_ifp) {
1687 dlil_output(lo_ifp, PF_INET, m, NULL,
1688 SA(dst), 0, adv);
1689 } else {
1690 printf("%s: no loopback ifp for "
1691 "forwarding!!!\n", __func__);
1692 }
1693 goto done;
1694 }
1695 /*
1696 * Some of the logic for this was nicked from above.
1697 *
1698 * This rewrites the cached route in a local PCB.
1699 * Is this what we want to do?
1700 */
1701 ROUTE_RELEASE(ro_fwd);
1702 bcopy(dst, &ro_fwd->ro_dst, sizeof (*dst));
1703
1704 rtalloc_ign(ro_fwd, RTF_PRCLONING);
1705
1706 if (ro_fwd->ro_rt == NULL) {
1707 OSAddAtomic(1, &ipstat.ips_noroute);
1708 error = EHOSTUNREACH;
1709 goto bad;
1710 }
1711
1712 RT_LOCK_SPIN(ro_fwd->ro_rt);
1713 ia_fw = ifatoia(ro_fwd->ro_rt->rt_ifa);
1714 if (ia_fw != NULL) {
1715 /* Become a regular mutex */
1716 RT_CONVERT_LOCK(ro_fwd->ro_rt);
1717 IFA_ADDREF(&ia_fw->ia_ifa);
1718 }
1719 ifp = ro_fwd->ro_rt->rt_ifp;
1720 ro_fwd->ro_rt->rt_use++;
1721 if (ro_fwd->ro_rt->rt_flags & RTF_GATEWAY)
1722 dst = SIN(ro_fwd->ro_rt->rt_gateway);
1723 if (ro_fwd->ro_rt->rt_flags & RTF_HOST) {
1724 /* double negation needed for bool bit field */
1725 ipobf.isbroadcast =
1726 !!(ro_fwd->ro_rt->rt_flags & RTF_BROADCAST);
1727 } else {
1728 /* Become a regular mutex */
1729 RT_CONVERT_LOCK(ro_fwd->ro_rt);
1730 ipobf.isbroadcast =
1731 in_broadcast(dst->sin_addr, ifp);
1732 }
1733 RT_UNLOCK(ro_fwd->ro_rt);
1734 ROUTE_RELEASE(ro);
1735 ro->ro_rt = ro_fwd->ro_rt;
1736 ro_fwd->ro_rt = NULL;
1737 dst = SIN(&ro_fwd->ro_dst);
1738
1739 /*
1740 * If we added a default src ip earlier,
1741 * which would have been gotten from the-then
1742 * interface, do it again, from the new one.
1743 */
1744 if (ia_fw != NULL) {
1745 if (ipobf.fwd_rewrite_src) {
1746 IFA_LOCK_SPIN(&ia_fw->ia_ifa);
1747 ip->ip_src = IA_SIN(ia_fw)->sin_addr;
1748 IFA_UNLOCK(&ia_fw->ia_ifa);
1749 }
1750 IFA_REMREF(&ia_fw->ia_ifa);
1751 }
1752 goto pass;
1753 }
1754 #endif /* IPFIREWALL_FORWARD */
1755 /*
1756 * if we get here, none of the above matches, and
1757 * we have to drop the pkt
1758 */
1759 m_freem(m);
1760 error = EACCES; /* not sure this is the right error msg */
1761 goto done;
1762 }
1763
1764 pass:
1765 #endif /* IPFIREWALL */
1766
1767 /* 127/8 must not appear on wire - RFC1122 */
1768 if (!(ifp->if_flags & IFF_LOOPBACK) &&
1769 ((ntohl(ip->ip_src.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET ||
1770 (ntohl(ip->ip_dst.s_addr) >> IN_CLASSA_NSHIFT) == IN_LOOPBACKNET)) {
1771 OSAddAtomic(1, &ipstat.ips_badaddr);
1772 error = EADDRNOTAVAIL;
1773 goto bad;
1774 }
1775
1776 ip_output_checksum(ifp, m, (IP_VHL_HL(ip->ip_vhl) << 2),
1777 ip->ip_len, &sw_csum);
1778
1779 /*
1780 * If small enough for interface, or the interface will take
1781 * care of the fragmentation for us, can just send directly.
1782 */
1783 if ((u_short)ip->ip_len <= ifp->if_mtu || TSO_IPV4_OK(ifp, m) ||
1784 (!(ip->ip_off & IP_DF) && (ifp->if_hwassist & CSUM_FRAGMENT))) {
1785 #if BYTE_ORDER != BIG_ENDIAN
1786 HTONS(ip->ip_len);
1787 HTONS(ip->ip_off);
1788 #endif
1789
1790 ip->ip_sum = 0;
1791 if (sw_csum & CSUM_DELAY_IP) {
1792 ip->ip_sum = ip_cksum_hdr_out(m, hlen);
1793 sw_csum &= ~CSUM_DELAY_IP;
1794 m->m_pkthdr.csum_flags &= ~CSUM_DELAY_IP;
1795 }
1796
1797 #if IPSEC
1798 /* clean ipsec history once it goes out of the node */
1799 if (ipsec_bypass == 0 && !(flags & IP_NOIPSEC))
1800 ipsec_delaux(m);
1801 #endif /* IPSEC */
1802 if ((m->m_pkthdr.csum_flags & CSUM_TSO_IPV4) &&
1803 (m->m_pkthdr.tso_segsz > 0))
1804 scnt += m->m_pkthdr.len / m->m_pkthdr.tso_segsz;
1805 else
1806 scnt++;
1807
1808 if (packetchain == 0) {
1809 if (ro->ro_rt != NULL && nstat_collect)
1810 nstat_route_tx(ro->ro_rt, scnt,
1811 m->m_pkthdr.len, 0);
1812
1813 error = dlil_output(ifp, PF_INET, m, ro->ro_rt,
1814 SA(dst), 0, adv);
1815 if (dlil_verbose && error) {
1816 printf("dlil_output error on interface %s: %d\n",
1817 ifp->if_xname, error);
1818 }
1819 scnt = 0;
1820 goto done;
1821 } else {
1822 /*
1823 * packet chaining allows us to reuse the
1824 * route for all packets
1825 */
1826 bytecnt += m->m_pkthdr.len;
1827 mppn = &m->m_nextpkt;
1828 m = m->m_nextpkt;
1829 if (m == NULL) {
1830 #if PF
1831 sendchain:
1832 #endif /* PF */
1833 if (pktcnt > ip_maxchainsent)
1834 ip_maxchainsent = pktcnt;
1835 if (ro->ro_rt != NULL && nstat_collect)
1836 nstat_route_tx(ro->ro_rt, scnt,
1837 bytecnt, 0);
1838
1839 error = dlil_output(ifp, PF_INET, packetlist,
1840 ro->ro_rt, SA(dst), 0, adv);
1841 if (dlil_verbose && error) {
1842 printf("dlil_output error on interface %s: %d\n",
1843 ifp->if_xname, error);
1844 }
1845 pktcnt = 0;
1846 scnt = 0;
1847 bytecnt = 0;
1848 goto done;
1849
1850 }
1851 m0 = m;
1852 pktcnt++;
1853 goto loopit;
1854 }
1855 }
1856 /*
1857 * Too large for interface; fragment if possible.
1858 * Must be able to put at least 8 bytes per fragment.
1859 * Balk when DF bit is set or the interface didn't support TSO.
1860 */
1861 if ((ip->ip_off & IP_DF) || pktcnt > 0 ||
1862 (m->m_pkthdr.csum_flags & CSUM_TSO_IPV4)) {
1863 error = EMSGSIZE;
1864 /*
1865 * This case can happen if the user changed the MTU
1866 * of an interface after enabling IP on it. Because
1867 * most netifs don't keep track of routes pointing to
1868 * them, there is no way for one to update all its
1869 * routes when the MTU is changed.
1870 */
1871 if (ro->ro_rt) {
1872 RT_LOCK_SPIN(ro->ro_rt);
1873 if ((ro->ro_rt->rt_flags & (RTF_UP | RTF_HOST)) &&
1874 !(ro->ro_rt->rt_rmx.rmx_locks & RTV_MTU) &&
1875 (ro->ro_rt->rt_rmx.rmx_mtu > ifp->if_mtu)) {
1876 ro->ro_rt->rt_rmx.rmx_mtu = ifp->if_mtu;
1877 }
1878 RT_UNLOCK(ro->ro_rt);
1879 }
1880 if (pktcnt > 0) {
1881 m0 = packetlist;
1882 }
1883 OSAddAtomic(1, &ipstat.ips_cantfrag);
1884 goto bad;
1885 }
1886
1887 error = ip_fragment(m, ifp, ifp->if_mtu, sw_csum);
1888 if (error != 0) {
1889 m0 = m = NULL;
1890 goto bad;
1891 }
1892
1893 KERNEL_DEBUG(DBG_LAYER_END, ip->ip_dst.s_addr,
1894 ip->ip_src.s_addr, ip->ip_p, ip->ip_off, ip->ip_len);
1895
1896 for (m = m0; m; m = m0) {
1897 m0 = m->m_nextpkt;
1898 m->m_nextpkt = 0;
1899 #if IPSEC
1900 /* clean ipsec history once it goes out of the node */
1901 if (ipsec_bypass == 0 && !(flags & IP_NOIPSEC))
1902 ipsec_delaux(m);
1903 #endif /* IPSEC */
1904 if (error == 0) {
1905 if ((packetchain != 0) && (pktcnt > 0)) {
1906 panic("%s: mix of packet in packetlist is "
1907 "wrong=%p", __func__, packetlist);
1908 /* NOTREACHED */
1909 }
1910 if (ro->ro_rt != NULL && nstat_collect) {
1911 nstat_route_tx(ro->ro_rt, 1,
1912 m->m_pkthdr.len, 0);
1913 }
1914 error = dlil_output(ifp, PF_INET, m, ro->ro_rt,
1915 SA(dst), 0, adv);
1916 if (dlil_verbose && error) {
1917 printf("dlil_output error on interface %s: %d\n",
1918 ifp->if_xname, error);
1919 }
1920 } else {
1921 m_freem(m);
1922 }
1923 }
1924
1925 if (error == 0)
1926 OSAddAtomic(1, &ipstat.ips_fragmented);
1927
1928 done:
1929 if (ia != NULL) {
1930 IFA_REMREF(&ia->ia_ifa);
1931 ia = NULL;
1932 }
1933 #if IPSEC
1934 ROUTE_RELEASE(&ipsec_state.ro);
1935 if (sp != NULL) {
1936 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
1937 printf("DP ip_output call free SP:%x\n", sp));
1938 key_freesp(sp, KEY_SADB_UNLOCKED);
1939 }
1940 #endif /* IPSEC */
1941 #if NECP
1942 ROUTE_RELEASE(&necp_route);
1943 #endif /* NECP */
1944 #if DUMMYNET
1945 ROUTE_RELEASE(&saved_route);
1946 #endif /* DUMMYNET */
1947 #if IPFIREWALL_FORWARD
1948 ROUTE_RELEASE(&sro_fwd);
1949 #endif /* IPFIREWALL_FORWARD */
1950
1951 KERNEL_DEBUG(DBG_FNC_IP_OUTPUT | DBG_FUNC_END, error, 0, 0, 0, 0);
1952 if (ip_output_measure) {
1953 net_perf_measure_time(&net_perf, &start_tv, packets_processed);
1954 net_perf_histogram(&net_perf, packets_processed);
1955 }
1956 return (error);
1957 bad:
1958 if (pktcnt > 0)
1959 m0 = packetlist;
1960 m_freem_list(m0);
1961 goto done;
1962
1963 #undef ipsec_state
1964 #undef args
1965 #undef sro_fwd
1966 #undef saved_route
1967 #undef ipf_pktopts
1968 #undef IP_CHECK_RESTRICTIONS
1969 }
1970
1971 int
1972 ip_fragment(struct mbuf *m, struct ifnet *ifp, unsigned long mtu, int sw_csum)
1973 {
1974 struct ip *ip, *mhip;
1975 int len, hlen, mhlen, firstlen, off, error = 0;
1976 struct mbuf **mnext = &m->m_nextpkt, *m0;
1977 int nfrags = 1;
1978
1979 ip = mtod(m, struct ip *);
1980 #ifdef _IP_VHL
1981 hlen = IP_VHL_HL(ip->ip_vhl) << 2;
1982 #else /* !_IP_VHL */
1983 hlen = ip->ip_hl << 2;
1984 #endif /* !_IP_VHL */
1985
1986 firstlen = len = (mtu - hlen) &~ 7;
1987 if (len < 8) {
1988 m_freem(m);
1989 return (EMSGSIZE);
1990 }
1991
1992 /*
1993 * if the interface will not calculate checksums on
1994 * fragmented packets, then do it here.
1995 */
1996 if ((m->m_pkthdr.csum_flags & CSUM_DELAY_DATA) &&
1997 !(ifp->if_hwassist & CSUM_IP_FRAGS))
1998 in_delayed_cksum(m);
1999
2000 /*
2001 * Loop through length of segment after first fragment,
2002 * make new header and copy data of each part and link onto chain.
2003 */
2004 m0 = m;
2005 mhlen = sizeof (struct ip);
2006 for (off = hlen + len; off < (u_short)ip->ip_len; off += len) {
2007 MGETHDR(m, M_DONTWAIT, MT_HEADER); /* MAC-OK */
2008 if (m == NULL) {
2009 error = ENOBUFS;
2010 OSAddAtomic(1, &ipstat.ips_odropped);
2011 goto sendorfree;
2012 }
2013 m->m_flags |= (m0->m_flags & M_MCAST) | M_FRAG;
2014 m->m_data += max_linkhdr;
2015 mhip = mtod(m, struct ip *);
2016 *mhip = *ip;
2017 if (hlen > sizeof (struct ip)) {
2018 mhlen = ip_optcopy(ip, mhip) + sizeof (struct ip);
2019 mhip->ip_vhl = IP_MAKE_VHL(IPVERSION, mhlen >> 2);
2020 }
2021 m->m_len = mhlen;
2022 mhip->ip_off = ((off - hlen) >> 3) + (ip->ip_off & ~IP_MF);
2023 if (ip->ip_off & IP_MF)
2024 mhip->ip_off |= IP_MF;
2025 if (off + len >= (u_short)ip->ip_len)
2026 len = (u_short)ip->ip_len - off;
2027 else
2028 mhip->ip_off |= IP_MF;
2029 mhip->ip_len = htons((u_short)(len + mhlen));
2030 m->m_next = m_copy(m0, off, len);
2031 if (m->m_next == NULL) {
2032 (void) m_free(m);
2033 error = ENOBUFS; /* ??? */
2034 OSAddAtomic(1, &ipstat.ips_odropped);
2035 goto sendorfree;
2036 }
2037 m->m_pkthdr.len = mhlen + len;
2038 m->m_pkthdr.rcvif = NULL;
2039 m->m_pkthdr.csum_flags = m0->m_pkthdr.csum_flags;
2040
2041 M_COPY_CLASSIFIER(m, m0);
2042 M_COPY_PFTAG(m, m0);
2043
2044 #if CONFIG_MACF_NET
2045 mac_netinet_fragment(m0, m);
2046 #endif /* CONFIG_MACF_NET */
2047
2048 #if BYTE_ORDER != BIG_ENDIAN
2049 HTONS(mhip->ip_off);
2050 #endif
2051
2052 mhip->ip_sum = 0;
2053 if (sw_csum & CSUM_DELAY_IP) {
2054 mhip->ip_sum = ip_cksum_hdr_out(m, mhlen);
2055 m->m_pkthdr.csum_flags &= ~CSUM_DELAY_IP;
2056 }
2057 *mnext = m;
2058 mnext = &m->m_nextpkt;
2059 nfrags++;
2060 }
2061 OSAddAtomic(nfrags, &ipstat.ips_ofragments);
2062
2063 /* set first/last markers for fragment chain */
2064 m->m_flags |= M_LASTFRAG;
2065 m0->m_flags |= M_FIRSTFRAG | M_FRAG;
2066 m0->m_pkthdr.csum_data = nfrags;
2067
2068 /*
2069 * Update first fragment by trimming what's been copied out
2070 * and updating header, then send each fragment (in order).
2071 */
2072 m = m0;
2073 m_adj(m, hlen + firstlen - (u_short)ip->ip_len);
2074 m->m_pkthdr.len = hlen + firstlen;
2075 ip->ip_len = htons((u_short)m->m_pkthdr.len);
2076 ip->ip_off |= IP_MF;
2077
2078 #if BYTE_ORDER != BIG_ENDIAN
2079 HTONS(ip->ip_off);
2080 #endif
2081
2082 ip->ip_sum = 0;
2083 if (sw_csum & CSUM_DELAY_IP) {
2084 ip->ip_sum = ip_cksum_hdr_out(m, hlen);
2085 m->m_pkthdr.csum_flags &= ~CSUM_DELAY_IP;
2086 }
2087 sendorfree:
2088 if (error)
2089 m_freem_list(m0);
2090
2091 return (error);
2092 }
2093
2094 static void
2095 ip_out_cksum_stats(int proto, u_int32_t len)
2096 {
2097 switch (proto) {
2098 case IPPROTO_TCP:
2099 tcp_out_cksum_stats(len);
2100 break;
2101 case IPPROTO_UDP:
2102 udp_out_cksum_stats(len);
2103 break;
2104 default:
2105 /* keep only TCP or UDP stats for now */
2106 break;
2107 }
2108 }
2109
2110 /*
2111 * Process a delayed payload checksum calculation (outbound path.)
2112 *
2113 * hoff is the number of bytes beyond the mbuf data pointer which
2114 * points to the IP header.
2115 *
2116 * Returns a bitmask representing all the work done in software.
2117 */
2118 uint32_t
2119 in_finalize_cksum(struct mbuf *m, uint32_t hoff, uint32_t csum_flags)
2120 {
2121 unsigned char buf[15 << 2] __attribute__((aligned(8)));
2122 struct ip *ip;
2123 uint32_t offset, _hlen, mlen, hlen, len, sw_csum;
2124 uint16_t csum, ip_len;
2125
2126 _CASSERT(sizeof (csum) == sizeof (uint16_t));
2127 VERIFY(m->m_flags & M_PKTHDR);
2128
2129 sw_csum = (csum_flags & m->m_pkthdr.csum_flags);
2130
2131 if ((sw_csum &= (CSUM_DELAY_IP | CSUM_DELAY_DATA)) == 0)
2132 goto done;
2133
2134 mlen = m->m_pkthdr.len; /* total mbuf len */
2135
2136 /* sanity check (need at least simple IP header) */
2137 if (mlen < (hoff + sizeof (*ip))) {
2138 panic("%s: mbuf %p pkt len (%u) < hoff+ip_hdr "
2139 "(%u+%u)\n", __func__, m, mlen, hoff,
2140 (uint32_t)sizeof (*ip));
2141 /* NOTREACHED */
2142 }
2143
2144 /*
2145 * In case the IP header is not contiguous, or not 32-bit aligned,
2146 * or if we're computing the IP header checksum, copy it to a local
2147 * buffer. Copy only the simple IP header here (IP options case
2148 * is handled below.)
2149 */
2150 if ((sw_csum & CSUM_DELAY_IP) || (hoff + sizeof (*ip)) > m->m_len ||
2151 !IP_HDR_ALIGNED_P(mtod(m, caddr_t) + hoff)) {
2152 m_copydata(m, hoff, sizeof (*ip), (caddr_t)buf);
2153 ip = (struct ip *)(void *)buf;
2154 _hlen = sizeof (*ip);
2155 } else {
2156 ip = (struct ip *)(void *)(m->m_data + hoff);
2157 _hlen = 0;
2158 }
2159
2160 hlen = IP_VHL_HL(ip->ip_vhl) << 2; /* IP header len */
2161
2162 /* sanity check */
2163 if (mlen < (hoff + hlen)) {
2164 panic("%s: mbuf %p pkt too short (%d) for IP header (%u), "
2165 "hoff %u", __func__, m, mlen, hlen, hoff);
2166 /* NOTREACHED */
2167 }
2168
2169 /*
2170 * We could be in the context of an IP or interface filter; in the
2171 * former case, ip_len would be in host (correct) order while for
2172 * the latter it would be in network order. Because of this, we
2173 * attempt to interpret the length field by comparing it against
2174 * the actual packet length. If the comparison fails, byte swap
2175 * the length and check again. If it still fails, use the actual
2176 * packet length. This also covers the trailing bytes case.
2177 */
2178 ip_len = ip->ip_len;
2179 if (ip_len != (mlen - hoff)) {
2180 ip_len = OSSwapInt16(ip_len);
2181 if (ip_len != (mlen - hoff)) {
2182 printf("%s: mbuf 0x%llx proto %d IP len %d (%x) "
2183 "[swapped %d (%x)] doesn't match actual packet "
2184 "length; %d is used instead\n", __func__,
2185 (uint64_t)VM_KERNEL_ADDRPERM(m), ip->ip_p,
2186 ip->ip_len, ip->ip_len, ip_len, ip_len,
2187 (mlen - hoff));
2188 ip_len = mlen - hoff;
2189 }
2190 }
2191
2192 len = ip_len - hlen; /* csum span */
2193
2194 if (sw_csum & CSUM_DELAY_DATA) {
2195 uint16_t ulpoff;
2196
2197 /*
2198 * offset is added to the lower 16-bit value of csum_data,
2199 * which is expected to contain the ULP offset; therefore
2200 * CSUM_PARTIAL offset adjustment must be undone.
2201 */
2202 if ((m->m_pkthdr.csum_flags & (CSUM_PARTIAL|CSUM_DATA_VALID)) ==
2203 (CSUM_PARTIAL|CSUM_DATA_VALID)) {
2204 /*
2205 * Get back the original ULP offset (this will
2206 * undo the CSUM_PARTIAL logic in ip_output.)
2207 */
2208 m->m_pkthdr.csum_data = (m->m_pkthdr.csum_tx_stuff -
2209 m->m_pkthdr.csum_tx_start);
2210 }
2211
2212 ulpoff = (m->m_pkthdr.csum_data & 0xffff); /* ULP csum offset */
2213 offset = hoff + hlen; /* ULP header */
2214
2215 if (mlen < (ulpoff + sizeof (csum))) {
2216 panic("%s: mbuf %p pkt len (%u) proto %d invalid ULP "
2217 "cksum offset (%u) cksum flags 0x%x\n", __func__,
2218 m, mlen, ip->ip_p, ulpoff, m->m_pkthdr.csum_flags);
2219 /* NOTREACHED */
2220 }
2221
2222 csum = inet_cksum(m, 0, offset, len);
2223
2224 /* Update stats */
2225 ip_out_cksum_stats(ip->ip_p, len);
2226
2227 /* RFC1122 4.1.3.4 */
2228 if (csum == 0 && (m->m_pkthdr.csum_flags & CSUM_UDP))
2229 csum = 0xffff;
2230
2231 /* Insert the checksum in the ULP csum field */
2232 offset += ulpoff;
2233 if (offset + sizeof (csum) > m->m_len) {
2234 m_copyback(m, offset, sizeof (csum), &csum);
2235 } else if (IP_HDR_ALIGNED_P(mtod(m, char *) + hoff)) {
2236 *(uint16_t *)(void *)(mtod(m, char *) + offset) = csum;
2237 } else {
2238 bcopy(&csum, (mtod(m, char *) + offset), sizeof (csum));
2239 }
2240 m->m_pkthdr.csum_flags &=
2241 ~(CSUM_DELAY_DATA | CSUM_DATA_VALID | CSUM_PARTIAL);
2242 }
2243
2244 if (sw_csum & CSUM_DELAY_IP) {
2245 /* IP header must be in the local buffer */
2246 VERIFY(_hlen == sizeof (*ip));
2247 if (_hlen != hlen) {
2248 VERIFY(hlen <= sizeof (buf));
2249 m_copydata(m, hoff, hlen, (caddr_t)buf);
2250 ip = (struct ip *)(void *)buf;
2251 _hlen = hlen;
2252 }
2253
2254 /*
2255 * Compute the IP header checksum as if the IP length
2256 * is the length which we believe is "correct"; see
2257 * how ip_len gets calculated above. Note that this
2258 * is done on the local copy and not on the real one.
2259 */
2260 ip->ip_len = htons(ip_len);
2261 ip->ip_sum = 0;
2262 csum = in_cksum_hdr_opt(ip);
2263
2264 /* Update stats */
2265 ipstat.ips_snd_swcsum++;
2266 ipstat.ips_snd_swcsum_bytes += hlen;
2267
2268 /*
2269 * Insert only the checksum in the existing IP header
2270 * csum field; all other fields are left unchanged.
2271 */
2272 offset = hoff + offsetof(struct ip, ip_sum);
2273 if (offset + sizeof (csum) > m->m_len) {
2274 m_copyback(m, offset, sizeof (csum), &csum);
2275 } else if (IP_HDR_ALIGNED_P(mtod(m, char *) + hoff)) {
2276 *(uint16_t *)(void *)(mtod(m, char *) + offset) = csum;
2277 } else {
2278 bcopy(&csum, (mtod(m, char *) + offset), sizeof (csum));
2279 }
2280 m->m_pkthdr.csum_flags &= ~CSUM_DELAY_IP;
2281 }
2282
2283 done:
2284 return (sw_csum);
2285 }
2286
2287 /*
2288 * Insert IP options into preformed packet.
2289 * Adjust IP destination as required for IP source routing,
2290 * as indicated by a non-zero in_addr at the start of the options.
2291 *
2292 * XXX This routine assumes that the packet has no options in place.
2293 */
2294 static struct mbuf *
2295 ip_insertoptions(struct mbuf *m, struct mbuf *opt, int *phlen)
2296 {
2297 struct ipoption *p = mtod(opt, struct ipoption *);
2298 struct mbuf *n;
2299 struct ip *ip = mtod(m, struct ip *);
2300 unsigned optlen;
2301
2302 optlen = opt->m_len - sizeof (p->ipopt_dst);
2303 if (optlen + (u_short)ip->ip_len > IP_MAXPACKET)
2304 return (m); /* XXX should fail */
2305 if (p->ipopt_dst.s_addr)
2306 ip->ip_dst = p->ipopt_dst;
2307 if (m->m_flags & M_EXT || m->m_data - optlen < m->m_pktdat) {
2308 MGETHDR(n, M_DONTWAIT, MT_HEADER); /* MAC-OK */
2309 if (n == NULL)
2310 return (m);
2311 n->m_pkthdr.rcvif = 0;
2312 #if CONFIG_MACF_NET
2313 mac_mbuf_label_copy(m, n);
2314 #endif /* CONFIG_MACF_NET */
2315 n->m_pkthdr.len = m->m_pkthdr.len + optlen;
2316 m->m_len -= sizeof (struct ip);
2317 m->m_data += sizeof (struct ip);
2318 n->m_next = m;
2319 m = n;
2320 m->m_len = optlen + sizeof (struct ip);
2321 m->m_data += max_linkhdr;
2322 (void) memcpy(mtod(m, void *), ip, sizeof (struct ip));
2323 } else {
2324 m->m_data -= optlen;
2325 m->m_len += optlen;
2326 m->m_pkthdr.len += optlen;
2327 ovbcopy((caddr_t)ip, mtod(m, caddr_t), sizeof (struct ip));
2328 }
2329 ip = mtod(m, struct ip *);
2330 bcopy(p->ipopt_list, ip + 1, optlen);
2331 *phlen = sizeof (struct ip) + optlen;
2332 ip->ip_vhl = IP_MAKE_VHL(IPVERSION, *phlen >> 2);
2333 ip->ip_len += optlen;
2334 return (m);
2335 }
2336
2337 /*
2338 * Copy options from ip to jp,
2339 * omitting those not copied during fragmentation.
2340 */
2341 static int
2342 ip_optcopy(struct ip *ip, struct ip *jp)
2343 {
2344 u_char *cp, *dp;
2345 int opt, optlen, cnt;
2346
2347 cp = (u_char *)(ip + 1);
2348 dp = (u_char *)(jp + 1);
2349 cnt = (IP_VHL_HL(ip->ip_vhl) << 2) - sizeof (struct ip);
2350 for (; cnt > 0; cnt -= optlen, cp += optlen) {
2351 opt = cp[0];
2352 if (opt == IPOPT_EOL)
2353 break;
2354 if (opt == IPOPT_NOP) {
2355 /* Preserve for IP mcast tunnel's LSRR alignment. */
2356 *dp++ = IPOPT_NOP;
2357 optlen = 1;
2358 continue;
2359 }
2360 #if DIAGNOSTIC
2361 if (cnt < IPOPT_OLEN + sizeof (*cp)) {
2362 panic("malformed IPv4 option passed to ip_optcopy");
2363 /* NOTREACHED */
2364 }
2365 #endif
2366 optlen = cp[IPOPT_OLEN];
2367 #if DIAGNOSTIC
2368 if (optlen < IPOPT_OLEN + sizeof (*cp) || optlen > cnt) {
2369 panic("malformed IPv4 option passed to ip_optcopy");
2370 /* NOTREACHED */
2371 }
2372 #endif
2373 /* bogus lengths should have been caught by ip_dooptions */
2374 if (optlen > cnt)
2375 optlen = cnt;
2376 if (IPOPT_COPIED(opt)) {
2377 bcopy(cp, dp, optlen);
2378 dp += optlen;
2379 }
2380 }
2381 for (optlen = dp - (u_char *)(jp+1); optlen & 0x3; optlen++)
2382 *dp++ = IPOPT_EOL;
2383 return (optlen);
2384 }
2385
2386 /*
2387 * IP socket option processing.
2388 */
2389 int
2390 ip_ctloutput(struct socket *so, struct sockopt *sopt)
2391 {
2392 struct inpcb *inp = sotoinpcb(so);
2393 int error, optval;
2394
2395 error = optval = 0;
2396 if (sopt->sopt_level != IPPROTO_IP)
2397 return (EINVAL);
2398
2399 switch (sopt->sopt_dir) {
2400 case SOPT_SET:
2401 switch (sopt->sopt_name) {
2402 #ifdef notyet
2403 case IP_RETOPTS:
2404 #endif
2405 case IP_OPTIONS: {
2406 struct mbuf *m;
2407
2408 if (sopt->sopt_valsize > MLEN) {
2409 error = EMSGSIZE;
2410 break;
2411 }
2412 MGET(m, sopt->sopt_p != kernproc ? M_WAIT : M_DONTWAIT,
2413 MT_HEADER);
2414 if (m == NULL) {
2415 error = ENOBUFS;
2416 break;
2417 }
2418 m->m_len = sopt->sopt_valsize;
2419 error = sooptcopyin(sopt, mtod(m, char *),
2420 m->m_len, m->m_len);
2421 if (error)
2422 break;
2423
2424 return (ip_pcbopts(sopt->sopt_name,
2425 &inp->inp_options, m));
2426 }
2427
2428 case IP_TOS:
2429 case IP_TTL:
2430 case IP_RECVOPTS:
2431 case IP_RECVRETOPTS:
2432 case IP_RECVDSTADDR:
2433 case IP_RECVIF:
2434 case IP_RECVTTL:
2435 case IP_RECVPKTINFO:
2436 error = sooptcopyin(sopt, &optval, sizeof (optval),
2437 sizeof (optval));
2438 if (error)
2439 break;
2440
2441 switch (sopt->sopt_name) {
2442 case IP_TOS:
2443 inp->inp_ip_tos = optval;
2444 break;
2445
2446 case IP_TTL:
2447 inp->inp_ip_ttl = optval;
2448 break;
2449 #define OPTSET(bit) \
2450 if (optval) \
2451 inp->inp_flags |= bit; \
2452 else \
2453 inp->inp_flags &= ~bit;
2454
2455 case IP_RECVOPTS:
2456 OPTSET(INP_RECVOPTS);
2457 break;
2458
2459 case IP_RECVRETOPTS:
2460 OPTSET(INP_RECVRETOPTS);
2461 break;
2462
2463 case IP_RECVDSTADDR:
2464 OPTSET(INP_RECVDSTADDR);
2465 break;
2466
2467 case IP_RECVIF:
2468 OPTSET(INP_RECVIF);
2469 break;
2470
2471 case IP_RECVTTL:
2472 OPTSET(INP_RECVTTL);
2473 break;
2474
2475 case IP_RECVPKTINFO:
2476 OPTSET(INP_PKTINFO);
2477 break;
2478 }
2479 break;
2480 #undef OPTSET
2481
2482 #if CONFIG_FORCE_OUT_IFP
2483 /*
2484 * Apple private interface, similar to IP_BOUND_IF, except
2485 * that the parameter is a NULL-terminated string containing
2486 * the name of the network interface; an emptry string means
2487 * unbind. Applications are encouraged to use IP_BOUND_IF
2488 * instead, as that is the current "official" API.
2489 */
2490 case IP_FORCE_OUT_IFP: {
2491 char ifname[IFNAMSIZ];
2492 unsigned int ifscope;
2493
2494 /* This option is settable only for IPv4 */
2495 if (!(inp->inp_vflag & INP_IPV4)) {
2496 error = EINVAL;
2497 break;
2498 }
2499
2500 /* Verify interface name parameter is sane */
2501 if (sopt->sopt_valsize > sizeof (ifname)) {
2502 error = EINVAL;
2503 break;
2504 }
2505
2506 /* Copy the interface name */
2507 if (sopt->sopt_valsize != 0) {
2508 error = sooptcopyin(sopt, ifname,
2509 sizeof (ifname), sopt->sopt_valsize);
2510 if (error)
2511 break;
2512 }
2513
2514 if (sopt->sopt_valsize == 0 || ifname[0] == '\0') {
2515 /* Unbind this socket from any interface */
2516 ifscope = IFSCOPE_NONE;
2517 } else {
2518 ifnet_t ifp;
2519
2520 /* Verify name is NULL terminated */
2521 if (ifname[sopt->sopt_valsize - 1] != '\0') {
2522 error = EINVAL;
2523 break;
2524 }
2525
2526 /* Bail out if given bogus interface name */
2527 if (ifnet_find_by_name(ifname, &ifp) != 0) {
2528 error = ENXIO;
2529 break;
2530 }
2531
2532 /* Bind this socket to this interface */
2533 ifscope = ifp->if_index;
2534
2535 /*
2536 * Won't actually free; since we don't release
2537 * this later, we should do it now.
2538 */
2539 ifnet_release(ifp);
2540 }
2541 error = inp_bindif(inp, ifscope, NULL);
2542 }
2543 break;
2544 #endif /* CONFIG_FORCE_OUT_IFP */
2545 /*
2546 * Multicast socket options are processed by the in_mcast
2547 * module.
2548 */
2549 case IP_MULTICAST_IF:
2550 case IP_MULTICAST_IFINDEX:
2551 case IP_MULTICAST_VIF:
2552 case IP_MULTICAST_TTL:
2553 case IP_MULTICAST_LOOP:
2554 case IP_ADD_MEMBERSHIP:
2555 case IP_DROP_MEMBERSHIP:
2556 case IP_ADD_SOURCE_MEMBERSHIP:
2557 case IP_DROP_SOURCE_MEMBERSHIP:
2558 case IP_BLOCK_SOURCE:
2559 case IP_UNBLOCK_SOURCE:
2560 case IP_MSFILTER:
2561 case MCAST_JOIN_GROUP:
2562 case MCAST_LEAVE_GROUP:
2563 case MCAST_JOIN_SOURCE_GROUP:
2564 case MCAST_LEAVE_SOURCE_GROUP:
2565 case MCAST_BLOCK_SOURCE:
2566 case MCAST_UNBLOCK_SOURCE:
2567 error = inp_setmoptions(inp, sopt);
2568 break;
2569
2570 case IP_PORTRANGE:
2571 error = sooptcopyin(sopt, &optval, sizeof (optval),
2572 sizeof (optval));
2573 if (error)
2574 break;
2575
2576 switch (optval) {
2577 case IP_PORTRANGE_DEFAULT:
2578 inp->inp_flags &= ~(INP_LOWPORT);
2579 inp->inp_flags &= ~(INP_HIGHPORT);
2580 break;
2581
2582 case IP_PORTRANGE_HIGH:
2583 inp->inp_flags &= ~(INP_LOWPORT);
2584 inp->inp_flags |= INP_HIGHPORT;
2585 break;
2586
2587 case IP_PORTRANGE_LOW:
2588 inp->inp_flags &= ~(INP_HIGHPORT);
2589 inp->inp_flags |= INP_LOWPORT;
2590 break;
2591
2592 default:
2593 error = EINVAL;
2594 break;
2595 }
2596 break;
2597
2598 #if IPSEC
2599 case IP_IPSEC_POLICY: {
2600 caddr_t req = NULL;
2601 size_t len = 0;
2602 int priv;
2603 struct mbuf *m;
2604 int optname;
2605
2606 if ((error = soopt_getm(sopt, &m)) != 0) /* XXX */
2607 break;
2608 if ((error = soopt_mcopyin(sopt, m)) != 0) /* XXX */
2609 break;
2610 priv = (proc_suser(sopt->sopt_p) == 0);
2611 if (m) {
2612 req = mtod(m, caddr_t);
2613 len = m->m_len;
2614 }
2615 optname = sopt->sopt_name;
2616 error = ipsec4_set_policy(inp, optname, req, len, priv);
2617 m_freem(m);
2618 break;
2619 }
2620 #endif /* IPSEC */
2621
2622 #if TRAFFIC_MGT
2623 case IP_TRAFFIC_MGT_BACKGROUND: {
2624 unsigned background = 0;
2625
2626 error = sooptcopyin(sopt, &background,
2627 sizeof (background), sizeof (background));
2628 if (error)
2629 break;
2630
2631 if (background) {
2632 socket_set_traffic_mgt_flags_locked(so,
2633 TRAFFIC_MGT_SO_BACKGROUND);
2634 } else {
2635 socket_clear_traffic_mgt_flags_locked(so,
2636 TRAFFIC_MGT_SO_BACKGROUND);
2637 }
2638
2639 break;
2640 }
2641 #endif /* TRAFFIC_MGT */
2642
2643 /*
2644 * On a multihomed system, scoped routing can be used to
2645 * restrict the source interface used for sending packets.
2646 * The socket option IP_BOUND_IF binds a particular AF_INET
2647 * socket to an interface such that data sent on the socket
2648 * is restricted to that interface. This is unlike the
2649 * SO_DONTROUTE option where the routing table is bypassed;
2650 * therefore it allows for a greater flexibility and control
2651 * over the system behavior, and does not place any restriction
2652 * on the destination address type (e.g. unicast, multicast,
2653 * or broadcast if applicable) or whether or not the host is
2654 * directly reachable. Note that in the multicast transmit
2655 * case, IP_MULTICAST_{IF,IFINDEX} takes precedence over
2656 * IP_BOUND_IF, since the former practically bypasses the
2657 * routing table; in this case, IP_BOUND_IF sets the default
2658 * interface used for sending multicast packets in the absence
2659 * of an explicit multicast transmit interface.
2660 */
2661 case IP_BOUND_IF:
2662 /* This option is settable only for IPv4 */
2663 if (!(inp->inp_vflag & INP_IPV4)) {
2664 error = EINVAL;
2665 break;
2666 }
2667
2668 error = sooptcopyin(sopt, &optval, sizeof (optval),
2669 sizeof (optval));
2670
2671 if (error)
2672 break;
2673
2674 error = inp_bindif(inp, optval, NULL);
2675 break;
2676
2677 case IP_NO_IFT_CELLULAR:
2678 /* This option is settable only for IPv4 */
2679 if (!(inp->inp_vflag & INP_IPV4)) {
2680 error = EINVAL;
2681 break;
2682 }
2683
2684 error = sooptcopyin(sopt, &optval, sizeof (optval),
2685 sizeof (optval));
2686
2687 if (error)
2688 break;
2689
2690 /* once set, it cannot be unset */
2691 if (!optval && INP_NO_CELLULAR(inp)) {
2692 error = EINVAL;
2693 break;
2694 }
2695
2696 error = so_set_restrictions(so,
2697 SO_RESTRICT_DENY_CELLULAR);
2698 break;
2699
2700 case IP_OUT_IF:
2701 /* This option is not settable */
2702 error = EINVAL;
2703 break;
2704
2705 default:
2706 error = ENOPROTOOPT;
2707 break;
2708 }
2709 break;
2710
2711 case SOPT_GET:
2712 switch (sopt->sopt_name) {
2713 case IP_OPTIONS:
2714 case IP_RETOPTS:
2715 if (inp->inp_options) {
2716 error = sooptcopyout(sopt,
2717 mtod(inp->inp_options, char *),
2718 inp->inp_options->m_len);
2719 } else {
2720 sopt->sopt_valsize = 0;
2721 }
2722 break;
2723
2724 case IP_TOS:
2725 case IP_TTL:
2726 case IP_RECVOPTS:
2727 case IP_RECVRETOPTS:
2728 case IP_RECVDSTADDR:
2729 case IP_RECVIF:
2730 case IP_RECVTTL:
2731 case IP_PORTRANGE:
2732 case IP_RECVPKTINFO:
2733 switch (sopt->sopt_name) {
2734
2735 case IP_TOS:
2736 optval = inp->inp_ip_tos;
2737 break;
2738
2739 case IP_TTL:
2740 optval = inp->inp_ip_ttl;
2741 break;
2742
2743 #define OPTBIT(bit) (inp->inp_flags & bit ? 1 : 0)
2744
2745 case IP_RECVOPTS:
2746 optval = OPTBIT(INP_RECVOPTS);
2747 break;
2748
2749 case IP_RECVRETOPTS:
2750 optval = OPTBIT(INP_RECVRETOPTS);
2751 break;
2752
2753 case IP_RECVDSTADDR:
2754 optval = OPTBIT(INP_RECVDSTADDR);
2755 break;
2756
2757 case IP_RECVIF:
2758 optval = OPTBIT(INP_RECVIF);
2759 break;
2760
2761 case IP_RECVTTL:
2762 optval = OPTBIT(INP_RECVTTL);
2763 break;
2764
2765 case IP_PORTRANGE:
2766 if (inp->inp_flags & INP_HIGHPORT)
2767 optval = IP_PORTRANGE_HIGH;
2768 else if (inp->inp_flags & INP_LOWPORT)
2769 optval = IP_PORTRANGE_LOW;
2770 else
2771 optval = 0;
2772 break;
2773
2774 case IP_RECVPKTINFO:
2775 optval = OPTBIT(INP_PKTINFO);
2776 break;
2777 }
2778 error = sooptcopyout(sopt, &optval, sizeof (optval));
2779 break;
2780
2781 case IP_MULTICAST_IF:
2782 case IP_MULTICAST_IFINDEX:
2783 case IP_MULTICAST_VIF:
2784 case IP_MULTICAST_TTL:
2785 case IP_MULTICAST_LOOP:
2786 case IP_MSFILTER:
2787 error = inp_getmoptions(inp, sopt);
2788 break;
2789
2790 #if IPSEC
2791 case IP_IPSEC_POLICY: {
2792 error = 0; /* This option is no longer supported */
2793 break;
2794 }
2795 #endif /* IPSEC */
2796
2797 #if TRAFFIC_MGT
2798 case IP_TRAFFIC_MGT_BACKGROUND: {
2799 unsigned background = (so->so_traffic_mgt_flags &
2800 TRAFFIC_MGT_SO_BACKGROUND) ? 1 : 0;
2801 return (sooptcopyout(sopt, &background,
2802 sizeof (background)));
2803 break;
2804 }
2805 #endif /* TRAFFIC_MGT */
2806
2807 case IP_BOUND_IF:
2808 if (inp->inp_flags & INP_BOUND_IF)
2809 optval = inp->inp_boundifp->if_index;
2810 error = sooptcopyout(sopt, &optval, sizeof (optval));
2811 break;
2812
2813 case IP_NO_IFT_CELLULAR:
2814 optval = INP_NO_CELLULAR(inp) ? 1 : 0;
2815 error = sooptcopyout(sopt, &optval, sizeof (optval));
2816 break;
2817
2818 case IP_OUT_IF:
2819 optval = (inp->inp_last_outifp != NULL) ?
2820 inp->inp_last_outifp->if_index : 0;
2821 error = sooptcopyout(sopt, &optval, sizeof (optval));
2822 break;
2823
2824 default:
2825 error = ENOPROTOOPT;
2826 break;
2827 }
2828 break;
2829 }
2830 return (error);
2831 }
2832
2833 /*
2834 * Set up IP options in pcb for insertion in output packets.
2835 * Store in mbuf with pointer in pcbopt, adding pseudo-option
2836 * with destination address if source routed.
2837 */
2838 static int
2839 ip_pcbopts(int optname, struct mbuf **pcbopt, struct mbuf *m)
2840 {
2841 #pragma unused(optname)
2842 int cnt, optlen;
2843 u_char *cp;
2844 u_char opt;
2845
2846 /* turn off any old options */
2847 if (*pcbopt)
2848 (void) m_free(*pcbopt);
2849 *pcbopt = 0;
2850 if (m == (struct mbuf *)0 || m->m_len == 0) {
2851 /*
2852 * Only turning off any previous options.
2853 */
2854 if (m)
2855 (void) m_free(m);
2856 return (0);
2857 }
2858
2859 if (m->m_len % sizeof (int32_t))
2860 goto bad;
2861
2862 /*
2863 * IP first-hop destination address will be stored before
2864 * actual options; move other options back
2865 * and clear it when none present.
2866 */
2867 if (m->m_data + m->m_len + sizeof (struct in_addr) >= &m->m_dat[MLEN])
2868 goto bad;
2869 cnt = m->m_len;
2870 m->m_len += sizeof (struct in_addr);
2871 cp = mtod(m, u_char *) + sizeof (struct in_addr);
2872 ovbcopy(mtod(m, caddr_t), (caddr_t)cp, (unsigned)cnt);
2873 bzero(mtod(m, caddr_t), sizeof (struct in_addr));
2874
2875 for (; cnt > 0; cnt -= optlen, cp += optlen) {
2876 opt = cp[IPOPT_OPTVAL];
2877 if (opt == IPOPT_EOL)
2878 break;
2879 if (opt == IPOPT_NOP)
2880 optlen = 1;
2881 else {
2882 if (cnt < IPOPT_OLEN + sizeof (*cp))
2883 goto bad;
2884 optlen = cp[IPOPT_OLEN];
2885 if (optlen < IPOPT_OLEN + sizeof (*cp) || optlen > cnt)
2886 goto bad;
2887 }
2888 switch (opt) {
2889
2890 default:
2891 break;
2892
2893 case IPOPT_LSRR:
2894 case IPOPT_SSRR:
2895 /*
2896 * user process specifies route as:
2897 * ->A->B->C->D
2898 * D must be our final destination (but we can't
2899 * check that since we may not have connected yet).
2900 * A is first hop destination, which doesn't appear in
2901 * actual IP option, but is stored before the options.
2902 */
2903 if (optlen < IPOPT_MINOFF - 1 + sizeof (struct in_addr))
2904 goto bad;
2905 m->m_len -= sizeof (struct in_addr);
2906 cnt -= sizeof (struct in_addr);
2907 optlen -= sizeof (struct in_addr);
2908 cp[IPOPT_OLEN] = optlen;
2909 /*
2910 * Move first hop before start of options.
2911 */
2912 bcopy((caddr_t)&cp[IPOPT_OFFSET+1], mtod(m, caddr_t),
2913 sizeof (struct in_addr));
2914 /*
2915 * Then copy rest of options back
2916 * to close up the deleted entry.
2917 */
2918 ovbcopy((caddr_t)(&cp[IPOPT_OFFSET+1] +
2919 sizeof (struct in_addr)),
2920 (caddr_t)&cp[IPOPT_OFFSET+1],
2921 (unsigned)cnt + sizeof (struct in_addr));
2922 break;
2923 }
2924 }
2925 if (m->m_len > MAX_IPOPTLEN + sizeof (struct in_addr))
2926 goto bad;
2927 *pcbopt = m;
2928 return (0);
2929
2930 bad:
2931 (void) m_free(m);
2932 return (EINVAL);
2933 }
2934
2935 void
2936 ip_moptions_init(void)
2937 {
2938 PE_parse_boot_argn("ifa_debug", &imo_debug, sizeof (imo_debug));
2939
2940 imo_size = (imo_debug == 0) ? sizeof (struct ip_moptions) :
2941 sizeof (struct ip_moptions_dbg);
2942
2943 imo_zone = zinit(imo_size, IMO_ZONE_MAX * imo_size, 0,
2944 IMO_ZONE_NAME);
2945 if (imo_zone == NULL) {
2946 panic("%s: failed allocating %s", __func__, IMO_ZONE_NAME);
2947 /* NOTREACHED */
2948 }
2949 zone_change(imo_zone, Z_EXPAND, TRUE);
2950 }
2951
2952 void
2953 imo_addref(struct ip_moptions *imo, int locked)
2954 {
2955 if (!locked)
2956 IMO_LOCK(imo);
2957 else
2958 IMO_LOCK_ASSERT_HELD(imo);
2959
2960 if (++imo->imo_refcnt == 0) {
2961 panic("%s: imo %p wraparound refcnt\n", __func__, imo);
2962 /* NOTREACHED */
2963 } else if (imo->imo_trace != NULL) {
2964 (*imo->imo_trace)(imo, TRUE);
2965 }
2966
2967 if (!locked)
2968 IMO_UNLOCK(imo);
2969 }
2970
2971 void
2972 imo_remref(struct ip_moptions *imo)
2973 {
2974 int i;
2975
2976 IMO_LOCK(imo);
2977 if (imo->imo_refcnt == 0) {
2978 panic("%s: imo %p negative refcnt", __func__, imo);
2979 /* NOTREACHED */
2980 } else if (imo->imo_trace != NULL) {
2981 (*imo->imo_trace)(imo, FALSE);
2982 }
2983
2984 --imo->imo_refcnt;
2985 if (imo->imo_refcnt > 0) {
2986 IMO_UNLOCK(imo);
2987 return;
2988 }
2989
2990 for (i = 0; i < imo->imo_num_memberships; ++i) {
2991 struct in_mfilter *imf;
2992
2993 imf = imo->imo_mfilters ? &imo->imo_mfilters[i] : NULL;
2994 if (imf != NULL)
2995 imf_leave(imf);
2996
2997 (void) in_leavegroup(imo->imo_membership[i], imf);
2998
2999 if (imf != NULL)
3000 imf_purge(imf);
3001
3002 INM_REMREF(imo->imo_membership[i]);
3003 imo->imo_membership[i] = NULL;
3004 }
3005 imo->imo_num_memberships = 0;
3006 if (imo->imo_mfilters != NULL) {
3007 FREE(imo->imo_mfilters, M_INMFILTER);
3008 imo->imo_mfilters = NULL;
3009 }
3010 if (imo->imo_membership != NULL) {
3011 FREE(imo->imo_membership, M_IPMOPTS);
3012 imo->imo_membership = NULL;
3013 }
3014 IMO_UNLOCK(imo);
3015
3016 lck_mtx_destroy(&imo->imo_lock, ifa_mtx_grp);
3017
3018 if (!(imo->imo_debug & IFD_ALLOC)) {
3019 panic("%s: imo %p cannot be freed", __func__, imo);
3020 /* NOTREACHED */
3021 }
3022 zfree(imo_zone, imo);
3023 }
3024
3025 static void
3026 imo_trace(struct ip_moptions *imo, int refhold)
3027 {
3028 struct ip_moptions_dbg *imo_dbg = (struct ip_moptions_dbg *)imo;
3029 ctrace_t *tr;
3030 u_int32_t idx;
3031 u_int16_t *cnt;
3032
3033 if (!(imo->imo_debug & IFD_DEBUG)) {
3034 panic("%s: imo %p has no debug structure", __func__, imo);
3035 /* NOTREACHED */
3036 }
3037 if (refhold) {
3038 cnt = &imo_dbg->imo_refhold_cnt;
3039 tr = imo_dbg->imo_refhold;
3040 } else {
3041 cnt = &imo_dbg->imo_refrele_cnt;
3042 tr = imo_dbg->imo_refrele;
3043 }
3044
3045 idx = atomic_add_16_ov(cnt, 1) % IMO_TRACE_HIST_SIZE;
3046 ctrace_record(&tr[idx]);
3047 }
3048
3049 struct ip_moptions *
3050 ip_allocmoptions(int how)
3051 {
3052 struct ip_moptions *imo;
3053
3054 imo = (how == M_WAITOK) ? zalloc(imo_zone) : zalloc_noblock(imo_zone);
3055 if (imo != NULL) {
3056 bzero(imo, imo_size);
3057 lck_mtx_init(&imo->imo_lock, ifa_mtx_grp, ifa_mtx_attr);
3058 imo->imo_debug |= IFD_ALLOC;
3059 if (imo_debug != 0) {
3060 imo->imo_debug |= IFD_DEBUG;
3061 imo->imo_trace = imo_trace;
3062 }
3063 IMO_ADDREF(imo);
3064 }
3065
3066 return (imo);
3067 }
3068
3069 /*
3070 * Routine called from ip_output() to loop back a copy of an IP multicast
3071 * packet to the input queue of a specified interface. Note that this
3072 * calls the output routine of the loopback "driver", but with an interface
3073 * pointer that might NOT be a loopback interface -- evil, but easier than
3074 * replicating that code here.
3075 */
3076 static void
3077 ip_mloopback(struct ifnet *srcifp, struct ifnet *origifp, struct mbuf *m,
3078 struct sockaddr_in *dst, int hlen)
3079 {
3080 struct mbuf *copym;
3081 struct ip *ip;
3082
3083 if (lo_ifp == NULL)
3084 return;
3085
3086 /*
3087 * Copy the packet header as it's needed for the checksum
3088 * Make sure to deep-copy IP header portion in case the data
3089 * is in an mbuf cluster, so that we can safely override the IP
3090 * header portion later.
3091 */
3092 copym = m_copym_mode(m, 0, M_COPYALL, M_DONTWAIT, M_COPYM_COPY_HDR);
3093 if (copym != NULL && ((copym->m_flags & M_EXT) || copym->m_len < hlen))
3094 copym = m_pullup(copym, hlen);
3095
3096 if (copym == NULL)
3097 return;
3098
3099 /*
3100 * We don't bother to fragment if the IP length is greater
3101 * than the interface's MTU. Can this possibly matter?
3102 */
3103 ip = mtod(copym, struct ip *);
3104 #if BYTE_ORDER != BIG_ENDIAN
3105 HTONS(ip->ip_len);
3106 HTONS(ip->ip_off);
3107 #endif
3108 ip->ip_sum = 0;
3109 ip->ip_sum = ip_cksum_hdr_out(copym, hlen);
3110
3111 /*
3112 * Mark checksum as valid unless receive checksum offload is
3113 * disabled; if so, compute checksum in software. If the
3114 * interface itself is lo0, this will be overridden by if_loop.
3115 */
3116 if (hwcksum_rx) {
3117 copym->m_pkthdr.csum_flags &= ~CSUM_PARTIAL;
3118 copym->m_pkthdr.csum_flags |=
3119 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
3120 copym->m_pkthdr.csum_data = 0xffff;
3121 } else if (copym->m_pkthdr.csum_flags & CSUM_DELAY_DATA) {
3122 #if BYTE_ORDER != BIG_ENDIAN
3123 NTOHS(ip->ip_len);
3124 #endif
3125 in_delayed_cksum(copym);
3126 #if BYTE_ORDER != BIG_ENDIAN
3127 HTONS(ip->ip_len);
3128 #endif
3129 }
3130
3131 /*
3132 * Stuff the 'real' ifp into the pkthdr, to be used in matching
3133 * in ip_input(); we need the loopback ifp/dl_tag passed as args
3134 * to make the loopback driver compliant with the data link
3135 * requirements.
3136 */
3137 copym->m_pkthdr.rcvif = origifp;
3138
3139 /*
3140 * Also record the source interface (which owns the source address).
3141 * This is basically a stripped down version of ifa_foraddr().
3142 */
3143 if (srcifp == NULL) {
3144 struct in_ifaddr *ia;
3145
3146 lck_rw_lock_shared(in_ifaddr_rwlock);
3147 TAILQ_FOREACH(ia, INADDR_HASH(ip->ip_src.s_addr), ia_hash) {
3148 IFA_LOCK_SPIN(&ia->ia_ifa);
3149 if (IA_SIN(ia)->sin_addr.s_addr == ip->ip_src.s_addr) {
3150 srcifp = ia->ia_ifp;
3151 IFA_UNLOCK(&ia->ia_ifa);
3152 break;
3153 }
3154 IFA_UNLOCK(&ia->ia_ifa);
3155 }
3156 lck_rw_done(in_ifaddr_rwlock);
3157 }
3158 if (srcifp != NULL)
3159 ip_setsrcifaddr_info(copym, srcifp->if_index, NULL);
3160 ip_setdstifaddr_info(copym, origifp->if_index, NULL);
3161
3162 dlil_output(lo_ifp, PF_INET, copym, NULL, SA(dst), 0, NULL);
3163 }
3164
3165 /*
3166 * Given a source IP address (and route, if available), determine the best
3167 * interface to send the packet from. Checking for (and updating) the
3168 * ROF_SRCIF_SELECTED flag in the pcb-supplied route placeholder is done
3169 * without any locks based on the assumption that ip_output() is single-
3170 * threaded per-pcb, i.e. for any given pcb there can only be one thread
3171 * performing output at the IP layer.
3172 *
3173 * This routine is analogous to in6_selectroute() for IPv6.
3174 */
3175 static struct ifaddr *
3176 in_selectsrcif(struct ip *ip, struct route *ro, unsigned int ifscope)
3177 {
3178 struct ifaddr *ifa = NULL;
3179 struct in_addr src = ip->ip_src;
3180 struct in_addr dst = ip->ip_dst;
3181 struct ifnet *rt_ifp;
3182 char s_src[MAX_IPv4_STR_LEN], s_dst[MAX_IPv4_STR_LEN];
3183
3184 VERIFY(src.s_addr != INADDR_ANY);
3185
3186 if (ip_select_srcif_debug) {
3187 (void) inet_ntop(AF_INET, &src.s_addr, s_src, sizeof (s_src));
3188 (void) inet_ntop(AF_INET, &dst.s_addr, s_dst, sizeof (s_dst));
3189 }
3190
3191 if (ro->ro_rt != NULL)
3192 RT_LOCK(ro->ro_rt);
3193
3194 rt_ifp = (ro->ro_rt != NULL) ? ro->ro_rt->rt_ifp : NULL;
3195
3196 /*
3197 * Given the source IP address, find a suitable source interface
3198 * to use for transmission; if the caller has specified a scope,
3199 * optimize the search by looking at the addresses only for that
3200 * interface. This is still suboptimal, however, as we need to
3201 * traverse the per-interface list.
3202 */
3203 if (ifscope != IFSCOPE_NONE || ro->ro_rt != NULL) {
3204 unsigned int scope = ifscope;
3205
3206 /*
3207 * If no scope is specified and the route is stale (pointing
3208 * to a defunct interface) use the current primary interface;
3209 * this happens when switching between interfaces configured
3210 * with the same IP address. Otherwise pick up the scope
3211 * information from the route; the ULP may have looked up a
3212 * correct route and we just need to verify it here and mark
3213 * it with the ROF_SRCIF_SELECTED flag below.
3214 */
3215 if (scope == IFSCOPE_NONE) {
3216 scope = rt_ifp->if_index;
3217 if (scope != get_primary_ifscope(AF_INET) &&
3218 ROUTE_UNUSABLE(ro))
3219 scope = get_primary_ifscope(AF_INET);
3220 }
3221
3222 ifa = (struct ifaddr *)ifa_foraddr_scoped(src.s_addr, scope);
3223
3224 if (ifa == NULL && ip->ip_p != IPPROTO_UDP &&
3225 ip->ip_p != IPPROTO_TCP && ipforwarding) {
3226 /*
3227 * If forwarding is enabled, and if the packet isn't
3228 * TCP or UDP, check if the source address belongs
3229 * to one of our own interfaces; if so, demote the
3230 * interface scope and do a route lookup right below.
3231 */
3232 ifa = (struct ifaddr *)ifa_foraddr(src.s_addr);
3233 if (ifa != NULL) {
3234 IFA_REMREF(ifa);
3235 ifa = NULL;
3236 ifscope = IFSCOPE_NONE;
3237 }
3238 }
3239
3240 if (ip_select_srcif_debug && ifa != NULL) {
3241 if (ro->ro_rt != NULL) {
3242 printf("%s->%s ifscope %d->%d ifa_if %s "
3243 "ro_if %s\n", s_src, s_dst, ifscope,
3244 scope, if_name(ifa->ifa_ifp),
3245 if_name(rt_ifp));
3246 } else {
3247 printf("%s->%s ifscope %d->%d ifa_if %s\n",
3248 s_src, s_dst, ifscope, scope,
3249 if_name(ifa->ifa_ifp));
3250 }
3251 }
3252 }
3253
3254 /*
3255 * Slow path; search for an interface having the corresponding source
3256 * IP address if the scope was not specified by the caller, and:
3257 *
3258 * 1) There currently isn't any route, or,
3259 * 2) The interface used by the route does not own that source
3260 * IP address; in this case, the route will get blown away
3261 * and we'll do a more specific scoped search using the newly
3262 * found interface.
3263 */
3264 if (ifa == NULL && ifscope == IFSCOPE_NONE) {
3265 ifa = (struct ifaddr *)ifa_foraddr(src.s_addr);
3266
3267 /*
3268 * If we have the IP address, but not the route, we don't
3269 * really know whether or not it belongs to the correct
3270 * interface (it could be shared across multiple interfaces.)
3271 * The only way to find out is to do a route lookup.
3272 */
3273 if (ifa != NULL && ro->ro_rt == NULL) {
3274 struct rtentry *rt;
3275 struct sockaddr_in sin;
3276 struct ifaddr *oifa = NULL;
3277
3278 bzero(&sin, sizeof (sin));
3279 sin.sin_family = AF_INET;
3280 sin.sin_len = sizeof (sin);
3281 sin.sin_addr = dst;
3282
3283 lck_mtx_lock(rnh_lock);
3284 if ((rt = rt_lookup(TRUE, SA(&sin), NULL,
3285 rt_tables[AF_INET], IFSCOPE_NONE)) != NULL) {
3286 RT_LOCK(rt);
3287 /*
3288 * If the route uses a different interface,
3289 * use that one instead. The IP address of
3290 * the ifaddr that we pick up here is not
3291 * relevant.
3292 */
3293 if (ifa->ifa_ifp != rt->rt_ifp) {
3294 oifa = ifa;
3295 ifa = rt->rt_ifa;
3296 IFA_ADDREF(ifa);
3297 RT_UNLOCK(rt);
3298 } else {
3299 RT_UNLOCK(rt);
3300 }
3301 rtfree_locked(rt);
3302 }
3303 lck_mtx_unlock(rnh_lock);
3304
3305 if (oifa != NULL) {
3306 struct ifaddr *iifa;
3307
3308 /*
3309 * See if the interface pointed to by the
3310 * route is configured with the source IP
3311 * address of the packet.
3312 */
3313 iifa = (struct ifaddr *)ifa_foraddr_scoped(
3314 src.s_addr, ifa->ifa_ifp->if_index);
3315
3316 if (iifa != NULL) {
3317 /*
3318 * Found it; drop the original one
3319 * as well as the route interface
3320 * address, and use this instead.
3321 */
3322 IFA_REMREF(oifa);
3323 IFA_REMREF(ifa);
3324 ifa = iifa;
3325 } else if (!ipforwarding ||
3326 (rt->rt_flags & RTF_GATEWAY)) {
3327 /*
3328 * This interface doesn't have that
3329 * source IP address; drop the route
3330 * interface address and just use the
3331 * original one, and let the caller
3332 * do a scoped route lookup.
3333 */
3334 IFA_REMREF(ifa);
3335 ifa = oifa;
3336 } else {
3337 /*
3338 * Forwarding is enabled and the source
3339 * address belongs to one of our own
3340 * interfaces which isn't the outgoing
3341 * interface, and we have a route, and
3342 * the destination is on a network that
3343 * is directly attached (onlink); drop
3344 * the original one and use the route
3345 * interface address instead.
3346 */
3347 IFA_REMREF(oifa);
3348 }
3349 }
3350 } else if (ifa != NULL && ro->ro_rt != NULL &&
3351 !(ro->ro_rt->rt_flags & RTF_GATEWAY) &&
3352 ifa->ifa_ifp != ro->ro_rt->rt_ifp && ipforwarding) {
3353 /*
3354 * Forwarding is enabled and the source address belongs
3355 * to one of our own interfaces which isn't the same
3356 * as the interface used by the known route; drop the
3357 * original one and use the route interface address.
3358 */
3359 IFA_REMREF(ifa);
3360 ifa = ro->ro_rt->rt_ifa;
3361 IFA_ADDREF(ifa);
3362 }
3363
3364 if (ip_select_srcif_debug && ifa != NULL) {
3365 printf("%s->%s ifscope %d ifa_if %s\n",
3366 s_src, s_dst, ifscope, if_name(ifa->ifa_ifp));
3367 }
3368 }
3369
3370 if (ro->ro_rt != NULL)
3371 RT_LOCK_ASSERT_HELD(ro->ro_rt);
3372 /*
3373 * If there is a non-loopback route with the wrong interface, or if
3374 * there is no interface configured with such an address, blow it
3375 * away. Except for local/loopback, we look for one with a matching
3376 * interface scope/index.
3377 */
3378 if (ro->ro_rt != NULL &&
3379 (ifa == NULL || (ifa->ifa_ifp != rt_ifp && rt_ifp != lo_ifp) ||
3380 !(ro->ro_rt->rt_flags & RTF_UP))) {
3381 if (ip_select_srcif_debug) {
3382 if (ifa != NULL) {
3383 printf("%s->%s ifscope %d ro_if %s != "
3384 "ifa_if %s (cached route cleared)\n",
3385 s_src, s_dst, ifscope, if_name(rt_ifp),
3386 if_name(ifa->ifa_ifp));
3387 } else {
3388 printf("%s->%s ifscope %d ro_if %s "
3389 "(no ifa_if found)\n",
3390 s_src, s_dst, ifscope, if_name(rt_ifp));
3391 }
3392 }
3393
3394 RT_UNLOCK(ro->ro_rt);
3395 ROUTE_RELEASE(ro);
3396
3397 /*
3398 * If the destination is IPv4 LLA and the route's interface
3399 * doesn't match the source interface, then the source IP
3400 * address is wrong; it most likely belongs to the primary
3401 * interface associated with the IPv4 LL subnet. Drop the
3402 * packet rather than letting it go out and return an error
3403 * to the ULP. This actually applies not only to IPv4 LL
3404 * but other shared subnets; for now we explicitly test only
3405 * for the former case and save the latter for future.
3406 */
3407 if (IN_LINKLOCAL(ntohl(dst.s_addr)) &&
3408 !IN_LINKLOCAL(ntohl(src.s_addr)) && ifa != NULL) {
3409 IFA_REMREF(ifa);
3410 ifa = NULL;
3411 }
3412 }
3413
3414 if (ip_select_srcif_debug && ifa == NULL) {
3415 printf("%s->%s ifscope %d (neither ro_if/ifa_if found)\n",
3416 s_src, s_dst, ifscope);
3417 }
3418
3419 /*
3420 * If there is a route, mark it accordingly. If there isn't one,
3421 * we'll get here again during the next transmit (possibly with a
3422 * route) and the flag will get set at that point. For IPv4 LLA
3423 * destination, mark it only if the route has been fully resolved;
3424 * otherwise we want to come back here again when the route points
3425 * to the interface over which the ARP reply arrives on.
3426 */
3427 if (ro->ro_rt != NULL && (!IN_LINKLOCAL(ntohl(dst.s_addr)) ||
3428 (ro->ro_rt->rt_gateway->sa_family == AF_LINK &&
3429 SDL(ro->ro_rt->rt_gateway)->sdl_alen != 0))) {
3430 if (ifa != NULL)
3431 IFA_ADDREF(ifa); /* for route */
3432 if (ro->ro_srcia != NULL)
3433 IFA_REMREF(ro->ro_srcia);
3434 ro->ro_srcia = ifa;
3435 ro->ro_flags |= ROF_SRCIF_SELECTED;
3436 RT_GENID_SYNC(ro->ro_rt);
3437 }
3438
3439 if (ro->ro_rt != NULL)
3440 RT_UNLOCK(ro->ro_rt);
3441
3442 return (ifa);
3443 }
3444
3445 void
3446 ip_output_checksum(struct ifnet *ifp, struct mbuf *m, int hlen, int ip_len,
3447 uint32_t *sw_csum)
3448 {
3449 int tso = TSO_IPV4_OK(ifp, m);
3450 uint32_t hwcap = ifp->if_hwassist;
3451
3452 m->m_pkthdr.csum_flags |= CSUM_IP;
3453
3454 if (!hwcksum_tx) {
3455 /* do all in software; hardware checksum offload is disabled */
3456 *sw_csum = (CSUM_DELAY_DATA | CSUM_DELAY_IP) &
3457 m->m_pkthdr.csum_flags;
3458 } else {
3459 /* do in software what the hardware cannot */
3460 *sw_csum = m->m_pkthdr.csum_flags &
3461 ~IF_HWASSIST_CSUM_FLAGS(hwcap);
3462 }
3463
3464 if (hlen != sizeof (struct ip)) {
3465 *sw_csum |= ((CSUM_DELAY_DATA | CSUM_DELAY_IP) &
3466 m->m_pkthdr.csum_flags);
3467 } else if (!(*sw_csum & CSUM_DELAY_DATA) && (hwcap & CSUM_PARTIAL)) {
3468 /*
3469 * Partial checksum offload, if non-IP fragment, and TCP only
3470 * (no UDP support, as the hardware may not be able to convert
3471 * +0 to -0 (0xffff) per RFC1122 4.1.3.4.)
3472 */
3473 if (hwcksum_tx && !tso &&
3474 (m->m_pkthdr.csum_flags & CSUM_TCP) &&
3475 ip_len <= ifp->if_mtu) {
3476 uint16_t start = sizeof (struct ip);
3477 uint16_t ulpoff = m->m_pkthdr.csum_data & 0xffff;
3478 m->m_pkthdr.csum_flags |=
3479 (CSUM_DATA_VALID | CSUM_PARTIAL);
3480 m->m_pkthdr.csum_tx_stuff = (ulpoff + start);
3481 m->m_pkthdr.csum_tx_start = start;
3482 /* do IP hdr chksum in software */
3483 *sw_csum = CSUM_DELAY_IP;
3484 } else {
3485 *sw_csum |= (CSUM_DELAY_DATA & m->m_pkthdr.csum_flags);
3486 }
3487 }
3488
3489 if (*sw_csum & CSUM_DELAY_DATA) {
3490 in_delayed_cksum(m);
3491 *sw_csum &= ~CSUM_DELAY_DATA;
3492 }
3493
3494 if (hwcksum_tx) {
3495 /*
3496 * Drop off bits that aren't supported by hardware;
3497 * also make sure to preserve non-checksum related bits.
3498 */
3499 m->m_pkthdr.csum_flags =
3500 ((m->m_pkthdr.csum_flags &
3501 (IF_HWASSIST_CSUM_FLAGS(hwcap) | CSUM_DATA_VALID)) |
3502 (m->m_pkthdr.csum_flags & ~IF_HWASSIST_CSUM_MASK));
3503 } else {
3504 /* drop all bits; hardware checksum offload is disabled */
3505 m->m_pkthdr.csum_flags = 0;
3506 }
3507 }
3508
3509 /*
3510 * GRE protocol output for PPP/PPTP
3511 */
3512 int
3513 ip_gre_output(struct mbuf *m)
3514 {
3515 struct route ro;
3516 int error;
3517
3518 bzero(&ro, sizeof (ro));
3519
3520 error = ip_output(m, NULL, &ro, 0, NULL, NULL);
3521
3522 ROUTE_RELEASE(&ro);
3523
3524 return (error);
3525 }
3526
3527 static int
3528 sysctl_reset_ip_output_stats SYSCTL_HANDLER_ARGS
3529 {
3530 #pragma unused(arg1, arg2)
3531 int error, i;
3532
3533 i = ip_output_measure;
3534 error = sysctl_handle_int(oidp, &i, 0, req);
3535 if (error || req->newptr == USER_ADDR_NULL)
3536 goto done;
3537 /* impose bounds */
3538 if (i < 0 || i > 1) {
3539 error = EINVAL;
3540 goto done;
3541 }
3542 if (ip_output_measure != i && i == 1) {
3543 net_perf_initialize(&net_perf, ip_output_measure_bins);
3544 }
3545 ip_output_measure = i;
3546 done:
3547 return (error);
3548 }
3549
3550 static int
3551 sysctl_ip_output_measure_bins SYSCTL_HANDLER_ARGS
3552 {
3553 #pragma unused(arg1, arg2)
3554 int error;
3555 uint64_t i;
3556
3557 i = ip_output_measure_bins;
3558 error = sysctl_handle_quad(oidp, &i, 0, req);
3559 if (error || req->newptr == USER_ADDR_NULL)
3560 goto done;
3561 /* validate data */
3562 if (!net_perf_validate_bins(i)) {
3563 error = EINVAL;
3564 goto done;
3565 }
3566 ip_output_measure_bins = i;
3567 done:
3568 return (error);
3569 }
3570
3571 static int
3572 sysctl_ip_output_getperf SYSCTL_HANDLER_ARGS
3573 {
3574 #pragma unused(oidp, arg1, arg2)
3575 if (req->oldptr == USER_ADDR_NULL)
3576 req->oldlen = (size_t)sizeof (struct ipstat);
3577
3578 return (SYSCTL_OUT(req, &net_perf, MIN(sizeof (net_perf), req->oldlen)));
3579 }
3580