]> git.saurik.com Git - apple/xnu.git/blob - bsd/netinet6/ip6_input.c
8d9241fc1a37869ff6b1422842f7c32877c07711
[apple/xnu.git] / bsd / netinet6 / ip6_input.c
1 /*
2 * Copyright (c) 2003-2018 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /*
30 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
31 * All rights reserved.
32 *
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
35 * are met:
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. Neither the name of the project nor the names of its contributors
42 * may be used to endorse or promote products derived from this software
43 * without specific prior written permission.
44 *
45 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
46 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
47 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
48 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
49 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
50 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
51 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
52 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
53 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
54 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
55 * SUCH DAMAGE.
56 */
57
58 /*
59 * Copyright (c) 1982, 1986, 1988, 1993
60 * The Regents of the University of California. All rights reserved.
61 *
62 * Redistribution and use in source and binary forms, with or without
63 * modification, are permitted provided that the following conditions
64 * are met:
65 * 1. Redistributions of source code must retain the above copyright
66 * notice, this list of conditions and the following disclaimer.
67 * 2. Redistributions in binary form must reproduce the above copyright
68 * notice, this list of conditions and the following disclaimer in the
69 * documentation and/or other materials provided with the distribution.
70 * 3. All advertising materials mentioning features or use of this software
71 * must display the following acknowledgement:
72 * This product includes software developed by the University of
73 * California, Berkeley and its contributors.
74 * 4. Neither the name of the University nor the names of its contributors
75 * may be used to endorse or promote products derived from this software
76 * without specific prior written permission.
77 *
78 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
79 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
80 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
81 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
82 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
83 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
84 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
85 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
86 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
87 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
88 * SUCH DAMAGE.
89 *
90 * @(#)ip_input.c 8.2 (Berkeley) 1/4/94
91 */
92
93 #include <sys/param.h>
94 #include <sys/systm.h>
95 #include <sys/malloc.h>
96 #include <sys/mbuf.h>
97 #include <sys/domain.h>
98 #include <sys/protosw.h>
99 #include <sys/socket.h>
100 #include <sys/socketvar.h>
101 #include <sys/errno.h>
102 #include <sys/time.h>
103 #include <sys/kernel.h>
104 #include <sys/syslog.h>
105 #include <sys/sysctl.h>
106 #include <sys/proc.h>
107 #include <sys/kauth.h>
108 #include <sys/mcache.h>
109
110 #include <mach/mach_time.h>
111 #include <mach/sdt.h>
112 #include <pexpert/pexpert.h>
113 #include <dev/random/randomdev.h>
114
115 #include <net/if.h>
116 #include <net/if_var.h>
117 #include <net/if_types.h>
118 #include <net/if_dl.h>
119 #include <net/route.h>
120 #include <net/kpi_protocol.h>
121 #include <net/ntstat.h>
122 #include <net/init.h>
123 #include <net/net_osdep.h>
124 #include <net/net_perf.h>
125
126 #include <netinet/in.h>
127 #include <netinet/in_systm.h>
128 #if INET
129 #include <netinet/ip.h>
130 #include <netinet/ip_icmp.h>
131 #endif /* INET */
132 #include <netinet/kpi_ipfilter_var.h>
133 #include <netinet/ip6.h>
134 #include <netinet6/in6_var.h>
135 #include <netinet6/ip6_var.h>
136 #include <netinet/in_pcb.h>
137 #include <netinet/icmp6.h>
138 #include <netinet6/in6_ifattach.h>
139 #include <netinet6/nd6.h>
140 #include <netinet6/scope6_var.h>
141 #include <netinet6/ip6protosw.h>
142
143 #if IPSEC
144 #include <netinet6/ipsec.h>
145 #include <netinet6/ipsec6.h>
146 extern int ipsec_bypass;
147 #endif /* IPSEC */
148
149 #if DUMMYNET
150 #include <netinet/ip_fw.h>
151 #include <netinet/ip_dummynet.h>
152 #endif /* DUMMYNET */
153
154 /* we need it for NLOOP. */
155 #include "loop.h"
156
157 #if PF
158 #include <net/pfvar.h>
159 #endif /* PF */
160
161 struct ip6protosw *ip6_protox[IPPROTO_MAX];
162
163 static lck_grp_attr_t *in6_ifaddr_rwlock_grp_attr;
164 static lck_grp_t *in6_ifaddr_rwlock_grp;
165 static lck_attr_t *in6_ifaddr_rwlock_attr;
166 decl_lck_rw_data(, in6_ifaddr_rwlock);
167
168 /* Protected by in6_ifaddr_rwlock */
169 struct in6_ifaddr *in6_ifaddrs = NULL;
170
171 #define IN6_IFSTAT_REQUIRE_ALIGNED_64(f) \
172 _CASSERT(!(offsetof(struct in6_ifstat, f) % sizeof (uint64_t)))
173
174 #define ICMP6_IFSTAT_REQUIRE_ALIGNED_64(f) \
175 _CASSERT(!(offsetof(struct icmp6_ifstat, f) % sizeof (uint64_t)))
176
177 struct ip6stat ip6stat;
178
179 decl_lck_mtx_data(, proxy6_lock);
180 decl_lck_mtx_data(static, dad6_mutex_data);
181 decl_lck_mtx_data(static, nd6_mutex_data);
182 decl_lck_mtx_data(static, prefix6_mutex_data);
183 lck_mtx_t *dad6_mutex = &dad6_mutex_data;
184 lck_mtx_t *nd6_mutex = &nd6_mutex_data;
185 lck_mtx_t *prefix6_mutex = &prefix6_mutex_data;
186 #ifdef ENABLE_ADDRSEL
187 decl_lck_mtx_data(static, addrsel_mutex_data);
188 lck_mtx_t *addrsel_mutex = &addrsel_mutex_data;
189 #endif
190 static lck_attr_t *ip6_mutex_attr;
191 static lck_grp_t *ip6_mutex_grp;
192 static lck_grp_attr_t *ip6_mutex_grp_attr;
193
194 extern int loopattach_done;
195 extern void addrsel_policy_init(void);
196
197 static int sysctl_reset_ip6_input_stats SYSCTL_HANDLER_ARGS;
198 static int sysctl_ip6_input_measure_bins SYSCTL_HANDLER_ARGS;
199 static int sysctl_ip6_input_getperf SYSCTL_HANDLER_ARGS;
200 static void ip6_init_delayed(void);
201 static int ip6_hopopts_input(u_int32_t *, u_int32_t *, struct mbuf **, int *);
202
203 #if NSTF
204 extern void stfattach(void);
205 #endif /* NSTF */
206
207 SYSCTL_DECL(_net_inet6_ip6);
208
209 static uint32_t ip6_adj_clear_hwcksum = 0;
210 SYSCTL_UINT(_net_inet6_ip6, OID_AUTO, adj_clear_hwcksum,
211 CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_adj_clear_hwcksum, 0,
212 "Invalidate hwcksum info when adjusting length");
213
214 static uint32_t ip6_adj_partial_sum = 1;
215 SYSCTL_UINT(_net_inet6_ip6, OID_AUTO, adj_partial_sum,
216 CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_adj_partial_sum, 0,
217 "Perform partial sum adjustment of trailing bytes at IP layer");
218
219 static int ip6_input_measure = 0;
220 SYSCTL_PROC(_net_inet6_ip6, OID_AUTO, input_perf,
221 CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_LOCKED,
222 &ip6_input_measure, 0, sysctl_reset_ip6_input_stats, "I", "Do time measurement");
223
224 static uint64_t ip6_input_measure_bins = 0;
225 SYSCTL_PROC(_net_inet6_ip6, OID_AUTO, input_perf_bins,
226 CTLTYPE_QUAD | CTLFLAG_RW | CTLFLAG_LOCKED, &ip6_input_measure_bins, 0,
227 sysctl_ip6_input_measure_bins, "I",
228 "bins for chaining performance data histogram");
229
230 static net_perf_t net_perf;
231 SYSCTL_PROC(_net_inet6_ip6, OID_AUTO, input_perf_data,
232 CTLTYPE_STRUCT | CTLFLAG_RD | CTLFLAG_LOCKED,
233 0, 0, sysctl_ip6_input_getperf, "S,net_perf",
234 "IP6 input performance data (struct net_perf, net/net_perf.h)");
235
236 /*
237 * On platforms which require strict alignment (currently for anything but
238 * i386 or x86_64), check if the IP header pointer is 32-bit aligned; if not,
239 * copy the contents of the mbuf chain into a new chain, and free the original
240 * one. Create some head room in the first mbuf of the new chain, in case
241 * it's needed later on.
242 *
243 * RFC 2460 says that IPv6 headers are 64-bit aligned, but network interfaces
244 * mostly align to 32-bit boundaries. Care should be taken never to use 64-bit
245 * load/store operations on the fields in IPv6 headers.
246 */
247 #if defined(__i386__) || defined(__x86_64__)
248 #define IP6_HDR_ALIGNMENT_FIXUP(_m, _ifp, _action) do { } while (0)
249 #else /* !__i386__ && !__x86_64__ */
250 #define IP6_HDR_ALIGNMENT_FIXUP(_m, _ifp, _action) do { \
251 if (!IP6_HDR_ALIGNED_P(mtod(_m, caddr_t))) { \
252 struct mbuf *_n; \
253 struct ifnet *__ifp = (_ifp); \
254 atomic_add_64(&(__ifp)->if_alignerrs, 1); \
255 if (((_m)->m_flags & M_PKTHDR) && \
256 (_m)->m_pkthdr.pkt_hdr != NULL) \
257 (_m)->m_pkthdr.pkt_hdr = NULL; \
258 _n = m_defrag_offset(_m, max_linkhdr, M_NOWAIT); \
259 if (_n == NULL) { \
260 ip6stat.ip6s_toosmall++; \
261 m_freem(_m); \
262 (_m) = NULL; \
263 _action; \
264 } else { \
265 VERIFY(_n != (_m)); \
266 (_m) = _n; \
267 } \
268 } \
269 } while (0)
270 #endif /* !__i386__ && !__x86_64__ */
271
272 static void
273 ip6_proto_input(protocol_family_t protocol, mbuf_t packet)
274 {
275 #pragma unused(protocol)
276 #if INET
277 struct timeval start_tv;
278 if (ip6_input_measure) {
279 net_perf_start_time(&net_perf, &start_tv);
280 }
281 #endif /* INET */
282 ip6_input(packet);
283 #if INET
284 if (ip6_input_measure) {
285 net_perf_measure_time(&net_perf, &start_tv, 1);
286 net_perf_histogram(&net_perf, 1);
287 }
288 #endif /* INET */
289 }
290
291 /*
292 * IP6 initialization: fill in IP6 protocol switch table.
293 * All protocols not implemented in kernel go to raw IP6 protocol handler.
294 */
295 void
296 ip6_init(struct ip6protosw *pp, struct domain *dp)
297 {
298 static int ip6_initialized = 0;
299 struct protosw *pr;
300 struct timeval tv;
301 int i;
302 domain_unguard_t unguard;
303
304 domain_proto_mtx_lock_assert_held();
305 VERIFY((pp->pr_flags & (PR_INITIALIZED | PR_ATTACHED)) == PR_ATTACHED);
306
307 _CASSERT((sizeof(struct ip6_hdr) +
308 sizeof(struct icmp6_hdr)) <= _MHLEN);
309
310 if (ip6_initialized) {
311 return;
312 }
313 ip6_initialized = 1;
314
315 eventhandler_lists_ctxt_init(&in6_evhdlr_ctxt);
316 (void)EVENTHANDLER_REGISTER(&in6_evhdlr_ctxt, in6_event,
317 in6_eventhdlr_callback, eventhandler_entry_dummy_arg,
318 EVENTHANDLER_PRI_ANY);
319
320 eventhandler_lists_ctxt_init(&in6_clat46_evhdlr_ctxt);
321 (void)EVENTHANDLER_REGISTER(&in6_clat46_evhdlr_ctxt, in6_clat46_event,
322 in6_clat46_eventhdlr_callback, eventhandler_entry_dummy_arg,
323 EVENTHANDLER_PRI_ANY);
324
325 for (i = 0; i < IN6_EVENT_MAX; i++) {
326 VERIFY(in6_event2kev_array[i].in6_event_code == i);
327 }
328
329 pr = pffindproto_locked(PF_INET6, IPPROTO_RAW, SOCK_RAW);
330 if (pr == NULL) {
331 panic("%s: Unable to find [PF_INET6,IPPROTO_RAW,SOCK_RAW]\n",
332 __func__);
333 /* NOTREACHED */
334 }
335
336 /* Initialize the entire ip6_protox[] array to IPPROTO_RAW. */
337 for (i = 0; i < IPPROTO_MAX; i++) {
338 ip6_protox[i] = (struct ip6protosw *)pr;
339 }
340 /*
341 * Cycle through IP protocols and put them into the appropriate place
342 * in ip6_protox[], skipping protocols IPPROTO_{IP,RAW}.
343 */
344 VERIFY(dp == inet6domain && dp->dom_family == PF_INET6);
345 TAILQ_FOREACH(pr, &dp->dom_protosw, pr_entry) {
346 VERIFY(pr->pr_domain == dp);
347 if (pr->pr_protocol != 0 && pr->pr_protocol != IPPROTO_RAW) {
348 /* Be careful to only index valid IP protocols. */
349 if (pr->pr_protocol < IPPROTO_MAX) {
350 ip6_protox[pr->pr_protocol] =
351 (struct ip6protosw *)pr;
352 }
353 }
354 }
355
356 ip6_mutex_grp_attr = lck_grp_attr_alloc_init();
357
358 ip6_mutex_grp = lck_grp_alloc_init("ip6", ip6_mutex_grp_attr);
359 ip6_mutex_attr = lck_attr_alloc_init();
360
361 lck_mtx_init(dad6_mutex, ip6_mutex_grp, ip6_mutex_attr);
362 lck_mtx_init(nd6_mutex, ip6_mutex_grp, ip6_mutex_attr);
363 lck_mtx_init(prefix6_mutex, ip6_mutex_grp, ip6_mutex_attr);
364 scope6_init(ip6_mutex_grp, ip6_mutex_attr);
365
366 #ifdef ENABLE_ADDRSEL
367 lck_mtx_init(addrsel_mutex, ip6_mutex_grp, ip6_mutex_attr);
368 #endif
369
370 lck_mtx_init(&proxy6_lock, ip6_mutex_grp, ip6_mutex_attr);
371
372 in6_ifaddr_rwlock_grp_attr = lck_grp_attr_alloc_init();
373 in6_ifaddr_rwlock_grp = lck_grp_alloc_init("in6_ifaddr_rwlock",
374 in6_ifaddr_rwlock_grp_attr);
375 in6_ifaddr_rwlock_attr = lck_attr_alloc_init();
376 lck_rw_init(&in6_ifaddr_rwlock, in6_ifaddr_rwlock_grp,
377 in6_ifaddr_rwlock_attr);
378
379 IN6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_in_receive);
380 IN6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_in_hdrerr);
381 IN6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_in_toobig);
382 IN6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_in_noroute);
383 IN6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_in_addrerr);
384 IN6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_in_protounknown);
385 IN6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_in_truncated);
386 IN6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_in_discard);
387 IN6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_in_deliver);
388 IN6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_out_forward);
389 IN6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_out_request);
390 IN6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_out_discard);
391 IN6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_out_fragok);
392 IN6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_out_fragfail);
393 IN6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_out_fragcreat);
394 IN6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_reass_reqd);
395 IN6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_reass_ok);
396 IN6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_reass_fail);
397 IN6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_in_mcast);
398 IN6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_out_mcast);
399
400 ICMP6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_in_msg);
401 ICMP6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_in_error);
402 ICMP6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_in_dstunreach);
403 ICMP6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_in_adminprohib);
404 ICMP6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_in_timeexceed);
405 ICMP6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_in_paramprob);
406 ICMP6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_in_pkttoobig);
407 ICMP6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_in_echo);
408 ICMP6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_in_echoreply);
409 ICMP6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_in_routersolicit);
410 ICMP6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_in_routeradvert);
411 ICMP6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_in_neighborsolicit);
412 ICMP6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_in_neighboradvert);
413 ICMP6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_in_redirect);
414 ICMP6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_in_mldquery);
415 ICMP6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_in_mldreport);
416 ICMP6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_in_mlddone);
417
418 ICMP6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_out_msg);
419 ICMP6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_out_error);
420 ICMP6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_out_dstunreach);
421 ICMP6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_out_adminprohib);
422 ICMP6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_out_timeexceed);
423 ICMP6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_out_paramprob);
424 ICMP6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_out_pkttoobig);
425 ICMP6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_out_echo);
426 ICMP6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_out_echoreply);
427 ICMP6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_out_routersolicit);
428 ICMP6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_out_routeradvert);
429 ICMP6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_out_neighborsolicit);
430 ICMP6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_out_neighboradvert);
431 ICMP6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_out_redirect);
432 ICMP6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_out_mldquery);
433 ICMP6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_out_mldreport);
434 ICMP6_IFSTAT_REQUIRE_ALIGNED_64(ifs6_out_mlddone);
435
436 getmicrotime(&tv);
437 ip6_desync_factor =
438 (RandomULong() ^ tv.tv_usec) % MAX_TEMP_DESYNC_FACTOR;
439
440 in6_ifaddr_init();
441 ip6_moptions_init();
442 nd6_init();
443 frag6_init();
444 icmp6_init(NULL, dp);
445 addrsel_policy_init();
446
447 /*
448 * P2P interfaces often route the local address to the loopback
449 * interface. At this point, lo0 hasn't been initialized yet, which
450 * means that we need to delay the IPv6 configuration of lo0.
451 */
452 net_init_add(ip6_init_delayed);
453
454 unguard = domain_unguard_deploy();
455 i = proto_register_input(PF_INET6, ip6_proto_input, NULL, 0);
456 if (i != 0) {
457 panic("%s: failed to register PF_INET6 protocol: %d\n",
458 __func__, i);
459 /* NOTREACHED */
460 }
461 domain_unguard_release(unguard);
462 }
463
464 static void
465 ip6_init_delayed(void)
466 {
467 (void) in6_ifattach_prelim(lo_ifp);
468
469 /* timer for regeneranation of temporary addresses randomize ID */
470 timeout(in6_tmpaddrtimer, NULL,
471 (ip6_temp_preferred_lifetime - ip6_desync_factor -
472 ip6_temp_regen_advance) * hz);
473
474 #if NSTF
475 stfattach();
476 #endif /* NSTF */
477 }
478
479 static void
480 ip6_input_adjust(struct mbuf *m, struct ip6_hdr *ip6, uint32_t plen,
481 struct ifnet *inifp)
482 {
483 boolean_t adjust = TRUE;
484 uint32_t tot_len = sizeof(*ip6) + plen;
485
486 ASSERT(m_pktlen(m) > tot_len);
487
488 /*
489 * Invalidate hardware checksum info if ip6_adj_clear_hwcksum
490 * is set; useful to handle buggy drivers. Note that this
491 * should not be enabled by default, as we may get here due
492 * to link-layer padding.
493 */
494 if (ip6_adj_clear_hwcksum &&
495 (m->m_pkthdr.csum_flags & CSUM_DATA_VALID) &&
496 !(inifp->if_flags & IFF_LOOPBACK) &&
497 !(m->m_pkthdr.pkt_flags & PKTF_LOOP)) {
498 m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID;
499 m->m_pkthdr.csum_data = 0;
500 ip6stat.ip6s_adj_hwcsum_clr++;
501 }
502
503 /*
504 * If partial checksum information is available, subtract
505 * out the partial sum of postpended extraneous bytes, and
506 * update the checksum metadata accordingly. By doing it
507 * here, the upper layer transport only needs to adjust any
508 * prepended extraneous bytes (else it will do both.)
509 */
510 if (ip6_adj_partial_sum &&
511 (m->m_pkthdr.csum_flags & (CSUM_DATA_VALID | CSUM_PARTIAL)) ==
512 (CSUM_DATA_VALID | CSUM_PARTIAL)) {
513 m->m_pkthdr.csum_rx_val = m_adj_sum16(m,
514 m->m_pkthdr.csum_rx_start, m->m_pkthdr.csum_rx_start,
515 (tot_len - m->m_pkthdr.csum_rx_start),
516 m->m_pkthdr.csum_rx_val);
517 } else if ((m->m_pkthdr.csum_flags &
518 (CSUM_DATA_VALID | CSUM_PARTIAL)) ==
519 (CSUM_DATA_VALID | CSUM_PARTIAL)) {
520 /*
521 * If packet has partial checksum info and we decided not
522 * to subtract the partial sum of postpended extraneous
523 * bytes here (not the default case), leave that work to
524 * be handled by the other layers. For now, only TCP, UDP
525 * layers are capable of dealing with this. For all other
526 * protocols (including fragments), trim and ditch the
527 * partial sum as those layers might not implement partial
528 * checksumming (or adjustment) at all.
529 */
530 if (ip6->ip6_nxt == IPPROTO_TCP ||
531 ip6->ip6_nxt == IPPROTO_UDP) {
532 adjust = FALSE;
533 } else {
534 m->m_pkthdr.csum_flags &= ~CSUM_DATA_VALID;
535 m->m_pkthdr.csum_data = 0;
536 ip6stat.ip6s_adj_hwcsum_clr++;
537 }
538 }
539
540 if (adjust) {
541 ip6stat.ip6s_adj++;
542 if (m->m_len == m->m_pkthdr.len) {
543 m->m_len = tot_len;
544 m->m_pkthdr.len = tot_len;
545 } else {
546 m_adj(m, tot_len - m->m_pkthdr.len);
547 }
548 }
549 }
550
551 void
552 ip6_input(struct mbuf *m)
553 {
554 struct ip6_hdr *ip6;
555 int off = sizeof(struct ip6_hdr), nest;
556 u_int32_t plen;
557 u_int32_t rtalert = ~0;
558 int nxt = 0, ours = 0;
559 struct ifnet *inifp, *deliverifp = NULL;
560 ipfilter_t inject_ipfref = NULL;
561 int seen = 1;
562 struct in6_ifaddr *ia6 = NULL;
563 struct sockaddr_in6 *dst6;
564 #if DUMMYNET
565 struct m_tag *tag;
566 #endif /* DUMMYNET */
567 struct {
568 struct route_in6 rin6;
569 #if DUMMYNET
570 struct ip_fw_args args;
571 #endif /* DUMMYNET */
572 } ip6ibz;
573 #define rin6 ip6ibz.rin6
574 #define args ip6ibz.args
575
576 /* zero out {rin6, args} */
577 bzero(&ip6ibz, sizeof(ip6ibz));
578
579 /*
580 * Check if the packet we received is valid after interface filter
581 * processing
582 */
583 MBUF_INPUT_CHECK(m, m->m_pkthdr.rcvif);
584 inifp = m->m_pkthdr.rcvif;
585 VERIFY(inifp != NULL);
586
587 /* Perform IP header alignment fixup, if needed */
588 IP6_HDR_ALIGNMENT_FIXUP(m, inifp, return );
589
590 m->m_pkthdr.pkt_flags &= ~PKTF_FORWARDED;
591 #if IPSEC
592 /*
593 * should the inner packet be considered authentic?
594 * see comment in ah4_input().
595 */
596 m->m_flags &= ~M_AUTHIPHDR;
597 m->m_flags &= ~M_AUTHIPDGM;
598 #endif /* IPSEC */
599
600 /*
601 * make sure we don't have onion peering information into m_aux.
602 */
603 ip6_delaux(m);
604
605 #if DUMMYNET
606 if ((tag = m_tag_locate(m, KERNEL_MODULE_TAG_ID,
607 KERNEL_TAG_TYPE_DUMMYNET, NULL)) != NULL) {
608 struct dn_pkt_tag *dn_tag;
609
610 dn_tag = (struct dn_pkt_tag *)(tag + 1);
611
612 args.fwa_pf_rule = dn_tag->dn_pf_rule;
613
614 m_tag_delete(m, tag);
615 }
616
617 if (args.fwa_pf_rule) {
618 ip6 = mtod(m, struct ip6_hdr *); /* In case PF got disabled */
619
620 goto check_with_pf;
621 }
622 #endif /* DUMMYNET */
623
624 /*
625 * No need to proccess packet twice if we've already seen it.
626 */
627 inject_ipfref = ipf_get_inject_filter(m);
628 if (inject_ipfref != NULL) {
629 ip6 = mtod(m, struct ip6_hdr *);
630 nxt = ip6->ip6_nxt;
631 seen = 0;
632 goto injectit;
633 } else {
634 seen = 1;
635 }
636
637 /*
638 * mbuf statistics
639 */
640 if (m->m_flags & M_EXT) {
641 if (m->m_next != NULL) {
642 ip6stat.ip6s_mext2m++;
643 } else {
644 ip6stat.ip6s_mext1++;
645 }
646 } else {
647 #define M2MMAX (sizeof (ip6stat.ip6s_m2m) / sizeof (ip6stat.ip6s_m2m[0]))
648 if (m->m_next != NULL) {
649 if (m->m_pkthdr.pkt_flags & PKTF_LOOP) {
650 /* XXX */
651 ip6stat.ip6s_m2m[ifnet_index(lo_ifp)]++;
652 } else if (inifp->if_index < M2MMAX) {
653 ip6stat.ip6s_m2m[inifp->if_index]++;
654 } else {
655 ip6stat.ip6s_m2m[0]++;
656 }
657 } else {
658 ip6stat.ip6s_m1++;
659 }
660 #undef M2MMAX
661 }
662
663 /*
664 * Drop the packet if IPv6 operation is disabled on the interface.
665 */
666 if (inifp->if_eflags & IFEF_IPV6_DISABLED) {
667 goto bad;
668 }
669
670 in6_ifstat_inc_na(inifp, ifs6_in_receive);
671 ip6stat.ip6s_total++;
672
673 /*
674 * L2 bridge code and some other code can return mbuf chain
675 * that does not conform to KAME requirement. too bad.
676 * XXX: fails to join if interface MTU > MCLBYTES. jumbogram?
677 */
678 if (m->m_next != NULL && m->m_pkthdr.len < MCLBYTES) {
679 struct mbuf *n;
680
681 MGETHDR(n, M_DONTWAIT, MT_HEADER); /* MAC-OK */
682 if (n) {
683 M_COPY_PKTHDR(n, m);
684 }
685 if (n && m->m_pkthdr.len > MHLEN) {
686 MCLGET(n, M_DONTWAIT);
687 if ((n->m_flags & M_EXT) == 0) {
688 m_freem(n);
689 n = NULL;
690 }
691 }
692 if (n == NULL) {
693 goto bad;
694 }
695
696 m_copydata(m, 0, m->m_pkthdr.len, mtod(n, caddr_t));
697 n->m_len = m->m_pkthdr.len;
698 m_freem(m);
699 m = n;
700 }
701 IP6_EXTHDR_CHECK(m, 0, sizeof(struct ip6_hdr), { goto done; });
702
703 if (m->m_len < sizeof(struct ip6_hdr)) {
704 if ((m = m_pullup(m, sizeof(struct ip6_hdr))) == 0) {
705 ip6stat.ip6s_toosmall++;
706 in6_ifstat_inc(inifp, ifs6_in_hdrerr);
707 goto done;
708 }
709 }
710
711 ip6 = mtod(m, struct ip6_hdr *);
712
713 if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) {
714 ip6stat.ip6s_badvers++;
715 in6_ifstat_inc(inifp, ifs6_in_hdrerr);
716 goto bad;
717 }
718
719 ip6stat.ip6s_nxthist[ip6->ip6_nxt]++;
720
721 /*
722 * Check against address spoofing/corruption.
723 */
724 if (!(m->m_pkthdr.pkt_flags & PKTF_LOOP) &&
725 IN6_IS_ADDR_LOOPBACK(&ip6->ip6_src)) {
726 ip6stat.ip6s_badscope++;
727 in6_ifstat_inc(inifp, ifs6_in_addrerr);
728 goto bad;
729 }
730 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_src) ||
731 IN6_IS_ADDR_UNSPECIFIED(&ip6->ip6_dst)) {
732 /*
733 * XXX: "badscope" is not very suitable for a multicast source.
734 */
735 ip6stat.ip6s_badscope++;
736 in6_ifstat_inc(inifp, ifs6_in_addrerr);
737 goto bad;
738 }
739 if (IN6_IS_ADDR_MC_INTFACELOCAL(&ip6->ip6_dst) &&
740 !(m->m_pkthdr.pkt_flags & PKTF_LOOP)) {
741 /*
742 * In this case, the packet should come from the loopback
743 * interface. However, we cannot just check the if_flags,
744 * because ip6_mloopback() passes the "actual" interface
745 * as the outgoing/incoming interface.
746 */
747 ip6stat.ip6s_badscope++;
748 in6_ifstat_inc(inifp, ifs6_in_addrerr);
749 goto bad;
750 }
751
752 /*
753 * The following check is not documented in specs. A malicious
754 * party may be able to use IPv4 mapped addr to confuse tcp/udp stack
755 * and bypass security checks (act as if it was from 127.0.0.1 by using
756 * IPv6 src ::ffff:127.0.0.1). Be cautious.
757 *
758 * This check chokes if we are in an SIIT cloud. As none of BSDs
759 * support IPv4-less kernel compilation, we cannot support SIIT
760 * environment at all. So, it makes more sense for us to reject any
761 * malicious packets for non-SIIT environment, than try to do a
762 * partial support for SIIT environment.
763 */
764 if (IN6_IS_ADDR_V4MAPPED(&ip6->ip6_src) ||
765 IN6_IS_ADDR_V4MAPPED(&ip6->ip6_dst)) {
766 ip6stat.ip6s_badscope++;
767 in6_ifstat_inc(inifp, ifs6_in_addrerr);
768 goto bad;
769 }
770 #if 0
771 /*
772 * Reject packets with IPv4 compatible addresses (auto tunnel).
773 *
774 * The code forbids auto tunnel relay case in RFC1933 (the check is
775 * stronger than RFC1933). We may want to re-enable it if mech-xx
776 * is revised to forbid relaying case.
777 */
778 if (IN6_IS_ADDR_V4COMPAT(&ip6->ip6_src) ||
779 IN6_IS_ADDR_V4COMPAT(&ip6->ip6_dst)) {
780 ip6stat.ip6s_badscope++;
781 in6_ifstat_inc(inifp, ifs6_in_addrerr);
782 goto bad;
783 }
784 #endif
785
786 /*
787 * Naively assume we can attribute inbound data to the route we would
788 * use to send to this destination. Asymetric routing breaks this
789 * assumption, but it still allows us to account for traffic from
790 * a remote node in the routing table.
791 * this has a very significant performance impact so we bypass
792 * if nstat_collect is disabled. We may also bypass if the
793 * protocol is tcp in the future because tcp will have a route that
794 * we can use to attribute the data to. That does mean we would not
795 * account for forwarded tcp traffic.
796 */
797 if (nstat_collect) {
798 struct rtentry *rte =
799 ifnet_cached_rtlookup_inet6(inifp, &ip6->ip6_src);
800 if (rte != NULL) {
801 nstat_route_rx(rte, 1, m->m_pkthdr.len, 0);
802 rtfree(rte);
803 }
804 }
805
806 #if DUMMYNET
807 check_with_pf:
808 #endif /* DUMMYNET */
809 #if PF
810 /* Invoke inbound packet filter */
811 if (PF_IS_ENABLED) {
812 int error;
813 #if DUMMYNET
814 error = pf_af_hook(inifp, NULL, &m, AF_INET6, TRUE, &args);
815 #else /* !DUMMYNET */
816 error = pf_af_hook(inifp, NULL, &m, AF_INET6, TRUE, NULL);
817 #endif /* !DUMMYNET */
818 if (error != 0 || m == NULL) {
819 if (m != NULL) {
820 panic("%s: unexpected packet %p\n",
821 __func__, m);
822 /* NOTREACHED */
823 }
824 /* Already freed by callee */
825 goto done;
826 }
827 ip6 = mtod(m, struct ip6_hdr *);
828 }
829 #endif /* PF */
830
831 /* drop packets if interface ID portion is already filled */
832 if (!(inifp->if_flags & IFF_LOOPBACK) &&
833 !(m->m_pkthdr.pkt_flags & PKTF_LOOP)) {
834 if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src) &&
835 ip6->ip6_src.s6_addr16[1]) {
836 ip6stat.ip6s_badscope++;
837 goto bad;
838 }
839 if (IN6_IS_SCOPE_EMBED(&ip6->ip6_dst) &&
840 ip6->ip6_dst.s6_addr16[1]) {
841 ip6stat.ip6s_badscope++;
842 goto bad;
843 }
844 }
845
846 if (m->m_pkthdr.pkt_flags & PKTF_IFAINFO) {
847 if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src)) {
848 ip6->ip6_src.s6_addr16[1] =
849 htons(m->m_pkthdr.src_ifindex);
850 }
851 if (IN6_IS_SCOPE_EMBED(&ip6->ip6_dst)) {
852 ip6->ip6_dst.s6_addr16[1] =
853 htons(m->m_pkthdr.dst_ifindex);
854 }
855 } else {
856 if (IN6_IS_SCOPE_LINKLOCAL(&ip6->ip6_src)) {
857 ip6->ip6_src.s6_addr16[1] = htons(inifp->if_index);
858 }
859 if (IN6_IS_SCOPE_EMBED(&ip6->ip6_dst)) {
860 ip6->ip6_dst.s6_addr16[1] = htons(inifp->if_index);
861 }
862 }
863
864 /*
865 * Multicast check
866 */
867 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst)) {
868 struct in6_multi *in6m = NULL;
869
870 in6_ifstat_inc_na(inifp, ifs6_in_mcast);
871 /*
872 * See if we belong to the destination multicast group on the
873 * arrival interface.
874 */
875 in6_multihead_lock_shared();
876 IN6_LOOKUP_MULTI(&ip6->ip6_dst, inifp, in6m);
877 in6_multihead_lock_done();
878 if (in6m != NULL) {
879 IN6M_REMREF(in6m);
880 ours = 1;
881 } else if (!nd6_prproxy) {
882 ip6stat.ip6s_notmember++;
883 ip6stat.ip6s_cantforward++;
884 in6_ifstat_inc(inifp, ifs6_in_discard);
885 goto bad;
886 }
887 deliverifp = inifp;
888 VERIFY(ia6 == NULL);
889 goto hbhcheck;
890 }
891
892 /*
893 * Unicast check
894 *
895 * Fast path: see if the target is ourselves.
896 */
897 lck_rw_lock_shared(&in6_ifaddr_rwlock);
898 for (ia6 = in6_ifaddrs; ia6 != NULL; ia6 = ia6->ia_next) {
899 /*
900 * No reference is held on the address, as we just need
901 * to test for a few things while holding the RW lock.
902 */
903 if (IN6_ARE_ADDR_EQUAL(&ia6->ia_addr.sin6_addr, &ip6->ip6_dst)) {
904 break;
905 }
906 }
907
908 if (ia6 != NULL) {
909 /*
910 * For performance, test without acquiring the address lock;
911 * a lot of things in the address are set once and never
912 * changed (e.g. ia_ifp.)
913 */
914 if (!(ia6->ia6_flags & (IN6_IFF_NOTREADY | IN6_IFF_CLAT46))) {
915 /* this address is ready */
916 ours = 1;
917 deliverifp = ia6->ia_ifp;
918 /*
919 * record dst address information into mbuf.
920 */
921 (void) ip6_setdstifaddr_info(m, 0, ia6);
922 lck_rw_done(&in6_ifaddr_rwlock);
923 goto hbhcheck;
924 }
925 lck_rw_done(&in6_ifaddr_rwlock);
926 ia6 = NULL;
927 /* address is not ready, so discard the packet. */
928 nd6log(info, "%s: packet to an unready address %s->%s\n",
929 __func__, ip6_sprintf(&ip6->ip6_src),
930 ip6_sprintf(&ip6->ip6_dst));
931 goto bad;
932 }
933 lck_rw_done(&in6_ifaddr_rwlock);
934
935 /*
936 * Slow path: route lookup.
937 */
938 dst6 = SIN6(&rin6.ro_dst);
939 dst6->sin6_len = sizeof(struct sockaddr_in6);
940 dst6->sin6_family = AF_INET6;
941 dst6->sin6_addr = ip6->ip6_dst;
942
943 rtalloc_scoped_ign((struct route *)&rin6,
944 RTF_PRCLONING, IFSCOPE_NONE);
945 if (rin6.ro_rt != NULL) {
946 RT_LOCK_SPIN(rin6.ro_rt);
947 }
948
949 #define rt6_key(r) (SIN6((r)->rt_nodes->rn_key))
950
951 /*
952 * Accept the packet if the forwarding interface to the destination
953 * according to the routing table is the loopback interface,
954 * unless the associated route has a gateway.
955 * Note that this approach causes to accept a packet if there is a
956 * route to the loopback interface for the destination of the packet.
957 * But we think it's even useful in some situations, e.g. when using
958 * a special daemon which wants to intercept the packet.
959 *
960 * XXX: some OSes automatically make a cloned route for the destination
961 * of an outgoing packet. If the outgoing interface of the packet
962 * is a loopback one, the kernel would consider the packet to be
963 * accepted, even if we have no such address assinged on the interface.
964 * We check the cloned flag of the route entry to reject such cases,
965 * assuming that route entries for our own addresses are not made by
966 * cloning (it should be true because in6_addloop explicitly installs
967 * the host route). However, we might have to do an explicit check
968 * while it would be less efficient. Or, should we rather install a
969 * reject route for such a case?
970 */
971 if (rin6.ro_rt != NULL &&
972 (rin6.ro_rt->rt_flags & (RTF_HOST | RTF_GATEWAY)) == RTF_HOST &&
973 #if RTF_WASCLONED
974 !(rin6.ro_rt->rt_flags & RTF_WASCLONED) &&
975 #endif
976 rin6.ro_rt->rt_ifp->if_type == IFT_LOOP) {
977 ia6 = (struct in6_ifaddr *)rin6.ro_rt->rt_ifa;
978 /*
979 * Packets to a tentative, duplicated, or somehow invalid
980 * address must not be accepted.
981 *
982 * For performance, test without acquiring the address lock;
983 * a lot of things in the address are set once and never
984 * changed (e.g. ia_ifp.)
985 */
986 if (!(ia6->ia6_flags & IN6_IFF_NOTREADY)) {
987 /* this address is ready */
988 ours = 1;
989 deliverifp = ia6->ia_ifp; /* correct? */
990 /*
991 * record dst address information into mbuf.
992 */
993 (void) ip6_setdstifaddr_info(m, 0, ia6);
994 RT_UNLOCK(rin6.ro_rt);
995 goto hbhcheck;
996 }
997 RT_UNLOCK(rin6.ro_rt);
998 ia6 = NULL;
999 /* address is not ready, so discard the packet. */
1000 nd6log(error, "%s: packet to an unready address %s->%s\n",
1001 __func__, ip6_sprintf(&ip6->ip6_src),
1002 ip6_sprintf(&ip6->ip6_dst));
1003 goto bad;
1004 }
1005
1006 if (rin6.ro_rt != NULL) {
1007 RT_UNLOCK(rin6.ro_rt);
1008 }
1009
1010 /*
1011 * Now there is no reason to process the packet if it's not our own
1012 * and we're not a router.
1013 */
1014 if (!ip6_forwarding) {
1015 ip6stat.ip6s_cantforward++;
1016 in6_ifstat_inc(inifp, ifs6_in_discard);
1017 /*
1018 * Raise a kernel event if the packet received on cellular
1019 * interface is not intended for local host.
1020 * For now limit it to ICMPv6 packets.
1021 */
1022 if (inifp->if_type == IFT_CELLULAR &&
1023 ip6->ip6_nxt == IPPROTO_ICMPV6) {
1024 in6_ifstat_inc(inifp, ifs6_cantfoward_icmp6);
1025 }
1026 goto bad;
1027 }
1028
1029 hbhcheck:
1030 /*
1031 * record dst address information into mbuf, if we don't have one yet.
1032 * note that we are unable to record it, if the address is not listed
1033 * as our interface address (e.g. multicast addresses, etc.)
1034 */
1035 if (deliverifp != NULL && ia6 == NULL) {
1036 ia6 = in6_ifawithifp(deliverifp, &ip6->ip6_dst);
1037 if (ia6 != NULL) {
1038 (void) ip6_setdstifaddr_info(m, 0, ia6);
1039 IFA_REMREF(&ia6->ia_ifa);
1040 }
1041 }
1042
1043 /*
1044 * Process Hop-by-Hop options header if it's contained.
1045 * m may be modified in ip6_hopopts_input().
1046 * If a JumboPayload option is included, plen will also be modified.
1047 */
1048 plen = (u_int32_t)ntohs(ip6->ip6_plen);
1049 if (ip6->ip6_nxt == IPPROTO_HOPOPTS) {
1050 struct ip6_hbh *hbh;
1051
1052 /*
1053 * Mark the packet to imply that HBH option has been checked.
1054 * This can only be true is the packet came in unfragmented
1055 * or if the option is in the first fragment
1056 */
1057 m->m_pkthdr.pkt_flags |= PKTF_HBH_CHKED;
1058 if (ip6_hopopts_input(&plen, &rtalert, &m, &off)) {
1059 #if 0 /* touches NULL pointer */
1060 in6_ifstat_inc(inifp, ifs6_in_discard);
1061 #endif
1062 goto done; /* m have already been freed */
1063 }
1064
1065 /* adjust pointer */
1066 ip6 = mtod(m, struct ip6_hdr *);
1067
1068 /*
1069 * if the payload length field is 0 and the next header field
1070 * indicates Hop-by-Hop Options header, then a Jumbo Payload
1071 * option MUST be included.
1072 */
1073 if (ip6->ip6_plen == 0 && plen == 0) {
1074 /*
1075 * Note that if a valid jumbo payload option is
1076 * contained, ip6_hopopts_input() must set a valid
1077 * (non-zero) payload length to the variable plen.
1078 */
1079 ip6stat.ip6s_badoptions++;
1080 in6_ifstat_inc(inifp, ifs6_in_discard);
1081 in6_ifstat_inc(inifp, ifs6_in_hdrerr);
1082 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER,
1083 (caddr_t)&ip6->ip6_plen - (caddr_t)ip6);
1084 goto done;
1085 }
1086 /* ip6_hopopts_input() ensures that mbuf is contiguous */
1087 hbh = (struct ip6_hbh *)(ip6 + 1);
1088 nxt = hbh->ip6h_nxt;
1089
1090 /*
1091 * If we are acting as a router and the packet contains a
1092 * router alert option, see if we know the option value.
1093 * Currently, we only support the option value for MLD, in which
1094 * case we should pass the packet to the multicast routing
1095 * daemon.
1096 */
1097 if (rtalert != ~0 && ip6_forwarding) {
1098 switch (rtalert) {
1099 case IP6OPT_RTALERT_MLD:
1100 ours = 1;
1101 break;
1102 default:
1103 /*
1104 * RFC2711 requires unrecognized values must be
1105 * silently ignored.
1106 */
1107 break;
1108 }
1109 }
1110 } else {
1111 nxt = ip6->ip6_nxt;
1112 }
1113
1114 /*
1115 * Check that the amount of data in the buffers
1116 * is as at least much as the IPv6 header would have us expect.
1117 * Trim mbufs if longer than we expect.
1118 * Drop packet if shorter than we expect.
1119 */
1120 if (m->m_pkthdr.len - sizeof(struct ip6_hdr) < plen) {
1121 ip6stat.ip6s_tooshort++;
1122 in6_ifstat_inc(inifp, ifs6_in_truncated);
1123 goto bad;
1124 }
1125 if (m->m_pkthdr.len > sizeof(struct ip6_hdr) + plen) {
1126 ip6_input_adjust(m, ip6, plen, inifp);
1127 }
1128
1129 /*
1130 * Forward if desirable.
1131 */
1132 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst)) {
1133 if (!ours && nd6_prproxy) {
1134 /*
1135 * If this isn't for us, this might be a Neighbor
1136 * Solicitation (dst is solicited-node multicast)
1137 * against an address in one of the proxied prefixes;
1138 * if so, claim the packet and let icmp6_input()
1139 * handle the rest.
1140 */
1141 ours = nd6_prproxy_isours(m, ip6, NULL, IFSCOPE_NONE);
1142 VERIFY(!ours ||
1143 (m->m_pkthdr.pkt_flags & PKTF_PROXY_DST));
1144 }
1145 if (!ours) {
1146 goto bad;
1147 }
1148 } else if (!ours) {
1149 /*
1150 * The unicast forwarding function might return the packet
1151 * if we are proxying prefix(es), and if the packet is an
1152 * ICMPv6 packet that has failed the zone checks, but is
1153 * targetted towards a proxied address (this is optimized by
1154 * way of RTF_PROXY test.) If so, claim the packet as ours
1155 * and let icmp6_input() handle the rest. The packet's hop
1156 * limit value is kept intact (it's not decremented). This
1157 * is for supporting Neighbor Unreachability Detection between
1158 * proxied nodes on different links (src is link-local, dst
1159 * is target address.)
1160 */
1161 if ((m = ip6_forward(m, &rin6, 0)) == NULL) {
1162 goto done;
1163 }
1164 VERIFY(rin6.ro_rt != NULL);
1165 VERIFY(m->m_pkthdr.pkt_flags & PKTF_PROXY_DST);
1166 deliverifp = rin6.ro_rt->rt_ifp;
1167 ours = 1;
1168 }
1169
1170 ip6 = mtod(m, struct ip6_hdr *);
1171
1172 /*
1173 * Malicious party may be able to use IPv4 mapped addr to confuse
1174 * tcp/udp stack and bypass security checks (act as if it was from
1175 * 127.0.0.1 by using IPv6 src ::ffff:127.0.0.1). Be cautious.
1176 *
1177 * For SIIT end node behavior, you may want to disable the check.
1178 * However, you will become vulnerable to attacks using IPv4 mapped
1179 * source.
1180 */
1181 if (IN6_IS_ADDR_V4MAPPED(&ip6->ip6_src) ||
1182 IN6_IS_ADDR_V4MAPPED(&ip6->ip6_dst)) {
1183 ip6stat.ip6s_badscope++;
1184 in6_ifstat_inc(inifp, ifs6_in_addrerr);
1185 goto bad;
1186 }
1187
1188 /*
1189 * Tell launch routine the next header
1190 */
1191 ip6stat.ip6s_delivered++;
1192 in6_ifstat_inc_na(deliverifp, ifs6_in_deliver);
1193
1194 injectit:
1195 nest = 0;
1196
1197 /*
1198 * Perform IP header alignment fixup again, if needed. Note that
1199 * we do it once for the outermost protocol, and we assume each
1200 * protocol handler wouldn't mess with the alignment afterwards.
1201 */
1202 IP6_HDR_ALIGNMENT_FIXUP(m, inifp, return );
1203
1204 while (nxt != IPPROTO_DONE) {
1205 struct ipfilter *filter;
1206 int (*pr_input)(struct mbuf **, int *, int);
1207
1208 /*
1209 * This would imply either IPPROTO_HOPOPTS was not the first
1210 * option or it did not come in the first fragment.
1211 */
1212 if (nxt == IPPROTO_HOPOPTS &&
1213 (m->m_pkthdr.pkt_flags & PKTF_HBH_CHKED) == 0) {
1214 /*
1215 * This implies that HBH option was not contained
1216 * in the first fragment
1217 */
1218 ip6stat.ip6s_badoptions++;
1219 goto bad;
1220 }
1221
1222 if (ip6_hdrnestlimit && (++nest > ip6_hdrnestlimit)) {
1223 ip6stat.ip6s_toomanyhdr++;
1224 goto bad;
1225 }
1226
1227 /*
1228 * protection against faulty packet - there should be
1229 * more sanity checks in header chain processing.
1230 */
1231 if (m->m_pkthdr.len < off) {
1232 ip6stat.ip6s_tooshort++;
1233 in6_ifstat_inc(inifp, ifs6_in_truncated);
1234 goto bad;
1235 }
1236
1237 #if IPSEC
1238 /*
1239 * enforce IPsec policy checking if we are seeing last header.
1240 * note that we do not visit this with protocols with pcb layer
1241 * code - like udp/tcp/raw ip.
1242 */
1243 if ((ipsec_bypass == 0) &&
1244 (ip6_protox[nxt]->pr_flags & PR_LASTHDR) != 0) {
1245 if (ipsec6_in_reject(m, NULL)) {
1246 IPSEC_STAT_INCREMENT(ipsec6stat.in_polvio);
1247 goto bad;
1248 }
1249 }
1250 #endif /* IPSEC */
1251
1252 /*
1253 * Call IP filter
1254 */
1255 if (!TAILQ_EMPTY(&ipv6_filters) && !IFNET_IS_INTCOPROC(inifp)) {
1256 ipf_ref();
1257 TAILQ_FOREACH(filter, &ipv6_filters, ipf_link) {
1258 if (seen == 0) {
1259 if ((struct ipfilter *)inject_ipfref ==
1260 filter) {
1261 seen = 1;
1262 }
1263 } else if (filter->ipf_filter.ipf_input) {
1264 errno_t result;
1265
1266 result = filter->ipf_filter.ipf_input(
1267 filter->ipf_filter.cookie,
1268 (mbuf_t *)&m, off, nxt);
1269 if (result == EJUSTRETURN) {
1270 ipf_unref();
1271 goto done;
1272 }
1273 if (result != 0) {
1274 ipf_unref();
1275 goto bad;
1276 }
1277 }
1278 }
1279 ipf_unref();
1280 }
1281
1282 DTRACE_IP6(receive, struct mbuf *, m, struct inpcb *, NULL,
1283 struct ip6_hdr *, ip6, struct ifnet *, inifp,
1284 struct ip *, NULL, struct ip6_hdr *, ip6);
1285
1286 if ((pr_input = ip6_protox[nxt]->pr_input) == NULL) {
1287 m_freem(m);
1288 m = NULL;
1289 nxt = IPPROTO_DONE;
1290 } else if (!(ip6_protox[nxt]->pr_flags & PR_PROTOLOCK)) {
1291 lck_mtx_lock(inet6_domain_mutex);
1292 nxt = pr_input(&m, &off, nxt);
1293 lck_mtx_unlock(inet6_domain_mutex);
1294 } else {
1295 nxt = pr_input(&m, &off, nxt);
1296 }
1297 }
1298 done:
1299 ROUTE_RELEASE(&rin6);
1300 return;
1301 bad:
1302 m_freem(m);
1303 goto done;
1304 }
1305
1306 void
1307 ip6_setsrcifaddr_info(struct mbuf *m, uint32_t src_idx, struct in6_ifaddr *ia6)
1308 {
1309 VERIFY(m->m_flags & M_PKTHDR);
1310
1311 /*
1312 * If the source ifaddr is specified, pick up the information
1313 * from there; otherwise just grab the passed-in ifindex as the
1314 * caller may not have the ifaddr available.
1315 */
1316 if (ia6 != NULL) {
1317 m->m_pkthdr.pkt_flags |= PKTF_IFAINFO;
1318 m->m_pkthdr.src_ifindex = ia6->ia_ifp->if_index;
1319
1320 /* See IN6_IFF comments in in6_var.h */
1321 m->m_pkthdr.src_iff = (ia6->ia6_flags & 0xffff);
1322 } else {
1323 m->m_pkthdr.src_iff = 0;
1324 m->m_pkthdr.src_ifindex = src_idx;
1325 if (src_idx != 0) {
1326 m->m_pkthdr.pkt_flags |= PKTF_IFAINFO;
1327 }
1328 }
1329 }
1330
1331 void
1332 ip6_setdstifaddr_info(struct mbuf *m, uint32_t dst_idx, struct in6_ifaddr *ia6)
1333 {
1334 VERIFY(m->m_flags & M_PKTHDR);
1335
1336 /*
1337 * If the destination ifaddr is specified, pick up the information
1338 * from there; otherwise just grab the passed-in ifindex as the
1339 * caller may not have the ifaddr available.
1340 */
1341 if (ia6 != NULL) {
1342 m->m_pkthdr.pkt_flags |= PKTF_IFAINFO;
1343 m->m_pkthdr.dst_ifindex = ia6->ia_ifp->if_index;
1344
1345 /* See IN6_IFF comments in in6_var.h */
1346 m->m_pkthdr.dst_iff = (ia6->ia6_flags & 0xffff);
1347 } else {
1348 m->m_pkthdr.dst_iff = 0;
1349 m->m_pkthdr.dst_ifindex = dst_idx;
1350 if (dst_idx != 0) {
1351 m->m_pkthdr.pkt_flags |= PKTF_IFAINFO;
1352 }
1353 }
1354 }
1355
1356 int
1357 ip6_getsrcifaddr_info(struct mbuf *m, uint32_t *src_idx, uint32_t *ia6f)
1358 {
1359 VERIFY(m->m_flags & M_PKTHDR);
1360
1361 if (!(m->m_pkthdr.pkt_flags & PKTF_IFAINFO)) {
1362 return -1;
1363 }
1364
1365 if (src_idx != NULL) {
1366 *src_idx = m->m_pkthdr.src_ifindex;
1367 }
1368
1369 if (ia6f != NULL) {
1370 *ia6f = m->m_pkthdr.src_iff;
1371 }
1372
1373 return 0;
1374 }
1375
1376 int
1377 ip6_getdstifaddr_info(struct mbuf *m, uint32_t *dst_idx, uint32_t *ia6f)
1378 {
1379 VERIFY(m->m_flags & M_PKTHDR);
1380
1381 if (!(m->m_pkthdr.pkt_flags & PKTF_IFAINFO)) {
1382 return -1;
1383 }
1384
1385 if (dst_idx != NULL) {
1386 *dst_idx = m->m_pkthdr.dst_ifindex;
1387 }
1388
1389 if (ia6f != NULL) {
1390 *ia6f = m->m_pkthdr.dst_iff;
1391 }
1392
1393 return 0;
1394 }
1395
1396 /*
1397 * Hop-by-Hop options header processing. If a valid jumbo payload option is
1398 * included, the real payload length will be stored in plenp.
1399 */
1400 static int
1401 ip6_hopopts_input(uint32_t *plenp, uint32_t *rtalertp, struct mbuf **mp,
1402 int *offp)
1403 {
1404 struct mbuf *m = *mp;
1405 int off = *offp, hbhlen;
1406 struct ip6_hbh *hbh;
1407 u_int8_t *opt;
1408
1409 /* validation of the length of the header */
1410 IP6_EXTHDR_CHECK(m, off, sizeof(*hbh), return (-1));
1411 hbh = (struct ip6_hbh *)(mtod(m, caddr_t) + off);
1412 hbhlen = (hbh->ip6h_len + 1) << 3;
1413
1414 IP6_EXTHDR_CHECK(m, off, hbhlen, return (-1));
1415 hbh = (struct ip6_hbh *)(mtod(m, caddr_t) + off);
1416 off += hbhlen;
1417 hbhlen -= sizeof(struct ip6_hbh);
1418 opt = (u_int8_t *)hbh + sizeof(struct ip6_hbh);
1419
1420 if (ip6_process_hopopts(m, (u_int8_t *)hbh + sizeof(struct ip6_hbh),
1421 hbhlen, rtalertp, plenp) < 0) {
1422 return -1;
1423 }
1424
1425 *offp = off;
1426 *mp = m;
1427 return 0;
1428 }
1429
1430 /*
1431 * Search header for all Hop-by-hop options and process each option.
1432 * This function is separate from ip6_hopopts_input() in order to
1433 * handle a case where the sending node itself process its hop-by-hop
1434 * options header. In such a case, the function is called from ip6_output().
1435 *
1436 * The function assumes that hbh header is located right after the IPv6 header
1437 * (RFC2460 p7), opthead is pointer into data content in m, and opthead to
1438 * opthead + hbhlen is located in continuous memory region.
1439 */
1440 int
1441 ip6_process_hopopts(struct mbuf *m, u_int8_t *opthead, int hbhlen,
1442 u_int32_t *rtalertp, u_int32_t *plenp)
1443 {
1444 struct ip6_hdr *ip6;
1445 int optlen = 0;
1446 u_int8_t *opt = opthead;
1447 u_int16_t rtalert_val;
1448 u_int32_t jumboplen;
1449 const int erroff = sizeof(struct ip6_hdr) + sizeof(struct ip6_hbh);
1450
1451 for (; hbhlen > 0; hbhlen -= optlen, opt += optlen) {
1452 switch (*opt) {
1453 case IP6OPT_PAD1:
1454 optlen = 1;
1455 break;
1456 case IP6OPT_PADN:
1457 if (hbhlen < IP6OPT_MINLEN) {
1458 ip6stat.ip6s_toosmall++;
1459 goto bad;
1460 }
1461 optlen = *(opt + 1) + 2;
1462 break;
1463 case IP6OPT_ROUTER_ALERT:
1464 /* XXX may need check for alignment */
1465 if (hbhlen < IP6OPT_RTALERT_LEN) {
1466 ip6stat.ip6s_toosmall++;
1467 goto bad;
1468 }
1469 if (*(opt + 1) != IP6OPT_RTALERT_LEN - 2) {
1470 /* XXX stat */
1471 icmp6_error(m, ICMP6_PARAM_PROB,
1472 ICMP6_PARAMPROB_HEADER,
1473 erroff + opt + 1 - opthead);
1474 return -1;
1475 }
1476 optlen = IP6OPT_RTALERT_LEN;
1477 bcopy((caddr_t)(opt + 2), (caddr_t)&rtalert_val, 2);
1478 *rtalertp = ntohs(rtalert_val);
1479 break;
1480 case IP6OPT_JUMBO:
1481 /* XXX may need check for alignment */
1482 if (hbhlen < IP6OPT_JUMBO_LEN) {
1483 ip6stat.ip6s_toosmall++;
1484 goto bad;
1485 }
1486 if (*(opt + 1) != IP6OPT_JUMBO_LEN - 2) {
1487 /* XXX stat */
1488 icmp6_error(m, ICMP6_PARAM_PROB,
1489 ICMP6_PARAMPROB_HEADER,
1490 erroff + opt + 1 - opthead);
1491 return -1;
1492 }
1493 optlen = IP6OPT_JUMBO_LEN;
1494
1495 /*
1496 * IPv6 packets that have non 0 payload length
1497 * must not contain a jumbo payload option.
1498 */
1499 ip6 = mtod(m, struct ip6_hdr *);
1500 if (ip6->ip6_plen) {
1501 ip6stat.ip6s_badoptions++;
1502 icmp6_error(m, ICMP6_PARAM_PROB,
1503 ICMP6_PARAMPROB_HEADER,
1504 erroff + opt - opthead);
1505 return -1;
1506 }
1507
1508 /*
1509 * We may see jumbolen in unaligned location, so
1510 * we'd need to perform bcopy().
1511 */
1512 bcopy(opt + 2, &jumboplen, sizeof(jumboplen));
1513 jumboplen = (u_int32_t)htonl(jumboplen);
1514
1515 #if 1
1516 /*
1517 * if there are multiple jumbo payload options,
1518 * *plenp will be non-zero and the packet will be
1519 * rejected.
1520 * the behavior may need some debate in ipngwg -
1521 * multiple options does not make sense, however,
1522 * there's no explicit mention in specification.
1523 */
1524 if (*plenp != 0) {
1525 ip6stat.ip6s_badoptions++;
1526 icmp6_error(m, ICMP6_PARAM_PROB,
1527 ICMP6_PARAMPROB_HEADER,
1528 erroff + opt + 2 - opthead);
1529 return -1;
1530 }
1531 #endif
1532
1533 /*
1534 * jumbo payload length must be larger than 65535.
1535 */
1536 if (jumboplen <= IPV6_MAXPACKET) {
1537 ip6stat.ip6s_badoptions++;
1538 icmp6_error(m, ICMP6_PARAM_PROB,
1539 ICMP6_PARAMPROB_HEADER,
1540 erroff + opt + 2 - opthead);
1541 return -1;
1542 }
1543 *plenp = jumboplen;
1544
1545 break;
1546 default: /* unknown option */
1547 if (hbhlen < IP6OPT_MINLEN) {
1548 ip6stat.ip6s_toosmall++;
1549 goto bad;
1550 }
1551 optlen = ip6_unknown_opt(opt, m,
1552 erroff + opt - opthead);
1553 if (optlen == -1) {
1554 return -1;
1555 }
1556 optlen += 2;
1557 break;
1558 }
1559 }
1560
1561 return 0;
1562
1563 bad:
1564 m_freem(m);
1565 return -1;
1566 }
1567
1568 /*
1569 * Unknown option processing.
1570 * The third argument `off' is the offset from the IPv6 header to the option,
1571 * which is necessary if the IPv6 header the and option header and IPv6 header
1572 * is not continuous in order to return an ICMPv6 error.
1573 */
1574 int
1575 ip6_unknown_opt(uint8_t *optp, struct mbuf *m, int off)
1576 {
1577 struct ip6_hdr *ip6;
1578
1579 switch (IP6OPT_TYPE(*optp)) {
1580 case IP6OPT_TYPE_SKIP: /* ignore the option */
1581 return (int)*(optp + 1);
1582
1583 case IP6OPT_TYPE_DISCARD: /* silently discard */
1584 m_freem(m);
1585 return -1;
1586
1587 case IP6OPT_TYPE_FORCEICMP: /* send ICMP even if multicasted */
1588 ip6stat.ip6s_badoptions++;
1589 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_OPTION, off);
1590 return -1;
1591
1592 case IP6OPT_TYPE_ICMP: /* send ICMP if not multicasted */
1593 ip6stat.ip6s_badoptions++;
1594 ip6 = mtod(m, struct ip6_hdr *);
1595 if (IN6_IS_ADDR_MULTICAST(&ip6->ip6_dst) ||
1596 (m->m_flags & (M_BCAST | M_MCAST))) {
1597 m_freem(m);
1598 } else {
1599 icmp6_error(m, ICMP6_PARAM_PROB,
1600 ICMP6_PARAMPROB_OPTION, off);
1601 }
1602 return -1;
1603 }
1604
1605 m_freem(m); /* XXX: NOTREACHED */
1606 return -1;
1607 }
1608
1609 /*
1610 * Create the "control" list for this pcb.
1611 * These functions will not modify mbuf chain at all.
1612 *
1613 * With KAME mbuf chain restriction:
1614 * The routine will be called from upper layer handlers like tcp6_input().
1615 * Thus the routine assumes that the caller (tcp6_input) have already
1616 * called IP6_EXTHDR_CHECK() and all the extension headers are located in the
1617 * very first mbuf on the mbuf chain.
1618 *
1619 * ip6_savecontrol_v4 will handle those options that are possible to be
1620 * set on a v4-mapped socket.
1621 * ip6_savecontrol will directly call ip6_savecontrol_v4 to handle those
1622 * options and handle the v6-only ones itself.
1623 */
1624 struct mbuf **
1625 ip6_savecontrol_v4(struct inpcb *inp, struct mbuf *m, struct mbuf **mp,
1626 int *v4only)
1627 {
1628 struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
1629
1630 if ((inp->inp_socket->so_options & SO_TIMESTAMP) != 0) {
1631 struct timeval tv;
1632
1633 getmicrotime(&tv);
1634 mp = sbcreatecontrol_mbuf((caddr_t)&tv, sizeof(tv),
1635 SCM_TIMESTAMP, SOL_SOCKET, mp);
1636 if (*mp == NULL) {
1637 return NULL;
1638 }
1639 }
1640 if ((inp->inp_socket->so_options & SO_TIMESTAMP_MONOTONIC) != 0) {
1641 uint64_t time;
1642
1643 time = mach_absolute_time();
1644 mp = sbcreatecontrol_mbuf((caddr_t)&time, sizeof(time),
1645 SCM_TIMESTAMP_MONOTONIC, SOL_SOCKET, mp);
1646 if (*mp == NULL) {
1647 return NULL;
1648 }
1649 }
1650 if ((inp->inp_socket->so_options & SO_TIMESTAMP_CONTINUOUS) != 0) {
1651 uint64_t time;
1652
1653 time = mach_continuous_time();
1654 mp = sbcreatecontrol_mbuf((caddr_t)&time, sizeof(time),
1655 SCM_TIMESTAMP_CONTINUOUS, SOL_SOCKET, mp);
1656 if (*mp == NULL) {
1657 return NULL;
1658 }
1659 }
1660 if ((inp->inp_socket->so_flags & SOF_RECV_TRAFFIC_CLASS) != 0) {
1661 int tc = m_get_traffic_class(m);
1662
1663 mp = sbcreatecontrol_mbuf((caddr_t)&tc, sizeof(tc),
1664 SO_TRAFFIC_CLASS, SOL_SOCKET, mp);
1665 if (*mp == NULL) {
1666 return NULL;
1667 }
1668 }
1669
1670 #define IS2292(inp, x, y) (((inp)->inp_flags & IN6P_RFC2292) ? (x) : (y))
1671 if ((ip6->ip6_vfc & IPV6_VERSION_MASK) != IPV6_VERSION) {
1672 if (v4only != NULL) {
1673 *v4only = 1;
1674 }
1675
1676 // Send ECN flags for v4-mapped addresses
1677 if ((inp->inp_flags & IN6P_TCLASS) != 0) {
1678 struct ip *ip_header = mtod(m, struct ip *);
1679
1680 int tclass = (int)(ip_header->ip_tos);
1681 mp = sbcreatecontrol_mbuf((caddr_t)&tclass, sizeof(tclass),
1682 IPV6_TCLASS, IPPROTO_IPV6, mp);
1683 if (*mp == NULL) {
1684 return NULL;
1685 }
1686 }
1687
1688 // Send IN6P_PKTINFO for v4-mapped address
1689 if ((inp->inp_flags & IN6P_PKTINFO) != 0) {
1690 struct in6_pktinfo pi6 = {
1691 .ipi6_addr = IN6ADDR_V4MAPPED_INIT,
1692 .ipi6_ifindex = (m && m->m_pkthdr.rcvif) ? m->m_pkthdr.rcvif->if_index : 0,
1693 };
1694
1695 struct ip *ip_header = mtod(m, struct ip *);
1696 bcopy(&ip_header->ip_dst, &pi6.ipi6_addr.s6_addr32[3], sizeof(struct in_addr));
1697
1698 mp = sbcreatecontrol_mbuf((caddr_t)&pi6,
1699 sizeof(struct in6_pktinfo),
1700 IS2292(inp, IPV6_2292PKTINFO, IPV6_PKTINFO),
1701 IPPROTO_IPV6, mp);
1702 if (*mp == NULL) {
1703 return NULL;
1704 }
1705 }
1706 return mp;
1707 }
1708
1709 /* RFC 2292 sec. 5 */
1710 if ((inp->inp_flags & IN6P_PKTINFO) != 0) {
1711 struct in6_pktinfo pi6;
1712
1713 bcopy(&ip6->ip6_dst, &pi6.ipi6_addr, sizeof(struct in6_addr));
1714 in6_clearscope(&pi6.ipi6_addr); /* XXX */
1715 pi6.ipi6_ifindex =
1716 (m && m->m_pkthdr.rcvif) ? m->m_pkthdr.rcvif->if_index : 0;
1717
1718 mp = sbcreatecontrol_mbuf((caddr_t)&pi6,
1719 sizeof(struct in6_pktinfo),
1720 IS2292(inp, IPV6_2292PKTINFO, IPV6_PKTINFO),
1721 IPPROTO_IPV6, mp);
1722 if (*mp == NULL) {
1723 return NULL;
1724 }
1725 }
1726
1727 if ((inp->inp_flags & IN6P_HOPLIMIT) != 0) {
1728 int hlim = ip6->ip6_hlim & 0xff;
1729
1730 mp = sbcreatecontrol_mbuf((caddr_t)&hlim, sizeof(int),
1731 IS2292(inp, IPV6_2292HOPLIMIT, IPV6_HOPLIMIT),
1732 IPPROTO_IPV6, mp);
1733 if (*mp == NULL) {
1734 return NULL;
1735 }
1736 }
1737
1738 if (v4only != NULL) {
1739 *v4only = 0;
1740 }
1741 return mp;
1742 }
1743
1744 int
1745 ip6_savecontrol(struct inpcb *in6p, struct mbuf *m, struct mbuf **mp)
1746 {
1747 struct mbuf **np;
1748 struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
1749 int v4only = 0;
1750
1751 *mp = NULL;
1752 np = ip6_savecontrol_v4(in6p, m, mp, &v4only);
1753 if (np == NULL) {
1754 goto no_mbufs;
1755 }
1756
1757 mp = np;
1758 if (v4only) {
1759 return 0;
1760 }
1761
1762 if ((in6p->inp_flags & IN6P_TCLASS) != 0) {
1763 u_int32_t flowinfo;
1764 int tclass;
1765
1766 flowinfo = (u_int32_t)ntohl(ip6->ip6_flow & IPV6_FLOWINFO_MASK);
1767 flowinfo >>= 20;
1768
1769 tclass = flowinfo & 0xff;
1770 mp = sbcreatecontrol_mbuf((caddr_t)&tclass, sizeof(tclass),
1771 IPV6_TCLASS, IPPROTO_IPV6, mp);
1772 if (*mp == NULL) {
1773 goto no_mbufs;
1774 }
1775 }
1776
1777 /*
1778 * IPV6_HOPOPTS socket option. Recall that we required super-user
1779 * privilege for the option (see ip6_ctloutput), but it might be too
1780 * strict, since there might be some hop-by-hop options which can be
1781 * returned to normal user.
1782 * See also RFC 2292 section 6 (or RFC 3542 section 8).
1783 */
1784 if ((in6p->inp_flags & IN6P_HOPOPTS) != 0) {
1785 /*
1786 * Check if a hop-by-hop options header is contatined in the
1787 * received packet, and if so, store the options as ancillary
1788 * data. Note that a hop-by-hop options header must be
1789 * just after the IPv6 header, which is assured through the
1790 * IPv6 input processing.
1791 */
1792 ip6 = mtod(m, struct ip6_hdr *);
1793 if (ip6->ip6_nxt == IPPROTO_HOPOPTS) {
1794 struct ip6_hbh *hbh;
1795 int hbhlen = 0;
1796 hbh = (struct ip6_hbh *)(ip6 + 1);
1797 hbhlen = (hbh->ip6h_len + 1) << 3;
1798
1799 /*
1800 * XXX: We copy the whole header even if a
1801 * jumbo payload option is included, the option which
1802 * is to be removed before returning according to
1803 * RFC2292.
1804 * Note: this constraint is removed in RFC3542
1805 */
1806 mp = sbcreatecontrol_mbuf((caddr_t)hbh, hbhlen,
1807 IS2292(in6p, IPV6_2292HOPOPTS, IPV6_HOPOPTS),
1808 IPPROTO_IPV6, mp);
1809
1810 if (*mp == NULL) {
1811 goto no_mbufs;
1812 }
1813 }
1814 }
1815
1816 if ((in6p->inp_flags & (IN6P_RTHDR | IN6P_DSTOPTS)) != 0) {
1817 int nxt = ip6->ip6_nxt, off = sizeof(struct ip6_hdr);
1818
1819 /*
1820 * Search for destination options headers or routing
1821 * header(s) through the header chain, and stores each
1822 * header as ancillary data.
1823 * Note that the order of the headers remains in
1824 * the chain of ancillary data.
1825 */
1826 while (1) { /* is explicit loop prevention necessary? */
1827 struct ip6_ext *ip6e = NULL;
1828 int elen;
1829
1830 /*
1831 * if it is not an extension header, don't try to
1832 * pull it from the chain.
1833 */
1834 switch (nxt) {
1835 case IPPROTO_DSTOPTS:
1836 case IPPROTO_ROUTING:
1837 case IPPROTO_HOPOPTS:
1838 case IPPROTO_AH: /* is it possible? */
1839 break;
1840 default:
1841 goto loopend;
1842 }
1843
1844 if (off + sizeof(*ip6e) > m->m_len) {
1845 goto loopend;
1846 }
1847 ip6e = (struct ip6_ext *)(mtod(m, caddr_t) + off);
1848 if (nxt == IPPROTO_AH) {
1849 elen = (ip6e->ip6e_len + 2) << 2;
1850 } else {
1851 elen = (ip6e->ip6e_len + 1) << 3;
1852 }
1853 if (off + elen > m->m_len) {
1854 goto loopend;
1855 }
1856
1857 switch (nxt) {
1858 case IPPROTO_DSTOPTS:
1859 if (!(in6p->inp_flags & IN6P_DSTOPTS)) {
1860 break;
1861 }
1862
1863 mp = sbcreatecontrol_mbuf((caddr_t)ip6e, elen,
1864 IS2292(in6p, IPV6_2292DSTOPTS,
1865 IPV6_DSTOPTS), IPPROTO_IPV6, mp);
1866 if (*mp == NULL) {
1867 goto no_mbufs;
1868 }
1869 break;
1870 case IPPROTO_ROUTING:
1871 if (!(in6p->inp_flags & IN6P_RTHDR)) {
1872 break;
1873 }
1874
1875 mp = sbcreatecontrol_mbuf((caddr_t)ip6e, elen,
1876 IS2292(in6p, IPV6_2292RTHDR, IPV6_RTHDR),
1877 IPPROTO_IPV6, mp);
1878 if (*mp == NULL) {
1879 goto no_mbufs;
1880 }
1881 break;
1882 case IPPROTO_HOPOPTS:
1883 case IPPROTO_AH: /* is it possible? */
1884 break;
1885
1886 default:
1887 /*
1888 * other cases have been filtered in the above.
1889 * none will visit this case. here we supply
1890 * the code just in case (nxt overwritten or
1891 * other cases).
1892 */
1893 goto loopend;
1894 }
1895
1896 /* proceed with the next header. */
1897 off += elen;
1898 nxt = ip6e->ip6e_nxt;
1899 ip6e = NULL;
1900 }
1901 loopend:
1902 ;
1903 }
1904 return 0;
1905 no_mbufs:
1906 ip6stat.ip6s_pktdropcntrl++;
1907 /* XXX increment a stat to show the failure */
1908 return ENOBUFS;
1909 }
1910 #undef IS2292
1911
1912 void
1913 ip6_notify_pmtu(struct inpcb *in6p, struct sockaddr_in6 *dst, u_int32_t *mtu)
1914 {
1915 struct socket *so;
1916 struct mbuf *m_mtu;
1917 struct ip6_mtuinfo mtuctl;
1918
1919 so = in6p->inp_socket;
1920
1921 if ((in6p->inp_flags & IN6P_MTU) == 0) {
1922 return;
1923 }
1924
1925 if (mtu == NULL) {
1926 return;
1927 }
1928
1929 #ifdef DIAGNOSTIC
1930 if (so == NULL) { /* I believe this is impossible */
1931 panic("ip6_notify_pmtu: socket is NULL");
1932 /* NOTREACHED */
1933 }
1934 #endif
1935
1936 if (IN6_IS_ADDR_UNSPECIFIED(&in6p->in6p_faddr) &&
1937 (so->so_proto == NULL || so->so_proto->pr_protocol == IPPROTO_TCP)) {
1938 return;
1939 }
1940
1941 if (!IN6_IS_ADDR_UNSPECIFIED(&in6p->in6p_faddr) &&
1942 !IN6_ARE_ADDR_EQUAL(&in6p->in6p_faddr, &dst->sin6_addr)) {
1943 return;
1944 }
1945
1946 bzero(&mtuctl, sizeof(mtuctl)); /* zero-clear for safety */
1947 mtuctl.ip6m_mtu = *mtu;
1948 mtuctl.ip6m_addr = *dst;
1949 if (sa6_recoverscope(&mtuctl.ip6m_addr, TRUE)) {
1950 return;
1951 }
1952
1953 if ((m_mtu = sbcreatecontrol((caddr_t)&mtuctl, sizeof(mtuctl),
1954 IPV6_PATHMTU, IPPROTO_IPV6)) == NULL) {
1955 return;
1956 }
1957
1958 if (sbappendaddr(&so->so_rcv, SA(dst), NULL, m_mtu, NULL) == 0) {
1959 return;
1960 }
1961 sorwakeup(so);
1962 }
1963
1964 /*
1965 * Get pointer to the previous header followed by the header
1966 * currently processed.
1967 * XXX: This function supposes that
1968 * M includes all headers,
1969 * the next header field and the header length field of each header
1970 * are valid, and
1971 * the sum of each header length equals to OFF.
1972 * Because of these assumptions, this function must be called very
1973 * carefully. Moreover, it will not be used in the near future when
1974 * we develop `neater' mechanism to process extension headers.
1975 */
1976 char *
1977 ip6_get_prevhdr(struct mbuf *m, int off)
1978 {
1979 struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
1980
1981 if (off == sizeof(struct ip6_hdr)) {
1982 return (char *)&ip6->ip6_nxt;
1983 } else {
1984 int len, nxt;
1985 struct ip6_ext *ip6e = NULL;
1986
1987 nxt = ip6->ip6_nxt;
1988 len = sizeof(struct ip6_hdr);
1989 while (len < off) {
1990 ip6e = (struct ip6_ext *)(mtod(m, caddr_t) + len);
1991
1992 switch (nxt) {
1993 case IPPROTO_FRAGMENT:
1994 len += sizeof(struct ip6_frag);
1995 break;
1996 case IPPROTO_AH:
1997 len += (ip6e->ip6e_len + 2) << 2;
1998 break;
1999 default:
2000 len += (ip6e->ip6e_len + 1) << 3;
2001 break;
2002 }
2003 nxt = ip6e->ip6e_nxt;
2004 }
2005 if (ip6e) {
2006 return (char *)&ip6e->ip6e_nxt;
2007 } else {
2008 return NULL;
2009 }
2010 }
2011 }
2012
2013 /*
2014 * get next header offset. m will be retained.
2015 */
2016 int
2017 ip6_nexthdr(struct mbuf *m, int off, int proto, int *nxtp)
2018 {
2019 struct ip6_hdr ip6;
2020 struct ip6_ext ip6e;
2021 struct ip6_frag fh;
2022
2023 /* just in case */
2024 VERIFY(m != NULL);
2025 if ((m->m_flags & M_PKTHDR) == 0 || m->m_pkthdr.len < off) {
2026 return -1;
2027 }
2028
2029 switch (proto) {
2030 case IPPROTO_IPV6:
2031 if (m->m_pkthdr.len < off + sizeof(ip6)) {
2032 return -1;
2033 }
2034 m_copydata(m, off, sizeof(ip6), (caddr_t)&ip6);
2035 if (nxtp) {
2036 *nxtp = ip6.ip6_nxt;
2037 }
2038 off += sizeof(ip6);
2039 return off;
2040
2041 case IPPROTO_FRAGMENT:
2042 /*
2043 * terminate parsing if it is not the first fragment,
2044 * it does not make sense to parse through it.
2045 */
2046 if (m->m_pkthdr.len < off + sizeof(fh)) {
2047 return -1;
2048 }
2049 m_copydata(m, off, sizeof(fh), (caddr_t)&fh);
2050 /* IP6F_OFF_MASK = 0xfff8(BigEndian), 0xf8ff(LittleEndian) */
2051 if (fh.ip6f_offlg & IP6F_OFF_MASK) {
2052 return -1;
2053 }
2054 if (nxtp) {
2055 *nxtp = fh.ip6f_nxt;
2056 }
2057 off += sizeof(struct ip6_frag);
2058 return off;
2059
2060 case IPPROTO_AH:
2061 if (m->m_pkthdr.len < off + sizeof(ip6e)) {
2062 return -1;
2063 }
2064 m_copydata(m, off, sizeof(ip6e), (caddr_t)&ip6e);
2065 if (nxtp) {
2066 *nxtp = ip6e.ip6e_nxt;
2067 }
2068 off += (ip6e.ip6e_len + 2) << 2;
2069 return off;
2070
2071 case IPPROTO_HOPOPTS:
2072 case IPPROTO_ROUTING:
2073 case IPPROTO_DSTOPTS:
2074 if (m->m_pkthdr.len < off + sizeof(ip6e)) {
2075 return -1;
2076 }
2077 m_copydata(m, off, sizeof(ip6e), (caddr_t)&ip6e);
2078 if (nxtp) {
2079 *nxtp = ip6e.ip6e_nxt;
2080 }
2081 off += (ip6e.ip6e_len + 1) << 3;
2082 return off;
2083
2084 case IPPROTO_NONE:
2085 case IPPROTO_ESP:
2086 case IPPROTO_IPCOMP:
2087 /* give up */
2088 return -1;
2089
2090 default:
2091 return -1;
2092 }
2093 }
2094
2095 /*
2096 * get offset for the last header in the chain. m will be kept untainted.
2097 */
2098 int
2099 ip6_lasthdr(struct mbuf *m, int off, int proto, int *nxtp)
2100 {
2101 int newoff;
2102 int nxt;
2103
2104 if (!nxtp) {
2105 nxt = -1;
2106 nxtp = &nxt;
2107 }
2108 while (1) {
2109 newoff = ip6_nexthdr(m, off, proto, nxtp);
2110 if (newoff < 0) {
2111 return off;
2112 } else if (newoff < off) {
2113 return -1; /* invalid */
2114 } else if (newoff == off) {
2115 return newoff;
2116 }
2117
2118 off = newoff;
2119 proto = *nxtp;
2120 }
2121 }
2122
2123 struct ip6aux *
2124 ip6_addaux(struct mbuf *m)
2125 {
2126 struct m_tag *tag;
2127
2128 /* Check if one is already allocated */
2129 tag = m_tag_locate(m, KERNEL_MODULE_TAG_ID,
2130 KERNEL_TAG_TYPE_INET6, NULL);
2131 if (tag == NULL) {
2132 /* Allocate a tag */
2133 tag = m_tag_create(KERNEL_MODULE_TAG_ID, KERNEL_TAG_TYPE_INET6,
2134 sizeof(struct ip6aux), M_DONTWAIT, m);
2135
2136 /* Attach it to the mbuf */
2137 if (tag) {
2138 m_tag_prepend(m, tag);
2139 }
2140 }
2141
2142 return tag ? (struct ip6aux *)(tag + 1) : NULL;
2143 }
2144
2145 struct ip6aux *
2146 ip6_findaux(struct mbuf *m)
2147 {
2148 struct m_tag *tag;
2149
2150 tag = m_tag_locate(m, KERNEL_MODULE_TAG_ID,
2151 KERNEL_TAG_TYPE_INET6, NULL);
2152
2153 return tag ? (struct ip6aux *)(tag + 1) : NULL;
2154 }
2155
2156 void
2157 ip6_delaux(struct mbuf *m)
2158 {
2159 struct m_tag *tag;
2160
2161 tag = m_tag_locate(m, KERNEL_MODULE_TAG_ID,
2162 KERNEL_TAG_TYPE_INET6, NULL);
2163 if (tag) {
2164 m_tag_delete(m, tag);
2165 }
2166 }
2167
2168 /*
2169 * Drain callback
2170 */
2171 void
2172 ip6_drain(void)
2173 {
2174 frag6_drain(); /* fragments */
2175 in6_rtqdrain(); /* protocol cloned routes */
2176 nd6_drain(NULL); /* cloned routes: ND6 */
2177 }
2178
2179 /*
2180 * System control for IP6
2181 */
2182
2183 u_char inet6ctlerrmap[PRC_NCMDS] = {
2184 0, 0, 0, 0,
2185 0, EMSGSIZE, EHOSTDOWN, EHOSTUNREACH,
2186 EHOSTUNREACH, EHOSTUNREACH, ECONNREFUSED, ECONNREFUSED,
2187 EMSGSIZE, EHOSTUNREACH, 0, 0,
2188 0, 0, 0, 0,
2189 ENOPROTOOPT
2190 };
2191
2192 static int
2193 sysctl_reset_ip6_input_stats SYSCTL_HANDLER_ARGS
2194 {
2195 #pragma unused(arg1, arg2)
2196 int error, i;
2197
2198 i = ip6_input_measure;
2199 error = sysctl_handle_int(oidp, &i, 0, req);
2200 if (error || req->newptr == USER_ADDR_NULL) {
2201 goto done;
2202 }
2203 /* impose bounds */
2204 if (i < 0 || i > 1) {
2205 error = EINVAL;
2206 goto done;
2207 }
2208 if (ip6_input_measure != i && i == 1) {
2209 net_perf_initialize(&net_perf, ip6_input_measure_bins);
2210 }
2211 ip6_input_measure = i;
2212 done:
2213 return error;
2214 }
2215
2216 static int
2217 sysctl_ip6_input_measure_bins SYSCTL_HANDLER_ARGS
2218 {
2219 #pragma unused(arg1, arg2)
2220 int error;
2221 uint64_t i;
2222
2223 i = ip6_input_measure_bins;
2224 error = sysctl_handle_quad(oidp, &i, 0, req);
2225 if (error || req->newptr == USER_ADDR_NULL) {
2226 goto done;
2227 }
2228 /* validate data */
2229 if (!net_perf_validate_bins(i)) {
2230 error = EINVAL;
2231 goto done;
2232 }
2233 ip6_input_measure_bins = i;
2234 done:
2235 return error;
2236 }
2237
2238 static int
2239 sysctl_ip6_input_getperf SYSCTL_HANDLER_ARGS
2240 {
2241 #pragma unused(oidp, arg1, arg2)
2242 if (req->oldptr == USER_ADDR_NULL) {
2243 req->oldlen = (size_t)sizeof(struct ipstat);
2244 }
2245
2246 return SYSCTL_OUT(req, &net_perf, MIN(sizeof(net_perf), req->oldlen));
2247 }