]> git.saurik.com Git - apple/xnu.git/blob - bsd/netinet6/esp_input.c
xnu-2422.1.72.tar.gz
[apple/xnu.git] / bsd / netinet6 / esp_input.c
1 /*
2 * Copyright (c) 2008-2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /* $FreeBSD: src/sys/netinet6/esp_input.c,v 1.1.2.3 2001/07/03 11:01:50 ume Exp $ */
30 /* $KAME: esp_input.c,v 1.55 2001/03/23 08:08:47 itojun Exp $ */
31
32 /*
33 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. Neither the name of the project nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 */
60
61 /*
62 * RFC1827/2406 Encapsulated Security Payload.
63 */
64
65 #include <sys/param.h>
66 #include <sys/systm.h>
67 #include <sys/malloc.h>
68 #include <sys/mbuf.h>
69 #include <sys/mcache.h>
70 #include <sys/domain.h>
71 #include <sys/protosw.h>
72 #include <sys/socket.h>
73 #include <sys/errno.h>
74 #include <sys/time.h>
75 #include <sys/kernel.h>
76 #include <sys/syslog.h>
77
78 #include <net/if.h>
79 #include <net/route.h>
80 #include <kern/cpu_number.h>
81 #include <kern/locks.h>
82
83 #include <netinet/in.h>
84 #include <netinet/in_systm.h>
85 #include <netinet/ip.h>
86 #include <netinet/ip_var.h>
87 #include <netinet/in_var.h>
88 #include <netinet/ip_ecn.h>
89 #include <netinet/in_pcb.h>
90 #include <netinet/udp.h>
91 #if INET6
92 #include <netinet6/ip6_ecn.h>
93 #endif
94
95 #if INET6
96 #include <netinet/ip6.h>
97 #include <netinet6/in6_pcb.h>
98 #include <netinet6/ip6_var.h>
99 #include <netinet/icmp6.h>
100 #include <netinet6/ip6protosw.h>
101 #endif
102
103 #include <netinet6/ipsec.h>
104 #if INET6
105 #include <netinet6/ipsec6.h>
106 #endif
107 #include <netinet6/ah.h>
108 #if INET6
109 #include <netinet6/ah6.h>
110 #endif
111 #include <netinet6/esp.h>
112 #if INET6
113 #include <netinet6/esp6.h>
114 #endif
115 #include <netkey/key.h>
116 #include <netkey/keydb.h>
117 #include <netkey/key_debug.h>
118
119 #include <net/kpi_protocol.h>
120 #include <netinet/kpi_ipfilter_var.h>
121
122 #include <net/net_osdep.h>
123 #include <mach/sdt.h>
124
125 #include <sys/kdebug.h>
126 #define DBG_LAYER_BEG NETDBG_CODE(DBG_NETIPSEC, 1)
127 #define DBG_LAYER_END NETDBG_CODE(DBG_NETIPSEC, 3)
128 #define DBG_FNC_ESPIN NETDBG_CODE(DBG_NETIPSEC, (6 << 8))
129 #define DBG_FNC_DECRYPT NETDBG_CODE(DBG_NETIPSEC, (7 << 8))
130 #define IPLEN_FLIPPED
131
132 extern lck_mtx_t *sadb_mutex;
133
134 #if INET
135 #define ESPMAXLEN \
136 (sizeof(struct esp) < sizeof(struct newesp) \
137 ? sizeof(struct newesp) : sizeof(struct esp))
138
139 static struct ip *
140 esp4_input_strip_UDP_encap (struct mbuf *m, int iphlen)
141 {
142 // strip the udp header that's encapsulating ESP
143 struct ip *ip;
144 size_t stripsiz = sizeof(struct udphdr);
145
146 ip = mtod(m, __typeof__(ip));
147 ovbcopy((caddr_t)ip, (caddr_t)(((u_char *)ip) + stripsiz), iphlen);
148 m->m_data += stripsiz;
149 m->m_len -= stripsiz;
150 m->m_pkthdr.len -= stripsiz;
151 ip = mtod(m, __typeof__(ip));
152 ip->ip_len = ip->ip_len - stripsiz;
153 ip->ip_p = IPPROTO_ESP;
154 return ip;
155 }
156
157 void
158 esp4_input(m, off)
159 struct mbuf *m;
160 int off;
161 {
162 struct ip *ip;
163 #if INET6
164 struct ip6_hdr *ip6;
165 #endif /* INET6 */
166 struct esp *esp;
167 struct esptail esptail;
168 u_int32_t spi;
169 u_int32_t seq;
170 struct secasvar *sav = NULL;
171 size_t taillen;
172 u_int16_t nxt;
173 const struct esp_algorithm *algo;
174 int ivlen;
175 size_t hlen;
176 size_t esplen;
177 sa_family_t ifamily;
178
179 KERNEL_DEBUG(DBG_FNC_ESPIN | DBG_FUNC_START, 0,0,0,0,0);
180 /* sanity check for alignment. */
181 if (off % 4 != 0 || m->m_pkthdr.len % 4 != 0) {
182 ipseclog((LOG_ERR, "IPv4 ESP input: packet alignment problem "
183 "(off=%d, pktlen=%d)\n", off, m->m_pkthdr.len));
184 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
185 goto bad;
186 }
187
188 if (m->m_len < off + ESPMAXLEN) {
189 m = m_pullup(m, off + ESPMAXLEN);
190 if (!m) {
191 ipseclog((LOG_DEBUG,
192 "IPv4 ESP input: can't pullup in esp4_input\n"));
193 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
194 goto bad;
195 }
196 }
197
198 /* Expect 32-bit aligned data pointer on strict-align platforms */
199 MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m);
200
201 ip = mtod(m, struct ip *);
202 // expect udp-encap and esp packets only
203 if (ip->ip_p != IPPROTO_ESP &&
204 !(ip->ip_p == IPPROTO_UDP && off >= sizeof(struct udphdr))) {
205 ipseclog((LOG_DEBUG,
206 "IPv4 ESP input: invalid protocol type\n"));
207 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
208 goto bad;
209 }
210 esp = (struct esp *)(void *)(((u_int8_t *)ip) + off);
211 #ifdef _IP_VHL
212 hlen = IP_VHL_HL(ip->ip_vhl) << 2;
213 #else
214 hlen = ip->ip_hl << 2;
215 #endif
216
217 /* find the sassoc. */
218 spi = esp->esp_spi;
219
220 if ((sav = key_allocsa(AF_INET,
221 (caddr_t)&ip->ip_src, (caddr_t)&ip->ip_dst,
222 IPPROTO_ESP, spi)) == 0) {
223 ipseclog((LOG_WARNING,
224 "IPv4 ESP input: no key association found for spi %u\n",
225 (u_int32_t)ntohl(spi)));
226 IPSEC_STAT_INCREMENT(ipsecstat.in_nosa);
227 goto bad;
228 }
229 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
230 printf("DP esp4_input called to allocate SA:0x%llx\n",
231 (uint64_t)VM_KERNEL_ADDRPERM(sav)));
232 if (sav->state != SADB_SASTATE_MATURE
233 && sav->state != SADB_SASTATE_DYING) {
234 ipseclog((LOG_DEBUG,
235 "IPv4 ESP input: non-mature/dying SA found for spi %u\n",
236 (u_int32_t)ntohl(spi)));
237 IPSEC_STAT_INCREMENT(ipsecstat.in_badspi);
238 goto bad;
239 }
240 algo = esp_algorithm_lookup(sav->alg_enc);
241 if (!algo) {
242 ipseclog((LOG_DEBUG, "IPv4 ESP input: "
243 "unsupported encryption algorithm for spi %u\n",
244 (u_int32_t)ntohl(spi)));
245 IPSEC_STAT_INCREMENT(ipsecstat.in_badspi);
246 goto bad;
247 }
248
249 /* check if we have proper ivlen information */
250 ivlen = sav->ivlen;
251 if (ivlen < 0) {
252 ipseclog((LOG_ERR, "inproper ivlen in IPv4 ESP input: %s %s\n",
253 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
254 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
255 goto bad;
256 }
257
258 seq = ntohl(((struct newesp *)esp)->esp_seq);
259 if (!((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay
260 && (sav->alg_auth && sav->key_auth)))
261 goto noreplaycheck;
262
263 if (sav->alg_auth == SADB_X_AALG_NULL ||
264 sav->alg_auth == SADB_AALG_NONE)
265 goto noreplaycheck;
266
267 /*
268 * check for sequence number.
269 */
270 if (ipsec_chkreplay(seq, sav))
271 ; /*okey*/
272 else {
273 IPSEC_STAT_INCREMENT(ipsecstat.in_espreplay);
274 ipseclog((LOG_WARNING,
275 "replay packet in IPv4 ESP input: %s %s\n",
276 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
277 goto bad;
278 }
279
280 /* check ICV */
281 {
282 u_char sum0[AH_MAXSUMSIZE] __attribute__((aligned(4)));
283 u_char sum[AH_MAXSUMSIZE] __attribute__((aligned(4)));
284 const struct ah_algorithm *sumalgo;
285 size_t siz;
286
287 sumalgo = ah_algorithm_lookup(sav->alg_auth);
288 if (!sumalgo)
289 goto noreplaycheck;
290 siz = (((*sumalgo->sumsiz)(sav) + 3) & ~(4 - 1));
291 if (m->m_pkthdr.len < off + ESPMAXLEN + siz) {
292 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
293 goto bad;
294 }
295 if (AH_MAXSUMSIZE < siz) {
296 ipseclog((LOG_DEBUG,
297 "internal error: AH_MAXSUMSIZE must be larger than %lu\n",
298 (u_int32_t)siz));
299 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
300 goto bad;
301 }
302
303 m_copydata(m, m->m_pkthdr.len - siz, siz, (caddr_t) &sum0[0]);
304
305 if (esp_auth(m, off, m->m_pkthdr.len - off - siz, sav, sum)) {
306 ipseclog((LOG_WARNING, "auth fail in IPv4 ESP input: %s %s\n",
307 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
308 IPSEC_STAT_INCREMENT(ipsecstat.in_espauthfail);
309 goto bad;
310 }
311
312 if (bcmp(sum0, sum, siz) != 0) {
313 ipseclog((LOG_WARNING, "auth fail in IPv4 ESP input: %s %s\n",
314 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
315 IPSEC_STAT_INCREMENT(ipsecstat.in_espauthfail);
316 goto bad;
317 }
318
319 /* strip off the authentication data */
320 m_adj(m, -siz);
321 ip = mtod(m, struct ip *);
322 #ifdef IPLEN_FLIPPED
323 ip->ip_len = ip->ip_len - siz;
324 #else
325 ip->ip_len = htons(ntohs(ip->ip_len) - siz);
326 #endif
327 m->m_flags |= M_AUTHIPDGM;
328 IPSEC_STAT_INCREMENT(ipsecstat.in_espauthsucc);
329 }
330
331 /*
332 * update sequence number.
333 */
334 if ((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay) {
335 if (ipsec_updatereplay(seq, sav)) {
336 IPSEC_STAT_INCREMENT(ipsecstat.in_espreplay);
337 goto bad;
338 }
339 }
340
341 noreplaycheck:
342
343 /* process main esp header. */
344 if (sav->flags & SADB_X_EXT_OLD) {
345 /* RFC 1827 */
346 esplen = sizeof(struct esp);
347 } else {
348 /* RFC 2406 */
349 if (sav->flags & SADB_X_EXT_DERIV)
350 esplen = sizeof(struct esp);
351 else
352 esplen = sizeof(struct newesp);
353 }
354
355 if (m->m_pkthdr.len < off + esplen + ivlen + sizeof(esptail)) {
356 ipseclog((LOG_WARNING,
357 "IPv4 ESP input: packet too short\n"));
358 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
359 goto bad;
360 }
361
362 if (m->m_len < off + esplen + ivlen) {
363 m = m_pullup(m, off + esplen + ivlen);
364 if (!m) {
365 ipseclog((LOG_DEBUG,
366 "IPv4 ESP input: can't pullup in esp4_input\n"));
367 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
368 goto bad;
369 }
370 }
371
372 /*
373 * pre-compute and cache intermediate key
374 */
375 if (esp_schedule(algo, sav) != 0) {
376 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
377 goto bad;
378 }
379
380 /*
381 * decrypt the packet.
382 */
383 if (!algo->decrypt)
384 panic("internal error: no decrypt function");
385 KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_START, 0,0,0,0,0);
386 if ((*algo->decrypt)(m, off, sav, algo, ivlen)) {
387 /* m is already freed */
388 m = NULL;
389 ipseclog((LOG_ERR, "decrypt fail in IPv4 ESP input: %s\n",
390 ipsec_logsastr(sav)));
391 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
392 KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_END, 1,0,0,0,0);
393 goto bad;
394 }
395 KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_END, 2,0,0,0,0);
396 IPSEC_STAT_INCREMENT(ipsecstat.in_esphist[sav->alg_enc]);
397
398 m->m_flags |= M_DECRYPTED;
399
400 /*
401 * find the trailer of the ESP.
402 */
403 m_copydata(m, m->m_pkthdr.len - sizeof(esptail), sizeof(esptail),
404 (caddr_t)&esptail);
405 nxt = esptail.esp_nxt;
406 taillen = esptail.esp_padlen + sizeof(esptail);
407
408 if (m->m_pkthdr.len < taillen
409 || m->m_pkthdr.len - taillen < hlen) { /*?*/
410 ipseclog((LOG_WARNING,
411 "bad pad length in IPv4 ESP input: %s %s\n",
412 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
413 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
414 goto bad;
415 }
416
417 /* strip off the trailing pad area. */
418 m_adj(m, -taillen);
419 ip = mtod(m, struct ip *);
420 #ifdef IPLEN_FLIPPED
421 ip->ip_len = ip->ip_len - taillen;
422 #else
423 ip->ip_len = htons(ntohs(ip->ip_len) - taillen);
424 #endif
425 if (ip->ip_p == IPPROTO_UDP) {
426 // offset includes the outer ip and udp header lengths.
427 if (m->m_len < off) {
428 m = m_pullup(m, off);
429 if (!m) {
430 ipseclog((LOG_DEBUG,
431 "IPv4 ESP input: invalid udp encapsulated ESP packet length \n"));
432 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
433 goto bad;
434 }
435 }
436
437 // check the UDP encap header to detect changes in the source port, and then strip the header
438 off -= sizeof(struct udphdr); // off no longer includes the udphdr's size
439 // if peer is behind nat and this is the latest esp packet
440 if ((sav->flags & SADB_X_EXT_NATT_DETECTED_PEER) != 0 &&
441 (sav->flags & SADB_X_EXT_OLD) == 0 &&
442 seq && sav->replay &&
443 seq >= sav->replay->lastseq) {
444 struct udphdr *encap_uh = (__typeof__(encap_uh))(void *)((caddr_t)ip + off);
445 if (encap_uh->uh_sport &&
446 ntohs(encap_uh->uh_sport) != sav->remote_ike_port) {
447 sav->remote_ike_port = ntohs(encap_uh->uh_sport);
448 }
449 }
450 ip = esp4_input_strip_UDP_encap(m, off);
451 esp = (struct esp *)(void *)(((u_int8_t *)ip) + off);
452 }
453
454 if (sav->utun_is_keepalive_fn) {
455 if (sav->utun_is_keepalive_fn(sav->utun_pcb, &m, nxt, sav->flags, (off + esplen + ivlen))) {
456 if (m) {
457 // not really bad, we just wanna exit
458 IPSEC_STAT_INCREMENT(ipsecstat.in_success);
459 m = NULL;
460 }
461 goto bad;
462 }
463 }
464
465 /* was it transmitted over the IPsec tunnel SA? */
466 if (ipsec4_tunnel_validate(m, off + esplen + ivlen, nxt, sav, &ifamily)) {
467 ifaddr_t ifa;
468 struct sockaddr_storage addr;
469
470 /*
471 * strip off all the headers that precedes ESP header.
472 * IP4 xx ESP IP4' payload -> IP4' payload
473 *
474 * XXX more sanity checks
475 * XXX relationship with gif?
476 */
477 u_int8_t tos;
478
479 tos = ip->ip_tos;
480 m_adj(m, off + esplen + ivlen);
481 if (ifamily == AF_INET) {
482 struct sockaddr_in *ipaddr;
483
484 if (m->m_len < sizeof(*ip)) {
485 m = m_pullup(m, sizeof(*ip));
486 if (!m) {
487 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
488 goto bad;
489 }
490 }
491 ip = mtod(m, struct ip *);
492 /* ECN consideration. */
493 ip_ecn_egress(ip4_ipsec_ecn, &tos, &ip->ip_tos);
494 if (!key_checktunnelsanity(sav, AF_INET,
495 (caddr_t)&ip->ip_src, (caddr_t)&ip->ip_dst)) {
496 ipseclog((LOG_ERR, "ipsec tunnel address mismatch "
497 "in ESP input: %s %s\n",
498 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
499 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
500 goto bad;
501 }
502
503 if (ip_doscopedroute) {
504 bzero(&addr, sizeof(addr));
505 ipaddr = (__typeof__(ipaddr))&addr;
506 ipaddr->sin_family = AF_INET;
507 ipaddr->sin_len = sizeof(*ipaddr);
508 ipaddr->sin_addr = ip->ip_dst;
509 }
510 #if INET6
511 } else if (ifamily == AF_INET6) {
512 struct sockaddr_in6 *ip6addr;
513
514 #ifndef PULLDOWN_TEST
515 /*
516 * m_pullup is prohibited in KAME IPv6 input processing
517 * but there's no other way!
518 */
519 #else
520 /* okay to pullup in m_pulldown style */
521 #endif
522 if (m->m_len < sizeof(*ip6)) {
523 m = m_pullup(m, sizeof(*ip6));
524 if (!m) {
525 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
526 goto bad;
527 }
528 }
529
530 /*
531 * Expect 32-bit aligned data pointer on strict-align
532 * platforms.
533 */
534 MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m);
535
536 ip6 = mtod(m, struct ip6_hdr *);
537
538 /* ECN consideration. */
539 /* XXX To be fixed later if needed */
540 // ip_ecn_egress(ip4_ipsec_ecn, &tos, &ip->ip_tos);
541
542 if (!key_checktunnelsanity(sav, AF_INET6,
543 (caddr_t)&ip6->ip6_src, (caddr_t)&ip6->ip6_dst)) {
544 ipseclog((LOG_ERR, "ipsec tunnel address mismatch "
545 "in ESP input: %s %s\n",
546 ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
547 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
548 goto bad;
549 }
550
551 if (ip6_doscopedroute) {
552 bzero(&addr, sizeof(addr));
553 ip6addr = (__typeof__(ip6addr))&addr;
554 ip6addr->sin6_family = AF_INET6;
555 ip6addr->sin6_len = sizeof(*ip6addr);
556 ip6addr->sin6_addr = ip6->ip6_dst;
557 }
558 #endif /* INET6 */
559 } else {
560 ipseclog((LOG_ERR, "ipsec tunnel unsupported address family "
561 "in ESP input\n"));
562 goto bad;
563 }
564
565 key_sa_recordxfer(sav, m);
566 if (ipsec_addhist(m, IPPROTO_ESP, spi) != 0 ||
567 ipsec_addhist(m, IPPROTO_IPV4, 0) != 0) {
568 IPSEC_STAT_INCREMENT(ipsecstat.in_nomem);
569 goto bad;
570 }
571
572 if (ip_doscopedroute || ip6_doscopedroute) {
573 // update the receiving interface address based on the inner address
574 ifa = ifa_ifwithaddr((struct sockaddr *)&addr);
575 if (ifa) {
576 m->m_pkthdr.rcvif = ifa->ifa_ifp;
577 IFA_REMREF(ifa);
578 }
579 }
580
581 /* Clear the csum flags, they can't be valid for the inner headers */
582 m->m_pkthdr.csum_flags = 0;
583
584 if (sav->utun_in_fn) {
585 if (!(sav->utun_in_fn(sav->utun_pcb, &m, ifamily == AF_INET ? PF_INET : PF_INET6))) {
586 m = NULL;
587 // we just wanna exit since packet has been completely processed
588 goto bad;
589 }
590 }
591
592 if (proto_input(ifamily == AF_INET ? PF_INET : PF_INET6, m) != 0)
593 goto bad;
594
595 nxt = IPPROTO_DONE;
596 KERNEL_DEBUG(DBG_FNC_ESPIN | DBG_FUNC_END, 2,0,0,0,0);
597 } else {
598 /*
599 * strip off ESP header and IV.
600 * even in m_pulldown case, we need to strip off ESP so that
601 * we can always compute checksum for AH correctly.
602 */
603 size_t stripsiz;
604
605 stripsiz = esplen + ivlen;
606
607 ip = mtod(m, struct ip *);
608 ovbcopy((caddr_t)ip, (caddr_t)(((u_char *)ip) + stripsiz), off);
609 m->m_data += stripsiz;
610 m->m_len -= stripsiz;
611 m->m_pkthdr.len -= stripsiz;
612
613 ip = mtod(m, struct ip *);
614 #ifdef IPLEN_FLIPPED
615 ip->ip_len = ip->ip_len - stripsiz;
616 #else
617 ip->ip_len = htons(ntohs(ip->ip_len) - stripsiz);
618 #endif
619 ip->ip_p = nxt;
620
621 key_sa_recordxfer(sav, m);
622 if (ipsec_addhist(m, IPPROTO_ESP, spi) != 0) {
623 IPSEC_STAT_INCREMENT(ipsecstat.in_nomem);
624 goto bad;
625 }
626
627 /*
628 * Set the csum valid flag, if we authenticated the
629 * packet, the payload shouldn't be corrupt unless
630 * it was corrupted before being signed on the other
631 * side.
632 */
633 if (nxt == IPPROTO_TCP || nxt == IPPROTO_UDP) {
634 m->m_pkthdr.csum_flags = CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
635 m->m_pkthdr.csum_data = 0xFFFF;
636 }
637
638 if (nxt != IPPROTO_DONE) {
639 if ((ip_protox[nxt]->pr_flags & PR_LASTHDR) != 0 &&
640 ipsec4_in_reject(m, NULL)) {
641 IPSEC_STAT_INCREMENT(ipsecstat.in_polvio);
642 goto bad;
643 }
644 KERNEL_DEBUG(DBG_FNC_ESPIN | DBG_FUNC_END, 3,0,0,0,0);
645
646 /* translate encapsulated UDP port ? */
647 if ((sav->flags & SADB_X_EXT_NATT_MULTIPLEUSERS) != 0) {
648 struct udphdr *udp;
649
650 if (nxt != IPPROTO_UDP) { /* not UPD packet - drop it */
651 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
652 goto bad;
653 }
654
655 if (m->m_len < off + sizeof(struct udphdr)) {
656 m = m_pullup(m, off + sizeof(struct udphdr));
657 if (!m) {
658 ipseclog((LOG_DEBUG,
659 "IPv4 ESP input: can't pullup UDP header in esp4_input\n"));
660 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
661 goto bad;
662 }
663 ip = mtod(m, struct ip *);
664 }
665 udp = (struct udphdr *)(void *)(((u_int8_t *)ip) + off);
666
667 lck_mtx_lock(sadb_mutex);
668 if (sav->natt_encapsulated_src_port == 0) {
669 sav->natt_encapsulated_src_port = udp->uh_sport;
670 } else if (sav->natt_encapsulated_src_port != udp->uh_sport) { /* something wrong */
671 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
672 lck_mtx_unlock(sadb_mutex);
673 goto bad;
674 }
675 lck_mtx_unlock(sadb_mutex);
676 udp->uh_sport = htons(sav->remote_ike_port);
677 udp->uh_sum = 0;
678 }
679
680 DTRACE_IP6(receive, struct mbuf *, m, struct inpcb *, NULL,
681 struct ip *, ip, struct ifnet *, m->m_pkthdr.rcvif,
682 struct ip *, ip, struct ip6_hdr *, NULL);
683
684 if (sav->utun_in_fn) {
685 if (!(sav->utun_in_fn(sav->utun_pcb, &m, PF_INET))) {
686 m = NULL;
687 // we just wanna exit since packet has been completely processed
688 goto bad;
689 }
690 }
691
692 ip_proto_dispatch_in(m, off, nxt, 0);
693 } else
694 m_freem(m);
695 m = NULL;
696 }
697
698 if (sav) {
699 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
700 printf("DP esp4_input call free SA:0x%llx\n",
701 (uint64_t)VM_KERNEL_ADDRPERM(sav)));
702 key_freesav(sav, KEY_SADB_UNLOCKED);
703 }
704 IPSEC_STAT_INCREMENT(ipsecstat.in_success);
705 return;
706
707 bad:
708 if (sav) {
709 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
710 printf("DP esp4_input call free SA:0x%llx\n",
711 (uint64_t)VM_KERNEL_ADDRPERM(sav)));
712 key_freesav(sav, KEY_SADB_UNLOCKED);
713 }
714 if (m)
715 m_freem(m);
716 KERNEL_DEBUG(DBG_FNC_ESPIN | DBG_FUNC_END, 4,0,0,0,0);
717 return;
718 }
719 #endif /* INET */
720
721 #if INET6
722 int
723 esp6_input(struct mbuf **mp, int *offp, int proto)
724 {
725 #pragma unused(proto)
726 struct mbuf *m = *mp;
727 int off = *offp;
728 struct ip6_hdr *ip6;
729 struct esp *esp;
730 struct esptail esptail;
731 u_int32_t spi;
732 u_int32_t seq;
733 struct secasvar *sav = NULL;
734 size_t taillen;
735 u_int16_t nxt;
736 const struct esp_algorithm *algo;
737 int ivlen;
738 size_t esplen;
739
740 /* sanity check for alignment. */
741 if (off % 4 != 0 || m->m_pkthdr.len % 4 != 0) {
742 ipseclog((LOG_ERR, "IPv6 ESP input: packet alignment problem "
743 "(off=%d, pktlen=%d)\n", off, m->m_pkthdr.len));
744 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
745 goto bad;
746 }
747
748 #ifndef PULLDOWN_TEST
749 IP6_EXTHDR_CHECK(m, off, ESPMAXLEN, {return IPPROTO_DONE;});
750 esp = (struct esp *)(void *)(mtod(m, caddr_t) + off);
751 #else
752 IP6_EXTHDR_GET(esp, struct esp *, m, off, ESPMAXLEN);
753 if (esp == NULL) {
754 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
755 return IPPROTO_DONE;
756 }
757 #endif
758 /* Expect 32-bit data aligned pointer on strict-align platforms */
759 MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m);
760
761 ip6 = mtod(m, struct ip6_hdr *);
762
763 if (ntohs(ip6->ip6_plen) == 0) {
764 ipseclog((LOG_ERR, "IPv6 ESP input: "
765 "ESP with IPv6 jumbogram is not supported.\n"));
766 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
767 goto bad;
768 }
769
770 /* find the sassoc. */
771 spi = esp->esp_spi;
772
773 if ((sav = key_allocsa(AF_INET6,
774 (caddr_t)&ip6->ip6_src, (caddr_t)&ip6->ip6_dst,
775 IPPROTO_ESP, spi)) == 0) {
776 ipseclog((LOG_WARNING,
777 "IPv6 ESP input: no key association found for spi %u\n",
778 (u_int32_t)ntohl(spi)));
779 IPSEC_STAT_INCREMENT(ipsec6stat.in_nosa);
780 goto bad;
781 }
782 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
783 printf("DP esp6_input called to allocate SA:0x%llx\n",
784 (uint64_t)VM_KERNEL_ADDRPERM(sav)));
785 if (sav->state != SADB_SASTATE_MATURE
786 && sav->state != SADB_SASTATE_DYING) {
787 ipseclog((LOG_DEBUG,
788 "IPv6 ESP input: non-mature/dying SA found for spi %u\n",
789 (u_int32_t)ntohl(spi)));
790 IPSEC_STAT_INCREMENT(ipsec6stat.in_badspi);
791 goto bad;
792 }
793 algo = esp_algorithm_lookup(sav->alg_enc);
794 if (!algo) {
795 ipseclog((LOG_DEBUG, "IPv6 ESP input: "
796 "unsupported encryption algorithm for spi %u\n",
797 (u_int32_t)ntohl(spi)));
798 IPSEC_STAT_INCREMENT(ipsec6stat.in_badspi);
799 goto bad;
800 }
801
802 /* check if we have proper ivlen information */
803 ivlen = sav->ivlen;
804 if (ivlen < 0) {
805 ipseclog((LOG_ERR, "inproper ivlen in IPv6 ESP input: %s %s\n",
806 ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
807 IPSEC_STAT_INCREMENT(ipsec6stat.in_badspi);
808 goto bad;
809 }
810
811 seq = ntohl(((struct newesp *)esp)->esp_seq);
812
813 if (!((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay
814 && (sav->alg_auth && sav->key_auth)))
815 goto noreplaycheck;
816
817 if (sav->alg_auth == SADB_X_AALG_NULL ||
818 sav->alg_auth == SADB_AALG_NONE)
819 goto noreplaycheck;
820
821 /*
822 * check for sequence number.
823 */
824 if (ipsec_chkreplay(seq, sav))
825 ; /*okey*/
826 else {
827 IPSEC_STAT_INCREMENT(ipsec6stat.in_espreplay);
828 ipseclog((LOG_WARNING,
829 "replay packet in IPv6 ESP input: %s %s\n",
830 ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
831 goto bad;
832 }
833
834 /* check ICV */
835 {
836 u_char sum0[AH_MAXSUMSIZE] __attribute__((aligned(4)));
837 u_char sum[AH_MAXSUMSIZE] __attribute__((aligned(4)));
838 const struct ah_algorithm *sumalgo;
839 size_t siz;
840
841 sumalgo = ah_algorithm_lookup(sav->alg_auth);
842 if (!sumalgo)
843 goto noreplaycheck;
844 siz = (((*sumalgo->sumsiz)(sav) + 3) & ~(4 - 1));
845 if (m->m_pkthdr.len < off + ESPMAXLEN + siz) {
846 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
847 goto bad;
848 }
849 if (AH_MAXSUMSIZE < siz) {
850 ipseclog((LOG_DEBUG,
851 "internal error: AH_MAXSUMSIZE must be larger than %lu\n",
852 (u_int32_t)siz));
853 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
854 goto bad;
855 }
856
857 m_copydata(m, m->m_pkthdr.len - siz, siz, (caddr_t) &sum0[0]);
858
859 if (esp_auth(m, off, m->m_pkthdr.len - off - siz, sav, sum)) {
860 ipseclog((LOG_WARNING, "auth fail in IPv6 ESP input: %s %s\n",
861 ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
862 IPSEC_STAT_INCREMENT(ipsec6stat.in_espauthfail);
863 goto bad;
864 }
865
866 if (bcmp(sum0, sum, siz) != 0) {
867 ipseclog((LOG_WARNING, "auth fail in IPv6 ESP input: %s %s\n",
868 ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
869 IPSEC_STAT_INCREMENT(ipsec6stat.in_espauthfail);
870 goto bad;
871 }
872
873 /* strip off the authentication data */
874 m_adj(m, -siz);
875 ip6 = mtod(m, struct ip6_hdr *);
876 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - siz);
877
878 m->m_flags |= M_AUTHIPDGM;
879 IPSEC_STAT_INCREMENT(ipsec6stat.in_espauthsucc);
880 }
881
882 /*
883 * update sequence number.
884 */
885 if ((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay) {
886 if (ipsec_updatereplay(seq, sav)) {
887 IPSEC_STAT_INCREMENT(ipsec6stat.in_espreplay);
888 goto bad;
889 }
890 }
891
892 noreplaycheck:
893
894 /* process main esp header. */
895 if (sav->flags & SADB_X_EXT_OLD) {
896 /* RFC 1827 */
897 esplen = sizeof(struct esp);
898 } else {
899 /* RFC 2406 */
900 if (sav->flags & SADB_X_EXT_DERIV)
901 esplen = sizeof(struct esp);
902 else
903 esplen = sizeof(struct newesp);
904 }
905
906 if (m->m_pkthdr.len < off + esplen + ivlen + sizeof(esptail)) {
907 ipseclog((LOG_WARNING,
908 "IPv6 ESP input: packet too short\n"));
909 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
910 goto bad;
911 }
912
913 #ifndef PULLDOWN_TEST
914 IP6_EXTHDR_CHECK(m, off, esplen + ivlen, return IPPROTO_DONE); /*XXX*/
915 #else
916 IP6_EXTHDR_GET(esp, struct esp *, m, off, esplen + ivlen);
917 if (esp == NULL) {
918 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
919 m = NULL;
920 goto bad;
921 }
922 #endif
923 ip6 = mtod(m, struct ip6_hdr *); /*set it again just in case*/
924
925 /*
926 * pre-compute and cache intermediate key
927 */
928 if (esp_schedule(algo, sav) != 0) {
929 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
930 goto bad;
931 }
932
933 /*
934 * decrypt the packet.
935 */
936 if (!algo->decrypt)
937 panic("internal error: no decrypt function");
938 if ((*algo->decrypt)(m, off, sav, algo, ivlen)) {
939 /* m is already freed */
940 m = NULL;
941 ipseclog((LOG_ERR, "decrypt fail in IPv6 ESP input: %s\n",
942 ipsec_logsastr(sav)));
943 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
944 goto bad;
945 }
946 IPSEC_STAT_INCREMENT(ipsec6stat.in_esphist[sav->alg_enc]);
947
948 m->m_flags |= M_DECRYPTED;
949
950 /*
951 * find the trailer of the ESP.
952 */
953 m_copydata(m, m->m_pkthdr.len - sizeof(esptail), sizeof(esptail),
954 (caddr_t)&esptail);
955 nxt = esptail.esp_nxt;
956 taillen = esptail.esp_padlen + sizeof(esptail);
957
958 if (m->m_pkthdr.len < taillen
959 || m->m_pkthdr.len - taillen < sizeof(struct ip6_hdr)) { /*?*/
960 ipseclog((LOG_WARNING,
961 "bad pad length in IPv6 ESP input: %s %s\n",
962 ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
963 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
964 goto bad;
965 }
966
967 /* strip off the trailing pad area. */
968 m_adj(m, -taillen);
969 ip6 = mtod(m, struct ip6_hdr *);
970 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - taillen);
971
972 if (sav->utun_is_keepalive_fn) {
973 if (sav->utun_is_keepalive_fn(sav->utun_pcb, &m, nxt, sav->flags, (off + esplen + ivlen))) {
974 if (m) {
975 // not really bad, we just wanna exit
976 IPSEC_STAT_INCREMENT(ipsec6stat.in_success);
977 m = NULL;
978 }
979 goto bad;
980 }
981 }
982
983 /* was it transmitted over the IPsec tunnel SA? */
984 if (ipsec6_tunnel_validate(m, off + esplen + ivlen, nxt, sav)) {
985 ifaddr_t ifa;
986 struct sockaddr_storage addr;
987
988 /*
989 * strip off all the headers that precedes ESP header.
990 * IP6 xx ESP IP6' payload -> IP6' payload
991 *
992 * XXX more sanity checks
993 * XXX relationship with gif?
994 */
995 u_int32_t flowinfo; /*net endian*/
996 flowinfo = ip6->ip6_flow;
997 m_adj(m, off + esplen + ivlen);
998 if (m->m_len < sizeof(*ip6)) {
999 #ifndef PULLDOWN_TEST
1000 /*
1001 * m_pullup is prohibited in KAME IPv6 input processing
1002 * but there's no other way!
1003 */
1004 #else
1005 /* okay to pullup in m_pulldown style */
1006 #endif
1007 m = m_pullup(m, sizeof(*ip6));
1008 if (!m) {
1009 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
1010 goto bad;
1011 }
1012 }
1013 ip6 = mtod(m, struct ip6_hdr *);
1014 /* ECN consideration. */
1015 ip6_ecn_egress(ip6_ipsec_ecn, &flowinfo, &ip6->ip6_flow);
1016 if (!key_checktunnelsanity(sav, AF_INET6,
1017 (caddr_t)&ip6->ip6_src, (caddr_t)&ip6->ip6_dst)) {
1018 ipseclog((LOG_ERR, "ipsec tunnel address mismatch "
1019 "in IPv6 ESP input: %s %s\n",
1020 ipsec6_logpacketstr(ip6, spi),
1021 ipsec_logsastr(sav)));
1022 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
1023 goto bad;
1024 }
1025
1026 key_sa_recordxfer(sav, m);
1027 if (ipsec_addhist(m, IPPROTO_ESP, spi) != 0 ||
1028 ipsec_addhist(m, IPPROTO_IPV6, 0) != 0) {
1029 IPSEC_STAT_INCREMENT(ipsec6stat.in_nomem);
1030 goto bad;
1031 }
1032
1033 if (ip6_doscopedroute) {
1034 struct sockaddr_in6 *ip6addr;
1035
1036 bzero(&addr, sizeof(addr));
1037 ip6addr = (__typeof__(ip6addr))&addr;
1038 ip6addr->sin6_family = AF_INET6;
1039 ip6addr->sin6_len = sizeof(*ip6addr);
1040 ip6addr->sin6_addr = ip6->ip6_dst;
1041
1042 // update the receiving interface address based on the inner address
1043 ifa = ifa_ifwithaddr((struct sockaddr *)&addr);
1044 if (ifa) {
1045 m->m_pkthdr.rcvif = ifa->ifa_ifp;
1046 IFA_REMREF(ifa);
1047 }
1048 }
1049
1050 if (sav->utun_in_fn) {
1051 if (!(sav->utun_in_fn(sav->utun_pcb, &m, PF_INET6))) {
1052 m = NULL;
1053 // we just wanna exit since packet has been completely processed
1054 goto bad;
1055 }
1056 }
1057
1058 if (proto_input(PF_INET6, m) != 0)
1059 goto bad;
1060 nxt = IPPROTO_DONE;
1061 } else {
1062 /*
1063 * strip off ESP header and IV.
1064 * even in m_pulldown case, we need to strip off ESP so that
1065 * we can always compute checksum for AH correctly.
1066 */
1067 size_t stripsiz;
1068 char *prvnxtp;
1069
1070 /*
1071 * Set the next header field of the previous header correctly.
1072 */
1073 prvnxtp = ip6_get_prevhdr(m, off); /* XXX */
1074 *prvnxtp = nxt;
1075
1076 stripsiz = esplen + ivlen;
1077
1078 ip6 = mtod(m, struct ip6_hdr *);
1079 if (m->m_len >= stripsiz + off) {
1080 ovbcopy((caddr_t)ip6, ((caddr_t)ip6) + stripsiz, off);
1081 m->m_data += stripsiz;
1082 m->m_len -= stripsiz;
1083 m->m_pkthdr.len -= stripsiz;
1084 } else {
1085 /*
1086 * this comes with no copy if the boundary is on
1087 * cluster
1088 */
1089 struct mbuf *n;
1090
1091 n = m_split(m, off, M_DONTWAIT);
1092 if (n == NULL) {
1093 /* m is retained by m_split */
1094 goto bad;
1095 }
1096 m_adj(n, stripsiz);
1097 /* m_cat does not update m_pkthdr.len */
1098 m->m_pkthdr.len += n->m_pkthdr.len;
1099 m_cat(m, n);
1100 }
1101
1102 #ifndef PULLDOWN_TEST
1103 /*
1104 * KAME requires that the packet to be contiguous on the
1105 * mbuf. We need to make that sure.
1106 * this kind of code should be avoided.
1107 * XXX other conditions to avoid running this part?
1108 */
1109 if (m->m_len != m->m_pkthdr.len) {
1110 struct mbuf *n = NULL;
1111 int maxlen;
1112
1113 MGETHDR(n, M_DONTWAIT, MT_HEADER); /* MAC-OK */
1114 maxlen = MHLEN;
1115 if (n)
1116 M_COPY_PKTHDR(n, m);
1117 if (n && m->m_pkthdr.len > maxlen) {
1118 MCLGET(n, M_DONTWAIT);
1119 maxlen = MCLBYTES;
1120 if ((n->m_flags & M_EXT) == 0) {
1121 m_free(n);
1122 n = NULL;
1123 }
1124 }
1125 if (!n) {
1126 printf("esp6_input: mbuf allocation failed\n");
1127 goto bad;
1128 }
1129
1130 if (m->m_pkthdr.len <= maxlen) {
1131 m_copydata(m, 0, m->m_pkthdr.len, mtod(n, caddr_t));
1132 n->m_len = m->m_pkthdr.len;
1133 n->m_pkthdr.len = m->m_pkthdr.len;
1134 n->m_next = NULL;
1135 m_freem(m);
1136 } else {
1137 m_copydata(m, 0, maxlen, mtod(n, caddr_t));
1138 n->m_len = maxlen;
1139 n->m_pkthdr.len = m->m_pkthdr.len;
1140 n->m_next = m;
1141 m_adj(m, maxlen);
1142 m->m_flags &= ~M_PKTHDR;
1143 }
1144 m = n;
1145 }
1146 #endif
1147
1148 ip6 = mtod(m, struct ip6_hdr *);
1149 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - stripsiz);
1150
1151 key_sa_recordxfer(sav, m);
1152 if (ipsec_addhist(m, IPPROTO_ESP, spi) != 0) {
1153 IPSEC_STAT_INCREMENT(ipsec6stat.in_nomem);
1154 goto bad;
1155 }
1156
1157 if (sav->utun_in_fn) {
1158 if (!(sav->utun_in_fn(sav->utun_pcb, &m, PF_INET6))) {
1159 m = NULL;
1160 // we just wanna exit since packet has been completely processed
1161 goto bad;
1162 }
1163 }
1164 }
1165
1166 *offp = off;
1167 *mp = m;
1168
1169 if (sav) {
1170 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
1171 printf("DP esp6_input call free SA:0x%llx\n",
1172 (uint64_t)VM_KERNEL_ADDRPERM(sav)));
1173 key_freesav(sav, KEY_SADB_UNLOCKED);
1174 }
1175 IPSEC_STAT_INCREMENT(ipsec6stat.in_success);
1176 return nxt;
1177
1178 bad:
1179 if (sav) {
1180 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
1181 printf("DP esp6_input call free SA:0x%llx\n",
1182 (uint64_t)VM_KERNEL_ADDRPERM(sav)));
1183 key_freesav(sav, KEY_SADB_UNLOCKED);
1184 }
1185 if (m)
1186 m_freem(m);
1187 return IPPROTO_DONE;
1188 }
1189
1190 void
1191 esp6_ctlinput(cmd, sa, d)
1192 int cmd;
1193 struct sockaddr *sa;
1194 void *d;
1195 {
1196 const struct newesp *espp;
1197 struct newesp esp;
1198 struct ip6ctlparam *ip6cp = NULL, ip6cp1;
1199 struct secasvar *sav;
1200 struct ip6_hdr *ip6;
1201 struct mbuf *m;
1202 int off;
1203 struct sockaddr_in6 *sa6_src, *sa6_dst;
1204
1205 if (sa->sa_family != AF_INET6 ||
1206 sa->sa_len != sizeof(struct sockaddr_in6))
1207 return;
1208 if ((unsigned)cmd >= PRC_NCMDS)
1209 return;
1210
1211 /* if the parameter is from icmp6, decode it. */
1212 if (d != NULL) {
1213 ip6cp = (struct ip6ctlparam *)d;
1214 m = ip6cp->ip6c_m;
1215 ip6 = ip6cp->ip6c_ip6;
1216 off = ip6cp->ip6c_off;
1217 } else {
1218 m = NULL;
1219 ip6 = NULL;
1220 }
1221
1222 if (ip6) {
1223 /*
1224 * Notify the error to all possible sockets via pfctlinput2.
1225 * Since the upper layer information (such as protocol type,
1226 * source and destination ports) is embedded in the encrypted
1227 * data and might have been cut, we can't directly call
1228 * an upper layer ctlinput function. However, the pcbnotify
1229 * function will consider source and destination addresses
1230 * as well as the flow info value, and may be able to find
1231 * some PCB that should be notified.
1232 * Although pfctlinput2 will call esp6_ctlinput(), there is
1233 * no possibility of an infinite loop of function calls,
1234 * because we don't pass the inner IPv6 header.
1235 */
1236 bzero(&ip6cp1, sizeof(ip6cp1));
1237 ip6cp1.ip6c_src = ip6cp->ip6c_src;
1238 pfctlinput2(cmd, sa, (void *)&ip6cp1);
1239
1240 /*
1241 * Then go to special cases that need ESP header information.
1242 * XXX: We assume that when ip6 is non NULL,
1243 * M and OFF are valid.
1244 */
1245
1246 /* check if we can safely examine src and dst ports */
1247 if (m->m_pkthdr.len < off + sizeof(esp))
1248 return;
1249
1250 if (m->m_len < off + sizeof(esp)) {
1251 /*
1252 * this should be rare case,
1253 * so we compromise on this copy...
1254 */
1255 m_copydata(m, off, sizeof(esp), (caddr_t)&esp);
1256 espp = &esp;
1257 } else
1258 espp = (struct newesp*)(void *)(mtod(m, caddr_t) + off);
1259
1260 if (cmd == PRC_MSGSIZE) {
1261 int valid = 0;
1262
1263 /*
1264 * Check to see if we have a valid SA corresponding to
1265 * the address in the ICMP message payload.
1266 */
1267 sa6_src = ip6cp->ip6c_src;
1268 sa6_dst = (struct sockaddr_in6 *)(void *)sa;
1269 sav = key_allocsa(AF_INET6,
1270 (caddr_t)&sa6_src->sin6_addr,
1271 (caddr_t)&sa6_dst->sin6_addr,
1272 IPPROTO_ESP, espp->esp_spi);
1273 if (sav) {
1274 if (sav->state == SADB_SASTATE_MATURE ||
1275 sav->state == SADB_SASTATE_DYING)
1276 valid++;
1277 key_freesav(sav, KEY_SADB_LOCKED);
1278 }
1279
1280 /* XXX Further validation? */
1281
1282 /*
1283 * Depending on the value of "valid" and routing table
1284 * size (mtudisc_{hi,lo}wat), we will:
1285 * - recalcurate the new MTU and create the
1286 * corresponding routing entry, or
1287 * - ignore the MTU change notification.
1288 */
1289 icmp6_mtudisc_update((struct ip6ctlparam *)d, valid);
1290 }
1291 } else {
1292 /* we normally notify any pcb here */
1293 }
1294 }
1295 #endif /* INET6 */