]> git.saurik.com Git - apple/xnu.git/blob - bsd/netinet6/esp_input.c
xnu-3789.70.16.tar.gz
[apple/xnu.git] / bsd / netinet6 / esp_input.c
1 /*
2 * Copyright (c) 2008-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /* $FreeBSD: src/sys/netinet6/esp_input.c,v 1.1.2.3 2001/07/03 11:01:50 ume Exp $ */
30 /* $KAME: esp_input.c,v 1.55 2001/03/23 08:08:47 itojun Exp $ */
31
32 /*
33 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. Neither the name of the project nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 */
60
61 /*
62 * RFC1827/2406 Encapsulated Security Payload.
63 */
64
65 #include <sys/param.h>
66 #include <sys/systm.h>
67 #include <sys/malloc.h>
68 #include <sys/mbuf.h>
69 #include <sys/mcache.h>
70 #include <sys/domain.h>
71 #include <sys/protosw.h>
72 #include <sys/socket.h>
73 #include <sys/errno.h>
74 #include <sys/time.h>
75 #include <sys/kernel.h>
76 #include <sys/syslog.h>
77
78 #include <net/if.h>
79 #include <net/if_ipsec.h>
80 #include <net/route.h>
81 #include <kern/cpu_number.h>
82 #include <kern/locks.h>
83
84 #include <netinet/in.h>
85 #include <netinet/in_systm.h>
86 #include <netinet/ip.h>
87 #include <netinet/ip_var.h>
88 #include <netinet/in_var.h>
89 #include <netinet/ip_ecn.h>
90 #include <netinet/in_pcb.h>
91 #include <netinet/udp.h>
92 #if INET6
93 #include <netinet6/ip6_ecn.h>
94 #endif
95
96 #if INET6
97 #include <netinet/ip6.h>
98 #include <netinet6/in6_pcb.h>
99 #include <netinet6/ip6_var.h>
100 #include <netinet/icmp6.h>
101 #include <netinet6/ip6protosw.h>
102 #endif
103
104 #include <netinet6/ipsec.h>
105 #if INET6
106 #include <netinet6/ipsec6.h>
107 #endif
108 #include <netinet6/ah.h>
109 #if INET6
110 #include <netinet6/ah6.h>
111 #endif
112 #include <netinet6/esp.h>
113 #if INET6
114 #include <netinet6/esp6.h>
115 #endif
116 #include <netkey/key.h>
117 #include <netkey/keydb.h>
118 #include <netkey/key_debug.h>
119
120 #include <net/kpi_protocol.h>
121 #include <netinet/kpi_ipfilter_var.h>
122
123 #include <net/net_osdep.h>
124 #include <mach/sdt.h>
125 #include <corecrypto/cc.h>
126
127 #include <sys/kdebug.h>
128 #define DBG_LAYER_BEG NETDBG_CODE(DBG_NETIPSEC, 1)
129 #define DBG_LAYER_END NETDBG_CODE(DBG_NETIPSEC, 3)
130 #define DBG_FNC_ESPIN NETDBG_CODE(DBG_NETIPSEC, (6 << 8))
131 #define DBG_FNC_DECRYPT NETDBG_CODE(DBG_NETIPSEC, (7 << 8))
132 #define IPLEN_FLIPPED
133
134 extern lck_mtx_t *sadb_mutex;
135
136 #if INET
137 #define ESPMAXLEN \
138 (sizeof(struct esp) < sizeof(struct newesp) \
139 ? sizeof(struct newesp) : sizeof(struct esp))
140
141 static struct ip *
142 esp4_input_strip_udp_encap (struct mbuf *m, int iphlen)
143 {
144 // strip the udp header that's encapsulating ESP
145 struct ip *ip;
146 size_t stripsiz = sizeof(struct udphdr);
147
148 ip = mtod(m, __typeof__(ip));
149 ovbcopy((caddr_t)ip, (caddr_t)(((u_char *)ip) + stripsiz), iphlen);
150 m->m_data += stripsiz;
151 m->m_len -= stripsiz;
152 m->m_pkthdr.len -= stripsiz;
153 ip = mtod(m, __typeof__(ip));
154 ip->ip_len = ip->ip_len - stripsiz;
155 ip->ip_p = IPPROTO_ESP;
156 return ip;
157 }
158
159 static struct ip6_hdr *
160 esp6_input_strip_udp_encap (struct mbuf *m, int ip6hlen)
161 {
162 // strip the udp header that's encapsulating ESP
163 struct ip6_hdr *ip6;
164 size_t stripsiz = sizeof(struct udphdr);
165
166 ip6 = mtod(m, __typeof__(ip6));
167 ovbcopy((caddr_t)ip6, (caddr_t)(((u_char *)ip6) + stripsiz), ip6hlen);
168 m->m_data += stripsiz;
169 m->m_len -= stripsiz;
170 m->m_pkthdr.len -= stripsiz;
171 ip6 = mtod(m, __typeof__(ip6));
172 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - stripsiz);
173 ip6->ip6_nxt = IPPROTO_ESP;
174 return ip6;
175 }
176
177 void
178 esp4_input(struct mbuf *m, int off)
179 {
180 struct ip *ip;
181 #if INET6
182 struct ip6_hdr *ip6;
183 #endif /* INET6 */
184 struct esp *esp;
185 struct esptail esptail;
186 u_int32_t spi;
187 u_int32_t seq;
188 struct secasvar *sav = NULL;
189 size_t taillen;
190 u_int16_t nxt;
191 const struct esp_algorithm *algo;
192 int ivlen;
193 size_t hlen;
194 size_t esplen;
195 sa_family_t ifamily;
196
197 KERNEL_DEBUG(DBG_FNC_ESPIN | DBG_FUNC_START, 0,0,0,0,0);
198 /* sanity check for alignment. */
199 if (off % 4 != 0 || m->m_pkthdr.len % 4 != 0) {
200 ipseclog((LOG_ERR, "IPv4 ESP input: packet alignment problem "
201 "(off=%d, pktlen=%d)\n", off, m->m_pkthdr.len));
202 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
203 goto bad;
204 }
205
206 if (m->m_len < off + ESPMAXLEN) {
207 m = m_pullup(m, off + ESPMAXLEN);
208 if (!m) {
209 ipseclog((LOG_DEBUG,
210 "IPv4 ESP input: can't pullup in esp4_input\n"));
211 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
212 goto bad;
213 }
214 }
215
216 /* Expect 32-bit aligned data pointer on strict-align platforms */
217 MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m);
218
219 ip = mtod(m, struct ip *);
220 // expect udp-encap and esp packets only
221 if (ip->ip_p != IPPROTO_ESP &&
222 !(ip->ip_p == IPPROTO_UDP && off >= sizeof(struct udphdr))) {
223 ipseclog((LOG_DEBUG,
224 "IPv4 ESP input: invalid protocol type\n"));
225 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
226 goto bad;
227 }
228 esp = (struct esp *)(void *)(((u_int8_t *)ip) + off);
229 #ifdef _IP_VHL
230 hlen = IP_VHL_HL(ip->ip_vhl) << 2;
231 #else
232 hlen = ip->ip_hl << 2;
233 #endif
234
235 /* find the sassoc. */
236 spi = esp->esp_spi;
237
238 if ((sav = key_allocsa(AF_INET,
239 (caddr_t)&ip->ip_src, (caddr_t)&ip->ip_dst,
240 IPPROTO_ESP, spi)) == 0) {
241 ipseclog((LOG_WARNING,
242 "IPv4 ESP input: no key association found for spi %u\n",
243 (u_int32_t)ntohl(spi)));
244 IPSEC_STAT_INCREMENT(ipsecstat.in_nosa);
245 goto bad;
246 }
247 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
248 printf("DP esp4_input called to allocate SA:0x%llx\n",
249 (uint64_t)VM_KERNEL_ADDRPERM(sav)));
250 if (sav->state != SADB_SASTATE_MATURE
251 && sav->state != SADB_SASTATE_DYING) {
252 ipseclog((LOG_DEBUG,
253 "IPv4 ESP input: non-mature/dying SA found for spi %u\n",
254 (u_int32_t)ntohl(spi)));
255 IPSEC_STAT_INCREMENT(ipsecstat.in_badspi);
256 goto bad;
257 }
258 algo = esp_algorithm_lookup(sav->alg_enc);
259 if (!algo) {
260 ipseclog((LOG_DEBUG, "IPv4 ESP input: "
261 "unsupported encryption algorithm for spi %u\n",
262 (u_int32_t)ntohl(spi)));
263 IPSEC_STAT_INCREMENT(ipsecstat.in_badspi);
264 goto bad;
265 }
266
267 /* check if we have proper ivlen information */
268 ivlen = sav->ivlen;
269 if (ivlen < 0) {
270 ipseclog((LOG_ERR, "inproper ivlen in IPv4 ESP input: %s %s\n",
271 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
272 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
273 goto bad;
274 }
275
276 seq = ntohl(((struct newesp *)esp)->esp_seq);
277
278 /* Save ICV from packet for verification later */
279 size_t siz = 0;
280 unsigned char saved_icv[AH_MAXSUMSIZE];
281 if (algo->finalizedecrypt) {
282 siz = algo->icvlen;
283 m_copydata(m, m->m_pkthdr.len - siz, siz, (caddr_t) saved_icv);
284 goto delay_icv;
285 }
286
287 if (!((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay
288 && (sav->alg_auth && sav->key_auth)))
289 goto noreplaycheck;
290
291 if (sav->alg_auth == SADB_X_AALG_NULL ||
292 sav->alg_auth == SADB_AALG_NONE)
293 goto noreplaycheck;
294
295 /*
296 * check for sequence number.
297 */
298 if (ipsec_chkreplay(seq, sav))
299 ; /*okey*/
300 else {
301 IPSEC_STAT_INCREMENT(ipsecstat.in_espreplay);
302 ipseclog((LOG_WARNING,
303 "replay packet in IPv4 ESP input: %s %s\n",
304 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
305 goto bad;
306 }
307
308 /* check ICV */
309 {
310 u_char sum0[AH_MAXSUMSIZE] __attribute__((aligned(4)));
311 u_char sum[AH_MAXSUMSIZE] __attribute__((aligned(4)));
312 const struct ah_algorithm *sumalgo;
313
314 sumalgo = ah_algorithm_lookup(sav->alg_auth);
315 if (!sumalgo)
316 goto noreplaycheck;
317 siz = (((*sumalgo->sumsiz)(sav) + 3) & ~(4 - 1));
318 if (m->m_pkthdr.len < off + ESPMAXLEN + siz) {
319 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
320 goto bad;
321 }
322 if (AH_MAXSUMSIZE < siz) {
323 ipseclog((LOG_DEBUG,
324 "internal error: AH_MAXSUMSIZE must be larger than %lu\n",
325 (u_int32_t)siz));
326 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
327 goto bad;
328 }
329
330 m_copydata(m, m->m_pkthdr.len - siz, siz, (caddr_t) &sum0[0]);
331
332 if (esp_auth(m, off, m->m_pkthdr.len - off - siz, sav, sum)) {
333 ipseclog((LOG_WARNING, "auth fail in IPv4 ESP input: %s %s\n",
334 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
335 IPSEC_STAT_INCREMENT(ipsecstat.in_espauthfail);
336 goto bad;
337 }
338
339 if (cc_cmp_safe(siz, sum0, sum)) {
340 ipseclog((LOG_WARNING, "auth fail in IPv4 ESP input: %s %s\n",
341 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
342 IPSEC_STAT_INCREMENT(ipsecstat.in_espauthfail);
343 goto bad;
344 }
345
346 delay_icv:
347
348 /* strip off the authentication data */
349 m_adj(m, -siz);
350 ip = mtod(m, struct ip *);
351 #ifdef IPLEN_FLIPPED
352 ip->ip_len = ip->ip_len - siz;
353 #else
354 ip->ip_len = htons(ntohs(ip->ip_len) - siz);
355 #endif
356 m->m_flags |= M_AUTHIPDGM;
357 IPSEC_STAT_INCREMENT(ipsecstat.in_espauthsucc);
358 }
359
360 /*
361 * update sequence number.
362 */
363 if ((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay) {
364 if (ipsec_updatereplay(seq, sav)) {
365 IPSEC_STAT_INCREMENT(ipsecstat.in_espreplay);
366 goto bad;
367 }
368 }
369
370 noreplaycheck:
371
372 /* process main esp header. */
373 if (sav->flags & SADB_X_EXT_OLD) {
374 /* RFC 1827 */
375 esplen = sizeof(struct esp);
376 } else {
377 /* RFC 2406 */
378 if (sav->flags & SADB_X_EXT_DERIV)
379 esplen = sizeof(struct esp);
380 else
381 esplen = sizeof(struct newesp);
382 }
383
384 if (m->m_pkthdr.len < off + esplen + ivlen + sizeof(esptail)) {
385 ipseclog((LOG_WARNING,
386 "IPv4 ESP input: packet too short\n"));
387 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
388 goto bad;
389 }
390
391 if (m->m_len < off + esplen + ivlen) {
392 m = m_pullup(m, off + esplen + ivlen);
393 if (!m) {
394 ipseclog((LOG_DEBUG,
395 "IPv4 ESP input: can't pullup in esp4_input\n"));
396 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
397 goto bad;
398 }
399 }
400
401 /*
402 * pre-compute and cache intermediate key
403 */
404 if (esp_schedule(algo, sav) != 0) {
405 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
406 goto bad;
407 }
408
409 /*
410 * decrypt the packet.
411 */
412 if (!algo->decrypt)
413 panic("internal error: no decrypt function");
414 KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_START, 0,0,0,0,0);
415 if ((*algo->decrypt)(m, off, sav, algo, ivlen)) {
416 /* m is already freed */
417 m = NULL;
418 ipseclog((LOG_ERR, "decrypt fail in IPv4 ESP input: %s\n",
419 ipsec_logsastr(sav)));
420 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
421 KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_END, 1,0,0,0,0);
422 goto bad;
423 }
424 KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_END, 2,0,0,0,0);
425 IPSEC_STAT_INCREMENT(ipsecstat.in_esphist[sav->alg_enc]);
426
427 m->m_flags |= M_DECRYPTED;
428
429 if (algo->finalizedecrypt)
430 {
431 if ((*algo->finalizedecrypt)(sav, saved_icv, algo->icvlen)) {
432 ipseclog((LOG_ERR, "packet decryption ICV failure\n"));
433 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
434 KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_END, 1,0,0,0,0);
435 goto bad;
436 }
437 }
438
439 /*
440 * find the trailer of the ESP.
441 */
442 m_copydata(m, m->m_pkthdr.len - sizeof(esptail), sizeof(esptail),
443 (caddr_t)&esptail);
444 nxt = esptail.esp_nxt;
445 taillen = esptail.esp_padlen + sizeof(esptail);
446
447 if (m->m_pkthdr.len < taillen
448 || m->m_pkthdr.len - taillen < hlen) { /*?*/
449 ipseclog((LOG_WARNING,
450 "bad pad length in IPv4 ESP input: %s %s\n",
451 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
452 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
453 goto bad;
454 }
455
456 /* strip off the trailing pad area. */
457 m_adj(m, -taillen);
458 ip = mtod(m, struct ip *);
459 #ifdef IPLEN_FLIPPED
460 ip->ip_len = ip->ip_len - taillen;
461 #else
462 ip->ip_len = htons(ntohs(ip->ip_len) - taillen);
463 #endif
464 if (ip->ip_p == IPPROTO_UDP) {
465 // offset includes the outer ip and udp header lengths.
466 if (m->m_len < off) {
467 m = m_pullup(m, off);
468 if (!m) {
469 ipseclog((LOG_DEBUG,
470 "IPv4 ESP input: invalid udp encapsulated ESP packet length \n"));
471 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
472 goto bad;
473 }
474 }
475
476 // check the UDP encap header to detect changes in the source port, and then strip the header
477 off -= sizeof(struct udphdr); // off no longer includes the udphdr's size
478 // if peer is behind nat and this is the latest esp packet
479 if ((sav->flags & SADB_X_EXT_NATT_DETECTED_PEER) != 0 &&
480 (sav->flags & SADB_X_EXT_OLD) == 0 &&
481 seq && sav->replay &&
482 seq >= sav->replay->lastseq) {
483 struct udphdr *encap_uh = (__typeof__(encap_uh))(void *)((caddr_t)ip + off);
484 if (encap_uh->uh_sport &&
485 ntohs(encap_uh->uh_sport) != sav->remote_ike_port) {
486 sav->remote_ike_port = ntohs(encap_uh->uh_sport);
487 }
488 }
489 ip = esp4_input_strip_udp_encap(m, off);
490 esp = (struct esp *)(void *)(((u_int8_t *)ip) + off);
491 }
492
493 /* was it transmitted over the IPsec tunnel SA? */
494 if (ipsec4_tunnel_validate(m, off + esplen + ivlen, nxt, sav, &ifamily)) {
495 ifaddr_t ifa;
496 struct sockaddr_storage addr;
497
498 /*
499 * strip off all the headers that precedes ESP header.
500 * IP4 xx ESP IP4' payload -> IP4' payload
501 *
502 * XXX more sanity checks
503 * XXX relationship with gif?
504 */
505 u_int8_t tos, otos;
506 int sum;
507
508 tos = ip->ip_tos;
509 m_adj(m, off + esplen + ivlen);
510 if (ifamily == AF_INET) {
511 struct sockaddr_in *ipaddr;
512
513 if (m->m_len < sizeof(*ip)) {
514 m = m_pullup(m, sizeof(*ip));
515 if (!m) {
516 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
517 goto bad;
518 }
519 }
520 ip = mtod(m, struct ip *);
521 /* ECN consideration. */
522
523 otos = ip->ip_tos;
524 if (ip_ecn_egress(ip4_ipsec_ecn, &tos, &ip->ip_tos) == 0) {
525 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
526 goto bad;
527 }
528
529 if (otos != ip->ip_tos) {
530 sum = ~ntohs(ip->ip_sum) & 0xffff;
531 sum += (~otos & 0xffff) + ip->ip_tos;
532 sum = (sum >> 16) + (sum & 0xffff);
533 sum += (sum >> 16); /* add carry */
534 ip->ip_sum = htons(~sum & 0xffff);
535 }
536
537 if (!key_checktunnelsanity(sav, AF_INET,
538 (caddr_t)&ip->ip_src, (caddr_t)&ip->ip_dst)) {
539 ipseclog((LOG_ERR, "ipsec tunnel address mismatch "
540 "in ESP input: %s %s\n",
541 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
542 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
543 goto bad;
544 }
545
546 bzero(&addr, sizeof(addr));
547 ipaddr = (__typeof__(ipaddr))&addr;
548 ipaddr->sin_family = AF_INET;
549 ipaddr->sin_len = sizeof(*ipaddr);
550 ipaddr->sin_addr = ip->ip_dst;
551 #if INET6
552 } else if (ifamily == AF_INET6) {
553 struct sockaddr_in6 *ip6addr;
554
555 /*
556 * m_pullup is prohibited in KAME IPv6 input processing
557 * but there's no other way!
558 */
559 if (m->m_len < sizeof(*ip6)) {
560 m = m_pullup(m, sizeof(*ip6));
561 if (!m) {
562 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
563 goto bad;
564 }
565 }
566
567 /*
568 * Expect 32-bit aligned data pointer on strict-align
569 * platforms.
570 */
571 MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m);
572
573 ip6 = mtod(m, struct ip6_hdr *);
574
575 /* ECN consideration. */
576 if (ip64_ecn_egress(ip4_ipsec_ecn, &tos, &ip6->ip6_flow) == 0) {
577 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
578 goto bad;
579 }
580
581 if (!key_checktunnelsanity(sav, AF_INET6,
582 (caddr_t)&ip6->ip6_src, (caddr_t)&ip6->ip6_dst)) {
583 ipseclog((LOG_ERR, "ipsec tunnel address mismatch "
584 "in ESP input: %s %s\n",
585 ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
586 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
587 goto bad;
588 }
589
590 bzero(&addr, sizeof(addr));
591 ip6addr = (__typeof__(ip6addr))&addr;
592 ip6addr->sin6_family = AF_INET6;
593 ip6addr->sin6_len = sizeof(*ip6addr);
594 ip6addr->sin6_addr = ip6->ip6_dst;
595 #endif /* INET6 */
596 } else {
597 ipseclog((LOG_ERR, "ipsec tunnel unsupported address family "
598 "in ESP input\n"));
599 goto bad;
600 }
601
602 key_sa_recordxfer(sav, m);
603 if (ipsec_addhist(m, IPPROTO_ESP, spi) != 0 ||
604 ipsec_addhist(m, IPPROTO_IPV4, 0) != 0) {
605 IPSEC_STAT_INCREMENT(ipsecstat.in_nomem);
606 goto bad;
607 }
608
609 // update the receiving interface address based on the inner address
610 ifa = ifa_ifwithaddr((struct sockaddr *)&addr);
611 if (ifa) {
612 m->m_pkthdr.rcvif = ifa->ifa_ifp;
613 IFA_REMREF(ifa);
614 }
615
616 /* Clear the csum flags, they can't be valid for the inner headers */
617 m->m_pkthdr.csum_flags = 0;
618
619 // Input via IPSec interface
620 if (sav->sah->ipsec_if != NULL) {
621 if (ipsec_inject_inbound_packet(sav->sah->ipsec_if, m) == 0) {
622 m = NULL;
623 goto done;
624 } else {
625 goto bad;
626 }
627 }
628
629 if (proto_input(ifamily == AF_INET ? PF_INET : PF_INET6, m) != 0)
630 goto bad;
631
632 nxt = IPPROTO_DONE;
633 KERNEL_DEBUG(DBG_FNC_ESPIN | DBG_FUNC_END, 2,0,0,0,0);
634 } else {
635 /*
636 * strip off ESP header and IV.
637 * even in m_pulldown case, we need to strip off ESP so that
638 * we can always compute checksum for AH correctly.
639 */
640 size_t stripsiz;
641
642 stripsiz = esplen + ivlen;
643
644 ip = mtod(m, struct ip *);
645 ovbcopy((caddr_t)ip, (caddr_t)(((u_char *)ip) + stripsiz), off);
646 m->m_data += stripsiz;
647 m->m_len -= stripsiz;
648 m->m_pkthdr.len -= stripsiz;
649
650 ip = mtod(m, struct ip *);
651 #ifdef IPLEN_FLIPPED
652 ip->ip_len = ip->ip_len - stripsiz;
653 #else
654 ip->ip_len = htons(ntohs(ip->ip_len) - stripsiz);
655 #endif
656 ip->ip_p = nxt;
657
658 key_sa_recordxfer(sav, m);
659 if (ipsec_addhist(m, IPPROTO_ESP, spi) != 0) {
660 IPSEC_STAT_INCREMENT(ipsecstat.in_nomem);
661 goto bad;
662 }
663
664 /*
665 * Set the csum valid flag, if we authenticated the
666 * packet, the payload shouldn't be corrupt unless
667 * it was corrupted before being signed on the other
668 * side.
669 */
670 if (nxt == IPPROTO_TCP || nxt == IPPROTO_UDP) {
671 m->m_pkthdr.csum_flags = CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
672 m->m_pkthdr.csum_data = 0xFFFF;
673 }
674
675 if (nxt != IPPROTO_DONE) {
676 if ((ip_protox[nxt]->pr_flags & PR_LASTHDR) != 0 &&
677 ipsec4_in_reject(m, NULL)) {
678 IPSEC_STAT_INCREMENT(ipsecstat.in_polvio);
679 goto bad;
680 }
681 KERNEL_DEBUG(DBG_FNC_ESPIN | DBG_FUNC_END, 3,0,0,0,0);
682
683 /* translate encapsulated UDP port ? */
684 if ((sav->flags & SADB_X_EXT_NATT_MULTIPLEUSERS) != 0) {
685 struct udphdr *udp;
686
687 if (nxt != IPPROTO_UDP) { /* not UPD packet - drop it */
688 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
689 goto bad;
690 }
691
692 if (m->m_len < off + sizeof(struct udphdr)) {
693 m = m_pullup(m, off + sizeof(struct udphdr));
694 if (!m) {
695 ipseclog((LOG_DEBUG,
696 "IPv4 ESP input: can't pullup UDP header in esp4_input\n"));
697 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
698 goto bad;
699 }
700 ip = mtod(m, struct ip *);
701 }
702 udp = (struct udphdr *)(void *)(((u_int8_t *)ip) + off);
703
704 lck_mtx_lock(sadb_mutex);
705 if (sav->natt_encapsulated_src_port == 0) {
706 sav->natt_encapsulated_src_port = udp->uh_sport;
707 } else if (sav->natt_encapsulated_src_port != udp->uh_sport) { /* something wrong */
708 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
709 lck_mtx_unlock(sadb_mutex);
710 goto bad;
711 }
712 lck_mtx_unlock(sadb_mutex);
713 udp->uh_sport = htons(sav->remote_ike_port);
714 udp->uh_sum = 0;
715 }
716
717 DTRACE_IP6(receive, struct mbuf *, m, struct inpcb *, NULL,
718 struct ip *, ip, struct ifnet *, m->m_pkthdr.rcvif,
719 struct ip *, ip, struct ip6_hdr *, NULL);
720
721 // Input via IPSec interface
722 if (sav->sah->ipsec_if != NULL) {
723 ip->ip_len = htons(ip->ip_len + hlen);
724 ip->ip_off = htons(ip->ip_off);
725 ip->ip_sum = 0;
726 ip->ip_sum = ip_cksum_hdr_in(m, hlen);
727 if (ipsec_inject_inbound_packet(sav->sah->ipsec_if, m) == 0) {
728 m = NULL;
729 goto done;
730 } else {
731 goto bad;
732 }
733 }
734
735 ip_proto_dispatch_in(m, off, nxt, 0);
736 } else
737 m_freem(m);
738 m = NULL;
739 }
740
741 done:
742 if (sav) {
743 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
744 printf("DP esp4_input call free SA:0x%llx\n",
745 (uint64_t)VM_KERNEL_ADDRPERM(sav)));
746 key_freesav(sav, KEY_SADB_UNLOCKED);
747 }
748 IPSEC_STAT_INCREMENT(ipsecstat.in_success);
749 return;
750
751 bad:
752 if (sav) {
753 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
754 printf("DP esp4_input call free SA:0x%llx\n",
755 (uint64_t)VM_KERNEL_ADDRPERM(sav)));
756 key_freesav(sav, KEY_SADB_UNLOCKED);
757 }
758 if (m)
759 m_freem(m);
760 KERNEL_DEBUG(DBG_FNC_ESPIN | DBG_FUNC_END, 4,0,0,0,0);
761 return;
762 }
763 #endif /* INET */
764
765 #if INET6
766 int
767 esp6_input(struct mbuf **mp, int *offp, int proto)
768 {
769 #pragma unused(proto)
770 struct mbuf *m = *mp;
771 int off = *offp;
772 struct ip *ip;
773 struct ip6_hdr *ip6;
774 struct esp *esp;
775 struct esptail esptail;
776 u_int32_t spi;
777 u_int32_t seq;
778 struct secasvar *sav = NULL;
779 size_t taillen;
780 u_int16_t nxt;
781 char *nproto;
782 const struct esp_algorithm *algo;
783 int ivlen;
784 size_t esplen;
785 sa_family_t ifamily;
786
787 /* sanity check for alignment. */
788 if (off % 4 != 0 || m->m_pkthdr.len % 4 != 0) {
789 ipseclog((LOG_ERR, "IPv6 ESP input: packet alignment problem "
790 "(off=%d, pktlen=%d)\n", off, m->m_pkthdr.len));
791 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
792 goto bad;
793 }
794
795 #ifndef PULLDOWN_TEST
796 IP6_EXTHDR_CHECK(m, off, ESPMAXLEN, {return IPPROTO_DONE;});
797 esp = (struct esp *)(void *)(mtod(m, caddr_t) + off);
798 #else
799 IP6_EXTHDR_GET(esp, struct esp *, m, off, ESPMAXLEN);
800 if (esp == NULL) {
801 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
802 return IPPROTO_DONE;
803 }
804 #endif
805 /* Expect 32-bit data aligned pointer on strict-align platforms */
806 MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m);
807
808 ip6 = mtod(m, struct ip6_hdr *);
809
810 if (ntohs(ip6->ip6_plen) == 0) {
811 ipseclog((LOG_ERR, "IPv6 ESP input: "
812 "ESP with IPv6 jumbogram is not supported.\n"));
813 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
814 goto bad;
815 }
816
817 nproto = ip6_get_prevhdr(m, off);
818 if (nproto == NULL || (*nproto != IPPROTO_ESP &&
819 !(*nproto == IPPROTO_UDP && off >= sizeof(struct udphdr)))) {
820 ipseclog((LOG_DEBUG, "IPv6 ESP input: invalid protocol type\n"));
821 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
822 goto bad;
823 }
824
825 /* find the sassoc. */
826 spi = esp->esp_spi;
827
828 if ((sav = key_allocsa(AF_INET6,
829 (caddr_t)&ip6->ip6_src, (caddr_t)&ip6->ip6_dst,
830 IPPROTO_ESP, spi)) == 0) {
831 ipseclog((LOG_WARNING,
832 "IPv6 ESP input: no key association found for spi %u\n",
833 (u_int32_t)ntohl(spi)));
834 IPSEC_STAT_INCREMENT(ipsec6stat.in_nosa);
835 goto bad;
836 }
837 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
838 printf("DP esp6_input called to allocate SA:0x%llx\n",
839 (uint64_t)VM_KERNEL_ADDRPERM(sav)));
840 if (sav->state != SADB_SASTATE_MATURE
841 && sav->state != SADB_SASTATE_DYING) {
842 ipseclog((LOG_DEBUG,
843 "IPv6 ESP input: non-mature/dying SA found for spi %u\n",
844 (u_int32_t)ntohl(spi)));
845 IPSEC_STAT_INCREMENT(ipsec6stat.in_badspi);
846 goto bad;
847 }
848 algo = esp_algorithm_lookup(sav->alg_enc);
849 if (!algo) {
850 ipseclog((LOG_DEBUG, "IPv6 ESP input: "
851 "unsupported encryption algorithm for spi %u\n",
852 (u_int32_t)ntohl(spi)));
853 IPSEC_STAT_INCREMENT(ipsec6stat.in_badspi);
854 goto bad;
855 }
856
857 /* check if we have proper ivlen information */
858 ivlen = sav->ivlen;
859 if (ivlen < 0) {
860 ipseclog((LOG_ERR, "inproper ivlen in IPv6 ESP input: %s %s\n",
861 ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
862 IPSEC_STAT_INCREMENT(ipsec6stat.in_badspi);
863 goto bad;
864 }
865
866 seq = ntohl(((struct newesp *)esp)->esp_seq);
867
868 /* Save ICV from packet for verification later */
869 size_t siz = 0;
870 unsigned char saved_icv[AH_MAXSUMSIZE];
871 if (algo->finalizedecrypt) {
872 siz = algo->icvlen;
873 m_copydata(m, m->m_pkthdr.len - siz, siz, (caddr_t) saved_icv);
874 goto delay_icv;
875 }
876
877 if (!((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay
878 && (sav->alg_auth && sav->key_auth)))
879 goto noreplaycheck;
880
881 if (sav->alg_auth == SADB_X_AALG_NULL ||
882 sav->alg_auth == SADB_AALG_NONE)
883 goto noreplaycheck;
884
885 /*
886 * check for sequence number.
887 */
888 if (ipsec_chkreplay(seq, sav))
889 ; /*okey*/
890 else {
891 IPSEC_STAT_INCREMENT(ipsec6stat.in_espreplay);
892 ipseclog((LOG_WARNING,
893 "replay packet in IPv6 ESP input: %s %s\n",
894 ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
895 goto bad;
896 }
897
898 /* check ICV */
899 {
900 u_char sum0[AH_MAXSUMSIZE] __attribute__((aligned(4)));
901 u_char sum[AH_MAXSUMSIZE] __attribute__((aligned(4)));
902 const struct ah_algorithm *sumalgo;
903
904 sumalgo = ah_algorithm_lookup(sav->alg_auth);
905 if (!sumalgo)
906 goto noreplaycheck;
907 siz = (((*sumalgo->sumsiz)(sav) + 3) & ~(4 - 1));
908 if (m->m_pkthdr.len < off + ESPMAXLEN + siz) {
909 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
910 goto bad;
911 }
912 if (AH_MAXSUMSIZE < siz) {
913 ipseclog((LOG_DEBUG,
914 "internal error: AH_MAXSUMSIZE must be larger than %lu\n",
915 (u_int32_t)siz));
916 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
917 goto bad;
918 }
919
920 m_copydata(m, m->m_pkthdr.len - siz, siz, (caddr_t) &sum0[0]);
921
922 if (esp_auth(m, off, m->m_pkthdr.len - off - siz, sav, sum)) {
923 ipseclog((LOG_WARNING, "auth fail in IPv6 ESP input: %s %s\n",
924 ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
925 IPSEC_STAT_INCREMENT(ipsec6stat.in_espauthfail);
926 goto bad;
927 }
928
929 if (cc_cmp_safe(siz, sum0, sum)) {
930 ipseclog((LOG_WARNING, "auth fail in IPv6 ESP input: %s %s\n",
931 ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
932 IPSEC_STAT_INCREMENT(ipsec6stat.in_espauthfail);
933 goto bad;
934 }
935
936 delay_icv:
937
938 /* strip off the authentication data */
939 m_adj(m, -siz);
940 ip6 = mtod(m, struct ip6_hdr *);
941 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - siz);
942
943 m->m_flags |= M_AUTHIPDGM;
944 IPSEC_STAT_INCREMENT(ipsec6stat.in_espauthsucc);
945 }
946
947 /*
948 * update sequence number.
949 */
950 if ((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay) {
951 if (ipsec_updatereplay(seq, sav)) {
952 IPSEC_STAT_INCREMENT(ipsec6stat.in_espreplay);
953 goto bad;
954 }
955 }
956
957 noreplaycheck:
958
959 /* process main esp header. */
960 if (sav->flags & SADB_X_EXT_OLD) {
961 /* RFC 1827 */
962 esplen = sizeof(struct esp);
963 } else {
964 /* RFC 2406 */
965 if (sav->flags & SADB_X_EXT_DERIV)
966 esplen = sizeof(struct esp);
967 else
968 esplen = sizeof(struct newesp);
969 }
970
971 if (m->m_pkthdr.len < off + esplen + ivlen + sizeof(esptail)) {
972 ipseclog((LOG_WARNING,
973 "IPv6 ESP input: packet too short\n"));
974 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
975 goto bad;
976 }
977
978 #ifndef PULLDOWN_TEST
979 IP6_EXTHDR_CHECK(m, off, esplen + ivlen, return IPPROTO_DONE); /*XXX*/
980 #else
981 IP6_EXTHDR_GET(esp, struct esp *, m, off, esplen + ivlen);
982 if (esp == NULL) {
983 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
984 m = NULL;
985 goto bad;
986 }
987 #endif
988 ip6 = mtod(m, struct ip6_hdr *); /*set it again just in case*/
989
990 /*
991 * pre-compute and cache intermediate key
992 */
993 if (esp_schedule(algo, sav) != 0) {
994 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
995 goto bad;
996 }
997
998 /*
999 * decrypt the packet.
1000 */
1001 if (!algo->decrypt)
1002 panic("internal error: no decrypt function");
1003 if ((*algo->decrypt)(m, off, sav, algo, ivlen)) {
1004 /* m is already freed */
1005 m = NULL;
1006 ipseclog((LOG_ERR, "decrypt fail in IPv6 ESP input: %s\n",
1007 ipsec_logsastr(sav)));
1008 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
1009 goto bad;
1010 }
1011 IPSEC_STAT_INCREMENT(ipsec6stat.in_esphist[sav->alg_enc]);
1012
1013 m->m_flags |= M_DECRYPTED;
1014
1015 if (algo->finalizedecrypt)
1016 {
1017 if ((*algo->finalizedecrypt)(sav, saved_icv, algo->icvlen)) {
1018 ipseclog((LOG_ERR, "packet decryption ICV failure\n"));
1019 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
1020 KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_END, 1,0,0,0,0);
1021 goto bad;
1022 }
1023 }
1024
1025 /*
1026 * find the trailer of the ESP.
1027 */
1028 m_copydata(m, m->m_pkthdr.len - sizeof(esptail), sizeof(esptail),
1029 (caddr_t)&esptail);
1030 nxt = esptail.esp_nxt;
1031 taillen = esptail.esp_padlen + sizeof(esptail);
1032
1033 if (m->m_pkthdr.len < taillen
1034 || m->m_pkthdr.len - taillen < sizeof(struct ip6_hdr)) { /*?*/
1035 ipseclog((LOG_WARNING,
1036 "bad pad length in IPv6 ESP input: %s %s\n",
1037 ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
1038 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
1039 goto bad;
1040 }
1041
1042 /* strip off the trailing pad area. */
1043 m_adj(m, -taillen);
1044 ip6 = mtod(m, struct ip6_hdr *);
1045 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - taillen);
1046
1047 if (*nproto == IPPROTO_UDP) {
1048 // offset includes the outer ip and udp header lengths.
1049 if (m->m_len < off) {
1050 m = m_pullup(m, off);
1051 if (!m) {
1052 ipseclog((LOG_DEBUG,
1053 "IPv6 ESP input: invalid udp encapsulated ESP packet length\n"));
1054 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
1055 goto bad;
1056 }
1057 }
1058
1059 // check the UDP encap header to detect changes in the source port, and then strip the header
1060 off -= sizeof(struct udphdr); // off no longer includes the udphdr's size
1061 // if peer is behind nat and this is the latest esp packet
1062 if ((sav->flags & SADB_X_EXT_NATT_DETECTED_PEER) != 0 &&
1063 (sav->flags & SADB_X_EXT_OLD) == 0 &&
1064 seq && sav->replay &&
1065 seq >= sav->replay->lastseq) {
1066 struct udphdr *encap_uh = (__typeof__(encap_uh))(void *)((caddr_t)ip6 + off);
1067 if (encap_uh->uh_sport &&
1068 ntohs(encap_uh->uh_sport) != sav->remote_ike_port) {
1069 sav->remote_ike_port = ntohs(encap_uh->uh_sport);
1070 }
1071 }
1072 ip6 = esp6_input_strip_udp_encap(m, off);
1073 esp = (struct esp *)(void *)(((u_int8_t *)ip6) + off);
1074 }
1075
1076
1077 /* was it transmitted over the IPsec tunnel SA? */
1078 if (ipsec6_tunnel_validate(m, off + esplen + ivlen, nxt, sav, &ifamily)) {
1079 ifaddr_t ifa;
1080 struct sockaddr_storage addr;
1081
1082 /*
1083 * strip off all the headers that precedes ESP header.
1084 * IP6 xx ESP IP6' payload -> IP6' payload
1085 *
1086 * XXX more sanity checks
1087 * XXX relationship with gif?
1088 */
1089 u_int32_t flowinfo; /*net endian*/
1090 flowinfo = ip6->ip6_flow;
1091 m_adj(m, off + esplen + ivlen);
1092 if (ifamily == AF_INET6) {
1093 struct sockaddr_in6 *ip6addr;
1094
1095 if (m->m_len < sizeof(*ip6)) {
1096 #ifndef PULLDOWN_TEST
1097 /*
1098 * m_pullup is prohibited in KAME IPv6 input processing
1099 * but there's no other way!
1100 */
1101 #else
1102 /* okay to pullup in m_pulldown style */
1103 #endif
1104 m = m_pullup(m, sizeof(*ip6));
1105 if (!m) {
1106 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
1107 goto bad;
1108 }
1109 }
1110 ip6 = mtod(m, struct ip6_hdr *);
1111 /* ECN consideration. */
1112 if (ip6_ecn_egress(ip6_ipsec_ecn, &flowinfo, &ip6->ip6_flow) == 0) {
1113 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
1114 goto bad;
1115 }
1116 if (!key_checktunnelsanity(sav, AF_INET6,
1117 (caddr_t)&ip6->ip6_src, (caddr_t)&ip6->ip6_dst)) {
1118 ipseclog((LOG_ERR, "ipsec tunnel address mismatch "
1119 "in IPv6 ESP input: %s %s\n",
1120 ipsec6_logpacketstr(ip6, spi),
1121 ipsec_logsastr(sav)));
1122 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
1123 goto bad;
1124 }
1125
1126 bzero(&addr, sizeof(addr));
1127 ip6addr = (__typeof__(ip6addr))&addr;
1128 ip6addr->sin6_family = AF_INET6;
1129 ip6addr->sin6_len = sizeof(*ip6addr);
1130 ip6addr->sin6_addr = ip6->ip6_dst;
1131 } else if (ifamily == AF_INET) {
1132 struct sockaddr_in *ipaddr;
1133
1134 if (m->m_len < sizeof(*ip)) {
1135 m = m_pullup(m, sizeof(*ip));
1136 if (!m) {
1137 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
1138 goto bad;
1139 }
1140 }
1141
1142 u_int8_t otos;
1143 int sum;
1144
1145 ip = mtod(m, struct ip *);
1146 otos = ip->ip_tos;
1147 /* ECN consideration. */
1148 if (ip46_ecn_egress(ip6_ipsec_ecn, &flowinfo, &ip->ip_tos) == 0) {
1149 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
1150 goto bad;
1151 }
1152
1153 if (otos != ip->ip_tos) {
1154 sum = ~ntohs(ip->ip_sum) & 0xffff;
1155 sum += (~otos & 0xffff) + ip->ip_tos;
1156 sum = (sum >> 16) + (sum & 0xffff);
1157 sum += (sum >> 16); /* add carry */
1158 ip->ip_sum = htons(~sum & 0xffff);
1159 }
1160
1161 if (!key_checktunnelsanity(sav, AF_INET,
1162 (caddr_t)&ip->ip_src, (caddr_t)&ip->ip_dst)) {
1163 ipseclog((LOG_ERR, "ipsec tunnel address mismatch "
1164 "in ESP input: %s %s\n",
1165 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
1166 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
1167 goto bad;
1168 }
1169
1170 bzero(&addr, sizeof(addr));
1171 ipaddr = (__typeof__(ipaddr))&addr;
1172 ipaddr->sin_family = AF_INET;
1173 ipaddr->sin_len = sizeof(*ipaddr);
1174 ipaddr->sin_addr = ip->ip_dst;
1175 }
1176
1177 key_sa_recordxfer(sav, m);
1178 if (ipsec_addhist(m, IPPROTO_ESP, spi) != 0 ||
1179 ipsec_addhist(m, IPPROTO_IPV6, 0) != 0) {
1180 IPSEC_STAT_INCREMENT(ipsec6stat.in_nomem);
1181 goto bad;
1182 }
1183
1184 // update the receiving interface address based on the inner address
1185 ifa = ifa_ifwithaddr((struct sockaddr *)&addr);
1186 if (ifa) {
1187 m->m_pkthdr.rcvif = ifa->ifa_ifp;
1188 IFA_REMREF(ifa);
1189 }
1190
1191 // Input via IPSec interface
1192 if (sav->sah->ipsec_if != NULL) {
1193 if (ipsec_inject_inbound_packet(sav->sah->ipsec_if, m) == 0) {
1194 m = NULL;
1195 nxt = IPPROTO_DONE;
1196 goto done;
1197 } else {
1198 goto bad;
1199 }
1200 }
1201
1202 if (proto_input(ifamily == AF_INET ? PF_INET : PF_INET6, m) != 0)
1203 goto bad;
1204 nxt = IPPROTO_DONE;
1205 } else {
1206 /*
1207 * strip off ESP header and IV.
1208 * even in m_pulldown case, we need to strip off ESP so that
1209 * we can always compute checksum for AH correctly.
1210 */
1211 size_t stripsiz;
1212 char *prvnxtp;
1213
1214 /*
1215 * Set the next header field of the previous header correctly.
1216 */
1217 prvnxtp = ip6_get_prevhdr(m, off); /* XXX */
1218 *prvnxtp = nxt;
1219
1220 stripsiz = esplen + ivlen;
1221
1222 ip6 = mtod(m, struct ip6_hdr *);
1223 if (m->m_len >= stripsiz + off) {
1224 ovbcopy((caddr_t)ip6, ((caddr_t)ip6) + stripsiz, off);
1225 m->m_data += stripsiz;
1226 m->m_len -= stripsiz;
1227 m->m_pkthdr.len -= stripsiz;
1228 } else {
1229 /*
1230 * this comes with no copy if the boundary is on
1231 * cluster
1232 */
1233 struct mbuf *n;
1234
1235 n = m_split(m, off, M_DONTWAIT);
1236 if (n == NULL) {
1237 /* m is retained by m_split */
1238 goto bad;
1239 }
1240 m_adj(n, stripsiz);
1241 /* m_cat does not update m_pkthdr.len */
1242 m->m_pkthdr.len += n->m_pkthdr.len;
1243 m_cat(m, n);
1244 }
1245
1246 #ifndef PULLDOWN_TEST
1247 /*
1248 * KAME requires that the packet to be contiguous on the
1249 * mbuf. We need to make that sure.
1250 * this kind of code should be avoided.
1251 * XXX other conditions to avoid running this part?
1252 */
1253 if (m->m_len != m->m_pkthdr.len) {
1254 struct mbuf *n = NULL;
1255 int maxlen;
1256
1257 MGETHDR(n, M_DONTWAIT, MT_HEADER); /* MAC-OK */
1258 maxlen = MHLEN;
1259 if (n)
1260 M_COPY_PKTHDR(n, m);
1261 if (n && m->m_pkthdr.len > maxlen) {
1262 MCLGET(n, M_DONTWAIT);
1263 maxlen = MCLBYTES;
1264 if ((n->m_flags & M_EXT) == 0) {
1265 m_free(n);
1266 n = NULL;
1267 }
1268 }
1269 if (!n) {
1270 printf("esp6_input: mbuf allocation failed\n");
1271 goto bad;
1272 }
1273
1274 if (m->m_pkthdr.len <= maxlen) {
1275 m_copydata(m, 0, m->m_pkthdr.len, mtod(n, caddr_t));
1276 n->m_len = m->m_pkthdr.len;
1277 n->m_pkthdr.len = m->m_pkthdr.len;
1278 n->m_next = NULL;
1279 m_freem(m);
1280 } else {
1281 m_copydata(m, 0, maxlen, mtod(n, caddr_t));
1282 n->m_len = maxlen;
1283 n->m_pkthdr.len = m->m_pkthdr.len;
1284 n->m_next = m;
1285 m_adj(m, maxlen);
1286 m->m_flags &= ~M_PKTHDR;
1287 }
1288 m = n;
1289 }
1290 #endif
1291
1292 ip6 = mtod(m, struct ip6_hdr *);
1293 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - stripsiz);
1294
1295 key_sa_recordxfer(sav, m);
1296 if (ipsec_addhist(m, IPPROTO_ESP, spi) != 0) {
1297 IPSEC_STAT_INCREMENT(ipsec6stat.in_nomem);
1298 goto bad;
1299 }
1300
1301 /*
1302 * Set the csum valid flag, if we authenticated the
1303 * packet, the payload shouldn't be corrupt unless
1304 * it was corrupted before being signed on the other
1305 * side.
1306 */
1307 if (nxt == IPPROTO_TCP || nxt == IPPROTO_UDP) {
1308 m->m_pkthdr.csum_flags = CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1309 m->m_pkthdr.csum_data = 0xFFFF;
1310 }
1311
1312 // Input via IPSec interface
1313 if (sav->sah->ipsec_if != NULL) {
1314 if (ipsec_inject_inbound_packet(sav->sah->ipsec_if, m) == 0) {
1315 m = NULL;
1316 nxt = IPPROTO_DONE;
1317 goto done;
1318 } else {
1319 goto bad;
1320 }
1321 }
1322
1323 }
1324
1325 done:
1326 *offp = off;
1327 *mp = m;
1328 if (sav) {
1329 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
1330 printf("DP esp6_input call free SA:0x%llx\n",
1331 (uint64_t)VM_KERNEL_ADDRPERM(sav)));
1332 key_freesav(sav, KEY_SADB_UNLOCKED);
1333 }
1334 IPSEC_STAT_INCREMENT(ipsec6stat.in_success);
1335 return nxt;
1336
1337 bad:
1338 if (sav) {
1339 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
1340 printf("DP esp6_input call free SA:0x%llx\n",
1341 (uint64_t)VM_KERNEL_ADDRPERM(sav)));
1342 key_freesav(sav, KEY_SADB_UNLOCKED);
1343 }
1344 if (m)
1345 m_freem(m);
1346 return IPPROTO_DONE;
1347 }
1348
1349 void
1350 esp6_ctlinput(int cmd, struct sockaddr *sa, void *d)
1351 {
1352 const struct newesp *espp;
1353 struct newesp esp;
1354 struct ip6ctlparam *ip6cp = NULL, ip6cp1;
1355 struct secasvar *sav;
1356 struct ip6_hdr *ip6;
1357 struct mbuf *m;
1358 int off;
1359 struct sockaddr_in6 *sa6_src, *sa6_dst;
1360
1361 if (sa->sa_family != AF_INET6 ||
1362 sa->sa_len != sizeof(struct sockaddr_in6))
1363 return;
1364 if ((unsigned)cmd >= PRC_NCMDS)
1365 return;
1366
1367 /* if the parameter is from icmp6, decode it. */
1368 if (d != NULL) {
1369 ip6cp = (struct ip6ctlparam *)d;
1370 m = ip6cp->ip6c_m;
1371 ip6 = ip6cp->ip6c_ip6;
1372 off = ip6cp->ip6c_off;
1373 } else {
1374 m = NULL;
1375 ip6 = NULL;
1376 }
1377
1378 if (ip6) {
1379 /*
1380 * Notify the error to all possible sockets via pfctlinput2.
1381 * Since the upper layer information (such as protocol type,
1382 * source and destination ports) is embedded in the encrypted
1383 * data and might have been cut, we can't directly call
1384 * an upper layer ctlinput function. However, the pcbnotify
1385 * function will consider source and destination addresses
1386 * as well as the flow info value, and may be able to find
1387 * some PCB that should be notified.
1388 * Although pfctlinput2 will call esp6_ctlinput(), there is
1389 * no possibility of an infinite loop of function calls,
1390 * because we don't pass the inner IPv6 header.
1391 */
1392 bzero(&ip6cp1, sizeof(ip6cp1));
1393 ip6cp1.ip6c_src = ip6cp->ip6c_src;
1394 pfctlinput2(cmd, sa, (void *)&ip6cp1);
1395
1396 /*
1397 * Then go to special cases that need ESP header information.
1398 * XXX: We assume that when ip6 is non NULL,
1399 * M and OFF are valid.
1400 */
1401
1402 /* check if we can safely examine src and dst ports */
1403 if (m->m_pkthdr.len < off + sizeof(esp))
1404 return;
1405
1406 if (m->m_len < off + sizeof(esp)) {
1407 /*
1408 * this should be rare case,
1409 * so we compromise on this copy...
1410 */
1411 m_copydata(m, off, sizeof(esp), (caddr_t)&esp);
1412 espp = &esp;
1413 } else
1414 espp = (struct newesp*)(void *)(mtod(m, caddr_t) + off);
1415
1416 if (cmd == PRC_MSGSIZE) {
1417 int valid = 0;
1418
1419 /*
1420 * Check to see if we have a valid SA corresponding to
1421 * the address in the ICMP message payload.
1422 */
1423 sa6_src = ip6cp->ip6c_src;
1424 sa6_dst = (struct sockaddr_in6 *)(void *)sa;
1425 sav = key_allocsa(AF_INET6,
1426 (caddr_t)&sa6_src->sin6_addr,
1427 (caddr_t)&sa6_dst->sin6_addr,
1428 IPPROTO_ESP, espp->esp_spi);
1429 if (sav) {
1430 if (sav->state == SADB_SASTATE_MATURE ||
1431 sav->state == SADB_SASTATE_DYING)
1432 valid++;
1433 key_freesav(sav, KEY_SADB_UNLOCKED);
1434 }
1435
1436 /* XXX Further validation? */
1437
1438 /*
1439 * Depending on the value of "valid" and routing table
1440 * size (mtudisc_{hi,lo}wat), we will:
1441 * - recalcurate the new MTU and create the
1442 * corresponding routing entry, or
1443 * - ignore the MTU change notification.
1444 */
1445 icmp6_mtudisc_update((struct ip6ctlparam *)d, valid);
1446 }
1447 } else {
1448 /* we normally notify any pcb here */
1449 }
1450 }
1451 #endif /* INET6 */