]> git.saurik.com Git - apple/xnu.git/blob - bsd/netinet6/esp_input.c
f2f8af9be5be8e36b909bca6c068cbaa794ba48d
[apple/xnu.git] / bsd / netinet6 / esp_input.c
1 /*
2 * Copyright (c) 2008-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /* $FreeBSD: src/sys/netinet6/esp_input.c,v 1.1.2.3 2001/07/03 11:01:50 ume Exp $ */
30 /* $KAME: esp_input.c,v 1.55 2001/03/23 08:08:47 itojun Exp $ */
31
32 /*
33 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. Neither the name of the project nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 */
60
61 /*
62 * RFC1827/2406 Encapsulated Security Payload.
63 */
64
65 #include <sys/param.h>
66 #include <sys/systm.h>
67 #include <sys/malloc.h>
68 #include <sys/mbuf.h>
69 #include <sys/mcache.h>
70 #include <sys/domain.h>
71 #include <sys/protosw.h>
72 #include <sys/socket.h>
73 #include <sys/errno.h>
74 #include <sys/time.h>
75 #include <sys/kernel.h>
76 #include <sys/syslog.h>
77
78 #include <net/if.h>
79 #include <net/if_ipsec.h>
80 #include <net/route.h>
81 #include <kern/cpu_number.h>
82 #include <kern/locks.h>
83
84 #include <netinet/in.h>
85 #include <netinet/in_systm.h>
86 #include <netinet/ip.h>
87 #include <netinet/ip_var.h>
88 #include <netinet/in_var.h>
89 #include <netinet/ip_ecn.h>
90 #include <netinet/in_pcb.h>
91 #include <netinet/udp.h>
92 #if INET6
93 #include <netinet6/ip6_ecn.h>
94 #endif
95
96 #if INET6
97 #include <netinet/ip6.h>
98 #include <netinet6/in6_pcb.h>
99 #include <netinet6/ip6_var.h>
100 #include <netinet/icmp6.h>
101 #include <netinet6/ip6protosw.h>
102 #endif
103
104 #include <netinet6/ipsec.h>
105 #if INET6
106 #include <netinet6/ipsec6.h>
107 #endif
108 #include <netinet6/ah.h>
109 #if INET6
110 #include <netinet6/ah6.h>
111 #endif
112 #include <netinet6/esp.h>
113 #if INET6
114 #include <netinet6/esp6.h>
115 #endif
116 #include <netkey/key.h>
117 #include <netkey/keydb.h>
118 #include <netkey/key_debug.h>
119
120 #include <net/kpi_protocol.h>
121 #include <netinet/kpi_ipfilter_var.h>
122
123 #include <net/net_osdep.h>
124 #include <mach/sdt.h>
125
126 #include <sys/kdebug.h>
127 #define DBG_LAYER_BEG NETDBG_CODE(DBG_NETIPSEC, 1)
128 #define DBG_LAYER_END NETDBG_CODE(DBG_NETIPSEC, 3)
129 #define DBG_FNC_ESPIN NETDBG_CODE(DBG_NETIPSEC, (6 << 8))
130 #define DBG_FNC_DECRYPT NETDBG_CODE(DBG_NETIPSEC, (7 << 8))
131 #define IPLEN_FLIPPED
132
133 extern lck_mtx_t *sadb_mutex;
134
135 #if INET
136 #define ESPMAXLEN \
137 (sizeof(struct esp) < sizeof(struct newesp) \
138 ? sizeof(struct newesp) : sizeof(struct esp))
139
140 static struct ip *
141 esp4_input_strip_udp_encap (struct mbuf *m, int iphlen)
142 {
143 // strip the udp header that's encapsulating ESP
144 struct ip *ip;
145 size_t stripsiz = sizeof(struct udphdr);
146
147 ip = mtod(m, __typeof__(ip));
148 ovbcopy((caddr_t)ip, (caddr_t)(((u_char *)ip) + stripsiz), iphlen);
149 m->m_data += stripsiz;
150 m->m_len -= stripsiz;
151 m->m_pkthdr.len -= stripsiz;
152 ip = mtod(m, __typeof__(ip));
153 ip->ip_len = ip->ip_len - stripsiz;
154 ip->ip_p = IPPROTO_ESP;
155 return ip;
156 }
157
158 static struct ip6_hdr *
159 esp6_input_strip_udp_encap (struct mbuf *m, int ip6hlen)
160 {
161 // strip the udp header that's encapsulating ESP
162 struct ip6_hdr *ip6;
163 size_t stripsiz = sizeof(struct udphdr);
164
165 ip6 = mtod(m, __typeof__(ip6));
166 ovbcopy((caddr_t)ip6, (caddr_t)(((u_char *)ip6) + stripsiz), ip6hlen);
167 m->m_data += stripsiz;
168 m->m_len -= stripsiz;
169 m->m_pkthdr.len -= stripsiz;
170 ip6 = mtod(m, __typeof__(ip6));
171 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - stripsiz);
172 ip6->ip6_nxt = IPPROTO_ESP;
173 return ip6;
174 }
175
176 void
177 esp4_input(struct mbuf *m, int off)
178 {
179 struct ip *ip;
180 #if INET6
181 struct ip6_hdr *ip6;
182 #endif /* INET6 */
183 struct esp *esp;
184 struct esptail esptail;
185 u_int32_t spi;
186 u_int32_t seq;
187 struct secasvar *sav = NULL;
188 size_t taillen;
189 u_int16_t nxt;
190 const struct esp_algorithm *algo;
191 int ivlen;
192 size_t hlen;
193 size_t esplen;
194 sa_family_t ifamily;
195
196 KERNEL_DEBUG(DBG_FNC_ESPIN | DBG_FUNC_START, 0,0,0,0,0);
197 /* sanity check for alignment. */
198 if (off % 4 != 0 || m->m_pkthdr.len % 4 != 0) {
199 ipseclog((LOG_ERR, "IPv4 ESP input: packet alignment problem "
200 "(off=%d, pktlen=%d)\n", off, m->m_pkthdr.len));
201 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
202 goto bad;
203 }
204
205 if (m->m_len < off + ESPMAXLEN) {
206 m = m_pullup(m, off + ESPMAXLEN);
207 if (!m) {
208 ipseclog((LOG_DEBUG,
209 "IPv4 ESP input: can't pullup in esp4_input\n"));
210 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
211 goto bad;
212 }
213 }
214
215 /* Expect 32-bit aligned data pointer on strict-align platforms */
216 MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m);
217
218 ip = mtod(m, struct ip *);
219 // expect udp-encap and esp packets only
220 if (ip->ip_p != IPPROTO_ESP &&
221 !(ip->ip_p == IPPROTO_UDP && off >= sizeof(struct udphdr))) {
222 ipseclog((LOG_DEBUG,
223 "IPv4 ESP input: invalid protocol type\n"));
224 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
225 goto bad;
226 }
227 esp = (struct esp *)(void *)(((u_int8_t *)ip) + off);
228 #ifdef _IP_VHL
229 hlen = IP_VHL_HL(ip->ip_vhl) << 2;
230 #else
231 hlen = ip->ip_hl << 2;
232 #endif
233
234 /* find the sassoc. */
235 spi = esp->esp_spi;
236
237 if ((sav = key_allocsa(AF_INET,
238 (caddr_t)&ip->ip_src, (caddr_t)&ip->ip_dst,
239 IPPROTO_ESP, spi)) == 0) {
240 ipseclog((LOG_WARNING,
241 "IPv4 ESP input: no key association found for spi %u\n",
242 (u_int32_t)ntohl(spi)));
243 IPSEC_STAT_INCREMENT(ipsecstat.in_nosa);
244 goto bad;
245 }
246 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
247 printf("DP esp4_input called to allocate SA:0x%llx\n",
248 (uint64_t)VM_KERNEL_ADDRPERM(sav)));
249 if (sav->state != SADB_SASTATE_MATURE
250 && sav->state != SADB_SASTATE_DYING) {
251 ipseclog((LOG_DEBUG,
252 "IPv4 ESP input: non-mature/dying SA found for spi %u\n",
253 (u_int32_t)ntohl(spi)));
254 IPSEC_STAT_INCREMENT(ipsecstat.in_badspi);
255 goto bad;
256 }
257 algo = esp_algorithm_lookup(sav->alg_enc);
258 if (!algo) {
259 ipseclog((LOG_DEBUG, "IPv4 ESP input: "
260 "unsupported encryption algorithm for spi %u\n",
261 (u_int32_t)ntohl(spi)));
262 IPSEC_STAT_INCREMENT(ipsecstat.in_badspi);
263 goto bad;
264 }
265
266 /* check if we have proper ivlen information */
267 ivlen = sav->ivlen;
268 if (ivlen < 0) {
269 ipseclog((LOG_ERR, "inproper ivlen in IPv4 ESP input: %s %s\n",
270 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
271 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
272 goto bad;
273 }
274
275 seq = ntohl(((struct newesp *)esp)->esp_seq);
276
277 /* Save ICV from packet for verification later */
278 size_t siz = 0;
279 unsigned char saved_icv[AH_MAXSUMSIZE];
280 if (algo->finalizedecrypt) {
281 siz = algo->icvlen;
282 m_copydata(m, m->m_pkthdr.len - siz, siz, (caddr_t) saved_icv);
283 goto delay_icv;
284 }
285
286 if (!((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay
287 && (sav->alg_auth && sav->key_auth)))
288 goto noreplaycheck;
289
290 if (sav->alg_auth == SADB_X_AALG_NULL ||
291 sav->alg_auth == SADB_AALG_NONE)
292 goto noreplaycheck;
293
294 /*
295 * check for sequence number.
296 */
297 if (ipsec_chkreplay(seq, sav))
298 ; /*okey*/
299 else {
300 IPSEC_STAT_INCREMENT(ipsecstat.in_espreplay);
301 ipseclog((LOG_WARNING,
302 "replay packet in IPv4 ESP input: %s %s\n",
303 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
304 goto bad;
305 }
306
307 /* check ICV */
308 {
309 u_char sum0[AH_MAXSUMSIZE] __attribute__((aligned(4)));
310 u_char sum[AH_MAXSUMSIZE] __attribute__((aligned(4)));
311 const struct ah_algorithm *sumalgo;
312
313 sumalgo = ah_algorithm_lookup(sav->alg_auth);
314 if (!sumalgo)
315 goto noreplaycheck;
316 siz = (((*sumalgo->sumsiz)(sav) + 3) & ~(4 - 1));
317 if (m->m_pkthdr.len < off + ESPMAXLEN + siz) {
318 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
319 goto bad;
320 }
321 if (AH_MAXSUMSIZE < siz) {
322 ipseclog((LOG_DEBUG,
323 "internal error: AH_MAXSUMSIZE must be larger than %lu\n",
324 (u_int32_t)siz));
325 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
326 goto bad;
327 }
328
329 m_copydata(m, m->m_pkthdr.len - siz, siz, (caddr_t) &sum0[0]);
330
331 if (esp_auth(m, off, m->m_pkthdr.len - off - siz, sav, sum)) {
332 ipseclog((LOG_WARNING, "auth fail in IPv4 ESP input: %s %s\n",
333 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
334 IPSEC_STAT_INCREMENT(ipsecstat.in_espauthfail);
335 goto bad;
336 }
337
338 if (bcmp(sum0, sum, siz) != 0) {
339 ipseclog((LOG_WARNING, "auth fail in IPv4 ESP input: %s %s\n",
340 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
341 IPSEC_STAT_INCREMENT(ipsecstat.in_espauthfail);
342 goto bad;
343 }
344
345 delay_icv:
346
347 /* strip off the authentication data */
348 m_adj(m, -siz);
349 ip = mtod(m, struct ip *);
350 #ifdef IPLEN_FLIPPED
351 ip->ip_len = ip->ip_len - siz;
352 #else
353 ip->ip_len = htons(ntohs(ip->ip_len) - siz);
354 #endif
355 m->m_flags |= M_AUTHIPDGM;
356 IPSEC_STAT_INCREMENT(ipsecstat.in_espauthsucc);
357 }
358
359 /*
360 * update sequence number.
361 */
362 if ((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay) {
363 if (ipsec_updatereplay(seq, sav)) {
364 IPSEC_STAT_INCREMENT(ipsecstat.in_espreplay);
365 goto bad;
366 }
367 }
368
369 noreplaycheck:
370
371 /* process main esp header. */
372 if (sav->flags & SADB_X_EXT_OLD) {
373 /* RFC 1827 */
374 esplen = sizeof(struct esp);
375 } else {
376 /* RFC 2406 */
377 if (sav->flags & SADB_X_EXT_DERIV)
378 esplen = sizeof(struct esp);
379 else
380 esplen = sizeof(struct newesp);
381 }
382
383 if (m->m_pkthdr.len < off + esplen + ivlen + sizeof(esptail)) {
384 ipseclog((LOG_WARNING,
385 "IPv4 ESP input: packet too short\n"));
386 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
387 goto bad;
388 }
389
390 if (m->m_len < off + esplen + ivlen) {
391 m = m_pullup(m, off + esplen + ivlen);
392 if (!m) {
393 ipseclog((LOG_DEBUG,
394 "IPv4 ESP input: can't pullup in esp4_input\n"));
395 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
396 goto bad;
397 }
398 }
399
400 /*
401 * pre-compute and cache intermediate key
402 */
403 if (esp_schedule(algo, sav) != 0) {
404 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
405 goto bad;
406 }
407
408 /*
409 * decrypt the packet.
410 */
411 if (!algo->decrypt)
412 panic("internal error: no decrypt function");
413 KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_START, 0,0,0,0,0);
414 if ((*algo->decrypt)(m, off, sav, algo, ivlen)) {
415 /* m is already freed */
416 m = NULL;
417 ipseclog((LOG_ERR, "decrypt fail in IPv4 ESP input: %s\n",
418 ipsec_logsastr(sav)));
419 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
420 KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_END, 1,0,0,0,0);
421 goto bad;
422 }
423 KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_END, 2,0,0,0,0);
424 IPSEC_STAT_INCREMENT(ipsecstat.in_esphist[sav->alg_enc]);
425
426 m->m_flags |= M_DECRYPTED;
427
428 if (algo->finalizedecrypt)
429 {
430 unsigned char tag[algo->icvlen];
431 if ((*algo->finalizedecrypt)(sav, tag, algo->icvlen)) {
432 ipseclog((LOG_ERR, "packet decryption ICV failure\n"));
433 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
434 KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_END, 1,0,0,0,0);
435 goto bad;
436 }
437 if (memcmp(saved_icv, tag, algo->icvlen)) {
438 ipseclog((LOG_ERR, "packet decryption ICV mismatch\n"));
439 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
440 KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_END, 1,0,0,0,0);
441 goto bad;
442 }
443 }
444
445 /*
446 * find the trailer of the ESP.
447 */
448 m_copydata(m, m->m_pkthdr.len - sizeof(esptail), sizeof(esptail),
449 (caddr_t)&esptail);
450 nxt = esptail.esp_nxt;
451 taillen = esptail.esp_padlen + sizeof(esptail);
452
453 if (m->m_pkthdr.len < taillen
454 || m->m_pkthdr.len - taillen < hlen) { /*?*/
455 ipseclog((LOG_WARNING,
456 "bad pad length in IPv4 ESP input: %s %s\n",
457 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
458 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
459 goto bad;
460 }
461
462 /* strip off the trailing pad area. */
463 m_adj(m, -taillen);
464 ip = mtod(m, struct ip *);
465 #ifdef IPLEN_FLIPPED
466 ip->ip_len = ip->ip_len - taillen;
467 #else
468 ip->ip_len = htons(ntohs(ip->ip_len) - taillen);
469 #endif
470 if (ip->ip_p == IPPROTO_UDP) {
471 // offset includes the outer ip and udp header lengths.
472 if (m->m_len < off) {
473 m = m_pullup(m, off);
474 if (!m) {
475 ipseclog((LOG_DEBUG,
476 "IPv4 ESP input: invalid udp encapsulated ESP packet length \n"));
477 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
478 goto bad;
479 }
480 }
481
482 // check the UDP encap header to detect changes in the source port, and then strip the header
483 off -= sizeof(struct udphdr); // off no longer includes the udphdr's size
484 // if peer is behind nat and this is the latest esp packet
485 if ((sav->flags & SADB_X_EXT_NATT_DETECTED_PEER) != 0 &&
486 (sav->flags & SADB_X_EXT_OLD) == 0 &&
487 seq && sav->replay &&
488 seq >= sav->replay->lastseq) {
489 struct udphdr *encap_uh = (__typeof__(encap_uh))(void *)((caddr_t)ip + off);
490 if (encap_uh->uh_sport &&
491 ntohs(encap_uh->uh_sport) != sav->remote_ike_port) {
492 sav->remote_ike_port = ntohs(encap_uh->uh_sport);
493 }
494 }
495 ip = esp4_input_strip_udp_encap(m, off);
496 esp = (struct esp *)(void *)(((u_int8_t *)ip) + off);
497 }
498
499 /* was it transmitted over the IPsec tunnel SA? */
500 if (ipsec4_tunnel_validate(m, off + esplen + ivlen, nxt, sav, &ifamily)) {
501 ifaddr_t ifa;
502 struct sockaddr_storage addr;
503
504 /*
505 * strip off all the headers that precedes ESP header.
506 * IP4 xx ESP IP4' payload -> IP4' payload
507 *
508 * XXX more sanity checks
509 * XXX relationship with gif?
510 */
511 u_int8_t tos, otos;
512 int sum;
513
514 tos = ip->ip_tos;
515 m_adj(m, off + esplen + ivlen);
516 if (ifamily == AF_INET) {
517 struct sockaddr_in *ipaddr;
518
519 if (m->m_len < sizeof(*ip)) {
520 m = m_pullup(m, sizeof(*ip));
521 if (!m) {
522 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
523 goto bad;
524 }
525 }
526 ip = mtod(m, struct ip *);
527 /* ECN consideration. */
528
529 otos = ip->ip_tos;
530 if (ip_ecn_egress(ip4_ipsec_ecn, &tos, &ip->ip_tos) == 0) {
531 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
532 goto bad;
533 }
534
535 if (otos != ip->ip_tos) {
536 sum = ~ntohs(ip->ip_sum) & 0xffff;
537 sum += (~otos & 0xffff) + ip->ip_tos;
538 sum = (sum >> 16) + (sum & 0xffff);
539 sum += (sum >> 16); /* add carry */
540 ip->ip_sum = htons(~sum & 0xffff);
541 }
542
543 if (!key_checktunnelsanity(sav, AF_INET,
544 (caddr_t)&ip->ip_src, (caddr_t)&ip->ip_dst)) {
545 ipseclog((LOG_ERR, "ipsec tunnel address mismatch "
546 "in ESP input: %s %s\n",
547 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
548 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
549 goto bad;
550 }
551
552 bzero(&addr, sizeof(addr));
553 ipaddr = (__typeof__(ipaddr))&addr;
554 ipaddr->sin_family = AF_INET;
555 ipaddr->sin_len = sizeof(*ipaddr);
556 ipaddr->sin_addr = ip->ip_dst;
557 #if INET6
558 } else if (ifamily == AF_INET6) {
559 struct sockaddr_in6 *ip6addr;
560
561 /*
562 * m_pullup is prohibited in KAME IPv6 input processing
563 * but there's no other way!
564 */
565 if (m->m_len < sizeof(*ip6)) {
566 m = m_pullup(m, sizeof(*ip6));
567 if (!m) {
568 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
569 goto bad;
570 }
571 }
572
573 /*
574 * Expect 32-bit aligned data pointer on strict-align
575 * platforms.
576 */
577 MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m);
578
579 ip6 = mtod(m, struct ip6_hdr *);
580
581 /* ECN consideration. */
582 if (ip64_ecn_egress(ip4_ipsec_ecn, &tos, &ip6->ip6_flow) == 0) {
583 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
584 goto bad;
585 }
586
587 if (!key_checktunnelsanity(sav, AF_INET6,
588 (caddr_t)&ip6->ip6_src, (caddr_t)&ip6->ip6_dst)) {
589 ipseclog((LOG_ERR, "ipsec tunnel address mismatch "
590 "in ESP input: %s %s\n",
591 ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
592 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
593 goto bad;
594 }
595
596 bzero(&addr, sizeof(addr));
597 ip6addr = (__typeof__(ip6addr))&addr;
598 ip6addr->sin6_family = AF_INET6;
599 ip6addr->sin6_len = sizeof(*ip6addr);
600 ip6addr->sin6_addr = ip6->ip6_dst;
601 #endif /* INET6 */
602 } else {
603 ipseclog((LOG_ERR, "ipsec tunnel unsupported address family "
604 "in ESP input\n"));
605 goto bad;
606 }
607
608 key_sa_recordxfer(sav, m);
609 if (ipsec_addhist(m, IPPROTO_ESP, spi) != 0 ||
610 ipsec_addhist(m, IPPROTO_IPV4, 0) != 0) {
611 IPSEC_STAT_INCREMENT(ipsecstat.in_nomem);
612 goto bad;
613 }
614
615 // update the receiving interface address based on the inner address
616 ifa = ifa_ifwithaddr((struct sockaddr *)&addr);
617 if (ifa) {
618 m->m_pkthdr.rcvif = ifa->ifa_ifp;
619 IFA_REMREF(ifa);
620 }
621
622 /* Clear the csum flags, they can't be valid for the inner headers */
623 m->m_pkthdr.csum_flags = 0;
624
625 // Input via IPSec interface
626 if (sav->sah->ipsec_if != NULL) {
627 if (ipsec_inject_inbound_packet(sav->sah->ipsec_if, m) == 0) {
628 m = NULL;
629 goto done;
630 } else {
631 goto bad;
632 }
633 }
634
635 if (proto_input(ifamily == AF_INET ? PF_INET : PF_INET6, m) != 0)
636 goto bad;
637
638 nxt = IPPROTO_DONE;
639 KERNEL_DEBUG(DBG_FNC_ESPIN | DBG_FUNC_END, 2,0,0,0,0);
640 } else {
641 /*
642 * strip off ESP header and IV.
643 * even in m_pulldown case, we need to strip off ESP so that
644 * we can always compute checksum for AH correctly.
645 */
646 size_t stripsiz;
647
648 stripsiz = esplen + ivlen;
649
650 ip = mtod(m, struct ip *);
651 ovbcopy((caddr_t)ip, (caddr_t)(((u_char *)ip) + stripsiz), off);
652 m->m_data += stripsiz;
653 m->m_len -= stripsiz;
654 m->m_pkthdr.len -= stripsiz;
655
656 ip = mtod(m, struct ip *);
657 #ifdef IPLEN_FLIPPED
658 ip->ip_len = ip->ip_len - stripsiz;
659 #else
660 ip->ip_len = htons(ntohs(ip->ip_len) - stripsiz);
661 #endif
662 ip->ip_p = nxt;
663
664 key_sa_recordxfer(sav, m);
665 if (ipsec_addhist(m, IPPROTO_ESP, spi) != 0) {
666 IPSEC_STAT_INCREMENT(ipsecstat.in_nomem);
667 goto bad;
668 }
669
670 /*
671 * Set the csum valid flag, if we authenticated the
672 * packet, the payload shouldn't be corrupt unless
673 * it was corrupted before being signed on the other
674 * side.
675 */
676 if (nxt == IPPROTO_TCP || nxt == IPPROTO_UDP) {
677 m->m_pkthdr.csum_flags = CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
678 m->m_pkthdr.csum_data = 0xFFFF;
679 }
680
681 if (nxt != IPPROTO_DONE) {
682 if ((ip_protox[nxt]->pr_flags & PR_LASTHDR) != 0 &&
683 ipsec4_in_reject(m, NULL)) {
684 IPSEC_STAT_INCREMENT(ipsecstat.in_polvio);
685 goto bad;
686 }
687 KERNEL_DEBUG(DBG_FNC_ESPIN | DBG_FUNC_END, 3,0,0,0,0);
688
689 /* translate encapsulated UDP port ? */
690 if ((sav->flags & SADB_X_EXT_NATT_MULTIPLEUSERS) != 0) {
691 struct udphdr *udp;
692
693 if (nxt != IPPROTO_UDP) { /* not UPD packet - drop it */
694 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
695 goto bad;
696 }
697
698 if (m->m_len < off + sizeof(struct udphdr)) {
699 m = m_pullup(m, off + sizeof(struct udphdr));
700 if (!m) {
701 ipseclog((LOG_DEBUG,
702 "IPv4 ESP input: can't pullup UDP header in esp4_input\n"));
703 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
704 goto bad;
705 }
706 ip = mtod(m, struct ip *);
707 }
708 udp = (struct udphdr *)(void *)(((u_int8_t *)ip) + off);
709
710 lck_mtx_lock(sadb_mutex);
711 if (sav->natt_encapsulated_src_port == 0) {
712 sav->natt_encapsulated_src_port = udp->uh_sport;
713 } else if (sav->natt_encapsulated_src_port != udp->uh_sport) { /* something wrong */
714 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
715 lck_mtx_unlock(sadb_mutex);
716 goto bad;
717 }
718 lck_mtx_unlock(sadb_mutex);
719 udp->uh_sport = htons(sav->remote_ike_port);
720 udp->uh_sum = 0;
721 }
722
723 DTRACE_IP6(receive, struct mbuf *, m, struct inpcb *, NULL,
724 struct ip *, ip, struct ifnet *, m->m_pkthdr.rcvif,
725 struct ip *, ip, struct ip6_hdr *, NULL);
726
727 // Input via IPSec interface
728 if (sav->sah->ipsec_if != NULL) {
729 ip->ip_len = htons(ip->ip_len + hlen);
730 ip->ip_off = htons(ip->ip_off);
731 ip->ip_sum = 0;
732 ip->ip_sum = ip_cksum_hdr_in(m, hlen);
733 if (ipsec_inject_inbound_packet(sav->sah->ipsec_if, m) == 0) {
734 m = NULL;
735 goto done;
736 } else {
737 goto bad;
738 }
739 }
740
741 ip_proto_dispatch_in(m, off, nxt, 0);
742 } else
743 m_freem(m);
744 m = NULL;
745 }
746
747 done:
748 if (sav) {
749 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
750 printf("DP esp4_input call free SA:0x%llx\n",
751 (uint64_t)VM_KERNEL_ADDRPERM(sav)));
752 key_freesav(sav, KEY_SADB_UNLOCKED);
753 }
754 IPSEC_STAT_INCREMENT(ipsecstat.in_success);
755 return;
756
757 bad:
758 if (sav) {
759 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
760 printf("DP esp4_input call free SA:0x%llx\n",
761 (uint64_t)VM_KERNEL_ADDRPERM(sav)));
762 key_freesav(sav, KEY_SADB_UNLOCKED);
763 }
764 if (m)
765 m_freem(m);
766 KERNEL_DEBUG(DBG_FNC_ESPIN | DBG_FUNC_END, 4,0,0,0,0);
767 return;
768 }
769 #endif /* INET */
770
771 #if INET6
772 int
773 esp6_input(struct mbuf **mp, int *offp, int proto)
774 {
775 #pragma unused(proto)
776 struct mbuf *m = *mp;
777 int off = *offp;
778 struct ip *ip;
779 struct ip6_hdr *ip6;
780 struct esp *esp;
781 struct esptail esptail;
782 u_int32_t spi;
783 u_int32_t seq;
784 struct secasvar *sav = NULL;
785 size_t taillen;
786 u_int16_t nxt;
787 char *nproto;
788 const struct esp_algorithm *algo;
789 int ivlen;
790 size_t esplen;
791 sa_family_t ifamily;
792
793 /* sanity check for alignment. */
794 if (off % 4 != 0 || m->m_pkthdr.len % 4 != 0) {
795 ipseclog((LOG_ERR, "IPv6 ESP input: packet alignment problem "
796 "(off=%d, pktlen=%d)\n", off, m->m_pkthdr.len));
797 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
798 goto bad;
799 }
800
801 #ifndef PULLDOWN_TEST
802 IP6_EXTHDR_CHECK(m, off, ESPMAXLEN, {return IPPROTO_DONE;});
803 esp = (struct esp *)(void *)(mtod(m, caddr_t) + off);
804 #else
805 IP6_EXTHDR_GET(esp, struct esp *, m, off, ESPMAXLEN);
806 if (esp == NULL) {
807 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
808 return IPPROTO_DONE;
809 }
810 #endif
811 /* Expect 32-bit data aligned pointer on strict-align platforms */
812 MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m);
813
814 ip6 = mtod(m, struct ip6_hdr *);
815
816 if (ntohs(ip6->ip6_plen) == 0) {
817 ipseclog((LOG_ERR, "IPv6 ESP input: "
818 "ESP with IPv6 jumbogram is not supported.\n"));
819 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
820 goto bad;
821 }
822
823 nproto = ip6_get_prevhdr(m, off);
824 if (nproto == NULL || (*nproto != IPPROTO_ESP &&
825 !(*nproto == IPPROTO_UDP && off >= sizeof(struct udphdr)))) {
826 ipseclog((LOG_DEBUG, "IPv6 ESP input: invalid protocol type\n"));
827 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
828 goto bad;
829 }
830
831 /* find the sassoc. */
832 spi = esp->esp_spi;
833
834 if ((sav = key_allocsa(AF_INET6,
835 (caddr_t)&ip6->ip6_src, (caddr_t)&ip6->ip6_dst,
836 IPPROTO_ESP, spi)) == 0) {
837 ipseclog((LOG_WARNING,
838 "IPv6 ESP input: no key association found for spi %u\n",
839 (u_int32_t)ntohl(spi)));
840 IPSEC_STAT_INCREMENT(ipsec6stat.in_nosa);
841 goto bad;
842 }
843 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
844 printf("DP esp6_input called to allocate SA:0x%llx\n",
845 (uint64_t)VM_KERNEL_ADDRPERM(sav)));
846 if (sav->state != SADB_SASTATE_MATURE
847 && sav->state != SADB_SASTATE_DYING) {
848 ipseclog((LOG_DEBUG,
849 "IPv6 ESP input: non-mature/dying SA found for spi %u\n",
850 (u_int32_t)ntohl(spi)));
851 IPSEC_STAT_INCREMENT(ipsec6stat.in_badspi);
852 goto bad;
853 }
854 algo = esp_algorithm_lookup(sav->alg_enc);
855 if (!algo) {
856 ipseclog((LOG_DEBUG, "IPv6 ESP input: "
857 "unsupported encryption algorithm for spi %u\n",
858 (u_int32_t)ntohl(spi)));
859 IPSEC_STAT_INCREMENT(ipsec6stat.in_badspi);
860 goto bad;
861 }
862
863 /* check if we have proper ivlen information */
864 ivlen = sav->ivlen;
865 if (ivlen < 0) {
866 ipseclog((LOG_ERR, "inproper ivlen in IPv6 ESP input: %s %s\n",
867 ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
868 IPSEC_STAT_INCREMENT(ipsec6stat.in_badspi);
869 goto bad;
870 }
871
872 seq = ntohl(((struct newesp *)esp)->esp_seq);
873
874 /* Save ICV from packet for verification later */
875 size_t siz = 0;
876 unsigned char saved_icv[AH_MAXSUMSIZE];
877 if (algo->finalizedecrypt) {
878 siz = algo->icvlen;
879 m_copydata(m, m->m_pkthdr.len - siz, siz, (caddr_t) saved_icv);
880 goto delay_icv;
881 }
882
883 if (!((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay
884 && (sav->alg_auth && sav->key_auth)))
885 goto noreplaycheck;
886
887 if (sav->alg_auth == SADB_X_AALG_NULL ||
888 sav->alg_auth == SADB_AALG_NONE)
889 goto noreplaycheck;
890
891 /*
892 * check for sequence number.
893 */
894 if (ipsec_chkreplay(seq, sav))
895 ; /*okey*/
896 else {
897 IPSEC_STAT_INCREMENT(ipsec6stat.in_espreplay);
898 ipseclog((LOG_WARNING,
899 "replay packet in IPv6 ESP input: %s %s\n",
900 ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
901 goto bad;
902 }
903
904 /* check ICV */
905 {
906 u_char sum0[AH_MAXSUMSIZE] __attribute__((aligned(4)));
907 u_char sum[AH_MAXSUMSIZE] __attribute__((aligned(4)));
908 const struct ah_algorithm *sumalgo;
909
910 sumalgo = ah_algorithm_lookup(sav->alg_auth);
911 if (!sumalgo)
912 goto noreplaycheck;
913 siz = (((*sumalgo->sumsiz)(sav) + 3) & ~(4 - 1));
914 if (m->m_pkthdr.len < off + ESPMAXLEN + siz) {
915 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
916 goto bad;
917 }
918 if (AH_MAXSUMSIZE < siz) {
919 ipseclog((LOG_DEBUG,
920 "internal error: AH_MAXSUMSIZE must be larger than %lu\n",
921 (u_int32_t)siz));
922 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
923 goto bad;
924 }
925
926 m_copydata(m, m->m_pkthdr.len - siz, siz, (caddr_t) &sum0[0]);
927
928 if (esp_auth(m, off, m->m_pkthdr.len - off - siz, sav, sum)) {
929 ipseclog((LOG_WARNING, "auth fail in IPv6 ESP input: %s %s\n",
930 ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
931 IPSEC_STAT_INCREMENT(ipsec6stat.in_espauthfail);
932 goto bad;
933 }
934
935 if (bcmp(sum0, sum, siz) != 0) {
936 ipseclog((LOG_WARNING, "auth fail in IPv6 ESP input: %s %s\n",
937 ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
938 IPSEC_STAT_INCREMENT(ipsec6stat.in_espauthfail);
939 goto bad;
940 }
941
942 delay_icv:
943
944 /* strip off the authentication data */
945 m_adj(m, -siz);
946 ip6 = mtod(m, struct ip6_hdr *);
947 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - siz);
948
949 m->m_flags |= M_AUTHIPDGM;
950 IPSEC_STAT_INCREMENT(ipsec6stat.in_espauthsucc);
951 }
952
953 /*
954 * update sequence number.
955 */
956 if ((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay) {
957 if (ipsec_updatereplay(seq, sav)) {
958 IPSEC_STAT_INCREMENT(ipsec6stat.in_espreplay);
959 goto bad;
960 }
961 }
962
963 noreplaycheck:
964
965 /* process main esp header. */
966 if (sav->flags & SADB_X_EXT_OLD) {
967 /* RFC 1827 */
968 esplen = sizeof(struct esp);
969 } else {
970 /* RFC 2406 */
971 if (sav->flags & SADB_X_EXT_DERIV)
972 esplen = sizeof(struct esp);
973 else
974 esplen = sizeof(struct newesp);
975 }
976
977 if (m->m_pkthdr.len < off + esplen + ivlen + sizeof(esptail)) {
978 ipseclog((LOG_WARNING,
979 "IPv6 ESP input: packet too short\n"));
980 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
981 goto bad;
982 }
983
984 #ifndef PULLDOWN_TEST
985 IP6_EXTHDR_CHECK(m, off, esplen + ivlen, return IPPROTO_DONE); /*XXX*/
986 #else
987 IP6_EXTHDR_GET(esp, struct esp *, m, off, esplen + ivlen);
988 if (esp == NULL) {
989 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
990 m = NULL;
991 goto bad;
992 }
993 #endif
994 ip6 = mtod(m, struct ip6_hdr *); /*set it again just in case*/
995
996 /*
997 * pre-compute and cache intermediate key
998 */
999 if (esp_schedule(algo, sav) != 0) {
1000 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
1001 goto bad;
1002 }
1003
1004 /*
1005 * decrypt the packet.
1006 */
1007 if (!algo->decrypt)
1008 panic("internal error: no decrypt function");
1009 if ((*algo->decrypt)(m, off, sav, algo, ivlen)) {
1010 /* m is already freed */
1011 m = NULL;
1012 ipseclog((LOG_ERR, "decrypt fail in IPv6 ESP input: %s\n",
1013 ipsec_logsastr(sav)));
1014 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
1015 goto bad;
1016 }
1017 IPSEC_STAT_INCREMENT(ipsec6stat.in_esphist[sav->alg_enc]);
1018
1019 m->m_flags |= M_DECRYPTED;
1020
1021 if (algo->finalizedecrypt)
1022 {
1023 unsigned char tag[algo->icvlen];
1024 if ((*algo->finalizedecrypt)(sav, tag, algo->icvlen)) {
1025 ipseclog((LOG_ERR, "packet decryption ICV failure\n"));
1026 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
1027 KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_END, 1,0,0,0,0);
1028 goto bad;
1029 }
1030 if (memcmp(saved_icv, tag, algo->icvlen)) {
1031 ipseclog((LOG_ERR, "packet decryption ICV mismatch\n"));
1032 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
1033 KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_END, 1,0,0,0,0);
1034 goto bad;
1035 }
1036 }
1037
1038 /*
1039 * find the trailer of the ESP.
1040 */
1041 m_copydata(m, m->m_pkthdr.len - sizeof(esptail), sizeof(esptail),
1042 (caddr_t)&esptail);
1043 nxt = esptail.esp_nxt;
1044 taillen = esptail.esp_padlen + sizeof(esptail);
1045
1046 if (m->m_pkthdr.len < taillen
1047 || m->m_pkthdr.len - taillen < sizeof(struct ip6_hdr)) { /*?*/
1048 ipseclog((LOG_WARNING,
1049 "bad pad length in IPv6 ESP input: %s %s\n",
1050 ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
1051 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
1052 goto bad;
1053 }
1054
1055 /* strip off the trailing pad area. */
1056 m_adj(m, -taillen);
1057 ip6 = mtod(m, struct ip6_hdr *);
1058 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - taillen);
1059
1060 if (*nproto == IPPROTO_UDP) {
1061 // offset includes the outer ip and udp header lengths.
1062 if (m->m_len < off) {
1063 m = m_pullup(m, off);
1064 if (!m) {
1065 ipseclog((LOG_DEBUG,
1066 "IPv6 ESP input: invalid udp encapsulated ESP packet length\n"));
1067 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
1068 goto bad;
1069 }
1070 }
1071
1072 // check the UDP encap header to detect changes in the source port, and then strip the header
1073 off -= sizeof(struct udphdr); // off no longer includes the udphdr's size
1074 // if peer is behind nat and this is the latest esp packet
1075 if ((sav->flags & SADB_X_EXT_NATT_DETECTED_PEER) != 0 &&
1076 (sav->flags & SADB_X_EXT_OLD) == 0 &&
1077 seq && sav->replay &&
1078 seq >= sav->replay->lastseq) {
1079 struct udphdr *encap_uh = (__typeof__(encap_uh))(void *)((caddr_t)ip6 + off);
1080 if (encap_uh->uh_sport &&
1081 ntohs(encap_uh->uh_sport) != sav->remote_ike_port) {
1082 sav->remote_ike_port = ntohs(encap_uh->uh_sport);
1083 }
1084 }
1085 ip6 = esp6_input_strip_udp_encap(m, off);
1086 esp = (struct esp *)(void *)(((u_int8_t *)ip6) + off);
1087 }
1088
1089
1090 /* was it transmitted over the IPsec tunnel SA? */
1091 if (ipsec6_tunnel_validate(m, off + esplen + ivlen, nxt, sav, &ifamily)) {
1092 ifaddr_t ifa;
1093 struct sockaddr_storage addr;
1094
1095 /*
1096 * strip off all the headers that precedes ESP header.
1097 * IP6 xx ESP IP6' payload -> IP6' payload
1098 *
1099 * XXX more sanity checks
1100 * XXX relationship with gif?
1101 */
1102 u_int32_t flowinfo; /*net endian*/
1103 flowinfo = ip6->ip6_flow;
1104 m_adj(m, off + esplen + ivlen);
1105 if (ifamily == AF_INET6) {
1106 struct sockaddr_in6 *ip6addr;
1107
1108 if (m->m_len < sizeof(*ip6)) {
1109 #ifndef PULLDOWN_TEST
1110 /*
1111 * m_pullup is prohibited in KAME IPv6 input processing
1112 * but there's no other way!
1113 */
1114 #else
1115 /* okay to pullup in m_pulldown style */
1116 #endif
1117 m = m_pullup(m, sizeof(*ip6));
1118 if (!m) {
1119 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
1120 goto bad;
1121 }
1122 }
1123 ip6 = mtod(m, struct ip6_hdr *);
1124 /* ECN consideration. */
1125 if (ip6_ecn_egress(ip6_ipsec_ecn, &flowinfo, &ip6->ip6_flow) == 0) {
1126 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
1127 goto bad;
1128 }
1129 if (!key_checktunnelsanity(sav, AF_INET6,
1130 (caddr_t)&ip6->ip6_src, (caddr_t)&ip6->ip6_dst)) {
1131 ipseclog((LOG_ERR, "ipsec tunnel address mismatch "
1132 "in IPv6 ESP input: %s %s\n",
1133 ipsec6_logpacketstr(ip6, spi),
1134 ipsec_logsastr(sav)));
1135 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
1136 goto bad;
1137 }
1138
1139 bzero(&addr, sizeof(addr));
1140 ip6addr = (__typeof__(ip6addr))&addr;
1141 ip6addr->sin6_family = AF_INET6;
1142 ip6addr->sin6_len = sizeof(*ip6addr);
1143 ip6addr->sin6_addr = ip6->ip6_dst;
1144 } else if (ifamily == AF_INET) {
1145 struct sockaddr_in *ipaddr;
1146
1147 if (m->m_len < sizeof(*ip)) {
1148 m = m_pullup(m, sizeof(*ip));
1149 if (!m) {
1150 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
1151 goto bad;
1152 }
1153 }
1154
1155 u_int8_t otos;
1156 int sum;
1157
1158 ip = mtod(m, struct ip *);
1159 otos = ip->ip_tos;
1160 /* ECN consideration. */
1161 if (ip46_ecn_egress(ip6_ipsec_ecn, &flowinfo, &ip->ip_tos) == 0) {
1162 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
1163 goto bad;
1164 }
1165
1166 if (otos != ip->ip_tos) {
1167 sum = ~ntohs(ip->ip_sum) & 0xffff;
1168 sum += (~otos & 0xffff) + ip->ip_tos;
1169 sum = (sum >> 16) + (sum & 0xffff);
1170 sum += (sum >> 16); /* add carry */
1171 ip->ip_sum = htons(~sum & 0xffff);
1172 }
1173
1174 if (!key_checktunnelsanity(sav, AF_INET,
1175 (caddr_t)&ip->ip_src, (caddr_t)&ip->ip_dst)) {
1176 ipseclog((LOG_ERR, "ipsec tunnel address mismatch "
1177 "in ESP input: %s %s\n",
1178 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
1179 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
1180 goto bad;
1181 }
1182
1183 bzero(&addr, sizeof(addr));
1184 ipaddr = (__typeof__(ipaddr))&addr;
1185 ipaddr->sin_family = AF_INET;
1186 ipaddr->sin_len = sizeof(*ipaddr);
1187 ipaddr->sin_addr = ip->ip_dst;
1188 }
1189
1190 key_sa_recordxfer(sav, m);
1191 if (ipsec_addhist(m, IPPROTO_ESP, spi) != 0 ||
1192 ipsec_addhist(m, IPPROTO_IPV6, 0) != 0) {
1193 IPSEC_STAT_INCREMENT(ipsec6stat.in_nomem);
1194 goto bad;
1195 }
1196
1197 // update the receiving interface address based on the inner address
1198 ifa = ifa_ifwithaddr((struct sockaddr *)&addr);
1199 if (ifa) {
1200 m->m_pkthdr.rcvif = ifa->ifa_ifp;
1201 IFA_REMREF(ifa);
1202 }
1203
1204 // Input via IPSec interface
1205 if (sav->sah->ipsec_if != NULL) {
1206 if (ipsec_inject_inbound_packet(sav->sah->ipsec_if, m) == 0) {
1207 m = NULL;
1208 nxt = IPPROTO_DONE;
1209 goto done;
1210 } else {
1211 goto bad;
1212 }
1213 }
1214
1215 if (proto_input(ifamily == AF_INET ? PF_INET : PF_INET6, m) != 0)
1216 goto bad;
1217 nxt = IPPROTO_DONE;
1218 } else {
1219 /*
1220 * strip off ESP header and IV.
1221 * even in m_pulldown case, we need to strip off ESP so that
1222 * we can always compute checksum for AH correctly.
1223 */
1224 size_t stripsiz;
1225 char *prvnxtp;
1226
1227 /*
1228 * Set the next header field of the previous header correctly.
1229 */
1230 prvnxtp = ip6_get_prevhdr(m, off); /* XXX */
1231 *prvnxtp = nxt;
1232
1233 stripsiz = esplen + ivlen;
1234
1235 ip6 = mtod(m, struct ip6_hdr *);
1236 if (m->m_len >= stripsiz + off) {
1237 ovbcopy((caddr_t)ip6, ((caddr_t)ip6) + stripsiz, off);
1238 m->m_data += stripsiz;
1239 m->m_len -= stripsiz;
1240 m->m_pkthdr.len -= stripsiz;
1241 } else {
1242 /*
1243 * this comes with no copy if the boundary is on
1244 * cluster
1245 */
1246 struct mbuf *n;
1247
1248 n = m_split(m, off, M_DONTWAIT);
1249 if (n == NULL) {
1250 /* m is retained by m_split */
1251 goto bad;
1252 }
1253 m_adj(n, stripsiz);
1254 /* m_cat does not update m_pkthdr.len */
1255 m->m_pkthdr.len += n->m_pkthdr.len;
1256 m_cat(m, n);
1257 }
1258
1259 #ifndef PULLDOWN_TEST
1260 /*
1261 * KAME requires that the packet to be contiguous on the
1262 * mbuf. We need to make that sure.
1263 * this kind of code should be avoided.
1264 * XXX other conditions to avoid running this part?
1265 */
1266 if (m->m_len != m->m_pkthdr.len) {
1267 struct mbuf *n = NULL;
1268 int maxlen;
1269
1270 MGETHDR(n, M_DONTWAIT, MT_HEADER); /* MAC-OK */
1271 maxlen = MHLEN;
1272 if (n)
1273 M_COPY_PKTHDR(n, m);
1274 if (n && m->m_pkthdr.len > maxlen) {
1275 MCLGET(n, M_DONTWAIT);
1276 maxlen = MCLBYTES;
1277 if ((n->m_flags & M_EXT) == 0) {
1278 m_free(n);
1279 n = NULL;
1280 }
1281 }
1282 if (!n) {
1283 printf("esp6_input: mbuf allocation failed\n");
1284 goto bad;
1285 }
1286
1287 if (m->m_pkthdr.len <= maxlen) {
1288 m_copydata(m, 0, m->m_pkthdr.len, mtod(n, caddr_t));
1289 n->m_len = m->m_pkthdr.len;
1290 n->m_pkthdr.len = m->m_pkthdr.len;
1291 n->m_next = NULL;
1292 m_freem(m);
1293 } else {
1294 m_copydata(m, 0, maxlen, mtod(n, caddr_t));
1295 n->m_len = maxlen;
1296 n->m_pkthdr.len = m->m_pkthdr.len;
1297 n->m_next = m;
1298 m_adj(m, maxlen);
1299 m->m_flags &= ~M_PKTHDR;
1300 }
1301 m = n;
1302 }
1303 #endif
1304
1305 ip6 = mtod(m, struct ip6_hdr *);
1306 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - stripsiz);
1307
1308 key_sa_recordxfer(sav, m);
1309 if (ipsec_addhist(m, IPPROTO_ESP, spi) != 0) {
1310 IPSEC_STAT_INCREMENT(ipsec6stat.in_nomem);
1311 goto bad;
1312 }
1313
1314 /*
1315 * Set the csum valid flag, if we authenticated the
1316 * packet, the payload shouldn't be corrupt unless
1317 * it was corrupted before being signed on the other
1318 * side.
1319 */
1320 if (nxt == IPPROTO_TCP || nxt == IPPROTO_UDP) {
1321 m->m_pkthdr.csum_flags = CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1322 m->m_pkthdr.csum_data = 0xFFFF;
1323 }
1324
1325 // Input via IPSec interface
1326 if (sav->sah->ipsec_if != NULL) {
1327 if (ipsec_inject_inbound_packet(sav->sah->ipsec_if, m) == 0) {
1328 m = NULL;
1329 nxt = IPPROTO_DONE;
1330 goto done;
1331 } else {
1332 goto bad;
1333 }
1334 }
1335
1336 }
1337
1338 done:
1339 *offp = off;
1340 *mp = m;
1341 if (sav) {
1342 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
1343 printf("DP esp6_input call free SA:0x%llx\n",
1344 (uint64_t)VM_KERNEL_ADDRPERM(sav)));
1345 key_freesav(sav, KEY_SADB_UNLOCKED);
1346 }
1347 IPSEC_STAT_INCREMENT(ipsec6stat.in_success);
1348 return nxt;
1349
1350 bad:
1351 if (sav) {
1352 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
1353 printf("DP esp6_input call free SA:0x%llx\n",
1354 (uint64_t)VM_KERNEL_ADDRPERM(sav)));
1355 key_freesav(sav, KEY_SADB_UNLOCKED);
1356 }
1357 if (m)
1358 m_freem(m);
1359 return IPPROTO_DONE;
1360 }
1361
1362 void
1363 esp6_ctlinput(int cmd, struct sockaddr *sa, void *d)
1364 {
1365 const struct newesp *espp;
1366 struct newesp esp;
1367 struct ip6ctlparam *ip6cp = NULL, ip6cp1;
1368 struct secasvar *sav;
1369 struct ip6_hdr *ip6;
1370 struct mbuf *m;
1371 int off;
1372 struct sockaddr_in6 *sa6_src, *sa6_dst;
1373
1374 if (sa->sa_family != AF_INET6 ||
1375 sa->sa_len != sizeof(struct sockaddr_in6))
1376 return;
1377 if ((unsigned)cmd >= PRC_NCMDS)
1378 return;
1379
1380 /* if the parameter is from icmp6, decode it. */
1381 if (d != NULL) {
1382 ip6cp = (struct ip6ctlparam *)d;
1383 m = ip6cp->ip6c_m;
1384 ip6 = ip6cp->ip6c_ip6;
1385 off = ip6cp->ip6c_off;
1386 } else {
1387 m = NULL;
1388 ip6 = NULL;
1389 }
1390
1391 if (ip6) {
1392 /*
1393 * Notify the error to all possible sockets via pfctlinput2.
1394 * Since the upper layer information (such as protocol type,
1395 * source and destination ports) is embedded in the encrypted
1396 * data and might have been cut, we can't directly call
1397 * an upper layer ctlinput function. However, the pcbnotify
1398 * function will consider source and destination addresses
1399 * as well as the flow info value, and may be able to find
1400 * some PCB that should be notified.
1401 * Although pfctlinput2 will call esp6_ctlinput(), there is
1402 * no possibility of an infinite loop of function calls,
1403 * because we don't pass the inner IPv6 header.
1404 */
1405 bzero(&ip6cp1, sizeof(ip6cp1));
1406 ip6cp1.ip6c_src = ip6cp->ip6c_src;
1407 pfctlinput2(cmd, sa, (void *)&ip6cp1);
1408
1409 /*
1410 * Then go to special cases that need ESP header information.
1411 * XXX: We assume that when ip6 is non NULL,
1412 * M and OFF are valid.
1413 */
1414
1415 /* check if we can safely examine src and dst ports */
1416 if (m->m_pkthdr.len < off + sizeof(esp))
1417 return;
1418
1419 if (m->m_len < off + sizeof(esp)) {
1420 /*
1421 * this should be rare case,
1422 * so we compromise on this copy...
1423 */
1424 m_copydata(m, off, sizeof(esp), (caddr_t)&esp);
1425 espp = &esp;
1426 } else
1427 espp = (struct newesp*)(void *)(mtod(m, caddr_t) + off);
1428
1429 if (cmd == PRC_MSGSIZE) {
1430 int valid = 0;
1431
1432 /*
1433 * Check to see if we have a valid SA corresponding to
1434 * the address in the ICMP message payload.
1435 */
1436 sa6_src = ip6cp->ip6c_src;
1437 sa6_dst = (struct sockaddr_in6 *)(void *)sa;
1438 sav = key_allocsa(AF_INET6,
1439 (caddr_t)&sa6_src->sin6_addr,
1440 (caddr_t)&sa6_dst->sin6_addr,
1441 IPPROTO_ESP, espp->esp_spi);
1442 if (sav) {
1443 if (sav->state == SADB_SASTATE_MATURE ||
1444 sav->state == SADB_SASTATE_DYING)
1445 valid++;
1446 key_freesav(sav, KEY_SADB_UNLOCKED);
1447 }
1448
1449 /* XXX Further validation? */
1450
1451 /*
1452 * Depending on the value of "valid" and routing table
1453 * size (mtudisc_{hi,lo}wat), we will:
1454 * - recalcurate the new MTU and create the
1455 * corresponding routing entry, or
1456 * - ignore the MTU change notification.
1457 */
1458 icmp6_mtudisc_update((struct ip6ctlparam *)d, valid);
1459 }
1460 } else {
1461 /* we normally notify any pcb here */
1462 }
1463 }
1464 #endif /* INET6 */