]> git.saurik.com Git - apple/xnu.git/blob - bsd/netinet6/esp_input.c
xnu-3789.21.4.tar.gz
[apple/xnu.git] / bsd / netinet6 / esp_input.c
1 /*
2 * Copyright (c) 2008-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /* $FreeBSD: src/sys/netinet6/esp_input.c,v 1.1.2.3 2001/07/03 11:01:50 ume Exp $ */
30 /* $KAME: esp_input.c,v 1.55 2001/03/23 08:08:47 itojun Exp $ */
31
32 /*
33 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. Neither the name of the project nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 */
60
61 /*
62 * RFC1827/2406 Encapsulated Security Payload.
63 */
64
65 #include <sys/param.h>
66 #include <sys/systm.h>
67 #include <sys/malloc.h>
68 #include <sys/mbuf.h>
69 #include <sys/mcache.h>
70 #include <sys/domain.h>
71 #include <sys/protosw.h>
72 #include <sys/socket.h>
73 #include <sys/errno.h>
74 #include <sys/time.h>
75 #include <sys/kernel.h>
76 #include <sys/syslog.h>
77
78 #include <net/if.h>
79 #include <net/if_ipsec.h>
80 #include <net/route.h>
81 #include <kern/cpu_number.h>
82 #include <kern/locks.h>
83
84 #include <netinet/in.h>
85 #include <netinet/in_systm.h>
86 #include <netinet/ip.h>
87 #include <netinet/ip_var.h>
88 #include <netinet/in_var.h>
89 #include <netinet/ip_ecn.h>
90 #include <netinet/in_pcb.h>
91 #include <netinet/udp.h>
92 #if INET6
93 #include <netinet6/ip6_ecn.h>
94 #endif
95
96 #if INET6
97 #include <netinet/ip6.h>
98 #include <netinet6/in6_pcb.h>
99 #include <netinet6/ip6_var.h>
100 #include <netinet/icmp6.h>
101 #include <netinet6/ip6protosw.h>
102 #endif
103
104 #include <netinet6/ipsec.h>
105 #if INET6
106 #include <netinet6/ipsec6.h>
107 #endif
108 #include <netinet6/ah.h>
109 #if INET6
110 #include <netinet6/ah6.h>
111 #endif
112 #include <netinet6/esp.h>
113 #if INET6
114 #include <netinet6/esp6.h>
115 #endif
116 #include <netkey/key.h>
117 #include <netkey/keydb.h>
118 #include <netkey/key_debug.h>
119
120 #include <net/kpi_protocol.h>
121 #include <netinet/kpi_ipfilter_var.h>
122
123 #include <net/net_osdep.h>
124 #include <mach/sdt.h>
125 #include <corecrypto/cc.h>
126
127 #include <sys/kdebug.h>
128 #define DBG_LAYER_BEG NETDBG_CODE(DBG_NETIPSEC, 1)
129 #define DBG_LAYER_END NETDBG_CODE(DBG_NETIPSEC, 3)
130 #define DBG_FNC_ESPIN NETDBG_CODE(DBG_NETIPSEC, (6 << 8))
131 #define DBG_FNC_DECRYPT NETDBG_CODE(DBG_NETIPSEC, (7 << 8))
132 #define IPLEN_FLIPPED
133
134 extern lck_mtx_t *sadb_mutex;
135
136 #if INET
137 #define ESPMAXLEN \
138 (sizeof(struct esp) < sizeof(struct newesp) \
139 ? sizeof(struct newesp) : sizeof(struct esp))
140
141 static struct ip *
142 esp4_input_strip_udp_encap (struct mbuf *m, int iphlen)
143 {
144 // strip the udp header that's encapsulating ESP
145 struct ip *ip;
146 size_t stripsiz = sizeof(struct udphdr);
147
148 ip = mtod(m, __typeof__(ip));
149 ovbcopy((caddr_t)ip, (caddr_t)(((u_char *)ip) + stripsiz), iphlen);
150 m->m_data += stripsiz;
151 m->m_len -= stripsiz;
152 m->m_pkthdr.len -= stripsiz;
153 ip = mtod(m, __typeof__(ip));
154 ip->ip_len = ip->ip_len - stripsiz;
155 ip->ip_p = IPPROTO_ESP;
156 return ip;
157 }
158
159 static struct ip6_hdr *
160 esp6_input_strip_udp_encap (struct mbuf *m, int ip6hlen)
161 {
162 // strip the udp header that's encapsulating ESP
163 struct ip6_hdr *ip6;
164 size_t stripsiz = sizeof(struct udphdr);
165
166 ip6 = mtod(m, __typeof__(ip6));
167 ovbcopy((caddr_t)ip6, (caddr_t)(((u_char *)ip6) + stripsiz), ip6hlen);
168 m->m_data += stripsiz;
169 m->m_len -= stripsiz;
170 m->m_pkthdr.len -= stripsiz;
171 ip6 = mtod(m, __typeof__(ip6));
172 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - stripsiz);
173 ip6->ip6_nxt = IPPROTO_ESP;
174 return ip6;
175 }
176
177 void
178 esp4_input(struct mbuf *m, int off)
179 {
180 struct ip *ip;
181 #if INET6
182 struct ip6_hdr *ip6;
183 #endif /* INET6 */
184 struct esp *esp;
185 struct esptail esptail;
186 u_int32_t spi;
187 u_int32_t seq;
188 struct secasvar *sav = NULL;
189 size_t taillen;
190 u_int16_t nxt;
191 const struct esp_algorithm *algo;
192 int ivlen;
193 size_t hlen;
194 size_t esplen;
195 sa_family_t ifamily;
196
197 KERNEL_DEBUG(DBG_FNC_ESPIN | DBG_FUNC_START, 0,0,0,0,0);
198 /* sanity check for alignment. */
199 if (off % 4 != 0 || m->m_pkthdr.len % 4 != 0) {
200 ipseclog((LOG_ERR, "IPv4 ESP input: packet alignment problem "
201 "(off=%d, pktlen=%d)\n", off, m->m_pkthdr.len));
202 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
203 goto bad;
204 }
205
206 if (m->m_len < off + ESPMAXLEN) {
207 m = m_pullup(m, off + ESPMAXLEN);
208 if (!m) {
209 ipseclog((LOG_DEBUG,
210 "IPv4 ESP input: can't pullup in esp4_input\n"));
211 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
212 goto bad;
213 }
214 }
215
216 /* Expect 32-bit aligned data pointer on strict-align platforms */
217 MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m);
218
219 ip = mtod(m, struct ip *);
220 // expect udp-encap and esp packets only
221 if (ip->ip_p != IPPROTO_ESP &&
222 !(ip->ip_p == IPPROTO_UDP && off >= sizeof(struct udphdr))) {
223 ipseclog((LOG_DEBUG,
224 "IPv4 ESP input: invalid protocol type\n"));
225 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
226 goto bad;
227 }
228 esp = (struct esp *)(void *)(((u_int8_t *)ip) + off);
229 #ifdef _IP_VHL
230 hlen = IP_VHL_HL(ip->ip_vhl) << 2;
231 #else
232 hlen = ip->ip_hl << 2;
233 #endif
234
235 /* find the sassoc. */
236 spi = esp->esp_spi;
237
238 if ((sav = key_allocsa(AF_INET,
239 (caddr_t)&ip->ip_src, (caddr_t)&ip->ip_dst,
240 IPPROTO_ESP, spi)) == 0) {
241 ipseclog((LOG_WARNING,
242 "IPv4 ESP input: no key association found for spi %u\n",
243 (u_int32_t)ntohl(spi)));
244 IPSEC_STAT_INCREMENT(ipsecstat.in_nosa);
245 goto bad;
246 }
247 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
248 printf("DP esp4_input called to allocate SA:0x%llx\n",
249 (uint64_t)VM_KERNEL_ADDRPERM(sav)));
250 if (sav->state != SADB_SASTATE_MATURE
251 && sav->state != SADB_SASTATE_DYING) {
252 ipseclog((LOG_DEBUG,
253 "IPv4 ESP input: non-mature/dying SA found for spi %u\n",
254 (u_int32_t)ntohl(spi)));
255 IPSEC_STAT_INCREMENT(ipsecstat.in_badspi);
256 goto bad;
257 }
258 algo = esp_algorithm_lookup(sav->alg_enc);
259 if (!algo) {
260 ipseclog((LOG_DEBUG, "IPv4 ESP input: "
261 "unsupported encryption algorithm for spi %u\n",
262 (u_int32_t)ntohl(spi)));
263 IPSEC_STAT_INCREMENT(ipsecstat.in_badspi);
264 goto bad;
265 }
266
267 /* check if we have proper ivlen information */
268 ivlen = sav->ivlen;
269 if (ivlen < 0) {
270 ipseclog((LOG_ERR, "inproper ivlen in IPv4 ESP input: %s %s\n",
271 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
272 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
273 goto bad;
274 }
275
276 seq = ntohl(((struct newesp *)esp)->esp_seq);
277
278 /* Save ICV from packet for verification later */
279 size_t siz = 0;
280 unsigned char saved_icv[AH_MAXSUMSIZE];
281 if (algo->finalizedecrypt) {
282 siz = algo->icvlen;
283 m_copydata(m, m->m_pkthdr.len - siz, siz, (caddr_t) saved_icv);
284 goto delay_icv;
285 }
286
287 if (!((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay
288 && (sav->alg_auth && sav->key_auth)))
289 goto noreplaycheck;
290
291 if (sav->alg_auth == SADB_X_AALG_NULL ||
292 sav->alg_auth == SADB_AALG_NONE)
293 goto noreplaycheck;
294
295 /*
296 * check for sequence number.
297 */
298 if (ipsec_chkreplay(seq, sav))
299 ; /*okey*/
300 else {
301 IPSEC_STAT_INCREMENT(ipsecstat.in_espreplay);
302 ipseclog((LOG_WARNING,
303 "replay packet in IPv4 ESP input: %s %s\n",
304 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
305 goto bad;
306 }
307
308 /* check ICV */
309 {
310 u_char sum0[AH_MAXSUMSIZE] __attribute__((aligned(4)));
311 u_char sum[AH_MAXSUMSIZE] __attribute__((aligned(4)));
312 const struct ah_algorithm *sumalgo;
313
314 sumalgo = ah_algorithm_lookup(sav->alg_auth);
315 if (!sumalgo)
316 goto noreplaycheck;
317 siz = (((*sumalgo->sumsiz)(sav) + 3) & ~(4 - 1));
318 if (m->m_pkthdr.len < off + ESPMAXLEN + siz) {
319 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
320 goto bad;
321 }
322 if (AH_MAXSUMSIZE < siz) {
323 ipseclog((LOG_DEBUG,
324 "internal error: AH_MAXSUMSIZE must be larger than %lu\n",
325 (u_int32_t)siz));
326 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
327 goto bad;
328 }
329
330 m_copydata(m, m->m_pkthdr.len - siz, siz, (caddr_t) &sum0[0]);
331
332 if (esp_auth(m, off, m->m_pkthdr.len - off - siz, sav, sum)) {
333 ipseclog((LOG_WARNING, "auth fail in IPv4 ESP input: %s %s\n",
334 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
335 IPSEC_STAT_INCREMENT(ipsecstat.in_espauthfail);
336 goto bad;
337 }
338
339 if (cc_cmp_safe(siz, sum0, sum)) {
340 ipseclog((LOG_WARNING, "auth fail in IPv4 ESP input: %s %s\n",
341 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
342 IPSEC_STAT_INCREMENT(ipsecstat.in_espauthfail);
343 goto bad;
344 }
345
346 delay_icv:
347
348 /* strip off the authentication data */
349 m_adj(m, -siz);
350 ip = mtod(m, struct ip *);
351 #ifdef IPLEN_FLIPPED
352 ip->ip_len = ip->ip_len - siz;
353 #else
354 ip->ip_len = htons(ntohs(ip->ip_len) - siz);
355 #endif
356 m->m_flags |= M_AUTHIPDGM;
357 IPSEC_STAT_INCREMENT(ipsecstat.in_espauthsucc);
358 }
359
360 /*
361 * update sequence number.
362 */
363 if ((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay) {
364 if (ipsec_updatereplay(seq, sav)) {
365 IPSEC_STAT_INCREMENT(ipsecstat.in_espreplay);
366 goto bad;
367 }
368 }
369
370 noreplaycheck:
371
372 /* process main esp header. */
373 if (sav->flags & SADB_X_EXT_OLD) {
374 /* RFC 1827 */
375 esplen = sizeof(struct esp);
376 } else {
377 /* RFC 2406 */
378 if (sav->flags & SADB_X_EXT_DERIV)
379 esplen = sizeof(struct esp);
380 else
381 esplen = sizeof(struct newesp);
382 }
383
384 if (m->m_pkthdr.len < off + esplen + ivlen + sizeof(esptail)) {
385 ipseclog((LOG_WARNING,
386 "IPv4 ESP input: packet too short\n"));
387 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
388 goto bad;
389 }
390
391 if (m->m_len < off + esplen + ivlen) {
392 m = m_pullup(m, off + esplen + ivlen);
393 if (!m) {
394 ipseclog((LOG_DEBUG,
395 "IPv4 ESP input: can't pullup in esp4_input\n"));
396 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
397 goto bad;
398 }
399 }
400
401 /*
402 * pre-compute and cache intermediate key
403 */
404 if (esp_schedule(algo, sav) != 0) {
405 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
406 goto bad;
407 }
408
409 /*
410 * decrypt the packet.
411 */
412 if (!algo->decrypt)
413 panic("internal error: no decrypt function");
414 KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_START, 0,0,0,0,0);
415 if ((*algo->decrypt)(m, off, sav, algo, ivlen)) {
416 /* m is already freed */
417 m = NULL;
418 ipseclog((LOG_ERR, "decrypt fail in IPv4 ESP input: %s\n",
419 ipsec_logsastr(sav)));
420 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
421 KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_END, 1,0,0,0,0);
422 goto bad;
423 }
424 KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_END, 2,0,0,0,0);
425 IPSEC_STAT_INCREMENT(ipsecstat.in_esphist[sav->alg_enc]);
426
427 m->m_flags |= M_DECRYPTED;
428
429 if (algo->finalizedecrypt)
430 {
431 unsigned char tag[algo->icvlen];
432 if ((*algo->finalizedecrypt)(sav, tag, algo->icvlen)) {
433 ipseclog((LOG_ERR, "packet decryption ICV failure\n"));
434 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
435 KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_END, 1,0,0,0,0);
436 goto bad;
437 }
438 if (cc_cmp_safe(algo->icvlen, saved_icv, tag)) {
439 ipseclog((LOG_ERR, "packet decryption ICV mismatch\n"));
440 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
441 KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_END, 1,0,0,0,0);
442 goto bad;
443 }
444 }
445
446 /*
447 * find the trailer of the ESP.
448 */
449 m_copydata(m, m->m_pkthdr.len - sizeof(esptail), sizeof(esptail),
450 (caddr_t)&esptail);
451 nxt = esptail.esp_nxt;
452 taillen = esptail.esp_padlen + sizeof(esptail);
453
454 if (m->m_pkthdr.len < taillen
455 || m->m_pkthdr.len - taillen < hlen) { /*?*/
456 ipseclog((LOG_WARNING,
457 "bad pad length in IPv4 ESP input: %s %s\n",
458 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
459 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
460 goto bad;
461 }
462
463 /* strip off the trailing pad area. */
464 m_adj(m, -taillen);
465 ip = mtod(m, struct ip *);
466 #ifdef IPLEN_FLIPPED
467 ip->ip_len = ip->ip_len - taillen;
468 #else
469 ip->ip_len = htons(ntohs(ip->ip_len) - taillen);
470 #endif
471 if (ip->ip_p == IPPROTO_UDP) {
472 // offset includes the outer ip and udp header lengths.
473 if (m->m_len < off) {
474 m = m_pullup(m, off);
475 if (!m) {
476 ipseclog((LOG_DEBUG,
477 "IPv4 ESP input: invalid udp encapsulated ESP packet length \n"));
478 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
479 goto bad;
480 }
481 }
482
483 // check the UDP encap header to detect changes in the source port, and then strip the header
484 off -= sizeof(struct udphdr); // off no longer includes the udphdr's size
485 // if peer is behind nat and this is the latest esp packet
486 if ((sav->flags & SADB_X_EXT_NATT_DETECTED_PEER) != 0 &&
487 (sav->flags & SADB_X_EXT_OLD) == 0 &&
488 seq && sav->replay &&
489 seq >= sav->replay->lastseq) {
490 struct udphdr *encap_uh = (__typeof__(encap_uh))(void *)((caddr_t)ip + off);
491 if (encap_uh->uh_sport &&
492 ntohs(encap_uh->uh_sport) != sav->remote_ike_port) {
493 sav->remote_ike_port = ntohs(encap_uh->uh_sport);
494 }
495 }
496 ip = esp4_input_strip_udp_encap(m, off);
497 esp = (struct esp *)(void *)(((u_int8_t *)ip) + off);
498 }
499
500 /* was it transmitted over the IPsec tunnel SA? */
501 if (ipsec4_tunnel_validate(m, off + esplen + ivlen, nxt, sav, &ifamily)) {
502 ifaddr_t ifa;
503 struct sockaddr_storage addr;
504
505 /*
506 * strip off all the headers that precedes ESP header.
507 * IP4 xx ESP IP4' payload -> IP4' payload
508 *
509 * XXX more sanity checks
510 * XXX relationship with gif?
511 */
512 u_int8_t tos, otos;
513 int sum;
514
515 tos = ip->ip_tos;
516 m_adj(m, off + esplen + ivlen);
517 if (ifamily == AF_INET) {
518 struct sockaddr_in *ipaddr;
519
520 if (m->m_len < sizeof(*ip)) {
521 m = m_pullup(m, sizeof(*ip));
522 if (!m) {
523 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
524 goto bad;
525 }
526 }
527 ip = mtod(m, struct ip *);
528 /* ECN consideration. */
529
530 otos = ip->ip_tos;
531 if (ip_ecn_egress(ip4_ipsec_ecn, &tos, &ip->ip_tos) == 0) {
532 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
533 goto bad;
534 }
535
536 if (otos != ip->ip_tos) {
537 sum = ~ntohs(ip->ip_sum) & 0xffff;
538 sum += (~otos & 0xffff) + ip->ip_tos;
539 sum = (sum >> 16) + (sum & 0xffff);
540 sum += (sum >> 16); /* add carry */
541 ip->ip_sum = htons(~sum & 0xffff);
542 }
543
544 if (!key_checktunnelsanity(sav, AF_INET,
545 (caddr_t)&ip->ip_src, (caddr_t)&ip->ip_dst)) {
546 ipseclog((LOG_ERR, "ipsec tunnel address mismatch "
547 "in ESP input: %s %s\n",
548 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
549 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
550 goto bad;
551 }
552
553 bzero(&addr, sizeof(addr));
554 ipaddr = (__typeof__(ipaddr))&addr;
555 ipaddr->sin_family = AF_INET;
556 ipaddr->sin_len = sizeof(*ipaddr);
557 ipaddr->sin_addr = ip->ip_dst;
558 #if INET6
559 } else if (ifamily == AF_INET6) {
560 struct sockaddr_in6 *ip6addr;
561
562 /*
563 * m_pullup is prohibited in KAME IPv6 input processing
564 * but there's no other way!
565 */
566 if (m->m_len < sizeof(*ip6)) {
567 m = m_pullup(m, sizeof(*ip6));
568 if (!m) {
569 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
570 goto bad;
571 }
572 }
573
574 /*
575 * Expect 32-bit aligned data pointer on strict-align
576 * platforms.
577 */
578 MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m);
579
580 ip6 = mtod(m, struct ip6_hdr *);
581
582 /* ECN consideration. */
583 if (ip64_ecn_egress(ip4_ipsec_ecn, &tos, &ip6->ip6_flow) == 0) {
584 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
585 goto bad;
586 }
587
588 if (!key_checktunnelsanity(sav, AF_INET6,
589 (caddr_t)&ip6->ip6_src, (caddr_t)&ip6->ip6_dst)) {
590 ipseclog((LOG_ERR, "ipsec tunnel address mismatch "
591 "in ESP input: %s %s\n",
592 ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
593 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
594 goto bad;
595 }
596
597 bzero(&addr, sizeof(addr));
598 ip6addr = (__typeof__(ip6addr))&addr;
599 ip6addr->sin6_family = AF_INET6;
600 ip6addr->sin6_len = sizeof(*ip6addr);
601 ip6addr->sin6_addr = ip6->ip6_dst;
602 #endif /* INET6 */
603 } else {
604 ipseclog((LOG_ERR, "ipsec tunnel unsupported address family "
605 "in ESP input\n"));
606 goto bad;
607 }
608
609 key_sa_recordxfer(sav, m);
610 if (ipsec_addhist(m, IPPROTO_ESP, spi) != 0 ||
611 ipsec_addhist(m, IPPROTO_IPV4, 0) != 0) {
612 IPSEC_STAT_INCREMENT(ipsecstat.in_nomem);
613 goto bad;
614 }
615
616 // update the receiving interface address based on the inner address
617 ifa = ifa_ifwithaddr((struct sockaddr *)&addr);
618 if (ifa) {
619 m->m_pkthdr.rcvif = ifa->ifa_ifp;
620 IFA_REMREF(ifa);
621 }
622
623 /* Clear the csum flags, they can't be valid for the inner headers */
624 m->m_pkthdr.csum_flags = 0;
625
626 // Input via IPSec interface
627 if (sav->sah->ipsec_if != NULL) {
628 if (ipsec_inject_inbound_packet(sav->sah->ipsec_if, m) == 0) {
629 m = NULL;
630 goto done;
631 } else {
632 goto bad;
633 }
634 }
635
636 if (proto_input(ifamily == AF_INET ? PF_INET : PF_INET6, m) != 0)
637 goto bad;
638
639 nxt = IPPROTO_DONE;
640 KERNEL_DEBUG(DBG_FNC_ESPIN | DBG_FUNC_END, 2,0,0,0,0);
641 } else {
642 /*
643 * strip off ESP header and IV.
644 * even in m_pulldown case, we need to strip off ESP so that
645 * we can always compute checksum for AH correctly.
646 */
647 size_t stripsiz;
648
649 stripsiz = esplen + ivlen;
650
651 ip = mtod(m, struct ip *);
652 ovbcopy((caddr_t)ip, (caddr_t)(((u_char *)ip) + stripsiz), off);
653 m->m_data += stripsiz;
654 m->m_len -= stripsiz;
655 m->m_pkthdr.len -= stripsiz;
656
657 ip = mtod(m, struct ip *);
658 #ifdef IPLEN_FLIPPED
659 ip->ip_len = ip->ip_len - stripsiz;
660 #else
661 ip->ip_len = htons(ntohs(ip->ip_len) - stripsiz);
662 #endif
663 ip->ip_p = nxt;
664
665 key_sa_recordxfer(sav, m);
666 if (ipsec_addhist(m, IPPROTO_ESP, spi) != 0) {
667 IPSEC_STAT_INCREMENT(ipsecstat.in_nomem);
668 goto bad;
669 }
670
671 /*
672 * Set the csum valid flag, if we authenticated the
673 * packet, the payload shouldn't be corrupt unless
674 * it was corrupted before being signed on the other
675 * side.
676 */
677 if (nxt == IPPROTO_TCP || nxt == IPPROTO_UDP) {
678 m->m_pkthdr.csum_flags = CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
679 m->m_pkthdr.csum_data = 0xFFFF;
680 }
681
682 if (nxt != IPPROTO_DONE) {
683 if ((ip_protox[nxt]->pr_flags & PR_LASTHDR) != 0 &&
684 ipsec4_in_reject(m, NULL)) {
685 IPSEC_STAT_INCREMENT(ipsecstat.in_polvio);
686 goto bad;
687 }
688 KERNEL_DEBUG(DBG_FNC_ESPIN | DBG_FUNC_END, 3,0,0,0,0);
689
690 /* translate encapsulated UDP port ? */
691 if ((sav->flags & SADB_X_EXT_NATT_MULTIPLEUSERS) != 0) {
692 struct udphdr *udp;
693
694 if (nxt != IPPROTO_UDP) { /* not UPD packet - drop it */
695 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
696 goto bad;
697 }
698
699 if (m->m_len < off + sizeof(struct udphdr)) {
700 m = m_pullup(m, off + sizeof(struct udphdr));
701 if (!m) {
702 ipseclog((LOG_DEBUG,
703 "IPv4 ESP input: can't pullup UDP header in esp4_input\n"));
704 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
705 goto bad;
706 }
707 ip = mtod(m, struct ip *);
708 }
709 udp = (struct udphdr *)(void *)(((u_int8_t *)ip) + off);
710
711 lck_mtx_lock(sadb_mutex);
712 if (sav->natt_encapsulated_src_port == 0) {
713 sav->natt_encapsulated_src_port = udp->uh_sport;
714 } else if (sav->natt_encapsulated_src_port != udp->uh_sport) { /* something wrong */
715 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
716 lck_mtx_unlock(sadb_mutex);
717 goto bad;
718 }
719 lck_mtx_unlock(sadb_mutex);
720 udp->uh_sport = htons(sav->remote_ike_port);
721 udp->uh_sum = 0;
722 }
723
724 DTRACE_IP6(receive, struct mbuf *, m, struct inpcb *, NULL,
725 struct ip *, ip, struct ifnet *, m->m_pkthdr.rcvif,
726 struct ip *, ip, struct ip6_hdr *, NULL);
727
728 // Input via IPSec interface
729 if (sav->sah->ipsec_if != NULL) {
730 ip->ip_len = htons(ip->ip_len + hlen);
731 ip->ip_off = htons(ip->ip_off);
732 ip->ip_sum = 0;
733 ip->ip_sum = ip_cksum_hdr_in(m, hlen);
734 if (ipsec_inject_inbound_packet(sav->sah->ipsec_if, m) == 0) {
735 m = NULL;
736 goto done;
737 } else {
738 goto bad;
739 }
740 }
741
742 ip_proto_dispatch_in(m, off, nxt, 0);
743 } else
744 m_freem(m);
745 m = NULL;
746 }
747
748 done:
749 if (sav) {
750 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
751 printf("DP esp4_input call free SA:0x%llx\n",
752 (uint64_t)VM_KERNEL_ADDRPERM(sav)));
753 key_freesav(sav, KEY_SADB_UNLOCKED);
754 }
755 IPSEC_STAT_INCREMENT(ipsecstat.in_success);
756 return;
757
758 bad:
759 if (sav) {
760 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
761 printf("DP esp4_input call free SA:0x%llx\n",
762 (uint64_t)VM_KERNEL_ADDRPERM(sav)));
763 key_freesav(sav, KEY_SADB_UNLOCKED);
764 }
765 if (m)
766 m_freem(m);
767 KERNEL_DEBUG(DBG_FNC_ESPIN | DBG_FUNC_END, 4,0,0,0,0);
768 return;
769 }
770 #endif /* INET */
771
772 #if INET6
773 int
774 esp6_input(struct mbuf **mp, int *offp, int proto)
775 {
776 #pragma unused(proto)
777 struct mbuf *m = *mp;
778 int off = *offp;
779 struct ip *ip;
780 struct ip6_hdr *ip6;
781 struct esp *esp;
782 struct esptail esptail;
783 u_int32_t spi;
784 u_int32_t seq;
785 struct secasvar *sav = NULL;
786 size_t taillen;
787 u_int16_t nxt;
788 char *nproto;
789 const struct esp_algorithm *algo;
790 int ivlen;
791 size_t esplen;
792 sa_family_t ifamily;
793
794 /* sanity check for alignment. */
795 if (off % 4 != 0 || m->m_pkthdr.len % 4 != 0) {
796 ipseclog((LOG_ERR, "IPv6 ESP input: packet alignment problem "
797 "(off=%d, pktlen=%d)\n", off, m->m_pkthdr.len));
798 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
799 goto bad;
800 }
801
802 #ifndef PULLDOWN_TEST
803 IP6_EXTHDR_CHECK(m, off, ESPMAXLEN, {return IPPROTO_DONE;});
804 esp = (struct esp *)(void *)(mtod(m, caddr_t) + off);
805 #else
806 IP6_EXTHDR_GET(esp, struct esp *, m, off, ESPMAXLEN);
807 if (esp == NULL) {
808 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
809 return IPPROTO_DONE;
810 }
811 #endif
812 /* Expect 32-bit data aligned pointer on strict-align platforms */
813 MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m);
814
815 ip6 = mtod(m, struct ip6_hdr *);
816
817 if (ntohs(ip6->ip6_plen) == 0) {
818 ipseclog((LOG_ERR, "IPv6 ESP input: "
819 "ESP with IPv6 jumbogram is not supported.\n"));
820 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
821 goto bad;
822 }
823
824 nproto = ip6_get_prevhdr(m, off);
825 if (nproto == NULL || (*nproto != IPPROTO_ESP &&
826 !(*nproto == IPPROTO_UDP && off >= sizeof(struct udphdr)))) {
827 ipseclog((LOG_DEBUG, "IPv6 ESP input: invalid protocol type\n"));
828 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
829 goto bad;
830 }
831
832 /* find the sassoc. */
833 spi = esp->esp_spi;
834
835 if ((sav = key_allocsa(AF_INET6,
836 (caddr_t)&ip6->ip6_src, (caddr_t)&ip6->ip6_dst,
837 IPPROTO_ESP, spi)) == 0) {
838 ipseclog((LOG_WARNING,
839 "IPv6 ESP input: no key association found for spi %u\n",
840 (u_int32_t)ntohl(spi)));
841 IPSEC_STAT_INCREMENT(ipsec6stat.in_nosa);
842 goto bad;
843 }
844 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
845 printf("DP esp6_input called to allocate SA:0x%llx\n",
846 (uint64_t)VM_KERNEL_ADDRPERM(sav)));
847 if (sav->state != SADB_SASTATE_MATURE
848 && sav->state != SADB_SASTATE_DYING) {
849 ipseclog((LOG_DEBUG,
850 "IPv6 ESP input: non-mature/dying SA found for spi %u\n",
851 (u_int32_t)ntohl(spi)));
852 IPSEC_STAT_INCREMENT(ipsec6stat.in_badspi);
853 goto bad;
854 }
855 algo = esp_algorithm_lookup(sav->alg_enc);
856 if (!algo) {
857 ipseclog((LOG_DEBUG, "IPv6 ESP input: "
858 "unsupported encryption algorithm for spi %u\n",
859 (u_int32_t)ntohl(spi)));
860 IPSEC_STAT_INCREMENT(ipsec6stat.in_badspi);
861 goto bad;
862 }
863
864 /* check if we have proper ivlen information */
865 ivlen = sav->ivlen;
866 if (ivlen < 0) {
867 ipseclog((LOG_ERR, "inproper ivlen in IPv6 ESP input: %s %s\n",
868 ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
869 IPSEC_STAT_INCREMENT(ipsec6stat.in_badspi);
870 goto bad;
871 }
872
873 seq = ntohl(((struct newesp *)esp)->esp_seq);
874
875 /* Save ICV from packet for verification later */
876 size_t siz = 0;
877 unsigned char saved_icv[AH_MAXSUMSIZE];
878 if (algo->finalizedecrypt) {
879 siz = algo->icvlen;
880 m_copydata(m, m->m_pkthdr.len - siz, siz, (caddr_t) saved_icv);
881 goto delay_icv;
882 }
883
884 if (!((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay
885 && (sav->alg_auth && sav->key_auth)))
886 goto noreplaycheck;
887
888 if (sav->alg_auth == SADB_X_AALG_NULL ||
889 sav->alg_auth == SADB_AALG_NONE)
890 goto noreplaycheck;
891
892 /*
893 * check for sequence number.
894 */
895 if (ipsec_chkreplay(seq, sav))
896 ; /*okey*/
897 else {
898 IPSEC_STAT_INCREMENT(ipsec6stat.in_espreplay);
899 ipseclog((LOG_WARNING,
900 "replay packet in IPv6 ESP input: %s %s\n",
901 ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
902 goto bad;
903 }
904
905 /* check ICV */
906 {
907 u_char sum0[AH_MAXSUMSIZE] __attribute__((aligned(4)));
908 u_char sum[AH_MAXSUMSIZE] __attribute__((aligned(4)));
909 const struct ah_algorithm *sumalgo;
910
911 sumalgo = ah_algorithm_lookup(sav->alg_auth);
912 if (!sumalgo)
913 goto noreplaycheck;
914 siz = (((*sumalgo->sumsiz)(sav) + 3) & ~(4 - 1));
915 if (m->m_pkthdr.len < off + ESPMAXLEN + siz) {
916 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
917 goto bad;
918 }
919 if (AH_MAXSUMSIZE < siz) {
920 ipseclog((LOG_DEBUG,
921 "internal error: AH_MAXSUMSIZE must be larger than %lu\n",
922 (u_int32_t)siz));
923 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
924 goto bad;
925 }
926
927 m_copydata(m, m->m_pkthdr.len - siz, siz, (caddr_t) &sum0[0]);
928
929 if (esp_auth(m, off, m->m_pkthdr.len - off - siz, sav, sum)) {
930 ipseclog((LOG_WARNING, "auth fail in IPv6 ESP input: %s %s\n",
931 ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
932 IPSEC_STAT_INCREMENT(ipsec6stat.in_espauthfail);
933 goto bad;
934 }
935
936 if (cc_cmp_safe(siz, sum0, sum)) {
937 ipseclog((LOG_WARNING, "auth fail in IPv6 ESP input: %s %s\n",
938 ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
939 IPSEC_STAT_INCREMENT(ipsec6stat.in_espauthfail);
940 goto bad;
941 }
942
943 delay_icv:
944
945 /* strip off the authentication data */
946 m_adj(m, -siz);
947 ip6 = mtod(m, struct ip6_hdr *);
948 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - siz);
949
950 m->m_flags |= M_AUTHIPDGM;
951 IPSEC_STAT_INCREMENT(ipsec6stat.in_espauthsucc);
952 }
953
954 /*
955 * update sequence number.
956 */
957 if ((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay) {
958 if (ipsec_updatereplay(seq, sav)) {
959 IPSEC_STAT_INCREMENT(ipsec6stat.in_espreplay);
960 goto bad;
961 }
962 }
963
964 noreplaycheck:
965
966 /* process main esp header. */
967 if (sav->flags & SADB_X_EXT_OLD) {
968 /* RFC 1827 */
969 esplen = sizeof(struct esp);
970 } else {
971 /* RFC 2406 */
972 if (sav->flags & SADB_X_EXT_DERIV)
973 esplen = sizeof(struct esp);
974 else
975 esplen = sizeof(struct newesp);
976 }
977
978 if (m->m_pkthdr.len < off + esplen + ivlen + sizeof(esptail)) {
979 ipseclog((LOG_WARNING,
980 "IPv6 ESP input: packet too short\n"));
981 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
982 goto bad;
983 }
984
985 #ifndef PULLDOWN_TEST
986 IP6_EXTHDR_CHECK(m, off, esplen + ivlen, return IPPROTO_DONE); /*XXX*/
987 #else
988 IP6_EXTHDR_GET(esp, struct esp *, m, off, esplen + ivlen);
989 if (esp == NULL) {
990 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
991 m = NULL;
992 goto bad;
993 }
994 #endif
995 ip6 = mtod(m, struct ip6_hdr *); /*set it again just in case*/
996
997 /*
998 * pre-compute and cache intermediate key
999 */
1000 if (esp_schedule(algo, sav) != 0) {
1001 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
1002 goto bad;
1003 }
1004
1005 /*
1006 * decrypt the packet.
1007 */
1008 if (!algo->decrypt)
1009 panic("internal error: no decrypt function");
1010 if ((*algo->decrypt)(m, off, sav, algo, ivlen)) {
1011 /* m is already freed */
1012 m = NULL;
1013 ipseclog((LOG_ERR, "decrypt fail in IPv6 ESP input: %s\n",
1014 ipsec_logsastr(sav)));
1015 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
1016 goto bad;
1017 }
1018 IPSEC_STAT_INCREMENT(ipsec6stat.in_esphist[sav->alg_enc]);
1019
1020 m->m_flags |= M_DECRYPTED;
1021
1022 if (algo->finalizedecrypt)
1023 {
1024 unsigned char tag[algo->icvlen];
1025 if ((*algo->finalizedecrypt)(sav, tag, algo->icvlen)) {
1026 ipseclog((LOG_ERR, "packet decryption ICV failure\n"));
1027 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
1028 KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_END, 1,0,0,0,0);
1029 goto bad;
1030 }
1031 if (cc_cmp_safe(algo->icvlen, saved_icv, tag)) {
1032 ipseclog((LOG_ERR, "packet decryption ICV mismatch\n"));
1033 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
1034 KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_END, 1,0,0,0,0);
1035 goto bad;
1036 }
1037 }
1038
1039 /*
1040 * find the trailer of the ESP.
1041 */
1042 m_copydata(m, m->m_pkthdr.len - sizeof(esptail), sizeof(esptail),
1043 (caddr_t)&esptail);
1044 nxt = esptail.esp_nxt;
1045 taillen = esptail.esp_padlen + sizeof(esptail);
1046
1047 if (m->m_pkthdr.len < taillen
1048 || m->m_pkthdr.len - taillen < sizeof(struct ip6_hdr)) { /*?*/
1049 ipseclog((LOG_WARNING,
1050 "bad pad length in IPv6 ESP input: %s %s\n",
1051 ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
1052 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
1053 goto bad;
1054 }
1055
1056 /* strip off the trailing pad area. */
1057 m_adj(m, -taillen);
1058 ip6 = mtod(m, struct ip6_hdr *);
1059 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - taillen);
1060
1061 if (*nproto == IPPROTO_UDP) {
1062 // offset includes the outer ip and udp header lengths.
1063 if (m->m_len < off) {
1064 m = m_pullup(m, off);
1065 if (!m) {
1066 ipseclog((LOG_DEBUG,
1067 "IPv6 ESP input: invalid udp encapsulated ESP packet length\n"));
1068 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
1069 goto bad;
1070 }
1071 }
1072
1073 // check the UDP encap header to detect changes in the source port, and then strip the header
1074 off -= sizeof(struct udphdr); // off no longer includes the udphdr's size
1075 // if peer is behind nat and this is the latest esp packet
1076 if ((sav->flags & SADB_X_EXT_NATT_DETECTED_PEER) != 0 &&
1077 (sav->flags & SADB_X_EXT_OLD) == 0 &&
1078 seq && sav->replay &&
1079 seq >= sav->replay->lastseq) {
1080 struct udphdr *encap_uh = (__typeof__(encap_uh))(void *)((caddr_t)ip6 + off);
1081 if (encap_uh->uh_sport &&
1082 ntohs(encap_uh->uh_sport) != sav->remote_ike_port) {
1083 sav->remote_ike_port = ntohs(encap_uh->uh_sport);
1084 }
1085 }
1086 ip6 = esp6_input_strip_udp_encap(m, off);
1087 esp = (struct esp *)(void *)(((u_int8_t *)ip6) + off);
1088 }
1089
1090
1091 /* was it transmitted over the IPsec tunnel SA? */
1092 if (ipsec6_tunnel_validate(m, off + esplen + ivlen, nxt, sav, &ifamily)) {
1093 ifaddr_t ifa;
1094 struct sockaddr_storage addr;
1095
1096 /*
1097 * strip off all the headers that precedes ESP header.
1098 * IP6 xx ESP IP6' payload -> IP6' payload
1099 *
1100 * XXX more sanity checks
1101 * XXX relationship with gif?
1102 */
1103 u_int32_t flowinfo; /*net endian*/
1104 flowinfo = ip6->ip6_flow;
1105 m_adj(m, off + esplen + ivlen);
1106 if (ifamily == AF_INET6) {
1107 struct sockaddr_in6 *ip6addr;
1108
1109 if (m->m_len < sizeof(*ip6)) {
1110 #ifndef PULLDOWN_TEST
1111 /*
1112 * m_pullup is prohibited in KAME IPv6 input processing
1113 * but there's no other way!
1114 */
1115 #else
1116 /* okay to pullup in m_pulldown style */
1117 #endif
1118 m = m_pullup(m, sizeof(*ip6));
1119 if (!m) {
1120 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
1121 goto bad;
1122 }
1123 }
1124 ip6 = mtod(m, struct ip6_hdr *);
1125 /* ECN consideration. */
1126 if (ip6_ecn_egress(ip6_ipsec_ecn, &flowinfo, &ip6->ip6_flow) == 0) {
1127 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
1128 goto bad;
1129 }
1130 if (!key_checktunnelsanity(sav, AF_INET6,
1131 (caddr_t)&ip6->ip6_src, (caddr_t)&ip6->ip6_dst)) {
1132 ipseclog((LOG_ERR, "ipsec tunnel address mismatch "
1133 "in IPv6 ESP input: %s %s\n",
1134 ipsec6_logpacketstr(ip6, spi),
1135 ipsec_logsastr(sav)));
1136 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
1137 goto bad;
1138 }
1139
1140 bzero(&addr, sizeof(addr));
1141 ip6addr = (__typeof__(ip6addr))&addr;
1142 ip6addr->sin6_family = AF_INET6;
1143 ip6addr->sin6_len = sizeof(*ip6addr);
1144 ip6addr->sin6_addr = ip6->ip6_dst;
1145 } else if (ifamily == AF_INET) {
1146 struct sockaddr_in *ipaddr;
1147
1148 if (m->m_len < sizeof(*ip)) {
1149 m = m_pullup(m, sizeof(*ip));
1150 if (!m) {
1151 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
1152 goto bad;
1153 }
1154 }
1155
1156 u_int8_t otos;
1157 int sum;
1158
1159 ip = mtod(m, struct ip *);
1160 otos = ip->ip_tos;
1161 /* ECN consideration. */
1162 if (ip46_ecn_egress(ip6_ipsec_ecn, &flowinfo, &ip->ip_tos) == 0) {
1163 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
1164 goto bad;
1165 }
1166
1167 if (otos != ip->ip_tos) {
1168 sum = ~ntohs(ip->ip_sum) & 0xffff;
1169 sum += (~otos & 0xffff) + ip->ip_tos;
1170 sum = (sum >> 16) + (sum & 0xffff);
1171 sum += (sum >> 16); /* add carry */
1172 ip->ip_sum = htons(~sum & 0xffff);
1173 }
1174
1175 if (!key_checktunnelsanity(sav, AF_INET,
1176 (caddr_t)&ip->ip_src, (caddr_t)&ip->ip_dst)) {
1177 ipseclog((LOG_ERR, "ipsec tunnel address mismatch "
1178 "in ESP input: %s %s\n",
1179 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
1180 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
1181 goto bad;
1182 }
1183
1184 bzero(&addr, sizeof(addr));
1185 ipaddr = (__typeof__(ipaddr))&addr;
1186 ipaddr->sin_family = AF_INET;
1187 ipaddr->sin_len = sizeof(*ipaddr);
1188 ipaddr->sin_addr = ip->ip_dst;
1189 }
1190
1191 key_sa_recordxfer(sav, m);
1192 if (ipsec_addhist(m, IPPROTO_ESP, spi) != 0 ||
1193 ipsec_addhist(m, IPPROTO_IPV6, 0) != 0) {
1194 IPSEC_STAT_INCREMENT(ipsec6stat.in_nomem);
1195 goto bad;
1196 }
1197
1198 // update the receiving interface address based on the inner address
1199 ifa = ifa_ifwithaddr((struct sockaddr *)&addr);
1200 if (ifa) {
1201 m->m_pkthdr.rcvif = ifa->ifa_ifp;
1202 IFA_REMREF(ifa);
1203 }
1204
1205 // Input via IPSec interface
1206 if (sav->sah->ipsec_if != NULL) {
1207 if (ipsec_inject_inbound_packet(sav->sah->ipsec_if, m) == 0) {
1208 m = NULL;
1209 nxt = IPPROTO_DONE;
1210 goto done;
1211 } else {
1212 goto bad;
1213 }
1214 }
1215
1216 if (proto_input(ifamily == AF_INET ? PF_INET : PF_INET6, m) != 0)
1217 goto bad;
1218 nxt = IPPROTO_DONE;
1219 } else {
1220 /*
1221 * strip off ESP header and IV.
1222 * even in m_pulldown case, we need to strip off ESP so that
1223 * we can always compute checksum for AH correctly.
1224 */
1225 size_t stripsiz;
1226 char *prvnxtp;
1227
1228 /*
1229 * Set the next header field of the previous header correctly.
1230 */
1231 prvnxtp = ip6_get_prevhdr(m, off); /* XXX */
1232 *prvnxtp = nxt;
1233
1234 stripsiz = esplen + ivlen;
1235
1236 ip6 = mtod(m, struct ip6_hdr *);
1237 if (m->m_len >= stripsiz + off) {
1238 ovbcopy((caddr_t)ip6, ((caddr_t)ip6) + stripsiz, off);
1239 m->m_data += stripsiz;
1240 m->m_len -= stripsiz;
1241 m->m_pkthdr.len -= stripsiz;
1242 } else {
1243 /*
1244 * this comes with no copy if the boundary is on
1245 * cluster
1246 */
1247 struct mbuf *n;
1248
1249 n = m_split(m, off, M_DONTWAIT);
1250 if (n == NULL) {
1251 /* m is retained by m_split */
1252 goto bad;
1253 }
1254 m_adj(n, stripsiz);
1255 /* m_cat does not update m_pkthdr.len */
1256 m->m_pkthdr.len += n->m_pkthdr.len;
1257 m_cat(m, n);
1258 }
1259
1260 #ifndef PULLDOWN_TEST
1261 /*
1262 * KAME requires that the packet to be contiguous on the
1263 * mbuf. We need to make that sure.
1264 * this kind of code should be avoided.
1265 * XXX other conditions to avoid running this part?
1266 */
1267 if (m->m_len != m->m_pkthdr.len) {
1268 struct mbuf *n = NULL;
1269 int maxlen;
1270
1271 MGETHDR(n, M_DONTWAIT, MT_HEADER); /* MAC-OK */
1272 maxlen = MHLEN;
1273 if (n)
1274 M_COPY_PKTHDR(n, m);
1275 if (n && m->m_pkthdr.len > maxlen) {
1276 MCLGET(n, M_DONTWAIT);
1277 maxlen = MCLBYTES;
1278 if ((n->m_flags & M_EXT) == 0) {
1279 m_free(n);
1280 n = NULL;
1281 }
1282 }
1283 if (!n) {
1284 printf("esp6_input: mbuf allocation failed\n");
1285 goto bad;
1286 }
1287
1288 if (m->m_pkthdr.len <= maxlen) {
1289 m_copydata(m, 0, m->m_pkthdr.len, mtod(n, caddr_t));
1290 n->m_len = m->m_pkthdr.len;
1291 n->m_pkthdr.len = m->m_pkthdr.len;
1292 n->m_next = NULL;
1293 m_freem(m);
1294 } else {
1295 m_copydata(m, 0, maxlen, mtod(n, caddr_t));
1296 n->m_len = maxlen;
1297 n->m_pkthdr.len = m->m_pkthdr.len;
1298 n->m_next = m;
1299 m_adj(m, maxlen);
1300 m->m_flags &= ~M_PKTHDR;
1301 }
1302 m = n;
1303 }
1304 #endif
1305
1306 ip6 = mtod(m, struct ip6_hdr *);
1307 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - stripsiz);
1308
1309 key_sa_recordxfer(sav, m);
1310 if (ipsec_addhist(m, IPPROTO_ESP, spi) != 0) {
1311 IPSEC_STAT_INCREMENT(ipsec6stat.in_nomem);
1312 goto bad;
1313 }
1314
1315 /*
1316 * Set the csum valid flag, if we authenticated the
1317 * packet, the payload shouldn't be corrupt unless
1318 * it was corrupted before being signed on the other
1319 * side.
1320 */
1321 if (nxt == IPPROTO_TCP || nxt == IPPROTO_UDP) {
1322 m->m_pkthdr.csum_flags = CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1323 m->m_pkthdr.csum_data = 0xFFFF;
1324 }
1325
1326 // Input via IPSec interface
1327 if (sav->sah->ipsec_if != NULL) {
1328 if (ipsec_inject_inbound_packet(sav->sah->ipsec_if, m) == 0) {
1329 m = NULL;
1330 nxt = IPPROTO_DONE;
1331 goto done;
1332 } else {
1333 goto bad;
1334 }
1335 }
1336
1337 }
1338
1339 done:
1340 *offp = off;
1341 *mp = m;
1342 if (sav) {
1343 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
1344 printf("DP esp6_input call free SA:0x%llx\n",
1345 (uint64_t)VM_KERNEL_ADDRPERM(sav)));
1346 key_freesav(sav, KEY_SADB_UNLOCKED);
1347 }
1348 IPSEC_STAT_INCREMENT(ipsec6stat.in_success);
1349 return nxt;
1350
1351 bad:
1352 if (sav) {
1353 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
1354 printf("DP esp6_input call free SA:0x%llx\n",
1355 (uint64_t)VM_KERNEL_ADDRPERM(sav)));
1356 key_freesav(sav, KEY_SADB_UNLOCKED);
1357 }
1358 if (m)
1359 m_freem(m);
1360 return IPPROTO_DONE;
1361 }
1362
1363 void
1364 esp6_ctlinput(int cmd, struct sockaddr *sa, void *d)
1365 {
1366 const struct newesp *espp;
1367 struct newesp esp;
1368 struct ip6ctlparam *ip6cp = NULL, ip6cp1;
1369 struct secasvar *sav;
1370 struct ip6_hdr *ip6;
1371 struct mbuf *m;
1372 int off;
1373 struct sockaddr_in6 *sa6_src, *sa6_dst;
1374
1375 if (sa->sa_family != AF_INET6 ||
1376 sa->sa_len != sizeof(struct sockaddr_in6))
1377 return;
1378 if ((unsigned)cmd >= PRC_NCMDS)
1379 return;
1380
1381 /* if the parameter is from icmp6, decode it. */
1382 if (d != NULL) {
1383 ip6cp = (struct ip6ctlparam *)d;
1384 m = ip6cp->ip6c_m;
1385 ip6 = ip6cp->ip6c_ip6;
1386 off = ip6cp->ip6c_off;
1387 } else {
1388 m = NULL;
1389 ip6 = NULL;
1390 }
1391
1392 if (ip6) {
1393 /*
1394 * Notify the error to all possible sockets via pfctlinput2.
1395 * Since the upper layer information (such as protocol type,
1396 * source and destination ports) is embedded in the encrypted
1397 * data and might have been cut, we can't directly call
1398 * an upper layer ctlinput function. However, the pcbnotify
1399 * function will consider source and destination addresses
1400 * as well as the flow info value, and may be able to find
1401 * some PCB that should be notified.
1402 * Although pfctlinput2 will call esp6_ctlinput(), there is
1403 * no possibility of an infinite loop of function calls,
1404 * because we don't pass the inner IPv6 header.
1405 */
1406 bzero(&ip6cp1, sizeof(ip6cp1));
1407 ip6cp1.ip6c_src = ip6cp->ip6c_src;
1408 pfctlinput2(cmd, sa, (void *)&ip6cp1);
1409
1410 /*
1411 * Then go to special cases that need ESP header information.
1412 * XXX: We assume that when ip6 is non NULL,
1413 * M and OFF are valid.
1414 */
1415
1416 /* check if we can safely examine src and dst ports */
1417 if (m->m_pkthdr.len < off + sizeof(esp))
1418 return;
1419
1420 if (m->m_len < off + sizeof(esp)) {
1421 /*
1422 * this should be rare case,
1423 * so we compromise on this copy...
1424 */
1425 m_copydata(m, off, sizeof(esp), (caddr_t)&esp);
1426 espp = &esp;
1427 } else
1428 espp = (struct newesp*)(void *)(mtod(m, caddr_t) + off);
1429
1430 if (cmd == PRC_MSGSIZE) {
1431 int valid = 0;
1432
1433 /*
1434 * Check to see if we have a valid SA corresponding to
1435 * the address in the ICMP message payload.
1436 */
1437 sa6_src = ip6cp->ip6c_src;
1438 sa6_dst = (struct sockaddr_in6 *)(void *)sa;
1439 sav = key_allocsa(AF_INET6,
1440 (caddr_t)&sa6_src->sin6_addr,
1441 (caddr_t)&sa6_dst->sin6_addr,
1442 IPPROTO_ESP, espp->esp_spi);
1443 if (sav) {
1444 if (sav->state == SADB_SASTATE_MATURE ||
1445 sav->state == SADB_SASTATE_DYING)
1446 valid++;
1447 key_freesav(sav, KEY_SADB_UNLOCKED);
1448 }
1449
1450 /* XXX Further validation? */
1451
1452 /*
1453 * Depending on the value of "valid" and routing table
1454 * size (mtudisc_{hi,lo}wat), we will:
1455 * - recalcurate the new MTU and create the
1456 * corresponding routing entry, or
1457 * - ignore the MTU change notification.
1458 */
1459 icmp6_mtudisc_update((struct ip6ctlparam *)d, valid);
1460 }
1461 } else {
1462 /* we normally notify any pcb here */
1463 }
1464 }
1465 #endif /* INET6 */