]> git.saurik.com Git - apple/xnu.git/blob - bsd/netinet6/esp_input.c
xnu-3248.40.184.tar.gz
[apple/xnu.git] / bsd / netinet6 / esp_input.c
1 /*
2 * Copyright (c) 2008-2013 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /* $FreeBSD: src/sys/netinet6/esp_input.c,v 1.1.2.3 2001/07/03 11:01:50 ume Exp $ */
30 /* $KAME: esp_input.c,v 1.55 2001/03/23 08:08:47 itojun Exp $ */
31
32 /*
33 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. Neither the name of the project nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 */
60
61 /*
62 * RFC1827/2406 Encapsulated Security Payload.
63 */
64
65 #include <sys/param.h>
66 #include <sys/systm.h>
67 #include <sys/malloc.h>
68 #include <sys/mbuf.h>
69 #include <sys/mcache.h>
70 #include <sys/domain.h>
71 #include <sys/protosw.h>
72 #include <sys/socket.h>
73 #include <sys/errno.h>
74 #include <sys/time.h>
75 #include <sys/kernel.h>
76 #include <sys/syslog.h>
77
78 #include <net/if.h>
79 #include <net/if_ipsec.h>
80 #include <net/route.h>
81 #include <kern/cpu_number.h>
82 #include <kern/locks.h>
83
84 #include <netinet/in.h>
85 #include <netinet/in_systm.h>
86 #include <netinet/ip.h>
87 #include <netinet/ip_var.h>
88 #include <netinet/in_var.h>
89 #include <netinet/ip_ecn.h>
90 #include <netinet/in_pcb.h>
91 #include <netinet/udp.h>
92 #if INET6
93 #include <netinet6/ip6_ecn.h>
94 #endif
95
96 #if INET6
97 #include <netinet/ip6.h>
98 #include <netinet6/in6_pcb.h>
99 #include <netinet6/ip6_var.h>
100 #include <netinet/icmp6.h>
101 #include <netinet6/ip6protosw.h>
102 #endif
103
104 #include <netinet6/ipsec.h>
105 #if INET6
106 #include <netinet6/ipsec6.h>
107 #endif
108 #include <netinet6/ah.h>
109 #if INET6
110 #include <netinet6/ah6.h>
111 #endif
112 #include <netinet6/esp.h>
113 #if INET6
114 #include <netinet6/esp6.h>
115 #endif
116 #include <netkey/key.h>
117 #include <netkey/keydb.h>
118 #include <netkey/key_debug.h>
119
120 #include <net/kpi_protocol.h>
121 #include <netinet/kpi_ipfilter_var.h>
122
123 #include <net/net_osdep.h>
124 #include <mach/sdt.h>
125
126 #include <sys/kdebug.h>
127 #define DBG_LAYER_BEG NETDBG_CODE(DBG_NETIPSEC, 1)
128 #define DBG_LAYER_END NETDBG_CODE(DBG_NETIPSEC, 3)
129 #define DBG_FNC_ESPIN NETDBG_CODE(DBG_NETIPSEC, (6 << 8))
130 #define DBG_FNC_DECRYPT NETDBG_CODE(DBG_NETIPSEC, (7 << 8))
131 #define IPLEN_FLIPPED
132
133 extern lck_mtx_t *sadb_mutex;
134
135 #if INET
136 #define ESPMAXLEN \
137 (sizeof(struct esp) < sizeof(struct newesp) \
138 ? sizeof(struct newesp) : sizeof(struct esp))
139
140 static struct ip *
141 esp4_input_strip_udp_encap (struct mbuf *m, int iphlen)
142 {
143 // strip the udp header that's encapsulating ESP
144 struct ip *ip;
145 size_t stripsiz = sizeof(struct udphdr);
146
147 ip = mtod(m, __typeof__(ip));
148 ovbcopy((caddr_t)ip, (caddr_t)(((u_char *)ip) + stripsiz), iphlen);
149 m->m_data += stripsiz;
150 m->m_len -= stripsiz;
151 m->m_pkthdr.len -= stripsiz;
152 ip = mtod(m, __typeof__(ip));
153 ip->ip_len = ip->ip_len - stripsiz;
154 ip->ip_p = IPPROTO_ESP;
155 return ip;
156 }
157
158 static struct ip6_hdr *
159 esp6_input_strip_udp_encap (struct mbuf *m, int ip6hlen)
160 {
161 // strip the udp header that's encapsulating ESP
162 struct ip6_hdr *ip6;
163 size_t stripsiz = sizeof(struct udphdr);
164
165 ip6 = mtod(m, __typeof__(ip6));
166 ovbcopy((caddr_t)ip6, (caddr_t)(((u_char *)ip6) + stripsiz), ip6hlen);
167 m->m_data += stripsiz;
168 m->m_len -= stripsiz;
169 m->m_pkthdr.len -= stripsiz;
170 ip6 = mtod(m, __typeof__(ip6));
171 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - stripsiz);
172 ip6->ip6_nxt = IPPROTO_ESP;
173 return ip6;
174 }
175
176 void
177 esp4_input(m, off)
178 struct mbuf *m;
179 int off;
180 {
181 struct ip *ip;
182 #if INET6
183 struct ip6_hdr *ip6;
184 #endif /* INET6 */
185 struct esp *esp;
186 struct esptail esptail;
187 u_int32_t spi;
188 u_int32_t seq;
189 struct secasvar *sav = NULL;
190 size_t taillen;
191 u_int16_t nxt;
192 const struct esp_algorithm *algo;
193 int ivlen;
194 size_t hlen;
195 size_t esplen;
196 sa_family_t ifamily;
197
198 KERNEL_DEBUG(DBG_FNC_ESPIN | DBG_FUNC_START, 0,0,0,0,0);
199 /* sanity check for alignment. */
200 if (off % 4 != 0 || m->m_pkthdr.len % 4 != 0) {
201 ipseclog((LOG_ERR, "IPv4 ESP input: packet alignment problem "
202 "(off=%d, pktlen=%d)\n", off, m->m_pkthdr.len));
203 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
204 goto bad;
205 }
206
207 if (m->m_len < off + ESPMAXLEN) {
208 m = m_pullup(m, off + ESPMAXLEN);
209 if (!m) {
210 ipseclog((LOG_DEBUG,
211 "IPv4 ESP input: can't pullup in esp4_input\n"));
212 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
213 goto bad;
214 }
215 }
216
217 /* Expect 32-bit aligned data pointer on strict-align platforms */
218 MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m);
219
220 ip = mtod(m, struct ip *);
221 // expect udp-encap and esp packets only
222 if (ip->ip_p != IPPROTO_ESP &&
223 !(ip->ip_p == IPPROTO_UDP && off >= sizeof(struct udphdr))) {
224 ipseclog((LOG_DEBUG,
225 "IPv4 ESP input: invalid protocol type\n"));
226 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
227 goto bad;
228 }
229 esp = (struct esp *)(void *)(((u_int8_t *)ip) + off);
230 #ifdef _IP_VHL
231 hlen = IP_VHL_HL(ip->ip_vhl) << 2;
232 #else
233 hlen = ip->ip_hl << 2;
234 #endif
235
236 /* find the sassoc. */
237 spi = esp->esp_spi;
238
239 if ((sav = key_allocsa(AF_INET,
240 (caddr_t)&ip->ip_src, (caddr_t)&ip->ip_dst,
241 IPPROTO_ESP, spi)) == 0) {
242 ipseclog((LOG_WARNING,
243 "IPv4 ESP input: no key association found for spi %u\n",
244 (u_int32_t)ntohl(spi)));
245 IPSEC_STAT_INCREMENT(ipsecstat.in_nosa);
246 goto bad;
247 }
248 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
249 printf("DP esp4_input called to allocate SA:0x%llx\n",
250 (uint64_t)VM_KERNEL_ADDRPERM(sav)));
251 if (sav->state != SADB_SASTATE_MATURE
252 && sav->state != SADB_SASTATE_DYING) {
253 ipseclog((LOG_DEBUG,
254 "IPv4 ESP input: non-mature/dying SA found for spi %u\n",
255 (u_int32_t)ntohl(spi)));
256 IPSEC_STAT_INCREMENT(ipsecstat.in_badspi);
257 goto bad;
258 }
259 algo = esp_algorithm_lookup(sav->alg_enc);
260 if (!algo) {
261 ipseclog((LOG_DEBUG, "IPv4 ESP input: "
262 "unsupported encryption algorithm for spi %u\n",
263 (u_int32_t)ntohl(spi)));
264 IPSEC_STAT_INCREMENT(ipsecstat.in_badspi);
265 goto bad;
266 }
267
268 /* check if we have proper ivlen information */
269 ivlen = sav->ivlen;
270 if (ivlen < 0) {
271 ipseclog((LOG_ERR, "inproper ivlen in IPv4 ESP input: %s %s\n",
272 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
273 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
274 goto bad;
275 }
276
277 seq = ntohl(((struct newesp *)esp)->esp_seq);
278
279 /* Save ICV from packet for verification later */
280 size_t siz = 0;
281 unsigned char saved_icv[AH_MAXSUMSIZE];
282 if (algo->finalizedecrypt) {
283 siz = algo->icvlen;
284 m_copydata(m, m->m_pkthdr.len - siz, siz, (caddr_t) saved_icv);
285 goto delay_icv;
286 }
287
288 if (!((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay
289 && (sav->alg_auth && sav->key_auth)))
290 goto noreplaycheck;
291
292 if (sav->alg_auth == SADB_X_AALG_NULL ||
293 sav->alg_auth == SADB_AALG_NONE)
294 goto noreplaycheck;
295
296 /*
297 * check for sequence number.
298 */
299 if (ipsec_chkreplay(seq, sav))
300 ; /*okey*/
301 else {
302 IPSEC_STAT_INCREMENT(ipsecstat.in_espreplay);
303 ipseclog((LOG_WARNING,
304 "replay packet in IPv4 ESP input: %s %s\n",
305 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
306 goto bad;
307 }
308
309 /* check ICV */
310 {
311 u_char sum0[AH_MAXSUMSIZE] __attribute__((aligned(4)));
312 u_char sum[AH_MAXSUMSIZE] __attribute__((aligned(4)));
313 const struct ah_algorithm *sumalgo;
314
315 sumalgo = ah_algorithm_lookup(sav->alg_auth);
316 if (!sumalgo)
317 goto noreplaycheck;
318 siz = (((*sumalgo->sumsiz)(sav) + 3) & ~(4 - 1));
319 if (m->m_pkthdr.len < off + ESPMAXLEN + siz) {
320 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
321 goto bad;
322 }
323 if (AH_MAXSUMSIZE < siz) {
324 ipseclog((LOG_DEBUG,
325 "internal error: AH_MAXSUMSIZE must be larger than %lu\n",
326 (u_int32_t)siz));
327 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
328 goto bad;
329 }
330
331 m_copydata(m, m->m_pkthdr.len - siz, siz, (caddr_t) &sum0[0]);
332
333 if (esp_auth(m, off, m->m_pkthdr.len - off - siz, sav, sum)) {
334 ipseclog((LOG_WARNING, "auth fail in IPv4 ESP input: %s %s\n",
335 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
336 IPSEC_STAT_INCREMENT(ipsecstat.in_espauthfail);
337 goto bad;
338 }
339
340 if (bcmp(sum0, sum, siz) != 0) {
341 ipseclog((LOG_WARNING, "auth fail in IPv4 ESP input: %s %s\n",
342 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
343 IPSEC_STAT_INCREMENT(ipsecstat.in_espauthfail);
344 goto bad;
345 }
346
347 delay_icv:
348
349 /* strip off the authentication data */
350 m_adj(m, -siz);
351 ip = mtod(m, struct ip *);
352 #ifdef IPLEN_FLIPPED
353 ip->ip_len = ip->ip_len - siz;
354 #else
355 ip->ip_len = htons(ntohs(ip->ip_len) - siz);
356 #endif
357 m->m_flags |= M_AUTHIPDGM;
358 IPSEC_STAT_INCREMENT(ipsecstat.in_espauthsucc);
359 }
360
361 /*
362 * update sequence number.
363 */
364 if ((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay) {
365 if (ipsec_updatereplay(seq, sav)) {
366 IPSEC_STAT_INCREMENT(ipsecstat.in_espreplay);
367 goto bad;
368 }
369 }
370
371 noreplaycheck:
372
373 /* process main esp header. */
374 if (sav->flags & SADB_X_EXT_OLD) {
375 /* RFC 1827 */
376 esplen = sizeof(struct esp);
377 } else {
378 /* RFC 2406 */
379 if (sav->flags & SADB_X_EXT_DERIV)
380 esplen = sizeof(struct esp);
381 else
382 esplen = sizeof(struct newesp);
383 }
384
385 if (m->m_pkthdr.len < off + esplen + ivlen + sizeof(esptail)) {
386 ipseclog((LOG_WARNING,
387 "IPv4 ESP input: packet too short\n"));
388 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
389 goto bad;
390 }
391
392 if (m->m_len < off + esplen + ivlen) {
393 m = m_pullup(m, off + esplen + ivlen);
394 if (!m) {
395 ipseclog((LOG_DEBUG,
396 "IPv4 ESP input: can't pullup in esp4_input\n"));
397 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
398 goto bad;
399 }
400 }
401
402 /*
403 * pre-compute and cache intermediate key
404 */
405 if (esp_schedule(algo, sav) != 0) {
406 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
407 goto bad;
408 }
409
410 /*
411 * decrypt the packet.
412 */
413 if (!algo->decrypt)
414 panic("internal error: no decrypt function");
415 KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_START, 0,0,0,0,0);
416 if ((*algo->decrypt)(m, off, sav, algo, ivlen)) {
417 /* m is already freed */
418 m = NULL;
419 ipseclog((LOG_ERR, "decrypt fail in IPv4 ESP input: %s\n",
420 ipsec_logsastr(sav)));
421 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
422 KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_END, 1,0,0,0,0);
423 goto bad;
424 }
425 KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_END, 2,0,0,0,0);
426 IPSEC_STAT_INCREMENT(ipsecstat.in_esphist[sav->alg_enc]);
427
428 m->m_flags |= M_DECRYPTED;
429
430 if (algo->finalizedecrypt)
431 {
432 unsigned char tag[algo->icvlen];
433 if ((*algo->finalizedecrypt)(sav, tag, algo->icvlen)) {
434 ipseclog((LOG_ERR, "packet decryption ICV failure\n"));
435 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
436 KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_END, 1,0,0,0,0);
437 goto bad;
438 }
439 if (memcmp(saved_icv, tag, algo->icvlen)) {
440 ipseclog((LOG_ERR, "packet decryption ICV mismatch\n"));
441 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
442 KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_END, 1,0,0,0,0);
443 goto bad;
444 }
445 }
446
447 /*
448 * find the trailer of the ESP.
449 */
450 m_copydata(m, m->m_pkthdr.len - sizeof(esptail), sizeof(esptail),
451 (caddr_t)&esptail);
452 nxt = esptail.esp_nxt;
453 taillen = esptail.esp_padlen + sizeof(esptail);
454
455 if (m->m_pkthdr.len < taillen
456 || m->m_pkthdr.len - taillen < hlen) { /*?*/
457 ipseclog((LOG_WARNING,
458 "bad pad length in IPv4 ESP input: %s %s\n",
459 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
460 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
461 goto bad;
462 }
463
464 /* strip off the trailing pad area. */
465 m_adj(m, -taillen);
466 ip = mtod(m, struct ip *);
467 #ifdef IPLEN_FLIPPED
468 ip->ip_len = ip->ip_len - taillen;
469 #else
470 ip->ip_len = htons(ntohs(ip->ip_len) - taillen);
471 #endif
472 if (ip->ip_p == IPPROTO_UDP) {
473 // offset includes the outer ip and udp header lengths.
474 if (m->m_len < off) {
475 m = m_pullup(m, off);
476 if (!m) {
477 ipseclog((LOG_DEBUG,
478 "IPv4 ESP input: invalid udp encapsulated ESP packet length \n"));
479 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
480 goto bad;
481 }
482 }
483
484 // check the UDP encap header to detect changes in the source port, and then strip the header
485 off -= sizeof(struct udphdr); // off no longer includes the udphdr's size
486 // if peer is behind nat and this is the latest esp packet
487 if ((sav->flags & SADB_X_EXT_NATT_DETECTED_PEER) != 0 &&
488 (sav->flags & SADB_X_EXT_OLD) == 0 &&
489 seq && sav->replay &&
490 seq >= sav->replay->lastseq) {
491 struct udphdr *encap_uh = (__typeof__(encap_uh))(void *)((caddr_t)ip + off);
492 if (encap_uh->uh_sport &&
493 ntohs(encap_uh->uh_sport) != sav->remote_ike_port) {
494 sav->remote_ike_port = ntohs(encap_uh->uh_sport);
495 }
496 }
497 ip = esp4_input_strip_udp_encap(m, off);
498 esp = (struct esp *)(void *)(((u_int8_t *)ip) + off);
499 }
500
501 if (sav->utun_is_keepalive_fn) {
502 if (sav->utun_is_keepalive_fn(sav->utun_pcb, &m, nxt, sav->flags, (off + esplen + ivlen))) {
503 if (m) {
504 // not really bad, we just wanna exit
505 IPSEC_STAT_INCREMENT(ipsecstat.in_success);
506 m = NULL;
507 }
508 goto bad;
509 }
510 }
511
512 /* was it transmitted over the IPsec tunnel SA? */
513 if (ipsec4_tunnel_validate(m, off + esplen + ivlen, nxt, sav, &ifamily)) {
514 ifaddr_t ifa;
515 struct sockaddr_storage addr;
516
517 /*
518 * strip off all the headers that precedes ESP header.
519 * IP4 xx ESP IP4' payload -> IP4' payload
520 *
521 * XXX more sanity checks
522 * XXX relationship with gif?
523 */
524 u_int8_t tos, otos;
525 int sum;
526
527 tos = ip->ip_tos;
528 m_adj(m, off + esplen + ivlen);
529 if (ifamily == AF_INET) {
530 struct sockaddr_in *ipaddr;
531
532 if (m->m_len < sizeof(*ip)) {
533 m = m_pullup(m, sizeof(*ip));
534 if (!m) {
535 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
536 goto bad;
537 }
538 }
539 ip = mtod(m, struct ip *);
540 /* ECN consideration. */
541
542 otos = ip->ip_tos;
543 if (ip_ecn_egress(ip4_ipsec_ecn, &tos, &ip->ip_tos) == 0) {
544 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
545 goto bad;
546 }
547
548 if (otos != ip->ip_tos) {
549 sum = ~ntohs(ip->ip_sum) & 0xffff;
550 sum += (~otos & 0xffff) + ip->ip_tos;
551 sum = (sum >> 16) + (sum & 0xffff);
552 sum += (sum >> 16); /* add carry */
553 ip->ip_sum = htons(~sum & 0xffff);
554 }
555
556 if (!key_checktunnelsanity(sav, AF_INET,
557 (caddr_t)&ip->ip_src, (caddr_t)&ip->ip_dst)) {
558 ipseclog((LOG_ERR, "ipsec tunnel address mismatch "
559 "in ESP input: %s %s\n",
560 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
561 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
562 goto bad;
563 }
564
565 if (ip_doscopedroute) {
566 bzero(&addr, sizeof(addr));
567 ipaddr = (__typeof__(ipaddr))&addr;
568 ipaddr->sin_family = AF_INET;
569 ipaddr->sin_len = sizeof(*ipaddr);
570 ipaddr->sin_addr = ip->ip_dst;
571 }
572 #if INET6
573 } else if (ifamily == AF_INET6) {
574 struct sockaddr_in6 *ip6addr;
575
576 /*
577 * m_pullup is prohibited in KAME IPv6 input processing
578 * but there's no other way!
579 */
580 if (m->m_len < sizeof(*ip6)) {
581 m = m_pullup(m, sizeof(*ip6));
582 if (!m) {
583 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
584 goto bad;
585 }
586 }
587
588 /*
589 * Expect 32-bit aligned data pointer on strict-align
590 * platforms.
591 */
592 MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m);
593
594 ip6 = mtod(m, struct ip6_hdr *);
595
596 /* ECN consideration. */
597 if (ip64_ecn_egress(ip4_ipsec_ecn, &tos, &ip6->ip6_flow) == 0) {
598 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
599 goto bad;
600 }
601
602 if (!key_checktunnelsanity(sav, AF_INET6,
603 (caddr_t)&ip6->ip6_src, (caddr_t)&ip6->ip6_dst)) {
604 ipseclog((LOG_ERR, "ipsec tunnel address mismatch "
605 "in ESP input: %s %s\n",
606 ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
607 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
608 goto bad;
609 }
610
611 if (ip6_doscopedroute) {
612 bzero(&addr, sizeof(addr));
613 ip6addr = (__typeof__(ip6addr))&addr;
614 ip6addr->sin6_family = AF_INET6;
615 ip6addr->sin6_len = sizeof(*ip6addr);
616 ip6addr->sin6_addr = ip6->ip6_dst;
617 }
618 #endif /* INET6 */
619 } else {
620 ipseclog((LOG_ERR, "ipsec tunnel unsupported address family "
621 "in ESP input\n"));
622 goto bad;
623 }
624
625 key_sa_recordxfer(sav, m);
626 if (ipsec_addhist(m, IPPROTO_ESP, spi) != 0 ||
627 ipsec_addhist(m, IPPROTO_IPV4, 0) != 0) {
628 IPSEC_STAT_INCREMENT(ipsecstat.in_nomem);
629 goto bad;
630 }
631
632 if (ip_doscopedroute || ip6_doscopedroute) {
633 // update the receiving interface address based on the inner address
634 ifa = ifa_ifwithaddr((struct sockaddr *)&addr);
635 if (ifa) {
636 m->m_pkthdr.rcvif = ifa->ifa_ifp;
637 IFA_REMREF(ifa);
638 }
639 }
640
641 /* Clear the csum flags, they can't be valid for the inner headers */
642 m->m_pkthdr.csum_flags = 0;
643
644 // Input via IPSec interface
645 if (sav->sah->ipsec_if != NULL) {
646 if (ipsec_inject_inbound_packet(sav->sah->ipsec_if, m) == 0) {
647 m = NULL;
648 goto done;
649 } else {
650 goto bad;
651 }
652 }
653
654 if (sav->utun_in_fn) {
655 if (!(sav->utun_in_fn(sav->utun_pcb, &m, ifamily == AF_INET ? PF_INET : PF_INET6))) {
656 m = NULL;
657 // we just wanna exit since packet has been completely processed
658 goto bad;
659 }
660 }
661
662 if (proto_input(ifamily == AF_INET ? PF_INET : PF_INET6, m) != 0)
663 goto bad;
664
665 nxt = IPPROTO_DONE;
666 KERNEL_DEBUG(DBG_FNC_ESPIN | DBG_FUNC_END, 2,0,0,0,0);
667 } else {
668 /*
669 * strip off ESP header and IV.
670 * even in m_pulldown case, we need to strip off ESP so that
671 * we can always compute checksum for AH correctly.
672 */
673 size_t stripsiz;
674
675 stripsiz = esplen + ivlen;
676
677 ip = mtod(m, struct ip *);
678 ovbcopy((caddr_t)ip, (caddr_t)(((u_char *)ip) + stripsiz), off);
679 m->m_data += stripsiz;
680 m->m_len -= stripsiz;
681 m->m_pkthdr.len -= stripsiz;
682
683 ip = mtod(m, struct ip *);
684 #ifdef IPLEN_FLIPPED
685 ip->ip_len = ip->ip_len - stripsiz;
686 #else
687 ip->ip_len = htons(ntohs(ip->ip_len) - stripsiz);
688 #endif
689 ip->ip_p = nxt;
690
691 key_sa_recordxfer(sav, m);
692 if (ipsec_addhist(m, IPPROTO_ESP, spi) != 0) {
693 IPSEC_STAT_INCREMENT(ipsecstat.in_nomem);
694 goto bad;
695 }
696
697 /*
698 * Set the csum valid flag, if we authenticated the
699 * packet, the payload shouldn't be corrupt unless
700 * it was corrupted before being signed on the other
701 * side.
702 */
703 if (nxt == IPPROTO_TCP || nxt == IPPROTO_UDP) {
704 m->m_pkthdr.csum_flags = CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
705 m->m_pkthdr.csum_data = 0xFFFF;
706 }
707
708 if (nxt != IPPROTO_DONE) {
709 if ((ip_protox[nxt]->pr_flags & PR_LASTHDR) != 0 &&
710 ipsec4_in_reject(m, NULL)) {
711 IPSEC_STAT_INCREMENT(ipsecstat.in_polvio);
712 goto bad;
713 }
714 KERNEL_DEBUG(DBG_FNC_ESPIN | DBG_FUNC_END, 3,0,0,0,0);
715
716 /* translate encapsulated UDP port ? */
717 if ((sav->flags & SADB_X_EXT_NATT_MULTIPLEUSERS) != 0) {
718 struct udphdr *udp;
719
720 if (nxt != IPPROTO_UDP) { /* not UPD packet - drop it */
721 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
722 goto bad;
723 }
724
725 if (m->m_len < off + sizeof(struct udphdr)) {
726 m = m_pullup(m, off + sizeof(struct udphdr));
727 if (!m) {
728 ipseclog((LOG_DEBUG,
729 "IPv4 ESP input: can't pullup UDP header in esp4_input\n"));
730 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
731 goto bad;
732 }
733 ip = mtod(m, struct ip *);
734 }
735 udp = (struct udphdr *)(void *)(((u_int8_t *)ip) + off);
736
737 lck_mtx_lock(sadb_mutex);
738 if (sav->natt_encapsulated_src_port == 0) {
739 sav->natt_encapsulated_src_port = udp->uh_sport;
740 } else if (sav->natt_encapsulated_src_port != udp->uh_sport) { /* something wrong */
741 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
742 lck_mtx_unlock(sadb_mutex);
743 goto bad;
744 }
745 lck_mtx_unlock(sadb_mutex);
746 udp->uh_sport = htons(sav->remote_ike_port);
747 udp->uh_sum = 0;
748 }
749
750 DTRACE_IP6(receive, struct mbuf *, m, struct inpcb *, NULL,
751 struct ip *, ip, struct ifnet *, m->m_pkthdr.rcvif,
752 struct ip *, ip, struct ip6_hdr *, NULL);
753
754 // Input via IPSec interface
755 if (sav->sah->ipsec_if != NULL) {
756 ip->ip_len = htons(ip->ip_len + hlen);
757 ip->ip_off = htons(ip->ip_off);
758 ip->ip_sum = 0;
759 ip->ip_sum = ip_cksum_hdr_in(m, hlen);
760 if (ipsec_inject_inbound_packet(sav->sah->ipsec_if, m) == 0) {
761 m = NULL;
762 goto done;
763 } else {
764 goto bad;
765 }
766 }
767
768 if (sav->utun_in_fn) {
769 if (!(sav->utun_in_fn(sav->utun_pcb, &m, PF_INET))) {
770 m = NULL;
771 // we just wanna exit since packet has been completely processed
772 goto bad;
773 }
774 }
775
776 ip_proto_dispatch_in(m, off, nxt, 0);
777 } else
778 m_freem(m);
779 m = NULL;
780 }
781
782 done:
783 if (sav) {
784 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
785 printf("DP esp4_input call free SA:0x%llx\n",
786 (uint64_t)VM_KERNEL_ADDRPERM(sav)));
787 key_freesav(sav, KEY_SADB_UNLOCKED);
788 }
789 IPSEC_STAT_INCREMENT(ipsecstat.in_success);
790 return;
791
792 bad:
793 if (sav) {
794 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
795 printf("DP esp4_input call free SA:0x%llx\n",
796 (uint64_t)VM_KERNEL_ADDRPERM(sav)));
797 key_freesav(sav, KEY_SADB_UNLOCKED);
798 }
799 if (m)
800 m_freem(m);
801 KERNEL_DEBUG(DBG_FNC_ESPIN | DBG_FUNC_END, 4,0,0,0,0);
802 return;
803 }
804 #endif /* INET */
805
806 #if INET6
807 int
808 esp6_input(struct mbuf **mp, int *offp, int proto)
809 {
810 #pragma unused(proto)
811 struct mbuf *m = *mp;
812 int off = *offp;
813 struct ip *ip;
814 struct ip6_hdr *ip6;
815 struct esp *esp;
816 struct esptail esptail;
817 u_int32_t spi;
818 u_int32_t seq;
819 struct secasvar *sav = NULL;
820 size_t taillen;
821 u_int16_t nxt;
822 char *nproto;
823 const struct esp_algorithm *algo;
824 int ivlen;
825 size_t esplen;
826 sa_family_t ifamily;
827
828 /* sanity check for alignment. */
829 if (off % 4 != 0 || m->m_pkthdr.len % 4 != 0) {
830 ipseclog((LOG_ERR, "IPv6 ESP input: packet alignment problem "
831 "(off=%d, pktlen=%d)\n", off, m->m_pkthdr.len));
832 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
833 goto bad;
834 }
835
836 #ifndef PULLDOWN_TEST
837 IP6_EXTHDR_CHECK(m, off, ESPMAXLEN, {return IPPROTO_DONE;});
838 esp = (struct esp *)(void *)(mtod(m, caddr_t) + off);
839 #else
840 IP6_EXTHDR_GET(esp, struct esp *, m, off, ESPMAXLEN);
841 if (esp == NULL) {
842 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
843 return IPPROTO_DONE;
844 }
845 #endif
846 /* Expect 32-bit data aligned pointer on strict-align platforms */
847 MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m);
848
849 ip6 = mtod(m, struct ip6_hdr *);
850
851 if (ntohs(ip6->ip6_plen) == 0) {
852 ipseclog((LOG_ERR, "IPv6 ESP input: "
853 "ESP with IPv6 jumbogram is not supported.\n"));
854 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
855 goto bad;
856 }
857
858 nproto = ip6_get_prevhdr(m, off);
859 if (nproto == NULL || (*nproto != IPPROTO_ESP &&
860 !(*nproto == IPPROTO_UDP && off >= sizeof(struct udphdr)))) {
861 ipseclog((LOG_DEBUG, "IPv6 ESP input: invalid protocol type\n"));
862 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
863 goto bad;
864 }
865
866 /* find the sassoc. */
867 spi = esp->esp_spi;
868
869 if ((sav = key_allocsa(AF_INET6,
870 (caddr_t)&ip6->ip6_src, (caddr_t)&ip6->ip6_dst,
871 IPPROTO_ESP, spi)) == 0) {
872 ipseclog((LOG_WARNING,
873 "IPv6 ESP input: no key association found for spi %u\n",
874 (u_int32_t)ntohl(spi)));
875 IPSEC_STAT_INCREMENT(ipsec6stat.in_nosa);
876 goto bad;
877 }
878 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
879 printf("DP esp6_input called to allocate SA:0x%llx\n",
880 (uint64_t)VM_KERNEL_ADDRPERM(sav)));
881 if (sav->state != SADB_SASTATE_MATURE
882 && sav->state != SADB_SASTATE_DYING) {
883 ipseclog((LOG_DEBUG,
884 "IPv6 ESP input: non-mature/dying SA found for spi %u\n",
885 (u_int32_t)ntohl(spi)));
886 IPSEC_STAT_INCREMENT(ipsec6stat.in_badspi);
887 goto bad;
888 }
889 algo = esp_algorithm_lookup(sav->alg_enc);
890 if (!algo) {
891 ipseclog((LOG_DEBUG, "IPv6 ESP input: "
892 "unsupported encryption algorithm for spi %u\n",
893 (u_int32_t)ntohl(spi)));
894 IPSEC_STAT_INCREMENT(ipsec6stat.in_badspi);
895 goto bad;
896 }
897
898 /* check if we have proper ivlen information */
899 ivlen = sav->ivlen;
900 if (ivlen < 0) {
901 ipseclog((LOG_ERR, "inproper ivlen in IPv6 ESP input: %s %s\n",
902 ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
903 IPSEC_STAT_INCREMENT(ipsec6stat.in_badspi);
904 goto bad;
905 }
906
907 seq = ntohl(((struct newesp *)esp)->esp_seq);
908
909 /* Save ICV from packet for verification later */
910 size_t siz = 0;
911 unsigned char saved_icv[AH_MAXSUMSIZE];
912 if (algo->finalizedecrypt) {
913 siz = algo->icvlen;
914 m_copydata(m, m->m_pkthdr.len - siz, siz, (caddr_t) saved_icv);
915 goto delay_icv;
916 }
917
918 if (!((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay
919 && (sav->alg_auth && sav->key_auth)))
920 goto noreplaycheck;
921
922 if (sav->alg_auth == SADB_X_AALG_NULL ||
923 sav->alg_auth == SADB_AALG_NONE)
924 goto noreplaycheck;
925
926 /*
927 * check for sequence number.
928 */
929 if (ipsec_chkreplay(seq, sav))
930 ; /*okey*/
931 else {
932 IPSEC_STAT_INCREMENT(ipsec6stat.in_espreplay);
933 ipseclog((LOG_WARNING,
934 "replay packet in IPv6 ESP input: %s %s\n",
935 ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
936 goto bad;
937 }
938
939 /* check ICV */
940 {
941 u_char sum0[AH_MAXSUMSIZE] __attribute__((aligned(4)));
942 u_char sum[AH_MAXSUMSIZE] __attribute__((aligned(4)));
943 const struct ah_algorithm *sumalgo;
944
945 sumalgo = ah_algorithm_lookup(sav->alg_auth);
946 if (!sumalgo)
947 goto noreplaycheck;
948 siz = (((*sumalgo->sumsiz)(sav) + 3) & ~(4 - 1));
949 if (m->m_pkthdr.len < off + ESPMAXLEN + siz) {
950 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
951 goto bad;
952 }
953 if (AH_MAXSUMSIZE < siz) {
954 ipseclog((LOG_DEBUG,
955 "internal error: AH_MAXSUMSIZE must be larger than %lu\n",
956 (u_int32_t)siz));
957 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
958 goto bad;
959 }
960
961 m_copydata(m, m->m_pkthdr.len - siz, siz, (caddr_t) &sum0[0]);
962
963 if (esp_auth(m, off, m->m_pkthdr.len - off - siz, sav, sum)) {
964 ipseclog((LOG_WARNING, "auth fail in IPv6 ESP input: %s %s\n",
965 ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
966 IPSEC_STAT_INCREMENT(ipsec6stat.in_espauthfail);
967 goto bad;
968 }
969
970 if (bcmp(sum0, sum, siz) != 0) {
971 ipseclog((LOG_WARNING, "auth fail in IPv6 ESP input: %s %s\n",
972 ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
973 IPSEC_STAT_INCREMENT(ipsec6stat.in_espauthfail);
974 goto bad;
975 }
976
977 delay_icv:
978
979 /* strip off the authentication data */
980 m_adj(m, -siz);
981 ip6 = mtod(m, struct ip6_hdr *);
982 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - siz);
983
984 m->m_flags |= M_AUTHIPDGM;
985 IPSEC_STAT_INCREMENT(ipsec6stat.in_espauthsucc);
986 }
987
988 /*
989 * update sequence number.
990 */
991 if ((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay) {
992 if (ipsec_updatereplay(seq, sav)) {
993 IPSEC_STAT_INCREMENT(ipsec6stat.in_espreplay);
994 goto bad;
995 }
996 }
997
998 noreplaycheck:
999
1000 /* process main esp header. */
1001 if (sav->flags & SADB_X_EXT_OLD) {
1002 /* RFC 1827 */
1003 esplen = sizeof(struct esp);
1004 } else {
1005 /* RFC 2406 */
1006 if (sav->flags & SADB_X_EXT_DERIV)
1007 esplen = sizeof(struct esp);
1008 else
1009 esplen = sizeof(struct newesp);
1010 }
1011
1012 if (m->m_pkthdr.len < off + esplen + ivlen + sizeof(esptail)) {
1013 ipseclog((LOG_WARNING,
1014 "IPv6 ESP input: packet too short\n"));
1015 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
1016 goto bad;
1017 }
1018
1019 #ifndef PULLDOWN_TEST
1020 IP6_EXTHDR_CHECK(m, off, esplen + ivlen, return IPPROTO_DONE); /*XXX*/
1021 #else
1022 IP6_EXTHDR_GET(esp, struct esp *, m, off, esplen + ivlen);
1023 if (esp == NULL) {
1024 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
1025 m = NULL;
1026 goto bad;
1027 }
1028 #endif
1029 ip6 = mtod(m, struct ip6_hdr *); /*set it again just in case*/
1030
1031 /*
1032 * pre-compute and cache intermediate key
1033 */
1034 if (esp_schedule(algo, sav) != 0) {
1035 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
1036 goto bad;
1037 }
1038
1039 /*
1040 * decrypt the packet.
1041 */
1042 if (!algo->decrypt)
1043 panic("internal error: no decrypt function");
1044 if ((*algo->decrypt)(m, off, sav, algo, ivlen)) {
1045 /* m is already freed */
1046 m = NULL;
1047 ipseclog((LOG_ERR, "decrypt fail in IPv6 ESP input: %s\n",
1048 ipsec_logsastr(sav)));
1049 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
1050 goto bad;
1051 }
1052 IPSEC_STAT_INCREMENT(ipsec6stat.in_esphist[sav->alg_enc]);
1053
1054 m->m_flags |= M_DECRYPTED;
1055
1056 if (algo->finalizedecrypt)
1057 {
1058 unsigned char tag[algo->icvlen];
1059 if ((*algo->finalizedecrypt)(sav, tag, algo->icvlen)) {
1060 ipseclog((LOG_ERR, "packet decryption ICV failure\n"));
1061 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
1062 KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_END, 1,0,0,0,0);
1063 goto bad;
1064 }
1065 if (memcmp(saved_icv, tag, algo->icvlen)) {
1066 ipseclog((LOG_ERR, "packet decryption ICV mismatch\n"));
1067 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
1068 KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_END, 1,0,0,0,0);
1069 goto bad;
1070 }
1071 }
1072
1073 /*
1074 * find the trailer of the ESP.
1075 */
1076 m_copydata(m, m->m_pkthdr.len - sizeof(esptail), sizeof(esptail),
1077 (caddr_t)&esptail);
1078 nxt = esptail.esp_nxt;
1079 taillen = esptail.esp_padlen + sizeof(esptail);
1080
1081 if (m->m_pkthdr.len < taillen
1082 || m->m_pkthdr.len - taillen < sizeof(struct ip6_hdr)) { /*?*/
1083 ipseclog((LOG_WARNING,
1084 "bad pad length in IPv6 ESP input: %s %s\n",
1085 ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
1086 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
1087 goto bad;
1088 }
1089
1090 /* strip off the trailing pad area. */
1091 m_adj(m, -taillen);
1092 ip6 = mtod(m, struct ip6_hdr *);
1093 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - taillen);
1094
1095 if (sav->utun_is_keepalive_fn) {
1096 if (sav->utun_is_keepalive_fn(sav->utun_pcb, &m, nxt, sav->flags, (off + esplen + ivlen))) {
1097 if (m) {
1098 // not really bad, we just wanna exit
1099 IPSEC_STAT_INCREMENT(ipsec6stat.in_success);
1100 m = NULL;
1101 }
1102 goto bad;
1103 }
1104 }
1105
1106 if (*nproto == IPPROTO_UDP) {
1107 // offset includes the outer ip and udp header lengths.
1108 if (m->m_len < off) {
1109 m = m_pullup(m, off);
1110 if (!m) {
1111 ipseclog((LOG_DEBUG,
1112 "IPv6 ESP input: invalid udp encapsulated ESP packet length\n"));
1113 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
1114 goto bad;
1115 }
1116 }
1117
1118 // check the UDP encap header to detect changes in the source port, and then strip the header
1119 off -= sizeof(struct udphdr); // off no longer includes the udphdr's size
1120 // if peer is behind nat and this is the latest esp packet
1121 if ((sav->flags & SADB_X_EXT_NATT_DETECTED_PEER) != 0 &&
1122 (sav->flags & SADB_X_EXT_OLD) == 0 &&
1123 seq && sav->replay &&
1124 seq >= sav->replay->lastseq) {
1125 struct udphdr *encap_uh = (__typeof__(encap_uh))(void *)((caddr_t)ip6 + off);
1126 if (encap_uh->uh_sport &&
1127 ntohs(encap_uh->uh_sport) != sav->remote_ike_port) {
1128 sav->remote_ike_port = ntohs(encap_uh->uh_sport);
1129 }
1130 }
1131 ip6 = esp6_input_strip_udp_encap(m, off);
1132 esp = (struct esp *)(void *)(((u_int8_t *)ip6) + off);
1133 }
1134
1135
1136 /* was it transmitted over the IPsec tunnel SA? */
1137 if (ipsec6_tunnel_validate(m, off + esplen + ivlen, nxt, sav, &ifamily)) {
1138 ifaddr_t ifa;
1139 struct sockaddr_storage addr;
1140
1141 /*
1142 * strip off all the headers that precedes ESP header.
1143 * IP6 xx ESP IP6' payload -> IP6' payload
1144 *
1145 * XXX more sanity checks
1146 * XXX relationship with gif?
1147 */
1148 u_int32_t flowinfo; /*net endian*/
1149 flowinfo = ip6->ip6_flow;
1150 m_adj(m, off + esplen + ivlen);
1151 if (ifamily == AF_INET6) {
1152 if (m->m_len < sizeof(*ip6)) {
1153 #ifndef PULLDOWN_TEST
1154 /*
1155 * m_pullup is prohibited in KAME IPv6 input processing
1156 * but there's no other way!
1157 */
1158 #else
1159 /* okay to pullup in m_pulldown style */
1160 #endif
1161 m = m_pullup(m, sizeof(*ip6));
1162 if (!m) {
1163 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
1164 goto bad;
1165 }
1166 }
1167 ip6 = mtod(m, struct ip6_hdr *);
1168 /* ECN consideration. */
1169 if (ip6_ecn_egress(ip6_ipsec_ecn, &flowinfo, &ip6->ip6_flow) == 0) {
1170 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
1171 goto bad;
1172 }
1173 if (!key_checktunnelsanity(sav, AF_INET6,
1174 (caddr_t)&ip6->ip6_src, (caddr_t)&ip6->ip6_dst)) {
1175 ipseclog((LOG_ERR, "ipsec tunnel address mismatch "
1176 "in IPv6 ESP input: %s %s\n",
1177 ipsec6_logpacketstr(ip6, spi),
1178 ipsec_logsastr(sav)));
1179 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
1180 goto bad;
1181 }
1182
1183 if (ip6_doscopedroute) {
1184 struct sockaddr_in6 *ip6addr;
1185
1186 bzero(&addr, sizeof(addr));
1187 ip6addr = (__typeof__(ip6addr))&addr;
1188 ip6addr->sin6_family = AF_INET6;
1189 ip6addr->sin6_len = sizeof(*ip6addr);
1190 ip6addr->sin6_addr = ip6->ip6_dst;
1191 }
1192 } else if (ifamily == AF_INET) {
1193 struct sockaddr_in *ipaddr;
1194
1195 if (m->m_len < sizeof(*ip)) {
1196 m = m_pullup(m, sizeof(*ip));
1197 if (!m) {
1198 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
1199 goto bad;
1200 }
1201 }
1202
1203 u_int8_t otos;
1204 int sum;
1205
1206 ip = mtod(m, struct ip *);
1207 otos = ip->ip_tos;
1208 /* ECN consideration. */
1209 if (ip46_ecn_egress(ip6_ipsec_ecn, &flowinfo, &ip->ip_tos) == 0) {
1210 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
1211 goto bad;
1212 }
1213
1214 if (otos != ip->ip_tos) {
1215 sum = ~ntohs(ip->ip_sum) & 0xffff;
1216 sum += (~otos & 0xffff) + ip->ip_tos;
1217 sum = (sum >> 16) + (sum & 0xffff);
1218 sum += (sum >> 16); /* add carry */
1219 ip->ip_sum = htons(~sum & 0xffff);
1220 }
1221
1222 if (!key_checktunnelsanity(sav, AF_INET,
1223 (caddr_t)&ip->ip_src, (caddr_t)&ip->ip_dst)) {
1224 ipseclog((LOG_ERR, "ipsec tunnel address mismatch "
1225 "in ESP input: %s %s\n",
1226 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
1227 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
1228 goto bad;
1229 }
1230
1231 if (ip_doscopedroute) {
1232 bzero(&addr, sizeof(addr));
1233 ipaddr = (__typeof__(ipaddr))&addr;
1234 ipaddr->sin_family = AF_INET;
1235 ipaddr->sin_len = sizeof(*ipaddr);
1236 ipaddr->sin_addr = ip->ip_dst;
1237 }
1238 }
1239
1240 key_sa_recordxfer(sav, m);
1241 if (ipsec_addhist(m, IPPROTO_ESP, spi) != 0 ||
1242 ipsec_addhist(m, IPPROTO_IPV6, 0) != 0) {
1243 IPSEC_STAT_INCREMENT(ipsec6stat.in_nomem);
1244 goto bad;
1245 }
1246
1247 if (ip_doscopedroute || ip6_doscopedroute) {
1248 // update the receiving interface address based on the inner address
1249 ifa = ifa_ifwithaddr((struct sockaddr *)&addr);
1250 if (ifa) {
1251 m->m_pkthdr.rcvif = ifa->ifa_ifp;
1252 IFA_REMREF(ifa);
1253 }
1254 }
1255
1256 // Input via IPSec interface
1257 if (sav->sah->ipsec_if != NULL) {
1258 if (ipsec_inject_inbound_packet(sav->sah->ipsec_if, m) == 0) {
1259 m = NULL;
1260 nxt = IPPROTO_DONE;
1261 goto done;
1262 } else {
1263 goto bad;
1264 }
1265 }
1266
1267 if (sav->utun_in_fn) {
1268 if (!(sav->utun_in_fn(sav->utun_pcb, &m, PF_INET6))) {
1269 m = NULL;
1270 // we just wanna exit since packet has been completely processed
1271 goto bad;
1272 }
1273 }
1274
1275 if (proto_input(ifamily == AF_INET ? PF_INET : PF_INET6, m) != 0)
1276 goto bad;
1277 nxt = IPPROTO_DONE;
1278 } else {
1279 /*
1280 * strip off ESP header and IV.
1281 * even in m_pulldown case, we need to strip off ESP so that
1282 * we can always compute checksum for AH correctly.
1283 */
1284 size_t stripsiz;
1285 char *prvnxtp;
1286
1287 /*
1288 * Set the next header field of the previous header correctly.
1289 */
1290 prvnxtp = ip6_get_prevhdr(m, off); /* XXX */
1291 *prvnxtp = nxt;
1292
1293 stripsiz = esplen + ivlen;
1294
1295 ip6 = mtod(m, struct ip6_hdr *);
1296 if (m->m_len >= stripsiz + off) {
1297 ovbcopy((caddr_t)ip6, ((caddr_t)ip6) + stripsiz, off);
1298 m->m_data += stripsiz;
1299 m->m_len -= stripsiz;
1300 m->m_pkthdr.len -= stripsiz;
1301 } else {
1302 /*
1303 * this comes with no copy if the boundary is on
1304 * cluster
1305 */
1306 struct mbuf *n;
1307
1308 n = m_split(m, off, M_DONTWAIT);
1309 if (n == NULL) {
1310 /* m is retained by m_split */
1311 goto bad;
1312 }
1313 m_adj(n, stripsiz);
1314 /* m_cat does not update m_pkthdr.len */
1315 m->m_pkthdr.len += n->m_pkthdr.len;
1316 m_cat(m, n);
1317 }
1318
1319 #ifndef PULLDOWN_TEST
1320 /*
1321 * KAME requires that the packet to be contiguous on the
1322 * mbuf. We need to make that sure.
1323 * this kind of code should be avoided.
1324 * XXX other conditions to avoid running this part?
1325 */
1326 if (m->m_len != m->m_pkthdr.len) {
1327 struct mbuf *n = NULL;
1328 int maxlen;
1329
1330 MGETHDR(n, M_DONTWAIT, MT_HEADER); /* MAC-OK */
1331 maxlen = MHLEN;
1332 if (n)
1333 M_COPY_PKTHDR(n, m);
1334 if (n && m->m_pkthdr.len > maxlen) {
1335 MCLGET(n, M_DONTWAIT);
1336 maxlen = MCLBYTES;
1337 if ((n->m_flags & M_EXT) == 0) {
1338 m_free(n);
1339 n = NULL;
1340 }
1341 }
1342 if (!n) {
1343 printf("esp6_input: mbuf allocation failed\n");
1344 goto bad;
1345 }
1346
1347 if (m->m_pkthdr.len <= maxlen) {
1348 m_copydata(m, 0, m->m_pkthdr.len, mtod(n, caddr_t));
1349 n->m_len = m->m_pkthdr.len;
1350 n->m_pkthdr.len = m->m_pkthdr.len;
1351 n->m_next = NULL;
1352 m_freem(m);
1353 } else {
1354 m_copydata(m, 0, maxlen, mtod(n, caddr_t));
1355 n->m_len = maxlen;
1356 n->m_pkthdr.len = m->m_pkthdr.len;
1357 n->m_next = m;
1358 m_adj(m, maxlen);
1359 m->m_flags &= ~M_PKTHDR;
1360 }
1361 m = n;
1362 }
1363 #endif
1364
1365 ip6 = mtod(m, struct ip6_hdr *);
1366 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - stripsiz);
1367
1368 key_sa_recordxfer(sav, m);
1369 if (ipsec_addhist(m, IPPROTO_ESP, spi) != 0) {
1370 IPSEC_STAT_INCREMENT(ipsec6stat.in_nomem);
1371 goto bad;
1372 }
1373
1374 /*
1375 * Set the csum valid flag, if we authenticated the
1376 * packet, the payload shouldn't be corrupt unless
1377 * it was corrupted before being signed on the other
1378 * side.
1379 */
1380 if (nxt == IPPROTO_TCP || nxt == IPPROTO_UDP) {
1381 m->m_pkthdr.csum_flags = CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1382 m->m_pkthdr.csum_data = 0xFFFF;
1383 }
1384
1385 // Input via IPSec interface
1386 if (sav->sah->ipsec_if != NULL) {
1387 if (ipsec_inject_inbound_packet(sav->sah->ipsec_if, m) == 0) {
1388 m = NULL;
1389 nxt = IPPROTO_DONE;
1390 goto done;
1391 } else {
1392 goto bad;
1393 }
1394 }
1395
1396 if (sav->utun_in_fn) {
1397 if (!(sav->utun_in_fn(sav->utun_pcb, &m, PF_INET6))) {
1398 m = NULL;
1399 // we just wanna exit since packet has been completely processed
1400 goto bad;
1401 }
1402 }
1403 }
1404
1405 done:
1406 *offp = off;
1407 *mp = m;
1408 if (sav) {
1409 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
1410 printf("DP esp6_input call free SA:0x%llx\n",
1411 (uint64_t)VM_KERNEL_ADDRPERM(sav)));
1412 key_freesav(sav, KEY_SADB_UNLOCKED);
1413 }
1414 IPSEC_STAT_INCREMENT(ipsec6stat.in_success);
1415 return nxt;
1416
1417 bad:
1418 if (sav) {
1419 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
1420 printf("DP esp6_input call free SA:0x%llx\n",
1421 (uint64_t)VM_KERNEL_ADDRPERM(sav)));
1422 key_freesav(sav, KEY_SADB_UNLOCKED);
1423 }
1424 if (m)
1425 m_freem(m);
1426 return IPPROTO_DONE;
1427 }
1428
1429 void
1430 esp6_ctlinput(cmd, sa, d)
1431 int cmd;
1432 struct sockaddr *sa;
1433 void *d;
1434 {
1435 const struct newesp *espp;
1436 struct newesp esp;
1437 struct ip6ctlparam *ip6cp = NULL, ip6cp1;
1438 struct secasvar *sav;
1439 struct ip6_hdr *ip6;
1440 struct mbuf *m;
1441 int off;
1442 struct sockaddr_in6 *sa6_src, *sa6_dst;
1443
1444 if (sa->sa_family != AF_INET6 ||
1445 sa->sa_len != sizeof(struct sockaddr_in6))
1446 return;
1447 if ((unsigned)cmd >= PRC_NCMDS)
1448 return;
1449
1450 /* if the parameter is from icmp6, decode it. */
1451 if (d != NULL) {
1452 ip6cp = (struct ip6ctlparam *)d;
1453 m = ip6cp->ip6c_m;
1454 ip6 = ip6cp->ip6c_ip6;
1455 off = ip6cp->ip6c_off;
1456 } else {
1457 m = NULL;
1458 ip6 = NULL;
1459 }
1460
1461 if (ip6) {
1462 /*
1463 * Notify the error to all possible sockets via pfctlinput2.
1464 * Since the upper layer information (such as protocol type,
1465 * source and destination ports) is embedded in the encrypted
1466 * data and might have been cut, we can't directly call
1467 * an upper layer ctlinput function. However, the pcbnotify
1468 * function will consider source and destination addresses
1469 * as well as the flow info value, and may be able to find
1470 * some PCB that should be notified.
1471 * Although pfctlinput2 will call esp6_ctlinput(), there is
1472 * no possibility of an infinite loop of function calls,
1473 * because we don't pass the inner IPv6 header.
1474 */
1475 bzero(&ip6cp1, sizeof(ip6cp1));
1476 ip6cp1.ip6c_src = ip6cp->ip6c_src;
1477 pfctlinput2(cmd, sa, (void *)&ip6cp1);
1478
1479 /*
1480 * Then go to special cases that need ESP header information.
1481 * XXX: We assume that when ip6 is non NULL,
1482 * M and OFF are valid.
1483 */
1484
1485 /* check if we can safely examine src and dst ports */
1486 if (m->m_pkthdr.len < off + sizeof(esp))
1487 return;
1488
1489 if (m->m_len < off + sizeof(esp)) {
1490 /*
1491 * this should be rare case,
1492 * so we compromise on this copy...
1493 */
1494 m_copydata(m, off, sizeof(esp), (caddr_t)&esp);
1495 espp = &esp;
1496 } else
1497 espp = (struct newesp*)(void *)(mtod(m, caddr_t) + off);
1498
1499 if (cmd == PRC_MSGSIZE) {
1500 int valid = 0;
1501
1502 /*
1503 * Check to see if we have a valid SA corresponding to
1504 * the address in the ICMP message payload.
1505 */
1506 sa6_src = ip6cp->ip6c_src;
1507 sa6_dst = (struct sockaddr_in6 *)(void *)sa;
1508 sav = key_allocsa(AF_INET6,
1509 (caddr_t)&sa6_src->sin6_addr,
1510 (caddr_t)&sa6_dst->sin6_addr,
1511 IPPROTO_ESP, espp->esp_spi);
1512 if (sav) {
1513 if (sav->state == SADB_SASTATE_MATURE ||
1514 sav->state == SADB_SASTATE_DYING)
1515 valid++;
1516 key_freesav(sav, KEY_SADB_UNLOCKED);
1517 }
1518
1519 /* XXX Further validation? */
1520
1521 /*
1522 * Depending on the value of "valid" and routing table
1523 * size (mtudisc_{hi,lo}wat), we will:
1524 * - recalcurate the new MTU and create the
1525 * corresponding routing entry, or
1526 * - ignore the MTU change notification.
1527 */
1528 icmp6_mtudisc_update((struct ip6ctlparam *)d, valid);
1529 }
1530 } else {
1531 /* we normally notify any pcb here */
1532 }
1533 }
1534 #endif /* INET6 */