]> git.saurik.com Git - apple/xnu.git/blob - bsd/netinet6/esp_input.c
xnu-6153.141.1.tar.gz
[apple/xnu.git] / bsd / netinet6 / esp_input.c
1 /*
2 * Copyright (c) 2008-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /* $FreeBSD: src/sys/netinet6/esp_input.c,v 1.1.2.3 2001/07/03 11:01:50 ume Exp $ */
30 /* $KAME: esp_input.c,v 1.55 2001/03/23 08:08:47 itojun Exp $ */
31
32 /*
33 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. Neither the name of the project nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 */
60
61 #define _IP_VHL
62
63 /*
64 * RFC1827/2406 Encapsulated Security Payload.
65 */
66
67 #include <sys/param.h>
68 #include <sys/systm.h>
69 #include <sys/malloc.h>
70 #include <sys/mbuf.h>
71 #include <sys/mcache.h>
72 #include <sys/domain.h>
73 #include <sys/protosw.h>
74 #include <sys/socket.h>
75 #include <sys/errno.h>
76 #include <sys/time.h>
77 #include <sys/kernel.h>
78 #include <sys/syslog.h>
79
80 #include <net/if.h>
81 #include <net/if_ipsec.h>
82 #include <net/route.h>
83 #include <kern/cpu_number.h>
84 #include <kern/locks.h>
85
86 #include <netinet/in.h>
87 #include <netinet/in_systm.h>
88 #include <netinet/ip.h>
89 #include <netinet/ip_var.h>
90 #include <netinet/in_var.h>
91 #include <netinet/ip_ecn.h>
92 #include <netinet/in_pcb.h>
93 #include <netinet/udp.h>
94 #include <netinet/tcp.h>
95 #include <netinet/in_tclass.h>
96 #if INET6
97 #include <netinet6/ip6_ecn.h>
98 #endif
99
100 #if INET6
101 #include <netinet/ip6.h>
102 #include <netinet6/in6_pcb.h>
103 #include <netinet6/ip6_var.h>
104 #include <netinet/icmp6.h>
105 #include <netinet6/ip6protosw.h>
106 #endif
107
108 #include <netinet6/ipsec.h>
109 #if INET6
110 #include <netinet6/ipsec6.h>
111 #endif
112 #include <netinet6/ah.h>
113 #if INET6
114 #include <netinet6/ah6.h>
115 #endif
116 #include <netinet6/esp.h>
117 #if INET6
118 #include <netinet6/esp6.h>
119 #endif
120 #include <netkey/key.h>
121 #include <netkey/keydb.h>
122 #include <netkey/key_debug.h>
123
124 #include <net/kpi_protocol.h>
125 #include <netinet/kpi_ipfilter_var.h>
126
127 #include <net/net_osdep.h>
128 #include <mach/sdt.h>
129 #include <corecrypto/cc.h>
130
131 #include <sys/kdebug.h>
132 #define DBG_LAYER_BEG NETDBG_CODE(DBG_NETIPSEC, 1)
133 #define DBG_LAYER_END NETDBG_CODE(DBG_NETIPSEC, 3)
134 #define DBG_FNC_ESPIN NETDBG_CODE(DBG_NETIPSEC, (6 << 8))
135 #define DBG_FNC_DECRYPT NETDBG_CODE(DBG_NETIPSEC, (7 << 8))
136 #define IPLEN_FLIPPED
137
138 extern lck_mtx_t *sadb_mutex;
139
140 #if INET
141 #define ESPMAXLEN \
142 (sizeof(struct esp) < sizeof(struct newesp) \
143 ? sizeof(struct newesp) : sizeof(struct esp))
144
145 static struct ip *
146 esp4_input_strip_udp_encap(struct mbuf *m, int iphlen)
147 {
148 // strip the udp header that's encapsulating ESP
149 struct ip *ip;
150 size_t stripsiz = sizeof(struct udphdr);
151
152 ip = mtod(m, __typeof__(ip));
153 ovbcopy((caddr_t)ip, (caddr_t)(((u_char *)ip) + stripsiz), iphlen);
154 m->m_data += stripsiz;
155 m->m_len -= stripsiz;
156 m->m_pkthdr.len -= stripsiz;
157 ip = mtod(m, __typeof__(ip));
158 ip->ip_len = ip->ip_len - stripsiz;
159 ip->ip_p = IPPROTO_ESP;
160 return ip;
161 }
162
163 static struct ip6_hdr *
164 esp6_input_strip_udp_encap(struct mbuf *m, int ip6hlen)
165 {
166 // strip the udp header that's encapsulating ESP
167 struct ip6_hdr *ip6;
168 size_t stripsiz = sizeof(struct udphdr);
169
170 ip6 = mtod(m, __typeof__(ip6));
171 ovbcopy((caddr_t)ip6, (caddr_t)(((u_char *)ip6) + stripsiz), ip6hlen);
172 m->m_data += stripsiz;
173 m->m_len -= stripsiz;
174 m->m_pkthdr.len -= stripsiz;
175 ip6 = mtod(m, __typeof__(ip6));
176 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - stripsiz);
177 ip6->ip6_nxt = IPPROTO_ESP;
178 return ip6;
179 }
180
181 static void
182 esp_input_log(struct mbuf *m, struct secasvar *sav, u_int32_t spi, u_int32_t seq)
183 {
184 if (net_mpklog_enabled &&
185 (sav->sah->ipsec_if->if_xflags & IFXF_MPK_LOG) == IFXF_MPK_LOG) {
186 struct tcphdr th = {};
187 size_t iphlen = 0;
188 u_int32_t proto_len = 0;
189 u_int8_t proto = 0;
190
191 struct ip *inner_ip = mtod(m, struct ip *);
192 if (IP_VHL_V(inner_ip->ip_vhl) == 4) {
193 iphlen = IP_VHL_HL(inner_ip->ip_vhl) << 2;
194 proto = inner_ip->ip_p;
195 } else if (IP_VHL_V(inner_ip->ip_vhl) == 6) {
196 struct ip6_hdr *inner_ip6 = mtod(m, struct ip6_hdr *);
197 iphlen = sizeof(struct ip6_hdr);
198 proto = inner_ip6->ip6_nxt;
199 }
200
201 if (proto == IPPROTO_TCP) {
202 if ((int)(iphlen + sizeof(th)) <= m->m_pkthdr.len) {
203 m_copydata(m, iphlen, sizeof(th), (u_int8_t *)&th);
204 }
205
206 proto_len = m->m_pkthdr.len - iphlen - (th.th_off << 2);
207 MPKL_ESP_INPUT_TCP(esp_mpkl_log_object,
208 ntohl(spi), seq,
209 ntohs(th.th_sport), ntohs(th.th_dport),
210 ntohl(th.th_seq), proto_len);
211 }
212 }
213 }
214
215 void
216 esp4_input(struct mbuf *m, int off)
217 {
218 (void)esp4_input_extended(m, off, NULL);
219 }
220
221 struct mbuf *
222 esp4_input_extended(struct mbuf *m, int off, ifnet_t interface)
223 {
224 struct ip *ip;
225 #if INET6
226 struct ip6_hdr *ip6;
227 #endif /* INET6 */
228 struct esp *esp;
229 struct esptail esptail;
230 u_int32_t spi;
231 u_int32_t seq;
232 struct secasvar *sav = NULL;
233 size_t taillen;
234 u_int16_t nxt;
235 const struct esp_algorithm *algo;
236 int ivlen;
237 size_t hlen;
238 size_t esplen;
239 sa_family_t ifamily;
240 struct mbuf *out_m = NULL;
241 mbuf_traffic_class_t traffic_class = 0;
242
243 KERNEL_DEBUG(DBG_FNC_ESPIN | DBG_FUNC_START, 0, 0, 0, 0, 0);
244 /* sanity check for alignment. */
245 if (off % 4 != 0 || m->m_pkthdr.len % 4 != 0) {
246 ipseclog((LOG_ERR, "IPv4 ESP input: packet alignment problem "
247 "(off=%d, pktlen=%d)\n", off, m->m_pkthdr.len));
248 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
249 goto bad;
250 }
251
252 if (m->m_len < off + ESPMAXLEN) {
253 m = m_pullup(m, off + ESPMAXLEN);
254 if (!m) {
255 ipseclog((LOG_DEBUG,
256 "IPv4 ESP input: can't pullup in esp4_input\n"));
257 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
258 goto bad;
259 }
260 }
261
262 m->m_pkthdr.csum_flags &= ~CSUM_RX_FLAGS;
263
264 /* Expect 32-bit aligned data pointer on strict-align platforms */
265 MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m);
266
267 ip = mtod(m, struct ip *);
268 // expect udp-encap and esp packets only
269 if (ip->ip_p != IPPROTO_ESP &&
270 !(ip->ip_p == IPPROTO_UDP && off >= sizeof(struct udphdr))) {
271 ipseclog((LOG_DEBUG,
272 "IPv4 ESP input: invalid protocol type\n"));
273 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
274 goto bad;
275 }
276 esp = (struct esp *)(void *)(((u_int8_t *)ip) + off);
277 #ifdef _IP_VHL
278 hlen = IP_VHL_HL(ip->ip_vhl) << 2;
279 #else
280 hlen = ip->ip_hl << 2;
281 #endif
282
283 /* find the sassoc. */
284 spi = esp->esp_spi;
285
286 if ((sav = key_allocsa_extended(AF_INET,
287 (caddr_t)&ip->ip_src, (caddr_t)&ip->ip_dst,
288 IPPROTO_ESP, spi, interface)) == 0) {
289 ipseclog((LOG_WARNING,
290 "IPv4 ESP input: no key association found for spi %u (0x%08x)\n",
291 (u_int32_t)ntohl(spi), (u_int32_t)ntohl(spi)));
292 IPSEC_STAT_INCREMENT(ipsecstat.in_nosa);
293 goto bad;
294 }
295 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
296 printf("DP esp4_input called to allocate SA:0x%llx\n",
297 (uint64_t)VM_KERNEL_ADDRPERM(sav)));
298 if (sav->state != SADB_SASTATE_MATURE
299 && sav->state != SADB_SASTATE_DYING) {
300 ipseclog((LOG_DEBUG,
301 "IPv4 ESP input: non-mature/dying SA found for spi %u (0x%08x)\n",
302 (u_int32_t)ntohl(spi), (u_int32_t)ntohl(spi)));
303 IPSEC_STAT_INCREMENT(ipsecstat.in_badspi);
304 goto bad;
305 }
306 algo = esp_algorithm_lookup(sav->alg_enc);
307 if (!algo) {
308 ipseclog((LOG_DEBUG, "IPv4 ESP input: "
309 "unsupported encryption algorithm for spi %u (0x%08x)\n",
310 (u_int32_t)ntohl(spi), (u_int32_t)ntohl(spi)));
311 IPSEC_STAT_INCREMENT(ipsecstat.in_badspi);
312 goto bad;
313 }
314
315 /* check if we have proper ivlen information */
316 ivlen = sav->ivlen;
317 if (ivlen < 0) {
318 ipseclog((LOG_ERR, "inproper ivlen in IPv4 ESP input: %s %s\n",
319 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
320 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
321 goto bad;
322 }
323
324 seq = ntohl(((struct newesp *)esp)->esp_seq);
325
326 if ((sav->flags2 & SADB_X_EXT_SA2_SEQ_PER_TRAFFIC_CLASS) ==
327 SADB_X_EXT_SA2_SEQ_PER_TRAFFIC_CLASS) {
328 u_int8_t dscp = ip->ip_tos >> IPTOS_DSCP_SHIFT;
329 traffic_class = rfc4594_dscp_to_tc(dscp);
330 }
331
332 /* Save ICV from packet for verification later */
333 size_t siz = 0;
334 unsigned char saved_icv[AH_MAXSUMSIZE];
335 if (algo->finalizedecrypt) {
336 siz = algo->icvlen;
337 m_copydata(m, m->m_pkthdr.len - siz, siz, (caddr_t) saved_icv);
338 goto delay_icv;
339 }
340
341 if (!((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay[traffic_class] != NULL &&
342 (sav->alg_auth && sav->key_auth))) {
343 goto noreplaycheck;
344 }
345
346 if (sav->alg_auth == SADB_X_AALG_NULL ||
347 sav->alg_auth == SADB_AALG_NONE) {
348 goto noreplaycheck;
349 }
350
351 /*
352 * check for sequence number.
353 */
354 if (ipsec_chkreplay(seq, sav, traffic_class)) {
355 ; /*okey*/
356 } else {
357 IPSEC_STAT_INCREMENT(ipsecstat.in_espreplay);
358 ipseclog((LOG_WARNING,
359 "replay packet in IPv4 ESP input: %s %s\n",
360 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
361 goto bad;
362 }
363
364 /* check ICV */
365 {
366 u_char sum0[AH_MAXSUMSIZE] __attribute__((aligned(4)));
367 u_char sum[AH_MAXSUMSIZE] __attribute__((aligned(4)));
368 const struct ah_algorithm *sumalgo;
369
370 sumalgo = ah_algorithm_lookup(sav->alg_auth);
371 if (!sumalgo) {
372 goto noreplaycheck;
373 }
374 siz = (((*sumalgo->sumsiz)(sav) + 3) & ~(4 - 1));
375 if (m->m_pkthdr.len < off + ESPMAXLEN + siz) {
376 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
377 goto bad;
378 }
379 if (AH_MAXSUMSIZE < siz) {
380 ipseclog((LOG_DEBUG,
381 "internal error: AH_MAXSUMSIZE must be larger than %u\n",
382 (u_int32_t)siz));
383 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
384 goto bad;
385 }
386
387 m_copydata(m, m->m_pkthdr.len - siz, siz, (caddr_t) &sum0[0]);
388
389 if (esp_auth(m, off, m->m_pkthdr.len - off - siz, sav, sum)) {
390 ipseclog((LOG_WARNING, "auth fail in IPv4 ESP input: %s %s\n",
391 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
392 IPSEC_STAT_INCREMENT(ipsecstat.in_espauthfail);
393 goto bad;
394 }
395
396 if (cc_cmp_safe(siz, sum0, sum)) {
397 ipseclog((LOG_WARNING, "cc_cmp fail in IPv4 ESP input: %s %s\n",
398 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
399 IPSEC_STAT_INCREMENT(ipsecstat.in_espauthfail);
400 goto bad;
401 }
402
403 delay_icv:
404
405 /* strip off the authentication data */
406 m_adj(m, -siz);
407 ip = mtod(m, struct ip *);
408 #ifdef IPLEN_FLIPPED
409 ip->ip_len = ip->ip_len - siz;
410 #else
411 ip->ip_len = htons(ntohs(ip->ip_len) - siz);
412 #endif
413 m->m_flags |= M_AUTHIPDGM;
414 IPSEC_STAT_INCREMENT(ipsecstat.in_espauthsucc);
415 }
416
417 /*
418 * update sequence number.
419 */
420 if ((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay[traffic_class] != NULL) {
421 if (ipsec_updatereplay(seq, sav, traffic_class)) {
422 IPSEC_STAT_INCREMENT(ipsecstat.in_espreplay);
423 goto bad;
424 }
425 }
426
427 noreplaycheck:
428
429 /* process main esp header. */
430 if (sav->flags & SADB_X_EXT_OLD) {
431 /* RFC 1827 */
432 esplen = sizeof(struct esp);
433 } else {
434 /* RFC 2406 */
435 if (sav->flags & SADB_X_EXT_DERIV) {
436 esplen = sizeof(struct esp);
437 } else {
438 esplen = sizeof(struct newesp);
439 }
440 }
441
442 if (m->m_pkthdr.len < off + esplen + ivlen + sizeof(esptail)) {
443 ipseclog((LOG_WARNING,
444 "IPv4 ESP input: packet too short\n"));
445 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
446 goto bad;
447 }
448
449 if (m->m_len < off + esplen + ivlen) {
450 m = m_pullup(m, off + esplen + ivlen);
451 if (!m) {
452 ipseclog((LOG_DEBUG,
453 "IPv4 ESP input: can't pullup in esp4_input\n"));
454 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
455 goto bad;
456 }
457 }
458
459 /*
460 * pre-compute and cache intermediate key
461 */
462 if (esp_schedule(algo, sav) != 0) {
463 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
464 goto bad;
465 }
466
467 /*
468 * decrypt the packet.
469 */
470 if (!algo->decrypt) {
471 panic("internal error: no decrypt function");
472 }
473 KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_START, 0, 0, 0, 0, 0);
474 if ((*algo->decrypt)(m, off, sav, algo, ivlen)) {
475 /* m is already freed */
476 m = NULL;
477 ipseclog((LOG_ERR, "decrypt fail in IPv4 ESP input: %s\n",
478 ipsec_logsastr(sav)));
479 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
480 KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_END, 1, 0, 0, 0, 0);
481 goto bad;
482 }
483 KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_END, 2, 0, 0, 0, 0);
484 IPSEC_STAT_INCREMENT(ipsecstat.in_esphist[sav->alg_enc]);
485
486 m->m_flags |= M_DECRYPTED;
487
488 if (algo->finalizedecrypt) {
489 if ((*algo->finalizedecrypt)(sav, saved_icv, algo->icvlen)) {
490 ipseclog((LOG_ERR, "esp4 packet decryption ICV failure\n"));
491 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
492 KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_END, 1, 0, 0, 0, 0);
493 goto bad;
494 }
495 }
496
497 /*
498 * find the trailer of the ESP.
499 */
500 m_copydata(m, m->m_pkthdr.len - sizeof(esptail), sizeof(esptail),
501 (caddr_t)&esptail);
502 nxt = esptail.esp_nxt;
503 taillen = esptail.esp_padlen + sizeof(esptail);
504
505 if (m->m_pkthdr.len < taillen
506 || m->m_pkthdr.len - taillen < hlen) { /*?*/
507 ipseclog((LOG_WARNING,
508 "bad pad length in IPv4 ESP input: %s %s\n",
509 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
510 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
511 goto bad;
512 }
513
514 /* strip off the trailing pad area. */
515 m_adj(m, -taillen);
516 ip = mtod(m, struct ip *);
517 #ifdef IPLEN_FLIPPED
518 ip->ip_len = ip->ip_len - taillen;
519 #else
520 ip->ip_len = htons(ntohs(ip->ip_len) - taillen);
521 #endif
522 if (ip->ip_p == IPPROTO_UDP) {
523 // offset includes the outer ip and udp header lengths.
524 if (m->m_len < off) {
525 m = m_pullup(m, off);
526 if (!m) {
527 ipseclog((LOG_DEBUG,
528 "IPv4 ESP input: invalid udp encapsulated ESP packet length \n"));
529 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
530 goto bad;
531 }
532 ip = mtod(m, struct ip *);
533 }
534
535 // check the UDP encap header to detect changes in the source port, and then strip the header
536 off -= sizeof(struct udphdr); // off no longer includes the udphdr's size
537 // if peer is behind nat and this is the latest esp packet
538 if ((sav->flags & SADB_X_EXT_NATT_DETECTED_PEER) != 0 &&
539 (sav->flags & SADB_X_EXT_OLD) == 0 &&
540 seq && sav->replay[traffic_class] &&
541 seq >= sav->replay[traffic_class]->lastseq) {
542 struct udphdr *encap_uh = (__typeof__(encap_uh))(void *)((caddr_t)ip + off);
543 if (encap_uh->uh_sport &&
544 ntohs(encap_uh->uh_sport) != sav->remote_ike_port) {
545 sav->remote_ike_port = ntohs(encap_uh->uh_sport);
546 }
547 }
548 ip = esp4_input_strip_udp_encap(m, off);
549 esp = (struct esp *)(void *)(((u_int8_t *)ip) + off);
550 }
551
552 /* was it transmitted over the IPsec tunnel SA? */
553 if (ipsec4_tunnel_validate(m, off + esplen + ivlen, nxt, sav, &ifamily)) {
554 ifaddr_t ifa;
555 struct sockaddr_storage addr;
556
557 /*
558 * strip off all the headers that precedes ESP header.
559 * IP4 xx ESP IP4' payload -> IP4' payload
560 *
561 * XXX more sanity checks
562 * XXX relationship with gif?
563 */
564 u_int8_t tos, otos;
565 int sum;
566
567 tos = ip->ip_tos;
568 m_adj(m, off + esplen + ivlen);
569 if (ifamily == AF_INET) {
570 struct sockaddr_in *ipaddr;
571
572 if (m->m_len < sizeof(*ip)) {
573 m = m_pullup(m, sizeof(*ip));
574 if (!m) {
575 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
576 goto bad;
577 }
578 }
579 ip = mtod(m, struct ip *);
580 /* ECN consideration. */
581
582 otos = ip->ip_tos;
583 if (ip_ecn_egress(ip4_ipsec_ecn, &tos, &ip->ip_tos) == 0) {
584 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
585 goto bad;
586 }
587
588 if (otos != ip->ip_tos) {
589 sum = ~ntohs(ip->ip_sum) & 0xffff;
590 sum += (~otos & 0xffff) + ip->ip_tos;
591 sum = (sum >> 16) + (sum & 0xffff);
592 sum += (sum >> 16); /* add carry */
593 ip->ip_sum = htons(~sum & 0xffff);
594 }
595
596 if (!key_checktunnelsanity(sav, AF_INET,
597 (caddr_t)&ip->ip_src, (caddr_t)&ip->ip_dst)) {
598 ipseclog((LOG_ERR, "ipsec tunnel address mismatch "
599 "in ESP input: %s %s\n",
600 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
601 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
602 goto bad;
603 }
604
605 bzero(&addr, sizeof(addr));
606 ipaddr = (__typeof__(ipaddr)) & addr;
607 ipaddr->sin_family = AF_INET;
608 ipaddr->sin_len = sizeof(*ipaddr);
609 ipaddr->sin_addr = ip->ip_dst;
610 #if INET6
611 } else if (ifamily == AF_INET6) {
612 struct sockaddr_in6 *ip6addr;
613
614 /*
615 * m_pullup is prohibited in KAME IPv6 input processing
616 * but there's no other way!
617 */
618 if (m->m_len < sizeof(*ip6)) {
619 m = m_pullup(m, sizeof(*ip6));
620 if (!m) {
621 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
622 goto bad;
623 }
624 }
625
626 /*
627 * Expect 32-bit aligned data pointer on strict-align
628 * platforms.
629 */
630 MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m);
631
632 ip6 = mtod(m, struct ip6_hdr *);
633
634 /* ECN consideration. */
635 if (ip64_ecn_egress(ip4_ipsec_ecn, &tos, &ip6->ip6_flow) == 0) {
636 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
637 goto bad;
638 }
639
640 if (!key_checktunnelsanity(sav, AF_INET6,
641 (caddr_t)&ip6->ip6_src, (caddr_t)&ip6->ip6_dst)) {
642 ipseclog((LOG_ERR, "ipsec tunnel address mismatch "
643 "in ESP input: %s %s\n",
644 ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
645 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
646 goto bad;
647 }
648
649 bzero(&addr, sizeof(addr));
650 ip6addr = (__typeof__(ip6addr)) & addr;
651 ip6addr->sin6_family = AF_INET6;
652 ip6addr->sin6_len = sizeof(*ip6addr);
653 ip6addr->sin6_addr = ip6->ip6_dst;
654 #endif /* INET6 */
655 } else {
656 ipseclog((LOG_ERR, "ipsec tunnel unsupported address family "
657 "in ESP input\n"));
658 goto bad;
659 }
660
661 key_sa_recordxfer(sav, m);
662 if (ipsec_addhist(m, IPPROTO_ESP, spi) != 0 ||
663 ipsec_addhist(m, IPPROTO_IPV4, 0) != 0) {
664 IPSEC_STAT_INCREMENT(ipsecstat.in_nomem);
665 goto bad;
666 }
667
668 // update the receiving interface address based on the inner address
669 ifa = ifa_ifwithaddr((struct sockaddr *)&addr);
670 if (ifa) {
671 m->m_pkthdr.rcvif = ifa->ifa_ifp;
672 IFA_REMREF(ifa);
673 }
674
675 /* Clear the csum flags, they can't be valid for the inner headers */
676 m->m_pkthdr.csum_flags = 0;
677
678 // Input via IPsec interface
679 lck_mtx_lock(sadb_mutex);
680 ifnet_t ipsec_if = sav->sah->ipsec_if;
681 if (ipsec_if != NULL) {
682 // If an interface is found, add a reference count before dropping the lock
683 ifnet_reference(ipsec_if);
684 }
685 lck_mtx_unlock(sadb_mutex);
686 if (ipsec_if != NULL) {
687 esp_input_log(m, sav, spi, seq);
688 ipsec_save_wake_packet(m, ntohl(spi), seq);
689
690 // Return mbuf
691 if (interface != NULL &&
692 interface == ipsec_if) {
693 out_m = m;
694 ifnet_release(ipsec_if);
695 goto done;
696 }
697
698 errno_t inject_error = ipsec_inject_inbound_packet(ipsec_if, m);
699 ifnet_release(ipsec_if);
700
701 if (inject_error == 0) {
702 m = NULL;
703 goto done;
704 } else {
705 goto bad;
706 }
707 }
708
709 if (proto_input(ifamily == AF_INET ? PF_INET : PF_INET6, m) != 0) {
710 goto bad;
711 }
712
713 nxt = IPPROTO_DONE;
714 KERNEL_DEBUG(DBG_FNC_ESPIN | DBG_FUNC_END, 2, 0, 0, 0, 0);
715 } else {
716 /*
717 * strip off ESP header and IV.
718 * even in m_pulldown case, we need to strip off ESP so that
719 * we can always compute checksum for AH correctly.
720 */
721 size_t stripsiz;
722
723 stripsiz = esplen + ivlen;
724
725 ip = mtod(m, struct ip *);
726 ovbcopy((caddr_t)ip, (caddr_t)(((u_char *)ip) + stripsiz), off);
727 m->m_data += stripsiz;
728 m->m_len -= stripsiz;
729 m->m_pkthdr.len -= stripsiz;
730
731 ip = mtod(m, struct ip *);
732 #ifdef IPLEN_FLIPPED
733 ip->ip_len = ip->ip_len - stripsiz;
734 #else
735 ip->ip_len = htons(ntohs(ip->ip_len) - stripsiz);
736 #endif
737 ip->ip_p = nxt;
738
739 key_sa_recordxfer(sav, m);
740 if (ipsec_addhist(m, IPPROTO_ESP, spi) != 0) {
741 IPSEC_STAT_INCREMENT(ipsecstat.in_nomem);
742 goto bad;
743 }
744
745 /*
746 * Set the csum valid flag, if we authenticated the
747 * packet, the payload shouldn't be corrupt unless
748 * it was corrupted before being signed on the other
749 * side.
750 */
751 if (nxt == IPPROTO_TCP || nxt == IPPROTO_UDP) {
752 m->m_pkthdr.csum_flags = CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
753 m->m_pkthdr.csum_data = 0xFFFF;
754 _CASSERT(offsetof(struct pkthdr, csum_data) == offsetof(struct pkthdr, csum_rx_val));
755 }
756
757 if (nxt != IPPROTO_DONE) {
758 if ((ip_protox[nxt]->pr_flags & PR_LASTHDR) != 0 &&
759 ipsec4_in_reject(m, NULL)) {
760 IPSEC_STAT_INCREMENT(ipsecstat.in_polvio);
761 goto bad;
762 }
763 KERNEL_DEBUG(DBG_FNC_ESPIN | DBG_FUNC_END, 3, 0, 0, 0, 0);
764
765 /* translate encapsulated UDP port ? */
766 if ((sav->flags & SADB_X_EXT_NATT_MULTIPLEUSERS) != 0) {
767 struct udphdr *udp;
768
769 if (nxt != IPPROTO_UDP) { /* not UPD packet - drop it */
770 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
771 goto bad;
772 }
773
774 if (m->m_len < off + sizeof(struct udphdr)) {
775 m = m_pullup(m, off + sizeof(struct udphdr));
776 if (!m) {
777 ipseclog((LOG_DEBUG,
778 "IPv4 ESP input: can't pullup UDP header in esp4_input\n"));
779 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
780 goto bad;
781 }
782 ip = mtod(m, struct ip *);
783 }
784 udp = (struct udphdr *)(void *)(((u_int8_t *)ip) + off);
785
786 lck_mtx_lock(sadb_mutex);
787 if (sav->natt_encapsulated_src_port == 0) {
788 sav->natt_encapsulated_src_port = udp->uh_sport;
789 } else if (sav->natt_encapsulated_src_port != udp->uh_sport) { /* something wrong */
790 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
791 lck_mtx_unlock(sadb_mutex);
792 goto bad;
793 }
794 lck_mtx_unlock(sadb_mutex);
795 udp->uh_sport = htons(sav->remote_ike_port);
796 udp->uh_sum = 0;
797 }
798
799 DTRACE_IP6(receive, struct mbuf *, m, struct inpcb *, NULL,
800 struct ip *, ip, struct ifnet *, m->m_pkthdr.rcvif,
801 struct ip *, ip, struct ip6_hdr *, NULL);
802
803 // Input via IPsec interface legacy path
804 lck_mtx_lock(sadb_mutex);
805 ifnet_t ipsec_if = sav->sah->ipsec_if;
806 if (ipsec_if != NULL) {
807 // If an interface is found, add a reference count before dropping the lock
808 ifnet_reference(ipsec_if);
809 }
810 lck_mtx_unlock(sadb_mutex);
811 if (ipsec_if != NULL) {
812 int mlen;
813 if ((mlen = m_length2(m, NULL)) < hlen) {
814 ipseclog((LOG_DEBUG,
815 "IPv4 ESP input: decrypted packet too short %d < %zu\n",
816 mlen, hlen));
817 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
818 ifnet_release(ipsec_if);
819 goto bad;
820 }
821 ip->ip_len = htons(ip->ip_len + hlen);
822 ip->ip_off = htons(ip->ip_off);
823 ip->ip_sum = 0;
824 ip->ip_sum = ip_cksum_hdr_in(m, hlen);
825
826 esp_input_log(m, sav, spi, seq);
827 ipsec_save_wake_packet(m, ntohl(spi), seq);
828
829 // Return mbuf
830 if (interface != NULL &&
831 interface == ipsec_if) {
832 out_m = m;
833 ifnet_release(ipsec_if);
834 goto done;
835 }
836
837 errno_t inject_error = ipsec_inject_inbound_packet(ipsec_if, m);
838 ifnet_release(ipsec_if);
839
840 if (inject_error == 0) {
841 m = NULL;
842 goto done;
843 } else {
844 goto bad;
845 }
846 }
847
848 ip_proto_dispatch_in(m, off, nxt, 0);
849 } else {
850 m_freem(m);
851 }
852 m = NULL;
853 }
854
855 done:
856 if (sav) {
857 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
858 printf("DP esp4_input call free SA:0x%llx\n",
859 (uint64_t)VM_KERNEL_ADDRPERM(sav)));
860 key_freesav(sav, KEY_SADB_UNLOCKED);
861 }
862 IPSEC_STAT_INCREMENT(ipsecstat.in_success);
863 return out_m;
864 bad:
865 if (sav) {
866 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
867 printf("DP esp4_input call free SA:0x%llx\n",
868 (uint64_t)VM_KERNEL_ADDRPERM(sav)));
869 key_freesav(sav, KEY_SADB_UNLOCKED);
870 }
871 if (m) {
872 m_freem(m);
873 }
874 KERNEL_DEBUG(DBG_FNC_ESPIN | DBG_FUNC_END, 4, 0, 0, 0, 0);
875 return out_m;
876 }
877 #endif /* INET */
878
879 #if INET6
880
881 int
882 esp6_input(struct mbuf **mp, int *offp, int proto)
883 {
884 return esp6_input_extended(mp, offp, proto, NULL);
885 }
886
887 int
888 esp6_input_extended(struct mbuf **mp, int *offp, int proto, ifnet_t interface)
889 {
890 #pragma unused(proto)
891 struct mbuf *m = *mp;
892 int off = *offp;
893 struct ip *ip;
894 struct ip6_hdr *ip6;
895 struct esp *esp;
896 struct esptail esptail;
897 u_int32_t spi;
898 u_int32_t seq;
899 struct secasvar *sav = NULL;
900 size_t taillen;
901 u_int16_t nxt;
902 char *nproto;
903 const struct esp_algorithm *algo;
904 int ivlen;
905 size_t esplen;
906 sa_family_t ifamily;
907 mbuf_traffic_class_t traffic_class = 0;
908
909 /* sanity check for alignment. */
910 if (off % 4 != 0 || m->m_pkthdr.len % 4 != 0) {
911 ipseclog((LOG_ERR, "IPv6 ESP input: packet alignment problem "
912 "(off=%d, pktlen=%d)\n", off, m->m_pkthdr.len));
913 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
914 goto bad;
915 }
916
917 #ifndef PULLDOWN_TEST
918 IP6_EXTHDR_CHECK(m, off, ESPMAXLEN, {return IPPROTO_DONE;});
919 esp = (struct esp *)(void *)(mtod(m, caddr_t) + off);
920 #else
921 IP6_EXTHDR_GET(esp, struct esp *, m, off, ESPMAXLEN);
922 if (esp == NULL) {
923 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
924 return IPPROTO_DONE;
925 }
926 #endif
927 m->m_pkthdr.csum_flags &= ~CSUM_RX_FLAGS;
928
929 /* Expect 32-bit data aligned pointer on strict-align platforms */
930 MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m);
931
932 ip6 = mtod(m, struct ip6_hdr *);
933
934 if (ntohs(ip6->ip6_plen) == 0) {
935 ipseclog((LOG_ERR, "IPv6 ESP input: "
936 "ESP with IPv6 jumbogram is not supported.\n"));
937 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
938 goto bad;
939 }
940
941 nproto = ip6_get_prevhdr(m, off);
942 if (nproto == NULL || (*nproto != IPPROTO_ESP &&
943 !(*nproto == IPPROTO_UDP && off >= sizeof(struct udphdr)))) {
944 ipseclog((LOG_DEBUG, "IPv6 ESP input: invalid protocol type\n"));
945 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
946 goto bad;
947 }
948
949 /* find the sassoc. */
950 spi = esp->esp_spi;
951
952 if ((sav = key_allocsa_extended(AF_INET6,
953 (caddr_t)&ip6->ip6_src, (caddr_t)&ip6->ip6_dst,
954 IPPROTO_ESP, spi, interface)) == 0) {
955 ipseclog((LOG_WARNING,
956 "IPv6 ESP input: no key association found for spi %u (0x%08x) seq %u"
957 " src %04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x"
958 " dst %04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x if %s\n",
959 (u_int32_t)ntohl(spi), (u_int32_t)ntohl(spi), ntohl(((struct newesp *)esp)->esp_seq),
960 ntohs(ip6->ip6_src.__u6_addr.__u6_addr16[0]), ntohs(ip6->ip6_src.__u6_addr.__u6_addr16[1]),
961 ntohs(ip6->ip6_src.__u6_addr.__u6_addr16[2]), ntohs(ip6->ip6_src.__u6_addr.__u6_addr16[3]),
962 ntohs(ip6->ip6_src.__u6_addr.__u6_addr16[4]), ntohs(ip6->ip6_src.__u6_addr.__u6_addr16[5]),
963 ntohs(ip6->ip6_src.__u6_addr.__u6_addr16[6]), ntohs(ip6->ip6_src.__u6_addr.__u6_addr16[7]),
964 ntohs(ip6->ip6_dst.__u6_addr.__u6_addr16[0]), ntohs(ip6->ip6_dst.__u6_addr.__u6_addr16[1]),
965 ntohs(ip6->ip6_dst.__u6_addr.__u6_addr16[2]), ntohs(ip6->ip6_dst.__u6_addr.__u6_addr16[3]),
966 ntohs(ip6->ip6_dst.__u6_addr.__u6_addr16[4]), ntohs(ip6->ip6_dst.__u6_addr.__u6_addr16[5]),
967 ntohs(ip6->ip6_dst.__u6_addr.__u6_addr16[6]), ntohs(ip6->ip6_dst.__u6_addr.__u6_addr16[7]),
968 ((interface != NULL) ? if_name(interface) : "NONE")));
969 IPSEC_STAT_INCREMENT(ipsec6stat.in_nosa);
970 goto bad;
971 }
972 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
973 printf("DP esp6_input called to allocate SA:0x%llx\n",
974 (uint64_t)VM_KERNEL_ADDRPERM(sav)));
975 if (sav->state != SADB_SASTATE_MATURE
976 && sav->state != SADB_SASTATE_DYING) {
977 ipseclog((LOG_DEBUG,
978 "IPv6 ESP input: non-mature/dying SA found for spi %u (0x%08x)\n",
979 (u_int32_t)ntohl(spi), (u_int32_t)ntohl(spi)));
980 IPSEC_STAT_INCREMENT(ipsec6stat.in_badspi);
981 goto bad;
982 }
983 algo = esp_algorithm_lookup(sav->alg_enc);
984 if (!algo) {
985 ipseclog((LOG_DEBUG, "IPv6 ESP input: "
986 "unsupported encryption algorithm for spi %u (0x%08x)\n",
987 (u_int32_t)ntohl(spi), (u_int32_t)ntohl(spi)));
988 IPSEC_STAT_INCREMENT(ipsec6stat.in_badspi);
989 goto bad;
990 }
991
992 /* check if we have proper ivlen information */
993 ivlen = sav->ivlen;
994 if (ivlen < 0) {
995 ipseclog((LOG_ERR, "inproper ivlen in IPv6 ESP input: %s %s\n",
996 ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
997 IPSEC_STAT_INCREMENT(ipsec6stat.in_badspi);
998 goto bad;
999 }
1000
1001 seq = ntohl(((struct newesp *)esp)->esp_seq);
1002
1003 if ((sav->flags2 & SADB_X_EXT_SA2_SEQ_PER_TRAFFIC_CLASS) ==
1004 SADB_X_EXT_SA2_SEQ_PER_TRAFFIC_CLASS) {
1005 u_int8_t dscp = (ntohl(ip6->ip6_flow) & IP6FLOW_DSCP_MASK) >> IP6FLOW_DSCP_SHIFT;
1006 traffic_class = rfc4594_dscp_to_tc(dscp);
1007 }
1008
1009 /* Save ICV from packet for verification later */
1010 size_t siz = 0;
1011 unsigned char saved_icv[AH_MAXSUMSIZE];
1012 if (algo->finalizedecrypt) {
1013 siz = algo->icvlen;
1014 m_copydata(m, m->m_pkthdr.len - siz, siz, (caddr_t) saved_icv);
1015 goto delay_icv;
1016 }
1017
1018 if (!((sav->flags & SADB_X_EXT_OLD) == 0 &&
1019 sav->replay[traffic_class] != NULL &&
1020 (sav->alg_auth && sav->key_auth))) {
1021 goto noreplaycheck;
1022 }
1023
1024 if (sav->alg_auth == SADB_X_AALG_NULL ||
1025 sav->alg_auth == SADB_AALG_NONE) {
1026 goto noreplaycheck;
1027 }
1028
1029 /*
1030 * check for sequence number.
1031 */
1032 if (ipsec_chkreplay(seq, sav, traffic_class)) {
1033 ; /*okey*/
1034 } else {
1035 IPSEC_STAT_INCREMENT(ipsec6stat.in_espreplay);
1036 ipseclog((LOG_WARNING,
1037 "replay packet in IPv6 ESP input: %s %s\n",
1038 ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
1039 goto bad;
1040 }
1041
1042 /* check ICV */
1043 {
1044 u_char sum0[AH_MAXSUMSIZE] __attribute__((aligned(4)));
1045 u_char sum[AH_MAXSUMSIZE] __attribute__((aligned(4)));
1046 const struct ah_algorithm *sumalgo;
1047
1048 sumalgo = ah_algorithm_lookup(sav->alg_auth);
1049 if (!sumalgo) {
1050 goto noreplaycheck;
1051 }
1052 siz = (((*sumalgo->sumsiz)(sav) + 3) & ~(4 - 1));
1053 if (m->m_pkthdr.len < off + ESPMAXLEN + siz) {
1054 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
1055 goto bad;
1056 }
1057 if (AH_MAXSUMSIZE < siz) {
1058 ipseclog((LOG_DEBUG,
1059 "internal error: AH_MAXSUMSIZE must be larger than %u\n",
1060 (u_int32_t)siz));
1061 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
1062 goto bad;
1063 }
1064
1065 m_copydata(m, m->m_pkthdr.len - siz, siz, (caddr_t) &sum0[0]);
1066
1067 if (esp_auth(m, off, m->m_pkthdr.len - off - siz, sav, sum)) {
1068 ipseclog((LOG_WARNING, "auth fail in IPv6 ESP input: %s %s\n",
1069 ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
1070 IPSEC_STAT_INCREMENT(ipsec6stat.in_espauthfail);
1071 goto bad;
1072 }
1073
1074 if (cc_cmp_safe(siz, sum0, sum)) {
1075 ipseclog((LOG_WARNING, "auth fail in IPv6 ESP input: %s %s\n",
1076 ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
1077 IPSEC_STAT_INCREMENT(ipsec6stat.in_espauthfail);
1078 goto bad;
1079 }
1080
1081 delay_icv:
1082
1083 /* strip off the authentication data */
1084 m_adj(m, -siz);
1085 ip6 = mtod(m, struct ip6_hdr *);
1086 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - siz);
1087
1088 m->m_flags |= M_AUTHIPDGM;
1089 IPSEC_STAT_INCREMENT(ipsec6stat.in_espauthsucc);
1090 }
1091
1092 /*
1093 * update sequence number.
1094 */
1095 if ((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay[traffic_class] != NULL) {
1096 if (ipsec_updatereplay(seq, sav, traffic_class)) {
1097 IPSEC_STAT_INCREMENT(ipsec6stat.in_espreplay);
1098 goto bad;
1099 }
1100 }
1101
1102 noreplaycheck:
1103
1104 /* process main esp header. */
1105 if (sav->flags & SADB_X_EXT_OLD) {
1106 /* RFC 1827 */
1107 esplen = sizeof(struct esp);
1108 } else {
1109 /* RFC 2406 */
1110 if (sav->flags & SADB_X_EXT_DERIV) {
1111 esplen = sizeof(struct esp);
1112 } else {
1113 esplen = sizeof(struct newesp);
1114 }
1115 }
1116
1117 if (m->m_pkthdr.len < off + esplen + ivlen + sizeof(esptail)) {
1118 ipseclog((LOG_WARNING,
1119 "IPv6 ESP input: packet too short\n"));
1120 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
1121 goto bad;
1122 }
1123
1124 #ifndef PULLDOWN_TEST
1125 IP6_EXTHDR_CHECK(m, off, esplen + ivlen, return IPPROTO_DONE); /*XXX*/
1126 #else
1127 IP6_EXTHDR_GET(esp, struct esp *, m, off, esplen + ivlen);
1128 if (esp == NULL) {
1129 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
1130 m = NULL;
1131 goto bad;
1132 }
1133 #endif
1134 ip6 = mtod(m, struct ip6_hdr *); /*set it again just in case*/
1135
1136 /*
1137 * pre-compute and cache intermediate key
1138 */
1139 if (esp_schedule(algo, sav) != 0) {
1140 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
1141 goto bad;
1142 }
1143
1144 /*
1145 * decrypt the packet.
1146 */
1147 if (!algo->decrypt) {
1148 panic("internal error: no decrypt function");
1149 }
1150 if ((*algo->decrypt)(m, off, sav, algo, ivlen)) {
1151 /* m is already freed */
1152 m = NULL;
1153 ipseclog((LOG_ERR, "decrypt fail in IPv6 ESP input: %s\n",
1154 ipsec_logsastr(sav)));
1155 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
1156 goto bad;
1157 }
1158 IPSEC_STAT_INCREMENT(ipsec6stat.in_esphist[sav->alg_enc]);
1159
1160 m->m_flags |= M_DECRYPTED;
1161
1162 if (algo->finalizedecrypt) {
1163 if ((*algo->finalizedecrypt)(sav, saved_icv, algo->icvlen)) {
1164 ipseclog((LOG_ERR, "esp6 packet decryption ICV failure\n"));
1165 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
1166 KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_END, 1, 0, 0, 0, 0);
1167 goto bad;
1168 }
1169 }
1170
1171 /*
1172 * find the trailer of the ESP.
1173 */
1174 m_copydata(m, m->m_pkthdr.len - sizeof(esptail), sizeof(esptail),
1175 (caddr_t)&esptail);
1176 nxt = esptail.esp_nxt;
1177 taillen = esptail.esp_padlen + sizeof(esptail);
1178
1179 if (m->m_pkthdr.len < taillen
1180 || m->m_pkthdr.len - taillen < sizeof(struct ip6_hdr)) { /*?*/
1181 ipseclog((LOG_WARNING,
1182 "bad pad length in IPv6 ESP input: %s %s\n",
1183 ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
1184 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
1185 goto bad;
1186 }
1187
1188 /* strip off the trailing pad area. */
1189 m_adj(m, -taillen);
1190 ip6 = mtod(m, struct ip6_hdr *);
1191 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - taillen);
1192
1193 if (*nproto == IPPROTO_UDP) {
1194 // offset includes the outer ip and udp header lengths.
1195 if (m->m_len < off) {
1196 m = m_pullup(m, off);
1197 if (!m) {
1198 ipseclog((LOG_DEBUG,
1199 "IPv6 ESP input: invalid udp encapsulated ESP packet length\n"));
1200 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
1201 goto bad;
1202 }
1203 ip6 = mtod(m, struct ip6_hdr *);
1204 }
1205
1206 // check the UDP encap header to detect changes in the source port, and then strip the header
1207 off -= sizeof(struct udphdr); // off no longer includes the udphdr's size
1208 // if peer is behind nat and this is the latest esp packet
1209 if ((sav->flags & SADB_X_EXT_NATT_DETECTED_PEER) != 0 &&
1210 (sav->flags & SADB_X_EXT_OLD) == 0 &&
1211 seq && sav->replay[traffic_class] &&
1212 seq >= sav->replay[traffic_class]->lastseq) {
1213 struct udphdr *encap_uh = (__typeof__(encap_uh))(void *)((caddr_t)ip6 + off);
1214 if (encap_uh->uh_sport &&
1215 ntohs(encap_uh->uh_sport) != sav->remote_ike_port) {
1216 sav->remote_ike_port = ntohs(encap_uh->uh_sport);
1217 }
1218 }
1219 ip6 = esp6_input_strip_udp_encap(m, off);
1220 esp = (struct esp *)(void *)(((u_int8_t *)ip6) + off);
1221 }
1222
1223
1224 /* was it transmitted over the IPsec tunnel SA? */
1225 if (ipsec6_tunnel_validate(m, off + esplen + ivlen, nxt, sav, &ifamily)) {
1226 ifaddr_t ifa;
1227 struct sockaddr_storage addr;
1228
1229 /*
1230 * strip off all the headers that precedes ESP header.
1231 * IP6 xx ESP IP6' payload -> IP6' payload
1232 *
1233 * XXX more sanity checks
1234 * XXX relationship with gif?
1235 */
1236 u_int32_t flowinfo; /*net endian*/
1237 flowinfo = ip6->ip6_flow;
1238 m_adj(m, off + esplen + ivlen);
1239 if (ifamily == AF_INET6) {
1240 struct sockaddr_in6 *ip6addr;
1241
1242 if (m->m_len < sizeof(*ip6)) {
1243 #ifndef PULLDOWN_TEST
1244 /*
1245 * m_pullup is prohibited in KAME IPv6 input processing
1246 * but there's no other way!
1247 */
1248 #else
1249 /* okay to pullup in m_pulldown style */
1250 #endif
1251 m = m_pullup(m, sizeof(*ip6));
1252 if (!m) {
1253 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
1254 goto bad;
1255 }
1256 }
1257 ip6 = mtod(m, struct ip6_hdr *);
1258 /* ECN consideration. */
1259 if (ip6_ecn_egress(ip6_ipsec_ecn, &flowinfo, &ip6->ip6_flow) == 0) {
1260 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
1261 goto bad;
1262 }
1263 if (!key_checktunnelsanity(sav, AF_INET6,
1264 (caddr_t)&ip6->ip6_src, (caddr_t)&ip6->ip6_dst)) {
1265 ipseclog((LOG_ERR, "ipsec tunnel address mismatch "
1266 "in IPv6 ESP input: %s %s\n",
1267 ipsec6_logpacketstr(ip6, spi),
1268 ipsec_logsastr(sav)));
1269 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
1270 goto bad;
1271 }
1272
1273 bzero(&addr, sizeof(addr));
1274 ip6addr = (__typeof__(ip6addr)) & addr;
1275 ip6addr->sin6_family = AF_INET6;
1276 ip6addr->sin6_len = sizeof(*ip6addr);
1277 ip6addr->sin6_addr = ip6->ip6_dst;
1278 } else if (ifamily == AF_INET) {
1279 struct sockaddr_in *ipaddr;
1280
1281 if (m->m_len < sizeof(*ip)) {
1282 m = m_pullup(m, sizeof(*ip));
1283 if (!m) {
1284 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
1285 goto bad;
1286 }
1287 }
1288
1289 u_int8_t otos;
1290 int sum;
1291
1292 ip = mtod(m, struct ip *);
1293 otos = ip->ip_tos;
1294 /* ECN consideration. */
1295 if (ip46_ecn_egress(ip6_ipsec_ecn, &flowinfo, &ip->ip_tos) == 0) {
1296 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
1297 goto bad;
1298 }
1299
1300 if (otos != ip->ip_tos) {
1301 sum = ~ntohs(ip->ip_sum) & 0xffff;
1302 sum += (~otos & 0xffff) + ip->ip_tos;
1303 sum = (sum >> 16) + (sum & 0xffff);
1304 sum += (sum >> 16); /* add carry */
1305 ip->ip_sum = htons(~sum & 0xffff);
1306 }
1307
1308 if (!key_checktunnelsanity(sav, AF_INET,
1309 (caddr_t)&ip->ip_src, (caddr_t)&ip->ip_dst)) {
1310 ipseclog((LOG_ERR, "ipsec tunnel address mismatch "
1311 "in ESP input: %s %s\n",
1312 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
1313 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
1314 goto bad;
1315 }
1316
1317 bzero(&addr, sizeof(addr));
1318 ipaddr = (__typeof__(ipaddr)) & addr;
1319 ipaddr->sin_family = AF_INET;
1320 ipaddr->sin_len = sizeof(*ipaddr);
1321 ipaddr->sin_addr = ip->ip_dst;
1322 }
1323
1324 key_sa_recordxfer(sav, m);
1325 if (ipsec_addhist(m, IPPROTO_ESP, spi) != 0 ||
1326 ipsec_addhist(m, IPPROTO_IPV6, 0) != 0) {
1327 IPSEC_STAT_INCREMENT(ipsec6stat.in_nomem);
1328 goto bad;
1329 }
1330
1331 // update the receiving interface address based on the inner address
1332 ifa = ifa_ifwithaddr((struct sockaddr *)&addr);
1333 if (ifa) {
1334 m->m_pkthdr.rcvif = ifa->ifa_ifp;
1335 IFA_REMREF(ifa);
1336 }
1337
1338 // Input via IPsec interface
1339 lck_mtx_lock(sadb_mutex);
1340 ifnet_t ipsec_if = sav->sah->ipsec_if;
1341 if (ipsec_if != NULL) {
1342 // If an interface is found, add a reference count before dropping the lock
1343 ifnet_reference(ipsec_if);
1344 }
1345 lck_mtx_unlock(sadb_mutex);
1346 if (ipsec_if != NULL) {
1347 esp_input_log(m, sav, spi, seq);
1348 ipsec_save_wake_packet(m, ntohl(spi), seq);
1349
1350 // Return mbuf
1351 if (interface != NULL &&
1352 interface == ipsec_if) {
1353 ifnet_release(ipsec_if);
1354 goto done;
1355 }
1356
1357 errno_t inject_error = ipsec_inject_inbound_packet(ipsec_if, m);
1358 ifnet_release(ipsec_if);
1359
1360 if (inject_error == 0) {
1361 m = NULL;
1362 nxt = IPPROTO_DONE;
1363 goto done;
1364 } else {
1365 goto bad;
1366 }
1367 }
1368
1369 if (proto_input(ifamily == AF_INET ? PF_INET : PF_INET6, m) != 0) {
1370 goto bad;
1371 }
1372 nxt = IPPROTO_DONE;
1373 } else {
1374 /*
1375 * strip off ESP header and IV.
1376 * even in m_pulldown case, we need to strip off ESP so that
1377 * we can always compute checksum for AH correctly.
1378 */
1379 size_t stripsiz;
1380 char *prvnxtp;
1381
1382 /*
1383 * Set the next header field of the previous header correctly.
1384 */
1385 prvnxtp = ip6_get_prevhdr(m, off); /* XXX */
1386 *prvnxtp = nxt;
1387
1388 stripsiz = esplen + ivlen;
1389
1390 ip6 = mtod(m, struct ip6_hdr *);
1391 if (m->m_len >= stripsiz + off) {
1392 ovbcopy((caddr_t)ip6, ((caddr_t)ip6) + stripsiz, off);
1393 m->m_data += stripsiz;
1394 m->m_len -= stripsiz;
1395 m->m_pkthdr.len -= stripsiz;
1396 } else {
1397 /*
1398 * this comes with no copy if the boundary is on
1399 * cluster
1400 */
1401 struct mbuf *n;
1402
1403 n = m_split(m, off, M_DONTWAIT);
1404 if (n == NULL) {
1405 /* m is retained by m_split */
1406 goto bad;
1407 }
1408 m_adj(n, stripsiz);
1409 /* m_cat does not update m_pkthdr.len */
1410 m->m_pkthdr.len += n->m_pkthdr.len;
1411 m_cat(m, n);
1412 }
1413
1414 #ifndef PULLDOWN_TEST
1415 /*
1416 * KAME requires that the packet to be contiguous on the
1417 * mbuf. We need to make that sure.
1418 * this kind of code should be avoided.
1419 * XXX other conditions to avoid running this part?
1420 */
1421 if (m->m_len != m->m_pkthdr.len) {
1422 struct mbuf *n = NULL;
1423 int maxlen;
1424
1425 MGETHDR(n, M_DONTWAIT, MT_HEADER); /* MAC-OK */
1426 maxlen = MHLEN;
1427 if (n) {
1428 M_COPY_PKTHDR(n, m);
1429 }
1430 if (n && m->m_pkthdr.len > maxlen) {
1431 MCLGET(n, M_DONTWAIT);
1432 maxlen = MCLBYTES;
1433 if ((n->m_flags & M_EXT) == 0) {
1434 m_free(n);
1435 n = NULL;
1436 }
1437 }
1438 if (!n) {
1439 printf("esp6_input: mbuf allocation failed\n");
1440 goto bad;
1441 }
1442
1443 if (m->m_pkthdr.len <= maxlen) {
1444 m_copydata(m, 0, m->m_pkthdr.len, mtod(n, caddr_t));
1445 n->m_len = m->m_pkthdr.len;
1446 n->m_pkthdr.len = m->m_pkthdr.len;
1447 n->m_next = NULL;
1448 m_freem(m);
1449 } else {
1450 m_copydata(m, 0, maxlen, mtod(n, caddr_t));
1451 n->m_len = maxlen;
1452 n->m_pkthdr.len = m->m_pkthdr.len;
1453 n->m_next = m;
1454 m_adj(m, maxlen);
1455 m->m_flags &= ~M_PKTHDR;
1456 }
1457 m = n;
1458 }
1459 #endif
1460 ip6 = mtod(m, struct ip6_hdr *);
1461 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - stripsiz);
1462
1463 key_sa_recordxfer(sav, m);
1464 if (ipsec_addhist(m, IPPROTO_ESP, spi) != 0) {
1465 IPSEC_STAT_INCREMENT(ipsec6stat.in_nomem);
1466 goto bad;
1467 }
1468
1469 /*
1470 * Set the csum valid flag, if we authenticated the
1471 * packet, the payload shouldn't be corrupt unless
1472 * it was corrupted before being signed on the other
1473 * side.
1474 */
1475 if (nxt == IPPROTO_TCP || nxt == IPPROTO_UDP) {
1476 m->m_pkthdr.csum_flags = CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1477 m->m_pkthdr.csum_data = 0xFFFF;
1478 _CASSERT(offsetof(struct pkthdr, csum_data) == offsetof(struct pkthdr, csum_rx_val));
1479 }
1480
1481 // Input via IPsec interface
1482 lck_mtx_lock(sadb_mutex);
1483 ifnet_t ipsec_if = sav->sah->ipsec_if;
1484 if (ipsec_if != NULL) {
1485 // If an interface is found, add a reference count before dropping the lock
1486 ifnet_reference(ipsec_if);
1487 }
1488 lck_mtx_unlock(sadb_mutex);
1489 if (ipsec_if != NULL) {
1490 esp_input_log(m, sav, spi, seq);
1491 ipsec_save_wake_packet(m, ntohl(spi), seq);
1492
1493 // Return mbuf
1494 if (interface != NULL &&
1495 interface == ipsec_if) {
1496 ifnet_release(ipsec_if);
1497 goto done;
1498 }
1499
1500 errno_t inject_error = ipsec_inject_inbound_packet(ipsec_if, m);
1501 ifnet_release(ipsec_if);
1502
1503 if (inject_error == 0) {
1504 m = NULL;
1505 nxt = IPPROTO_DONE;
1506 goto done;
1507 } else {
1508 goto bad;
1509 }
1510 }
1511 }
1512
1513 done:
1514 *offp = off;
1515 *mp = m;
1516 if (sav) {
1517 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
1518 printf("DP esp6_input call free SA:0x%llx\n",
1519 (uint64_t)VM_KERNEL_ADDRPERM(sav)));
1520 key_freesav(sav, KEY_SADB_UNLOCKED);
1521 }
1522 IPSEC_STAT_INCREMENT(ipsec6stat.in_success);
1523 return nxt;
1524
1525 bad:
1526 if (sav) {
1527 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
1528 printf("DP esp6_input call free SA:0x%llx\n",
1529 (uint64_t)VM_KERNEL_ADDRPERM(sav)));
1530 key_freesav(sav, KEY_SADB_UNLOCKED);
1531 }
1532 if (m) {
1533 m_freem(m);
1534 }
1535 if (interface != NULL) {
1536 *mp = NULL;
1537 }
1538 return IPPROTO_DONE;
1539 }
1540
1541 void
1542 esp6_ctlinput(int cmd, struct sockaddr *sa, void *d, __unused struct ifnet *ifp)
1543 {
1544 const struct newesp *espp;
1545 struct newesp esp;
1546 struct ip6ctlparam *ip6cp = NULL, ip6cp1;
1547 struct secasvar *sav;
1548 struct ip6_hdr *ip6;
1549 struct mbuf *m;
1550 int off = 0;
1551 struct sockaddr_in6 *sa6_src, *sa6_dst;
1552
1553 if (sa->sa_family != AF_INET6 ||
1554 sa->sa_len != sizeof(struct sockaddr_in6)) {
1555 return;
1556 }
1557 if ((unsigned)cmd >= PRC_NCMDS) {
1558 return;
1559 }
1560
1561 /* if the parameter is from icmp6, decode it. */
1562 if (d != NULL) {
1563 ip6cp = (struct ip6ctlparam *)d;
1564 m = ip6cp->ip6c_m;
1565 ip6 = ip6cp->ip6c_ip6;
1566 off = ip6cp->ip6c_off;
1567 } else {
1568 m = NULL;
1569 ip6 = NULL;
1570 }
1571
1572 if (ip6) {
1573 /*
1574 * Notify the error to all possible sockets via pfctlinput2.
1575 * Since the upper layer information (such as protocol type,
1576 * source and destination ports) is embedded in the encrypted
1577 * data and might have been cut, we can't directly call
1578 * an upper layer ctlinput function. However, the pcbnotify
1579 * function will consider source and destination addresses
1580 * as well as the flow info value, and may be able to find
1581 * some PCB that should be notified.
1582 * Although pfctlinput2 will call esp6_ctlinput(), there is
1583 * no possibility of an infinite loop of function calls,
1584 * because we don't pass the inner IPv6 header.
1585 */
1586 bzero(&ip6cp1, sizeof(ip6cp1));
1587 ip6cp1.ip6c_src = ip6cp->ip6c_src;
1588 pfctlinput2(cmd, sa, (void *)&ip6cp1);
1589
1590 /*
1591 * Then go to special cases that need ESP header information.
1592 * XXX: We assume that when ip6 is non NULL,
1593 * M and OFF are valid.
1594 */
1595
1596 /* check if we can safely examine src and dst ports */
1597 if (m->m_pkthdr.len < off + sizeof(esp)) {
1598 return;
1599 }
1600
1601 if (m->m_len < off + sizeof(esp)) {
1602 /*
1603 * this should be rare case,
1604 * so we compromise on this copy...
1605 */
1606 m_copydata(m, off, sizeof(esp), (caddr_t)&esp);
1607 espp = &esp;
1608 } else {
1609 espp = (struct newesp*)(void *)(mtod(m, caddr_t) + off);
1610 }
1611
1612 if (cmd == PRC_MSGSIZE) {
1613 int valid = 0;
1614
1615 /*
1616 * Check to see if we have a valid SA corresponding to
1617 * the address in the ICMP message payload.
1618 */
1619 sa6_src = ip6cp->ip6c_src;
1620 sa6_dst = (struct sockaddr_in6 *)(void *)sa;
1621 sav = key_allocsa(AF_INET6,
1622 (caddr_t)&sa6_src->sin6_addr,
1623 (caddr_t)&sa6_dst->sin6_addr,
1624 IPPROTO_ESP, espp->esp_spi);
1625 if (sav) {
1626 if (sav->state == SADB_SASTATE_MATURE ||
1627 sav->state == SADB_SASTATE_DYING) {
1628 valid++;
1629 }
1630 key_freesav(sav, KEY_SADB_UNLOCKED);
1631 }
1632
1633 /* XXX Further validation? */
1634
1635 /*
1636 * Depending on the value of "valid" and routing table
1637 * size (mtudisc_{hi,lo}wat), we will:
1638 * - recalcurate the new MTU and create the
1639 * corresponding routing entry, or
1640 * - ignore the MTU change notification.
1641 */
1642 icmp6_mtudisc_update((struct ip6ctlparam *)d, valid);
1643 }
1644 } else {
1645 /* we normally notify any pcb here */
1646 }
1647 }
1648 #endif /* INET6 */