]> git.saurik.com Git - apple/xnu.git/blob - bsd/netinet6/esp_input.c
xnu-7195.60.75.tar.gz
[apple/xnu.git] / bsd / netinet6 / esp_input.c
1 /*
2 * Copyright (c) 2008-2016 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /* $FreeBSD: src/sys/netinet6/esp_input.c,v 1.1.2.3 2001/07/03 11:01:50 ume Exp $ */
30 /* $KAME: esp_input.c,v 1.55 2001/03/23 08:08:47 itojun Exp $ */
31
32 /*
33 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. Neither the name of the project nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
47 *
48 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 * SUCH DAMAGE.
59 */
60
61 #define _IP_VHL
62
63 /*
64 * RFC1827/2406 Encapsulated Security Payload.
65 */
66
67 #include <sys/param.h>
68 #include <sys/systm.h>
69 #include <sys/malloc.h>
70 #include <sys/mbuf.h>
71 #include <sys/mcache.h>
72 #include <sys/domain.h>
73 #include <sys/protosw.h>
74 #include <sys/socket.h>
75 #include <sys/errno.h>
76 #include <sys/time.h>
77 #include <sys/kernel.h>
78 #include <sys/syslog.h>
79
80 #include <net/if.h>
81 #include <net/if_ipsec.h>
82 #include <net/route.h>
83 #include <kern/cpu_number.h>
84 #include <kern/locks.h>
85
86 #include <netinet/in.h>
87 #include <netinet/in_systm.h>
88 #include <netinet/ip.h>
89 #include <netinet/ip_var.h>
90 #include <netinet/in_var.h>
91 #include <netinet/ip_ecn.h>
92 #include <netinet/in_pcb.h>
93 #include <netinet/udp.h>
94 #include <netinet/tcp.h>
95 #include <netinet/in_tclass.h>
96 #include <netinet6/ip6_ecn.h>
97
98 #include <netinet/ip6.h>
99 #include <netinet6/in6_pcb.h>
100 #include <netinet6/ip6_var.h>
101 #include <netinet/icmp6.h>
102 #include <netinet6/ip6protosw.h>
103
104 #include <netinet6/ipsec.h>
105 #include <netinet6/ipsec6.h>
106 #include <netinet6/ah.h>
107 #include <netinet6/ah6.h>
108 #include <netinet6/esp.h>
109 #include <netinet6/esp6.h>
110 #include <netkey/key.h>
111 #include <netkey/keydb.h>
112 #include <netkey/key_debug.h>
113
114 #include <net/kpi_protocol.h>
115 #include <netinet/kpi_ipfilter_var.h>
116
117 #include <net/net_osdep.h>
118 #include <mach/sdt.h>
119 #include <corecrypto/cc.h>
120
121 #include <sys/kdebug.h>
122 #define DBG_LAYER_BEG NETDBG_CODE(DBG_NETIPSEC, 1)
123 #define DBG_LAYER_END NETDBG_CODE(DBG_NETIPSEC, 3)
124 #define DBG_FNC_ESPIN NETDBG_CODE(DBG_NETIPSEC, (6 << 8))
125 #define DBG_FNC_DECRYPT NETDBG_CODE(DBG_NETIPSEC, (7 << 8))
126 #define IPLEN_FLIPPED
127
128 extern lck_mtx_t *sadb_mutex;
129
130 #define ESPMAXLEN \
131 (sizeof(struct esp) < sizeof(struct newesp) \
132 ? sizeof(struct newesp) : sizeof(struct esp))
133
134 static struct ip *
135 esp4_input_strip_udp_encap(struct mbuf *m, int iphlen)
136 {
137 // strip the udp header that's encapsulating ESP
138 struct ip *ip;
139 u_int8_t stripsiz = (u_int8_t)sizeof(struct udphdr);
140
141 ip = mtod(m, __typeof__(ip));
142 ovbcopy((caddr_t)ip, (caddr_t)(((u_char *)ip) + stripsiz), iphlen);
143 m->m_data += stripsiz;
144 m->m_len -= stripsiz;
145 m->m_pkthdr.len -= stripsiz;
146 ip = mtod(m, __typeof__(ip));
147 ip->ip_len = ip->ip_len - stripsiz;
148 ip->ip_p = IPPROTO_ESP;
149 return ip;
150 }
151
152 static struct ip6_hdr *
153 esp6_input_strip_udp_encap(struct mbuf *m, int ip6hlen)
154 {
155 // strip the udp header that's encapsulating ESP
156 struct ip6_hdr *ip6;
157 u_int8_t stripsiz = (u_int8_t)sizeof(struct udphdr);
158
159 ip6 = mtod(m, __typeof__(ip6));
160 ovbcopy((caddr_t)ip6, (caddr_t)(((u_char *)ip6) + stripsiz), ip6hlen);
161 m->m_data += stripsiz;
162 m->m_len -= stripsiz;
163 m->m_pkthdr.len -= stripsiz;
164 ip6 = mtod(m, __typeof__(ip6));
165 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - stripsiz);
166 ip6->ip6_nxt = IPPROTO_ESP;
167 return ip6;
168 }
169
170 static void
171 esp_input_log(struct mbuf *m, struct secasvar *sav, u_int32_t spi, u_int32_t seq)
172 {
173 if (net_mpklog_enabled &&
174 (sav->sah->ipsec_if->if_xflags & IFXF_MPK_LOG) == IFXF_MPK_LOG) {
175 struct tcphdr th = {};
176 u_int32_t proto_len = 0;
177 u_int8_t iphlen = 0;
178 u_int8_t proto = 0;
179
180 struct ip *inner_ip = mtod(m, struct ip *);
181 if (IP_VHL_V(inner_ip->ip_vhl) == 4) {
182 iphlen = (u_int8_t)(IP_VHL_HL(inner_ip->ip_vhl) << 2);
183 proto = inner_ip->ip_p;
184 } else if (IP_VHL_V(inner_ip->ip_vhl) == 6) {
185 struct ip6_hdr *inner_ip6 = mtod(m, struct ip6_hdr *);
186 iphlen = sizeof(struct ip6_hdr);
187 proto = inner_ip6->ip6_nxt;
188 }
189
190 if (proto == IPPROTO_TCP) {
191 if ((int)(iphlen + sizeof(th)) <= m->m_pkthdr.len) {
192 m_copydata(m, iphlen, sizeof(th), (u_int8_t *)&th);
193 }
194
195 proto_len = m->m_pkthdr.len - iphlen - (th.th_off << 2);
196 MPKL_ESP_INPUT_TCP(esp_mpkl_log_object,
197 ntohl(spi), seq,
198 ntohs(th.th_sport), ntohs(th.th_dport),
199 ntohl(th.th_seq), proto_len);
200 }
201 }
202 }
203
204 void
205 esp4_input(struct mbuf *m, int off)
206 {
207 (void)esp4_input_extended(m, off, NULL);
208 }
209
210 struct mbuf *
211 esp4_input_extended(struct mbuf *m, int off, ifnet_t interface)
212 {
213 struct ip *ip;
214 struct ip6_hdr *ip6;
215 struct esp *esp;
216 struct esptail esptail;
217 u_int32_t spi;
218 u_int32_t seq;
219 struct secasvar *sav = NULL;
220 size_t taillen;
221 u_int16_t nxt;
222 const struct esp_algorithm *algo;
223 int ivlen;
224 size_t esplen;
225 u_int8_t hlen;
226 sa_family_t ifamily;
227 struct mbuf *out_m = NULL;
228 mbuf_traffic_class_t traffic_class = 0;
229
230 KERNEL_DEBUG(DBG_FNC_ESPIN | DBG_FUNC_START, 0, 0, 0, 0, 0);
231 /* sanity check for alignment. */
232 if (off % 4 != 0 || m->m_pkthdr.len % 4 != 0) {
233 ipseclog((LOG_ERR, "IPv4 ESP input: packet alignment problem "
234 "(off=%d, pktlen=%d)\n", off, m->m_pkthdr.len));
235 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
236 goto bad;
237 }
238
239 if (m->m_len < off + ESPMAXLEN) {
240 m = m_pullup(m, off + ESPMAXLEN);
241 if (!m) {
242 ipseclog((LOG_DEBUG,
243 "IPv4 ESP input: can't pullup in esp4_input\n"));
244 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
245 goto bad;
246 }
247 }
248
249 m->m_pkthdr.csum_flags &= ~CSUM_RX_FLAGS;
250
251 /* Expect 32-bit aligned data pointer on strict-align platforms */
252 MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m);
253
254 ip = mtod(m, struct ip *);
255 // expect udp-encap and esp packets only
256 if (ip->ip_p != IPPROTO_ESP &&
257 !(ip->ip_p == IPPROTO_UDP && off >= sizeof(struct udphdr))) {
258 ipseclog((LOG_DEBUG,
259 "IPv4 ESP input: invalid protocol type\n"));
260 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
261 goto bad;
262 }
263 esp = (struct esp *)(void *)(((u_int8_t *)ip) + off);
264 #ifdef _IP_VHL
265 hlen = (u_int8_t)(IP_VHL_HL(ip->ip_vhl) << 2);
266 #else
267 hlen = ip->ip_hl << 2;
268 #endif
269
270 /* find the sassoc. */
271 spi = esp->esp_spi;
272
273 if ((sav = key_allocsa_extended(AF_INET,
274 (caddr_t)&ip->ip_src, (caddr_t)&ip->ip_dst,
275 IPPROTO_ESP, spi, interface)) == 0) {
276 ipseclog((LOG_WARNING,
277 "IPv4 ESP input: no key association found for spi %u (0x%08x)\n",
278 (u_int32_t)ntohl(spi), (u_int32_t)ntohl(spi)));
279 IPSEC_STAT_INCREMENT(ipsecstat.in_nosa);
280 goto bad;
281 }
282 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
283 printf("DP esp4_input called to allocate SA:0x%llx\n",
284 (uint64_t)VM_KERNEL_ADDRPERM(sav)));
285 if (sav->state != SADB_SASTATE_MATURE
286 && sav->state != SADB_SASTATE_DYING) {
287 ipseclog((LOG_DEBUG,
288 "IPv4 ESP input: non-mature/dying SA found for spi %u (0x%08x)\n",
289 (u_int32_t)ntohl(spi), (u_int32_t)ntohl(spi)));
290 IPSEC_STAT_INCREMENT(ipsecstat.in_badspi);
291 goto bad;
292 }
293 algo = esp_algorithm_lookup(sav->alg_enc);
294 if (!algo) {
295 ipseclog((LOG_DEBUG, "IPv4 ESP input: "
296 "unsupported encryption algorithm for spi %u (0x%08x)\n",
297 (u_int32_t)ntohl(spi), (u_int32_t)ntohl(spi)));
298 IPSEC_STAT_INCREMENT(ipsecstat.in_badspi);
299 goto bad;
300 }
301
302 /* check if we have proper ivlen information */
303 ivlen = sav->ivlen;
304 if (ivlen < 0) {
305 ipseclog((LOG_ERR, "inproper ivlen in IPv4 ESP input: %s %s\n",
306 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
307 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
308 goto bad;
309 }
310
311 seq = ntohl(((struct newesp *)esp)->esp_seq);
312
313 if ((sav->flags2 & SADB_X_EXT_SA2_SEQ_PER_TRAFFIC_CLASS) ==
314 SADB_X_EXT_SA2_SEQ_PER_TRAFFIC_CLASS) {
315 u_int8_t dscp = ip->ip_tos >> IPTOS_DSCP_SHIFT;
316 traffic_class = rfc4594_dscp_to_tc(dscp);
317 }
318
319 /* Save ICV from packet for verification later */
320 size_t siz = 0;
321 unsigned char saved_icv[AH_MAXSUMSIZE];
322 if (algo->finalizedecrypt) {
323 siz = algo->icvlen;
324 VERIFY(siz <= USHRT_MAX);
325 m_copydata(m, m->m_pkthdr.len - (u_short)siz, (u_short)siz, (caddr_t) saved_icv);
326 goto delay_icv;
327 }
328
329 if (!((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay[traffic_class] != NULL &&
330 (sav->alg_auth && sav->key_auth))) {
331 goto noreplaycheck;
332 }
333
334 if (sav->alg_auth == SADB_X_AALG_NULL ||
335 sav->alg_auth == SADB_AALG_NONE) {
336 goto noreplaycheck;
337 }
338
339 /*
340 * check for sequence number.
341 */
342 _CASSERT(MBUF_TC_MAX <= UINT8_MAX);
343 if (ipsec_chkreplay(seq, sav, (u_int8_t)traffic_class)) {
344 ; /*okey*/
345 } else {
346 IPSEC_STAT_INCREMENT(ipsecstat.in_espreplay);
347 ipseclog((LOG_WARNING,
348 "replay packet in IPv4 ESP input: %s %s\n",
349 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
350 goto bad;
351 }
352
353 /* check ICV */
354 {
355 u_char sum0[AH_MAXSUMSIZE] __attribute__((aligned(4)));
356 u_char sum[AH_MAXSUMSIZE] __attribute__((aligned(4)));
357 const struct ah_algorithm *sumalgo;
358
359 sumalgo = ah_algorithm_lookup(sav->alg_auth);
360 if (!sumalgo) {
361 goto noreplaycheck;
362 }
363 siz = (((*sumalgo->sumsiz)(sav) + 3) & ~(4 - 1));
364 if (m->m_pkthdr.len < off + ESPMAXLEN + siz) {
365 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
366 goto bad;
367 }
368 if (AH_MAXSUMSIZE < siz) {
369 ipseclog((LOG_DEBUG,
370 "internal error: AH_MAXSUMSIZE must be larger than %u\n",
371 (u_int32_t)siz));
372 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
373 goto bad;
374 }
375
376 m_copydata(m, m->m_pkthdr.len - (int)siz, (int)siz, (caddr_t) &sum0[0]);
377
378 if (esp_auth(m, off, m->m_pkthdr.len - off - siz, sav, sum)) {
379 ipseclog((LOG_WARNING, "auth fail in IPv4 ESP input: %s %s\n",
380 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
381 IPSEC_STAT_INCREMENT(ipsecstat.in_espauthfail);
382 goto bad;
383 }
384
385 if (cc_cmp_safe(siz, sum0, sum)) {
386 ipseclog((LOG_WARNING, "cc_cmp fail in IPv4 ESP input: %s %s\n",
387 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
388 IPSEC_STAT_INCREMENT(ipsecstat.in_espauthfail);
389 goto bad;
390 }
391
392 delay_icv:
393
394 /* strip off the authentication data */
395 m_adj(m, (int)-siz);
396 ip = mtod(m, struct ip *);
397 #ifdef IPLEN_FLIPPED
398 ip->ip_len = ip->ip_len - (u_short)siz;
399 #else
400 ip->ip_len = htons(ntohs(ip->ip_len) - siz);
401 #endif
402 m->m_flags |= M_AUTHIPDGM;
403 IPSEC_STAT_INCREMENT(ipsecstat.in_espauthsucc);
404 }
405
406 /*
407 * update sequence number.
408 */
409 if ((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay[traffic_class] != NULL) {
410 if (ipsec_updatereplay(seq, sav, (u_int8_t)traffic_class)) {
411 IPSEC_STAT_INCREMENT(ipsecstat.in_espreplay);
412 goto bad;
413 }
414 }
415
416 noreplaycheck:
417
418 /* process main esp header. */
419 if (sav->flags & SADB_X_EXT_OLD) {
420 /* RFC 1827 */
421 esplen = sizeof(struct esp);
422 } else {
423 /* RFC 2406 */
424 if (sav->flags & SADB_X_EXT_DERIV) {
425 esplen = sizeof(struct esp);
426 } else {
427 esplen = sizeof(struct newesp);
428 }
429 }
430
431 if (m->m_pkthdr.len < off + esplen + ivlen + sizeof(esptail)) {
432 ipseclog((LOG_WARNING,
433 "IPv4 ESP input: packet too short\n"));
434 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
435 goto bad;
436 }
437
438 if (m->m_len < off + esplen + ivlen) {
439 m = m_pullup(m, (int)(off + esplen + ivlen));
440 if (!m) {
441 ipseclog((LOG_DEBUG,
442 "IPv4 ESP input: can't pullup in esp4_input\n"));
443 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
444 goto bad;
445 }
446 }
447
448 /*
449 * pre-compute and cache intermediate key
450 */
451 if (esp_schedule(algo, sav) != 0) {
452 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
453 goto bad;
454 }
455
456 /*
457 * decrypt the packet.
458 */
459 if (!algo->decrypt) {
460 panic("internal error: no decrypt function");
461 }
462 KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_START, 0, 0, 0, 0, 0);
463 if ((*algo->decrypt)(m, off, sav, algo, ivlen)) {
464 /* m is already freed */
465 m = NULL;
466 ipseclog((LOG_ERR, "decrypt fail in IPv4 ESP input: %s\n",
467 ipsec_logsastr(sav)));
468 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
469 KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_END, 1, 0, 0, 0, 0);
470 goto bad;
471 }
472 KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_END, 2, 0, 0, 0, 0);
473 IPSEC_STAT_INCREMENT(ipsecstat.in_esphist[sav->alg_enc]);
474
475 m->m_flags |= M_DECRYPTED;
476
477 if (algo->finalizedecrypt) {
478 if ((*algo->finalizedecrypt)(sav, saved_icv, algo->icvlen)) {
479 ipseclog((LOG_ERR, "esp4 packet decryption ICV failure\n"));
480 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
481 KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_END, 1, 0, 0, 0, 0);
482 goto bad;
483 }
484 }
485
486 /*
487 * find the trailer of the ESP.
488 */
489 m_copydata(m, m->m_pkthdr.len - sizeof(esptail), sizeof(esptail),
490 (caddr_t)&esptail);
491 nxt = esptail.esp_nxt;
492 taillen = esptail.esp_padlen + sizeof(esptail);
493
494 if (m->m_pkthdr.len < taillen
495 || m->m_pkthdr.len - taillen < hlen) { /*?*/
496 ipseclog((LOG_WARNING,
497 "bad pad length in IPv4 ESP input: %s %s\n",
498 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
499 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
500 goto bad;
501 }
502
503 /* strip off the trailing pad area. */
504 m_adj(m, (int)-taillen);
505 ip = mtod(m, struct ip *);
506 #ifdef IPLEN_FLIPPED
507 ip->ip_len = ip->ip_len - (u_short)taillen;
508 #else
509 ip->ip_len = htons(ntohs(ip->ip_len) - taillen);
510 #endif
511 if (ip->ip_p == IPPROTO_UDP) {
512 // offset includes the outer ip and udp header lengths.
513 if (m->m_len < off) {
514 m = m_pullup(m, off);
515 if (!m) {
516 ipseclog((LOG_DEBUG,
517 "IPv4 ESP input: invalid udp encapsulated ESP packet length \n"));
518 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
519 goto bad;
520 }
521 ip = mtod(m, struct ip *);
522 }
523
524 // check the UDP encap header to detect changes in the source port, and then strip the header
525 off -= sizeof(struct udphdr); // off no longer includes the udphdr's size
526 // if peer is behind nat and this is the latest esp packet
527 if ((sav->flags & SADB_X_EXT_NATT_DETECTED_PEER) != 0 &&
528 (sav->flags & SADB_X_EXT_OLD) == 0 &&
529 seq && sav->replay[traffic_class] &&
530 seq >= sav->replay[traffic_class]->lastseq) {
531 struct udphdr *encap_uh = (__typeof__(encap_uh))(void *)((caddr_t)ip + off);
532 if (encap_uh->uh_sport &&
533 ntohs(encap_uh->uh_sport) != sav->remote_ike_port) {
534 sav->remote_ike_port = ntohs(encap_uh->uh_sport);
535 }
536 }
537 ip = esp4_input_strip_udp_encap(m, off);
538 esp = (struct esp *)(void *)(((u_int8_t *)ip) + off);
539 }
540
541 /* was it transmitted over the IPsec tunnel SA? */
542 if (ipsec4_tunnel_validate(m, (int)(off + esplen + ivlen), nxt, sav, &ifamily)) {
543 ifaddr_t ifa;
544 struct sockaddr_storage addr;
545
546 /*
547 * strip off all the headers that precedes ESP header.
548 * IP4 xx ESP IP4' payload -> IP4' payload
549 *
550 * XXX more sanity checks
551 * XXX relationship with gif?
552 */
553 u_int8_t tos, otos;
554 int sum;
555
556 tos = ip->ip_tos;
557 m_adj(m, (int)(off + esplen + ivlen));
558 if (ifamily == AF_INET) {
559 struct sockaddr_in *ipaddr;
560
561 if (m->m_len < sizeof(*ip)) {
562 m = m_pullup(m, sizeof(*ip));
563 if (!m) {
564 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
565 goto bad;
566 }
567 }
568 ip = mtod(m, struct ip *);
569 /* ECN consideration. */
570
571 otos = ip->ip_tos;
572 if (ip_ecn_egress(ip4_ipsec_ecn, &tos, &ip->ip_tos) == 0) {
573 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
574 goto bad;
575 }
576
577 if (otos != ip->ip_tos) {
578 sum = ~ntohs(ip->ip_sum) & 0xffff;
579 sum += (~otos & 0xffff) + ip->ip_tos;
580 sum = (sum >> 16) + (sum & 0xffff);
581 sum += (sum >> 16); /* add carry */
582 ip->ip_sum = htons(~sum & 0xffff);
583 }
584
585 if (!key_checktunnelsanity(sav, AF_INET,
586 (caddr_t)&ip->ip_src, (caddr_t)&ip->ip_dst)) {
587 ipseclog((LOG_ERR, "ipsec tunnel address mismatch "
588 "in ESP input: %s %s\n",
589 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
590 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
591 goto bad;
592 }
593
594 bzero(&addr, sizeof(addr));
595 ipaddr = (__typeof__(ipaddr)) & addr;
596 ipaddr->sin_family = AF_INET;
597 ipaddr->sin_len = sizeof(*ipaddr);
598 ipaddr->sin_addr = ip->ip_dst;
599 } else if (ifamily == AF_INET6) {
600 struct sockaddr_in6 *ip6addr;
601
602 /*
603 * m_pullup is prohibited in KAME IPv6 input processing
604 * but there's no other way!
605 */
606 if (m->m_len < sizeof(*ip6)) {
607 m = m_pullup(m, sizeof(*ip6));
608 if (!m) {
609 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
610 goto bad;
611 }
612 }
613
614 /*
615 * Expect 32-bit aligned data pointer on strict-align
616 * platforms.
617 */
618 MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m);
619
620 ip6 = mtod(m, struct ip6_hdr *);
621
622 /* ECN consideration. */
623 if (ip64_ecn_egress(ip4_ipsec_ecn, &tos, &ip6->ip6_flow) == 0) {
624 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
625 goto bad;
626 }
627
628 if (!key_checktunnelsanity(sav, AF_INET6,
629 (caddr_t)&ip6->ip6_src, (caddr_t)&ip6->ip6_dst)) {
630 ipseclog((LOG_ERR, "ipsec tunnel address mismatch "
631 "in ESP input: %s %s\n",
632 ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
633 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
634 goto bad;
635 }
636
637 bzero(&addr, sizeof(addr));
638 ip6addr = (__typeof__(ip6addr)) & addr;
639 ip6addr->sin6_family = AF_INET6;
640 ip6addr->sin6_len = sizeof(*ip6addr);
641 ip6addr->sin6_addr = ip6->ip6_dst;
642 } else {
643 ipseclog((LOG_ERR, "ipsec tunnel unsupported address family "
644 "in ESP input\n"));
645 goto bad;
646 }
647
648 key_sa_recordxfer(sav, m);
649 if (ipsec_addhist(m, IPPROTO_ESP, spi) != 0 ||
650 ipsec_addhist(m, IPPROTO_IPV4, 0) != 0) {
651 IPSEC_STAT_INCREMENT(ipsecstat.in_nomem);
652 goto bad;
653 }
654
655 // update the receiving interface address based on the inner address
656 ifa = ifa_ifwithaddr((struct sockaddr *)&addr);
657 if (ifa) {
658 m->m_pkthdr.rcvif = ifa->ifa_ifp;
659 IFA_REMREF(ifa);
660 }
661
662 /* Clear the csum flags, they can't be valid for the inner headers */
663 m->m_pkthdr.csum_flags = 0;
664
665 // Input via IPsec interface
666 lck_mtx_lock(sadb_mutex);
667 ifnet_t ipsec_if = sav->sah->ipsec_if;
668 if (ipsec_if != NULL) {
669 // If an interface is found, add a reference count before dropping the lock
670 ifnet_reference(ipsec_if);
671 }
672 lck_mtx_unlock(sadb_mutex);
673 if (ipsec_if != NULL) {
674 esp_input_log(m, sav, spi, seq);
675 ipsec_save_wake_packet(m, ntohl(spi), seq);
676
677 // Return mbuf
678 if (interface != NULL &&
679 interface == ipsec_if) {
680 out_m = m;
681 ifnet_release(ipsec_if);
682 goto done;
683 }
684
685 errno_t inject_error = ipsec_inject_inbound_packet(ipsec_if, m);
686 ifnet_release(ipsec_if);
687
688 if (inject_error == 0) {
689 m = NULL;
690 goto done;
691 } else {
692 goto bad;
693 }
694 }
695
696 if (proto_input(ifamily == AF_INET ? PF_INET : PF_INET6, m) != 0) {
697 goto bad;
698 }
699
700 nxt = IPPROTO_DONE;
701 KERNEL_DEBUG(DBG_FNC_ESPIN | DBG_FUNC_END, 2, 0, 0, 0, 0);
702 } else {
703 /*
704 * strip off ESP header and IV.
705 * even in m_pulldown case, we need to strip off ESP so that
706 * we can always compute checksum for AH correctly.
707 */
708 size_t stripsiz;
709
710 stripsiz = esplen + ivlen;
711
712 ip = mtod(m, struct ip *);
713 ovbcopy((caddr_t)ip, (caddr_t)(((u_char *)ip) + stripsiz), off);
714 m->m_data += stripsiz;
715 m->m_len -= stripsiz;
716 m->m_pkthdr.len -= stripsiz;
717
718 ip = mtod(m, struct ip *);
719 #ifdef IPLEN_FLIPPED
720 ip->ip_len = ip->ip_len - (u_short)stripsiz;
721 #else
722 ip->ip_len = htons(ntohs(ip->ip_len) - stripsiz);
723 #endif
724 ip->ip_p = (u_int8_t)nxt;
725
726 key_sa_recordxfer(sav, m);
727 if (ipsec_addhist(m, IPPROTO_ESP, spi) != 0) {
728 IPSEC_STAT_INCREMENT(ipsecstat.in_nomem);
729 goto bad;
730 }
731
732 /*
733 * Set the csum valid flag, if we authenticated the
734 * packet, the payload shouldn't be corrupt unless
735 * it was corrupted before being signed on the other
736 * side.
737 */
738 if (nxt == IPPROTO_TCP || nxt == IPPROTO_UDP) {
739 m->m_pkthdr.csum_flags = CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
740 m->m_pkthdr.csum_data = 0xFFFF;
741 _CASSERT(offsetof(struct pkthdr, csum_data) == offsetof(struct pkthdr, csum_rx_val));
742 }
743
744 if (nxt != IPPROTO_DONE) {
745 if ((ip_protox[nxt]->pr_flags & PR_LASTHDR) != 0 &&
746 ipsec4_in_reject(m, NULL)) {
747 IPSEC_STAT_INCREMENT(ipsecstat.in_polvio);
748 goto bad;
749 }
750 KERNEL_DEBUG(DBG_FNC_ESPIN | DBG_FUNC_END, 3, 0, 0, 0, 0);
751
752 /* translate encapsulated UDP port ? */
753 if ((sav->flags & SADB_X_EXT_NATT_MULTIPLEUSERS) != 0) {
754 struct udphdr *udp;
755
756 if (nxt != IPPROTO_UDP) { /* not UPD packet - drop it */
757 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
758 goto bad;
759 }
760
761 if (m->m_len < off + sizeof(struct udphdr)) {
762 m = m_pullup(m, off + sizeof(struct udphdr));
763 if (!m) {
764 ipseclog((LOG_DEBUG,
765 "IPv4 ESP input: can't pullup UDP header in esp4_input\n"));
766 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
767 goto bad;
768 }
769 ip = mtod(m, struct ip *);
770 }
771 udp = (struct udphdr *)(void *)(((u_int8_t *)ip) + off);
772
773 lck_mtx_lock(sadb_mutex);
774 if (sav->natt_encapsulated_src_port == 0) {
775 sav->natt_encapsulated_src_port = udp->uh_sport;
776 } else if (sav->natt_encapsulated_src_port != udp->uh_sport) { /* something wrong */
777 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
778 lck_mtx_unlock(sadb_mutex);
779 goto bad;
780 }
781 lck_mtx_unlock(sadb_mutex);
782 udp->uh_sport = htons(sav->remote_ike_port);
783 udp->uh_sum = 0;
784 }
785
786 DTRACE_IP6(receive, struct mbuf *, m, struct inpcb *, NULL,
787 struct ip *, ip, struct ifnet *, m->m_pkthdr.rcvif,
788 struct ip *, ip, struct ip6_hdr *, NULL);
789
790 // Input via IPsec interface legacy path
791 lck_mtx_lock(sadb_mutex);
792 ifnet_t ipsec_if = sav->sah->ipsec_if;
793 if (ipsec_if != NULL) {
794 // If an interface is found, add a reference count before dropping the lock
795 ifnet_reference(ipsec_if);
796 }
797 lck_mtx_unlock(sadb_mutex);
798 if (ipsec_if != NULL) {
799 int mlen;
800 if ((mlen = m_length2(m, NULL)) < hlen) {
801 ipseclog((LOG_DEBUG,
802 "IPv4 ESP input: decrypted packet too short %d < %u\n",
803 mlen, hlen));
804 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
805 ifnet_release(ipsec_if);
806 goto bad;
807 }
808 ip->ip_len = htons(ip->ip_len + hlen);
809 ip->ip_off = htons(ip->ip_off);
810 ip->ip_sum = 0;
811 ip->ip_sum = ip_cksum_hdr_in(m, hlen);
812
813 esp_input_log(m, sav, spi, seq);
814 ipsec_save_wake_packet(m, ntohl(spi), seq);
815
816 // Return mbuf
817 if (interface != NULL &&
818 interface == ipsec_if) {
819 out_m = m;
820 ifnet_release(ipsec_if);
821 goto done;
822 }
823
824 errno_t inject_error = ipsec_inject_inbound_packet(ipsec_if, m);
825 ifnet_release(ipsec_if);
826
827 if (inject_error == 0) {
828 m = NULL;
829 goto done;
830 } else {
831 goto bad;
832 }
833 }
834
835 ip_proto_dispatch_in(m, off, (u_int8_t)nxt, 0);
836 } else {
837 m_freem(m);
838 }
839 m = NULL;
840 }
841
842 done:
843 if (sav) {
844 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
845 printf("DP esp4_input call free SA:0x%llx\n",
846 (uint64_t)VM_KERNEL_ADDRPERM(sav)));
847 key_freesav(sav, KEY_SADB_UNLOCKED);
848 }
849 IPSEC_STAT_INCREMENT(ipsecstat.in_success);
850 return out_m;
851 bad:
852 if (sav) {
853 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
854 printf("DP esp4_input call free SA:0x%llx\n",
855 (uint64_t)VM_KERNEL_ADDRPERM(sav)));
856 key_freesav(sav, KEY_SADB_UNLOCKED);
857 }
858 if (m) {
859 m_freem(m);
860 }
861 KERNEL_DEBUG(DBG_FNC_ESPIN | DBG_FUNC_END, 4, 0, 0, 0, 0);
862 return out_m;
863 }
864
865 int
866 esp6_input(struct mbuf **mp, int *offp, int proto)
867 {
868 return esp6_input_extended(mp, offp, proto, NULL);
869 }
870
871 int
872 esp6_input_extended(struct mbuf **mp, int *offp, int proto, ifnet_t interface)
873 {
874 #pragma unused(proto)
875 struct mbuf *m = *mp;
876 int off = *offp;
877 struct ip *ip;
878 struct ip6_hdr *ip6;
879 struct esp *esp;
880 struct esptail esptail;
881 u_int32_t spi;
882 u_int32_t seq;
883 struct secasvar *sav = NULL;
884 u_int16_t nxt;
885 char *nproto;
886 const struct esp_algorithm *algo;
887 int ivlen;
888 size_t esplen;
889 u_int16_t taillen;
890 sa_family_t ifamily;
891 mbuf_traffic_class_t traffic_class = 0;
892
893 /* sanity check for alignment. */
894 if (off % 4 != 0 || m->m_pkthdr.len % 4 != 0) {
895 ipseclog((LOG_ERR, "IPv6 ESP input: packet alignment problem "
896 "(off=%d, pktlen=%d)\n", off, m->m_pkthdr.len));
897 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
898 goto bad;
899 }
900
901 #ifndef PULLDOWN_TEST
902 IP6_EXTHDR_CHECK(m, off, ESPMAXLEN, {return IPPROTO_DONE;});
903 esp = (struct esp *)(void *)(mtod(m, caddr_t) + off);
904 #else
905 IP6_EXTHDR_GET(esp, struct esp *, m, off, ESPMAXLEN);
906 if (esp == NULL) {
907 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
908 return IPPROTO_DONE;
909 }
910 #endif
911 m->m_pkthdr.csum_flags &= ~CSUM_RX_FLAGS;
912
913 /* Expect 32-bit data aligned pointer on strict-align platforms */
914 MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m);
915
916 ip6 = mtod(m, struct ip6_hdr *);
917
918 if (ntohs(ip6->ip6_plen) == 0) {
919 ipseclog((LOG_ERR, "IPv6 ESP input: "
920 "ESP with IPv6 jumbogram is not supported.\n"));
921 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
922 goto bad;
923 }
924
925 nproto = ip6_get_prevhdr(m, off);
926 if (nproto == NULL || (*nproto != IPPROTO_ESP &&
927 !(*nproto == IPPROTO_UDP && off >= sizeof(struct udphdr)))) {
928 ipseclog((LOG_DEBUG, "IPv6 ESP input: invalid protocol type\n"));
929 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
930 goto bad;
931 }
932
933 /* find the sassoc. */
934 spi = esp->esp_spi;
935
936 if ((sav = key_allocsa_extended(AF_INET6,
937 (caddr_t)&ip6->ip6_src, (caddr_t)&ip6->ip6_dst,
938 IPPROTO_ESP, spi, interface)) == 0) {
939 ipseclog((LOG_WARNING,
940 "IPv6 ESP input: no key association found for spi %u (0x%08x) seq %u"
941 " src %04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x"
942 " dst %04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x if %s\n",
943 (u_int32_t)ntohl(spi), (u_int32_t)ntohl(spi), ntohl(((struct newesp *)esp)->esp_seq),
944 ntohs(ip6->ip6_src.__u6_addr.__u6_addr16[0]), ntohs(ip6->ip6_src.__u6_addr.__u6_addr16[1]),
945 ntohs(ip6->ip6_src.__u6_addr.__u6_addr16[2]), ntohs(ip6->ip6_src.__u6_addr.__u6_addr16[3]),
946 ntohs(ip6->ip6_src.__u6_addr.__u6_addr16[4]), ntohs(ip6->ip6_src.__u6_addr.__u6_addr16[5]),
947 ntohs(ip6->ip6_src.__u6_addr.__u6_addr16[6]), ntohs(ip6->ip6_src.__u6_addr.__u6_addr16[7]),
948 ntohs(ip6->ip6_dst.__u6_addr.__u6_addr16[0]), ntohs(ip6->ip6_dst.__u6_addr.__u6_addr16[1]),
949 ntohs(ip6->ip6_dst.__u6_addr.__u6_addr16[2]), ntohs(ip6->ip6_dst.__u6_addr.__u6_addr16[3]),
950 ntohs(ip6->ip6_dst.__u6_addr.__u6_addr16[4]), ntohs(ip6->ip6_dst.__u6_addr.__u6_addr16[5]),
951 ntohs(ip6->ip6_dst.__u6_addr.__u6_addr16[6]), ntohs(ip6->ip6_dst.__u6_addr.__u6_addr16[7]),
952 ((interface != NULL) ? if_name(interface) : "NONE")));
953 IPSEC_STAT_INCREMENT(ipsec6stat.in_nosa);
954 goto bad;
955 }
956 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
957 printf("DP esp6_input called to allocate SA:0x%llx\n",
958 (uint64_t)VM_KERNEL_ADDRPERM(sav)));
959 if (sav->state != SADB_SASTATE_MATURE
960 && sav->state != SADB_SASTATE_DYING) {
961 ipseclog((LOG_DEBUG,
962 "IPv6 ESP input: non-mature/dying SA found for spi %u (0x%08x)\n",
963 (u_int32_t)ntohl(spi), (u_int32_t)ntohl(spi)));
964 IPSEC_STAT_INCREMENT(ipsec6stat.in_badspi);
965 goto bad;
966 }
967 algo = esp_algorithm_lookup(sav->alg_enc);
968 if (!algo) {
969 ipseclog((LOG_DEBUG, "IPv6 ESP input: "
970 "unsupported encryption algorithm for spi %u (0x%08x)\n",
971 (u_int32_t)ntohl(spi), (u_int32_t)ntohl(spi)));
972 IPSEC_STAT_INCREMENT(ipsec6stat.in_badspi);
973 goto bad;
974 }
975
976 /* check if we have proper ivlen information */
977 ivlen = sav->ivlen;
978 if (ivlen < 0) {
979 ipseclog((LOG_ERR, "inproper ivlen in IPv6 ESP input: %s %s\n",
980 ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
981 IPSEC_STAT_INCREMENT(ipsec6stat.in_badspi);
982 goto bad;
983 }
984
985 seq = ntohl(((struct newesp *)esp)->esp_seq);
986
987 if ((sav->flags2 & SADB_X_EXT_SA2_SEQ_PER_TRAFFIC_CLASS) ==
988 SADB_X_EXT_SA2_SEQ_PER_TRAFFIC_CLASS) {
989 u_int8_t dscp = (ntohl(ip6->ip6_flow) & IP6FLOW_DSCP_MASK) >> IP6FLOW_DSCP_SHIFT;
990 traffic_class = rfc4594_dscp_to_tc(dscp);
991 }
992
993 /* Save ICV from packet for verification later */
994 size_t siz = 0;
995 unsigned char saved_icv[AH_MAXSUMSIZE];
996 if (algo->finalizedecrypt) {
997 siz = algo->icvlen;
998 VERIFY(siz <= UINT16_MAX);
999 m_copydata(m, m->m_pkthdr.len - (int)siz, (int)siz, (caddr_t) saved_icv);
1000 goto delay_icv;
1001 }
1002
1003 if (!((sav->flags & SADB_X_EXT_OLD) == 0 &&
1004 sav->replay[traffic_class] != NULL &&
1005 (sav->alg_auth && sav->key_auth))) {
1006 goto noreplaycheck;
1007 }
1008
1009 if (sav->alg_auth == SADB_X_AALG_NULL ||
1010 sav->alg_auth == SADB_AALG_NONE) {
1011 goto noreplaycheck;
1012 }
1013
1014 /*
1015 * check for sequence number.
1016 */
1017 if (ipsec_chkreplay(seq, sav, (u_int8_t)traffic_class)) {
1018 ; /*okey*/
1019 } else {
1020 IPSEC_STAT_INCREMENT(ipsec6stat.in_espreplay);
1021 ipseclog((LOG_WARNING,
1022 "replay packet in IPv6 ESP input: %s %s\n",
1023 ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
1024 goto bad;
1025 }
1026
1027 /* check ICV */
1028 {
1029 u_char sum0[AH_MAXSUMSIZE] __attribute__((aligned(4)));
1030 u_char sum[AH_MAXSUMSIZE] __attribute__((aligned(4)));
1031 const struct ah_algorithm *sumalgo;
1032
1033 sumalgo = ah_algorithm_lookup(sav->alg_auth);
1034 if (!sumalgo) {
1035 goto noreplaycheck;
1036 }
1037 siz = (((*sumalgo->sumsiz)(sav) + 3) & ~(4 - 1));
1038 if (m->m_pkthdr.len < off + ESPMAXLEN + siz) {
1039 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
1040 goto bad;
1041 }
1042 if (AH_MAXSUMSIZE < siz) {
1043 ipseclog((LOG_DEBUG,
1044 "internal error: AH_MAXSUMSIZE must be larger than %u\n",
1045 (u_int32_t)siz));
1046 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
1047 goto bad;
1048 }
1049
1050 m_copydata(m, m->m_pkthdr.len - (int)siz, (int)siz, (caddr_t) &sum0[0]);
1051
1052 if (esp_auth(m, off, m->m_pkthdr.len - off - siz, sav, sum)) {
1053 ipseclog((LOG_WARNING, "auth fail in IPv6 ESP input: %s %s\n",
1054 ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
1055 IPSEC_STAT_INCREMENT(ipsec6stat.in_espauthfail);
1056 goto bad;
1057 }
1058
1059 if (cc_cmp_safe(siz, sum0, sum)) {
1060 ipseclog((LOG_WARNING, "auth fail in IPv6 ESP input: %s %s\n",
1061 ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
1062 IPSEC_STAT_INCREMENT(ipsec6stat.in_espauthfail);
1063 goto bad;
1064 }
1065
1066 delay_icv:
1067
1068 /* strip off the authentication data */
1069 m_adj(m, (int)-siz);
1070 ip6 = mtod(m, struct ip6_hdr *);
1071 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - (u_int16_t)siz);
1072
1073 m->m_flags |= M_AUTHIPDGM;
1074 IPSEC_STAT_INCREMENT(ipsec6stat.in_espauthsucc);
1075 }
1076
1077 /*
1078 * update sequence number.
1079 */
1080 if ((sav->flags & SADB_X_EXT_OLD) == 0 && sav->replay[traffic_class] != NULL) {
1081 if (ipsec_updatereplay(seq, sav, (u_int8_t)traffic_class)) {
1082 IPSEC_STAT_INCREMENT(ipsec6stat.in_espreplay);
1083 goto bad;
1084 }
1085 }
1086
1087 noreplaycheck:
1088
1089 /* process main esp header. */
1090 if (sav->flags & SADB_X_EXT_OLD) {
1091 /* RFC 1827 */
1092 esplen = sizeof(struct esp);
1093 } else {
1094 /* RFC 2406 */
1095 if (sav->flags & SADB_X_EXT_DERIV) {
1096 esplen = sizeof(struct esp);
1097 } else {
1098 esplen = sizeof(struct newesp);
1099 }
1100 }
1101
1102 if (m->m_pkthdr.len < off + esplen + ivlen + sizeof(esptail)) {
1103 ipseclog((LOG_WARNING,
1104 "IPv6 ESP input: packet too short\n"));
1105 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
1106 goto bad;
1107 }
1108
1109 #ifndef PULLDOWN_TEST
1110 IP6_EXTHDR_CHECK(m, off, (int)(esplen + ivlen), return IPPROTO_DONE); /*XXX*/
1111 #else
1112 IP6_EXTHDR_GET(esp, struct esp *, m, off, esplen + ivlen);
1113 if (esp == NULL) {
1114 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
1115 m = NULL;
1116 goto bad;
1117 }
1118 #endif
1119 ip6 = mtod(m, struct ip6_hdr *); /*set it again just in case*/
1120
1121 /*
1122 * pre-compute and cache intermediate key
1123 */
1124 if (esp_schedule(algo, sav) != 0) {
1125 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
1126 goto bad;
1127 }
1128
1129 /*
1130 * decrypt the packet.
1131 */
1132 if (!algo->decrypt) {
1133 panic("internal error: no decrypt function");
1134 }
1135 if ((*algo->decrypt)(m, off, sav, algo, ivlen)) {
1136 /* m is already freed */
1137 m = NULL;
1138 ipseclog((LOG_ERR, "decrypt fail in IPv6 ESP input: %s\n",
1139 ipsec_logsastr(sav)));
1140 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
1141 goto bad;
1142 }
1143 IPSEC_STAT_INCREMENT(ipsec6stat.in_esphist[sav->alg_enc]);
1144
1145 m->m_flags |= M_DECRYPTED;
1146
1147 if (algo->finalizedecrypt) {
1148 if ((*algo->finalizedecrypt)(sav, saved_icv, algo->icvlen)) {
1149 ipseclog((LOG_ERR, "esp6 packet decryption ICV failure\n"));
1150 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
1151 KERNEL_DEBUG(DBG_FNC_DECRYPT | DBG_FUNC_END, 1, 0, 0, 0, 0);
1152 goto bad;
1153 }
1154 }
1155
1156 /*
1157 * find the trailer of the ESP.
1158 */
1159 m_copydata(m, m->m_pkthdr.len - sizeof(esptail), sizeof(esptail),
1160 (caddr_t)&esptail);
1161 nxt = esptail.esp_nxt;
1162 taillen = esptail.esp_padlen + sizeof(esptail);
1163
1164 if (m->m_pkthdr.len < taillen
1165 || m->m_pkthdr.len - taillen < sizeof(struct ip6_hdr)) { /*?*/
1166 ipseclog((LOG_WARNING,
1167 "bad pad length in IPv6 ESP input: %s %s\n",
1168 ipsec6_logpacketstr(ip6, spi), ipsec_logsastr(sav)));
1169 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
1170 goto bad;
1171 }
1172
1173 /* strip off the trailing pad area. */
1174 m_adj(m, -taillen);
1175 ip6 = mtod(m, struct ip6_hdr *);
1176 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - taillen);
1177
1178 if (*nproto == IPPROTO_UDP) {
1179 // offset includes the outer ip and udp header lengths.
1180 if (m->m_len < off) {
1181 m = m_pullup(m, off);
1182 if (!m) {
1183 ipseclog((LOG_DEBUG,
1184 "IPv6 ESP input: invalid udp encapsulated ESP packet length\n"));
1185 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
1186 goto bad;
1187 }
1188 ip6 = mtod(m, struct ip6_hdr *);
1189 }
1190
1191 // check the UDP encap header to detect changes in the source port, and then strip the header
1192 off -= sizeof(struct udphdr); // off no longer includes the udphdr's size
1193 // if peer is behind nat and this is the latest esp packet
1194 if ((sav->flags & SADB_X_EXT_NATT_DETECTED_PEER) != 0 &&
1195 (sav->flags & SADB_X_EXT_OLD) == 0 &&
1196 seq && sav->replay[traffic_class] &&
1197 seq >= sav->replay[traffic_class]->lastseq) {
1198 struct udphdr *encap_uh = (__typeof__(encap_uh))(void *)((caddr_t)ip6 + off);
1199 if (encap_uh->uh_sport &&
1200 ntohs(encap_uh->uh_sport) != sav->remote_ike_port) {
1201 sav->remote_ike_port = ntohs(encap_uh->uh_sport);
1202 }
1203 }
1204 ip6 = esp6_input_strip_udp_encap(m, off);
1205 esp = (struct esp *)(void *)(((u_int8_t *)ip6) + off);
1206 }
1207
1208
1209 /* was it transmitted over the IPsec tunnel SA? */
1210 if (ipsec6_tunnel_validate(m, (int)(off + esplen + ivlen), nxt, sav, &ifamily)) {
1211 ifaddr_t ifa;
1212 struct sockaddr_storage addr;
1213
1214 /*
1215 * strip off all the headers that precedes ESP header.
1216 * IP6 xx ESP IP6' payload -> IP6' payload
1217 *
1218 * XXX more sanity checks
1219 * XXX relationship with gif?
1220 */
1221 u_int32_t flowinfo; /*net endian*/
1222 flowinfo = ip6->ip6_flow;
1223 m_adj(m, (int)(off + esplen + ivlen));
1224 if (ifamily == AF_INET6) {
1225 struct sockaddr_in6 *ip6addr;
1226
1227 if (m->m_len < sizeof(*ip6)) {
1228 #ifndef PULLDOWN_TEST
1229 /*
1230 * m_pullup is prohibited in KAME IPv6 input processing
1231 * but there's no other way!
1232 */
1233 #else
1234 /* okay to pullup in m_pulldown style */
1235 #endif
1236 m = m_pullup(m, sizeof(*ip6));
1237 if (!m) {
1238 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
1239 goto bad;
1240 }
1241 }
1242 ip6 = mtod(m, struct ip6_hdr *);
1243 /* ECN consideration. */
1244 if (ip6_ecn_egress(ip6_ipsec_ecn, &flowinfo, &ip6->ip6_flow) == 0) {
1245 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
1246 goto bad;
1247 }
1248 if (!key_checktunnelsanity(sav, AF_INET6,
1249 (caddr_t)&ip6->ip6_src, (caddr_t)&ip6->ip6_dst)) {
1250 ipseclog((LOG_ERR, "ipsec tunnel address mismatch "
1251 "in IPv6 ESP input: %s %s\n",
1252 ipsec6_logpacketstr(ip6, spi),
1253 ipsec_logsastr(sav)));
1254 IPSEC_STAT_INCREMENT(ipsec6stat.in_inval);
1255 goto bad;
1256 }
1257
1258 bzero(&addr, sizeof(addr));
1259 ip6addr = (__typeof__(ip6addr)) & addr;
1260 ip6addr->sin6_family = AF_INET6;
1261 ip6addr->sin6_len = sizeof(*ip6addr);
1262 ip6addr->sin6_addr = ip6->ip6_dst;
1263 } else if (ifamily == AF_INET) {
1264 struct sockaddr_in *ipaddr;
1265
1266 if (m->m_len < sizeof(*ip)) {
1267 m = m_pullup(m, sizeof(*ip));
1268 if (!m) {
1269 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
1270 goto bad;
1271 }
1272 }
1273
1274 u_int8_t otos;
1275 int sum;
1276
1277 ip = mtod(m, struct ip *);
1278 otos = ip->ip_tos;
1279 /* ECN consideration. */
1280 if (ip46_ecn_egress(ip6_ipsec_ecn, &flowinfo, &ip->ip_tos) == 0) {
1281 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
1282 goto bad;
1283 }
1284
1285 if (otos != ip->ip_tos) {
1286 sum = ~ntohs(ip->ip_sum) & 0xffff;
1287 sum += (~otos & 0xffff) + ip->ip_tos;
1288 sum = (sum >> 16) + (sum & 0xffff);
1289 sum += (sum >> 16); /* add carry */
1290 ip->ip_sum = htons(~sum & 0xffff);
1291 }
1292
1293 if (!key_checktunnelsanity(sav, AF_INET,
1294 (caddr_t)&ip->ip_src, (caddr_t)&ip->ip_dst)) {
1295 ipseclog((LOG_ERR, "ipsec tunnel address mismatch "
1296 "in ESP input: %s %s\n",
1297 ipsec4_logpacketstr(ip, spi), ipsec_logsastr(sav)));
1298 IPSEC_STAT_INCREMENT(ipsecstat.in_inval);
1299 goto bad;
1300 }
1301
1302 bzero(&addr, sizeof(addr));
1303 ipaddr = (__typeof__(ipaddr)) & addr;
1304 ipaddr->sin_family = AF_INET;
1305 ipaddr->sin_len = sizeof(*ipaddr);
1306 ipaddr->sin_addr = ip->ip_dst;
1307 }
1308
1309 key_sa_recordxfer(sav, m);
1310 if (ipsec_addhist(m, IPPROTO_ESP, spi) != 0 ||
1311 ipsec_addhist(m, IPPROTO_IPV6, 0) != 0) {
1312 IPSEC_STAT_INCREMENT(ipsec6stat.in_nomem);
1313 goto bad;
1314 }
1315
1316 // update the receiving interface address based on the inner address
1317 ifa = ifa_ifwithaddr((struct sockaddr *)&addr);
1318 if (ifa) {
1319 m->m_pkthdr.rcvif = ifa->ifa_ifp;
1320 IFA_REMREF(ifa);
1321 }
1322
1323 // Input via IPsec interface
1324 lck_mtx_lock(sadb_mutex);
1325 ifnet_t ipsec_if = sav->sah->ipsec_if;
1326 if (ipsec_if != NULL) {
1327 // If an interface is found, add a reference count before dropping the lock
1328 ifnet_reference(ipsec_if);
1329 }
1330 lck_mtx_unlock(sadb_mutex);
1331 if (ipsec_if != NULL) {
1332 esp_input_log(m, sav, spi, seq);
1333 ipsec_save_wake_packet(m, ntohl(spi), seq);
1334
1335 // Return mbuf
1336 if (interface != NULL &&
1337 interface == ipsec_if) {
1338 ifnet_release(ipsec_if);
1339 goto done;
1340 }
1341
1342 errno_t inject_error = ipsec_inject_inbound_packet(ipsec_if, m);
1343 ifnet_release(ipsec_if);
1344
1345 if (inject_error == 0) {
1346 m = NULL;
1347 nxt = IPPROTO_DONE;
1348 goto done;
1349 } else {
1350 goto bad;
1351 }
1352 }
1353
1354 if (proto_input(ifamily == AF_INET ? PF_INET : PF_INET6, m) != 0) {
1355 goto bad;
1356 }
1357 nxt = IPPROTO_DONE;
1358 } else {
1359 /*
1360 * strip off ESP header and IV.
1361 * even in m_pulldown case, we need to strip off ESP so that
1362 * we can always compute checksum for AH correctly.
1363 */
1364 u_int16_t stripsiz;
1365 char *prvnxtp;
1366
1367 /*
1368 * Set the next header field of the previous header correctly.
1369 */
1370 prvnxtp = ip6_get_prevhdr(m, off); /* XXX */
1371 *prvnxtp = (u_int8_t)nxt;
1372
1373 VERIFY(esplen + ivlen <= UINT16_MAX);
1374 stripsiz = (u_int16_t)(esplen + ivlen);
1375
1376 ip6 = mtod(m, struct ip6_hdr *);
1377 if (m->m_len >= stripsiz + off) {
1378 ovbcopy((caddr_t)ip6, ((caddr_t)ip6) + stripsiz, off);
1379 m->m_data += stripsiz;
1380 m->m_len -= stripsiz;
1381 m->m_pkthdr.len -= stripsiz;
1382 } else {
1383 /*
1384 * this comes with no copy if the boundary is on
1385 * cluster
1386 */
1387 struct mbuf *n;
1388
1389 n = m_split(m, off, M_DONTWAIT);
1390 if (n == NULL) {
1391 /* m is retained by m_split */
1392 goto bad;
1393 }
1394 m_adj(n, stripsiz);
1395 /* m_cat does not update m_pkthdr.len */
1396 m->m_pkthdr.len += n->m_pkthdr.len;
1397 m_cat(m, n);
1398 }
1399
1400 #ifndef PULLDOWN_TEST
1401 /*
1402 * KAME requires that the packet to be contiguous on the
1403 * mbuf. We need to make that sure.
1404 * this kind of code should be avoided.
1405 * XXX other conditions to avoid running this part?
1406 */
1407 if (m->m_len != m->m_pkthdr.len) {
1408 struct mbuf *n = NULL;
1409 int maxlen;
1410
1411 MGETHDR(n, M_DONTWAIT, MT_HEADER); /* MAC-OK */
1412 maxlen = MHLEN;
1413 if (n) {
1414 M_COPY_PKTHDR(n, m);
1415 }
1416 if (n && m->m_pkthdr.len > maxlen) {
1417 MCLGET(n, M_DONTWAIT);
1418 maxlen = MCLBYTES;
1419 if ((n->m_flags & M_EXT) == 0) {
1420 m_free(n);
1421 n = NULL;
1422 }
1423 }
1424 if (!n) {
1425 printf("esp6_input: mbuf allocation failed\n");
1426 goto bad;
1427 }
1428
1429 if (m->m_pkthdr.len <= maxlen) {
1430 m_copydata(m, 0, m->m_pkthdr.len, mtod(n, caddr_t));
1431 n->m_len = m->m_pkthdr.len;
1432 n->m_pkthdr.len = m->m_pkthdr.len;
1433 n->m_next = NULL;
1434 m_freem(m);
1435 } else {
1436 m_copydata(m, 0, maxlen, mtod(n, caddr_t));
1437 n->m_len = maxlen;
1438 n->m_pkthdr.len = m->m_pkthdr.len;
1439 n->m_next = m;
1440 m_adj(m, maxlen);
1441 m->m_flags &= ~M_PKTHDR;
1442 }
1443 m = n;
1444 }
1445 #endif
1446 ip6 = mtod(m, struct ip6_hdr *);
1447 ip6->ip6_plen = htons(ntohs(ip6->ip6_plen) - stripsiz);
1448
1449 key_sa_recordxfer(sav, m);
1450 if (ipsec_addhist(m, IPPROTO_ESP, spi) != 0) {
1451 IPSEC_STAT_INCREMENT(ipsec6stat.in_nomem);
1452 goto bad;
1453 }
1454
1455 /*
1456 * Set the csum valid flag, if we authenticated the
1457 * packet, the payload shouldn't be corrupt unless
1458 * it was corrupted before being signed on the other
1459 * side.
1460 */
1461 if (nxt == IPPROTO_TCP || nxt == IPPROTO_UDP) {
1462 m->m_pkthdr.csum_flags = CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
1463 m->m_pkthdr.csum_data = 0xFFFF;
1464 _CASSERT(offsetof(struct pkthdr, csum_data) == offsetof(struct pkthdr, csum_rx_val));
1465 }
1466
1467 // Input via IPsec interface
1468 lck_mtx_lock(sadb_mutex);
1469 ifnet_t ipsec_if = sav->sah->ipsec_if;
1470 if (ipsec_if != NULL) {
1471 // If an interface is found, add a reference count before dropping the lock
1472 ifnet_reference(ipsec_if);
1473 }
1474 lck_mtx_unlock(sadb_mutex);
1475 if (ipsec_if != NULL) {
1476 esp_input_log(m, sav, spi, seq);
1477 ipsec_save_wake_packet(m, ntohl(spi), seq);
1478
1479 // Return mbuf
1480 if (interface != NULL &&
1481 interface == ipsec_if) {
1482 ifnet_release(ipsec_if);
1483 goto done;
1484 }
1485
1486 errno_t inject_error = ipsec_inject_inbound_packet(ipsec_if, m);
1487 ifnet_release(ipsec_if);
1488
1489 if (inject_error == 0) {
1490 m = NULL;
1491 nxt = IPPROTO_DONE;
1492 goto done;
1493 } else {
1494 goto bad;
1495 }
1496 }
1497 }
1498
1499 done:
1500 *offp = off;
1501 *mp = m;
1502 if (sav) {
1503 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
1504 printf("DP esp6_input call free SA:0x%llx\n",
1505 (uint64_t)VM_KERNEL_ADDRPERM(sav)));
1506 key_freesav(sav, KEY_SADB_UNLOCKED);
1507 }
1508 IPSEC_STAT_INCREMENT(ipsec6stat.in_success);
1509 return nxt;
1510
1511 bad:
1512 if (sav) {
1513 KEYDEBUG(KEYDEBUG_IPSEC_STAMP,
1514 printf("DP esp6_input call free SA:0x%llx\n",
1515 (uint64_t)VM_KERNEL_ADDRPERM(sav)));
1516 key_freesav(sav, KEY_SADB_UNLOCKED);
1517 }
1518 if (m) {
1519 m_freem(m);
1520 }
1521 if (interface != NULL) {
1522 *mp = NULL;
1523 }
1524 return IPPROTO_DONE;
1525 }
1526
1527 void
1528 esp6_ctlinput(int cmd, struct sockaddr *sa, void *d, __unused struct ifnet *ifp)
1529 {
1530 const struct newesp *espp;
1531 struct newesp esp;
1532 struct ip6ctlparam *ip6cp = NULL, ip6cp1;
1533 struct secasvar *sav;
1534 struct ip6_hdr *ip6;
1535 struct mbuf *m;
1536 int off = 0;
1537 struct sockaddr_in6 *sa6_src, *sa6_dst;
1538
1539 if (sa->sa_family != AF_INET6 ||
1540 sa->sa_len != sizeof(struct sockaddr_in6)) {
1541 return;
1542 }
1543 if ((unsigned)cmd >= PRC_NCMDS) {
1544 return;
1545 }
1546
1547 /* if the parameter is from icmp6, decode it. */
1548 if (d != NULL) {
1549 ip6cp = (struct ip6ctlparam *)d;
1550 m = ip6cp->ip6c_m;
1551 ip6 = ip6cp->ip6c_ip6;
1552 off = ip6cp->ip6c_off;
1553 } else {
1554 m = NULL;
1555 ip6 = NULL;
1556 }
1557
1558 if (ip6) {
1559 /*
1560 * Notify the error to all possible sockets via pfctlinput2.
1561 * Since the upper layer information (such as protocol type,
1562 * source and destination ports) is embedded in the encrypted
1563 * data and might have been cut, we can't directly call
1564 * an upper layer ctlinput function. However, the pcbnotify
1565 * function will consider source and destination addresses
1566 * as well as the flow info value, and may be able to find
1567 * some PCB that should be notified.
1568 * Although pfctlinput2 will call esp6_ctlinput(), there is
1569 * no possibility of an infinite loop of function calls,
1570 * because we don't pass the inner IPv6 header.
1571 */
1572 bzero(&ip6cp1, sizeof(ip6cp1));
1573 ip6cp1.ip6c_src = ip6cp->ip6c_src;
1574 pfctlinput2(cmd, sa, (void *)&ip6cp1);
1575
1576 /*
1577 * Then go to special cases that need ESP header information.
1578 * XXX: We assume that when ip6 is non NULL,
1579 * M and OFF are valid.
1580 */
1581
1582 /* check if we can safely examine src and dst ports */
1583 if (m->m_pkthdr.len < off + sizeof(esp)) {
1584 return;
1585 }
1586
1587 if (m->m_len < off + sizeof(esp)) {
1588 /*
1589 * this should be rare case,
1590 * so we compromise on this copy...
1591 */
1592 m_copydata(m, off, sizeof(esp), (caddr_t)&esp);
1593 espp = &esp;
1594 } else {
1595 espp = (struct newesp*)(void *)(mtod(m, caddr_t) + off);
1596 }
1597
1598 if (cmd == PRC_MSGSIZE) {
1599 int valid = 0;
1600
1601 /*
1602 * Check to see if we have a valid SA corresponding to
1603 * the address in the ICMP message payload.
1604 */
1605 sa6_src = ip6cp->ip6c_src;
1606 sa6_dst = (struct sockaddr_in6 *)(void *)sa;
1607 sav = key_allocsa(AF_INET6,
1608 (caddr_t)&sa6_src->sin6_addr,
1609 (caddr_t)&sa6_dst->sin6_addr,
1610 IPPROTO_ESP, espp->esp_spi);
1611 if (sav) {
1612 if (sav->state == SADB_SASTATE_MATURE ||
1613 sav->state == SADB_SASTATE_DYING) {
1614 valid++;
1615 }
1616 key_freesav(sav, KEY_SADB_UNLOCKED);
1617 }
1618
1619 /* XXX Further validation? */
1620
1621 /*
1622 * Depending on the value of "valid" and routing table
1623 * size (mtudisc_{hi,lo}wat), we will:
1624 * - recalcurate the new MTU and create the
1625 * corresponding routing entry, or
1626 * - ignore the MTU change notification.
1627 */
1628 icmp6_mtudisc_update((struct ip6ctlparam *)d, valid);
1629 }
1630 } else {
1631 /* we normally notify any pcb here */
1632 }
1633 }