2 * Copyright (c) 2008-2016 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 /* $FreeBSD: src/sys/netinet6/esp_input.c,v 1.1.2.3 2001/07/03 11:01:50 ume Exp $ */
30 /* $KAME: esp_input.c,v 1.55 2001/03/23 08:08:47 itojun Exp $ */
33 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
34 * All rights reserved.
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. Neither the name of the project nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
48 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
62 * RFC1827/2406 Encapsulated Security Payload.
65 #include <sys/param.h>
66 #include <sys/systm.h>
67 #include <sys/malloc.h>
69 #include <sys/mcache.h>
70 #include <sys/domain.h>
71 #include <sys/protosw.h>
72 #include <sys/socket.h>
73 #include <sys/errno.h>
75 #include <sys/kernel.h>
76 #include <sys/syslog.h>
79 #include <net/if_ipsec.h>
80 #include <net/route.h>
81 #include <kern/cpu_number.h>
82 #include <kern/locks.h>
84 #include <netinet/in.h>
85 #include <netinet/in_systm.h>
86 #include <netinet/ip.h>
87 #include <netinet/ip_var.h>
88 #include <netinet/in_var.h>
89 #include <netinet/ip_ecn.h>
90 #include <netinet/in_pcb.h>
91 #include <netinet/udp.h>
93 #include <netinet6/ip6_ecn.h>
97 #include <netinet/ip6.h>
98 #include <netinet6/in6_pcb.h>
99 #include <netinet6/ip6_var.h>
100 #include <netinet/icmp6.h>
101 #include <netinet6/ip6protosw.h>
104 #include <netinet6/ipsec.h>
106 #include <netinet6/ipsec6.h>
108 #include <netinet6/ah.h>
110 #include <netinet6/ah6.h>
112 #include <netinet6/esp.h>
114 #include <netinet6/esp6.h>
116 #include <netkey/key.h>
117 #include <netkey/keydb.h>
118 #include <netkey/key_debug.h>
120 #include <net/kpi_protocol.h>
121 #include <netinet/kpi_ipfilter_var.h>
123 #include <net/net_osdep.h>
124 #include <mach/sdt.h>
125 #include <corecrypto/cc.h>
127 #include <sys/kdebug.h>
128 #define DBG_LAYER_BEG NETDBG_CODE(DBG_NETIPSEC, 1)
129 #define DBG_LAYER_END NETDBG_CODE(DBG_NETIPSEC, 3)
130 #define DBG_FNC_ESPIN NETDBG_CODE(DBG_NETIPSEC, (6 << 8))
131 #define DBG_FNC_DECRYPT NETDBG_CODE(DBG_NETIPSEC, (7 << 8))
132 #define IPLEN_FLIPPED
134 extern lck_mtx_t
*sadb_mutex
;
138 (sizeof(struct esp) < sizeof(struct newesp) \
139 ? sizeof(struct newesp) : sizeof(struct esp))
142 esp4_input_strip_udp_encap(struct mbuf
*m
, int iphlen
)
144 // strip the udp header that's encapsulating ESP
146 size_t stripsiz
= sizeof(struct udphdr
);
148 ip
= mtod(m
, __typeof__(ip
));
149 ovbcopy((caddr_t
)ip
, (caddr_t
)(((u_char
*)ip
) + stripsiz
), iphlen
);
150 m
->m_data
+= stripsiz
;
151 m
->m_len
-= stripsiz
;
152 m
->m_pkthdr
.len
-= stripsiz
;
153 ip
= mtod(m
, __typeof__(ip
));
154 ip
->ip_len
= ip
->ip_len
- stripsiz
;
155 ip
->ip_p
= IPPROTO_ESP
;
159 static struct ip6_hdr
*
160 esp6_input_strip_udp_encap(struct mbuf
*m
, int ip6hlen
)
162 // strip the udp header that's encapsulating ESP
164 size_t stripsiz
= sizeof(struct udphdr
);
166 ip6
= mtod(m
, __typeof__(ip6
));
167 ovbcopy((caddr_t
)ip6
, (caddr_t
)(((u_char
*)ip6
) + stripsiz
), ip6hlen
);
168 m
->m_data
+= stripsiz
;
169 m
->m_len
-= stripsiz
;
170 m
->m_pkthdr
.len
-= stripsiz
;
171 ip6
= mtod(m
, __typeof__(ip6
));
172 ip6
->ip6_plen
= htons(ntohs(ip6
->ip6_plen
) - stripsiz
);
173 ip6
->ip6_nxt
= IPPROTO_ESP
;
178 esp4_input(struct mbuf
*m
, int off
)
180 (void)esp4_input_extended(m
, off
, NULL
);
184 esp4_input_extended(struct mbuf
*m
, int off
, ifnet_t interface
)
191 struct esptail esptail
;
194 struct secasvar
*sav
= NULL
;
197 const struct esp_algorithm
*algo
;
202 struct mbuf
*out_m
= NULL
;
204 KERNEL_DEBUG(DBG_FNC_ESPIN
| DBG_FUNC_START
, 0, 0, 0, 0, 0);
205 /* sanity check for alignment. */
206 if (off
% 4 != 0 || m
->m_pkthdr
.len
% 4 != 0) {
207 ipseclog((LOG_ERR
, "IPv4 ESP input: packet alignment problem "
208 "(off=%d, pktlen=%d)\n", off
, m
->m_pkthdr
.len
));
209 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
213 if (m
->m_len
< off
+ ESPMAXLEN
) {
214 m
= m_pullup(m
, off
+ ESPMAXLEN
);
217 "IPv4 ESP input: can't pullup in esp4_input\n"));
218 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
223 m
->m_pkthdr
.csum_flags
&= ~CSUM_RX_FLAGS
;
225 /* Expect 32-bit aligned data pointer on strict-align platforms */
226 MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m
);
228 ip
= mtod(m
, struct ip
*);
229 // expect udp-encap and esp packets only
230 if (ip
->ip_p
!= IPPROTO_ESP
&&
231 !(ip
->ip_p
== IPPROTO_UDP
&& off
>= sizeof(struct udphdr
))) {
233 "IPv4 ESP input: invalid protocol type\n"));
234 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
237 esp
= (struct esp
*)(void *)(((u_int8_t
*)ip
) + off
);
239 hlen
= IP_VHL_HL(ip
->ip_vhl
) << 2;
241 hlen
= ip
->ip_hl
<< 2;
244 /* find the sassoc. */
247 if ((sav
= key_allocsa_extended(AF_INET
,
248 (caddr_t
)&ip
->ip_src
, (caddr_t
)&ip
->ip_dst
,
249 IPPROTO_ESP
, spi
, interface
)) == 0) {
250 ipseclog((LOG_WARNING
,
251 "IPv4 ESP input: no key association found for spi %u\n",
252 (u_int32_t
)ntohl(spi
)));
253 IPSEC_STAT_INCREMENT(ipsecstat
.in_nosa
);
256 KEYDEBUG(KEYDEBUG_IPSEC_STAMP
,
257 printf("DP esp4_input called to allocate SA:0x%llx\n",
258 (uint64_t)VM_KERNEL_ADDRPERM(sav
)));
259 if (sav
->state
!= SADB_SASTATE_MATURE
260 && sav
->state
!= SADB_SASTATE_DYING
) {
262 "IPv4 ESP input: non-mature/dying SA found for spi %u\n",
263 (u_int32_t
)ntohl(spi
)));
264 IPSEC_STAT_INCREMENT(ipsecstat
.in_badspi
);
267 algo
= esp_algorithm_lookup(sav
->alg_enc
);
269 ipseclog((LOG_DEBUG
, "IPv4 ESP input: "
270 "unsupported encryption algorithm for spi %u\n",
271 (u_int32_t
)ntohl(spi
)));
272 IPSEC_STAT_INCREMENT(ipsecstat
.in_badspi
);
276 /* check if we have proper ivlen information */
279 ipseclog((LOG_ERR
, "inproper ivlen in IPv4 ESP input: %s %s\n",
280 ipsec4_logpacketstr(ip
, spi
), ipsec_logsastr(sav
)));
281 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
285 seq
= ntohl(((struct newesp
*)esp
)->esp_seq
);
287 /* Save ICV from packet for verification later */
289 unsigned char saved_icv
[AH_MAXSUMSIZE
];
290 if (algo
->finalizedecrypt
) {
292 m_copydata(m
, m
->m_pkthdr
.len
- siz
, siz
, (caddr_t
) saved_icv
);
296 if (!((sav
->flags
& SADB_X_EXT_OLD
) == 0 && sav
->replay
297 && (sav
->alg_auth
&& sav
->key_auth
))) {
301 if (sav
->alg_auth
== SADB_X_AALG_NULL
||
302 sav
->alg_auth
== SADB_AALG_NONE
) {
307 * check for sequence number.
309 if (ipsec_chkreplay(seq
, sav
)) {
312 IPSEC_STAT_INCREMENT(ipsecstat
.in_espreplay
);
313 ipseclog((LOG_WARNING
,
314 "replay packet in IPv4 ESP input: %s %s\n",
315 ipsec4_logpacketstr(ip
, spi
), ipsec_logsastr(sav
)));
321 u_char sum0
[AH_MAXSUMSIZE
] __attribute__((aligned(4)));
322 u_char sum
[AH_MAXSUMSIZE
] __attribute__((aligned(4)));
323 const struct ah_algorithm
*sumalgo
;
325 sumalgo
= ah_algorithm_lookup(sav
->alg_auth
);
329 siz
= (((*sumalgo
->sumsiz
)(sav
) + 3) & ~(4 - 1));
330 if (m
->m_pkthdr
.len
< off
+ ESPMAXLEN
+ siz
) {
331 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
334 if (AH_MAXSUMSIZE
< siz
) {
336 "internal error: AH_MAXSUMSIZE must be larger than %lu\n",
338 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
342 m_copydata(m
, m
->m_pkthdr
.len
- siz
, siz
, (caddr_t
) &sum0
[0]);
344 if (esp_auth(m
, off
, m
->m_pkthdr
.len
- off
- siz
, sav
, sum
)) {
345 ipseclog((LOG_WARNING
, "auth fail in IPv4 ESP input: %s %s\n",
346 ipsec4_logpacketstr(ip
, spi
), ipsec_logsastr(sav
)));
347 IPSEC_STAT_INCREMENT(ipsecstat
.in_espauthfail
);
351 if (cc_cmp_safe(siz
, sum0
, sum
)) {
352 ipseclog((LOG_WARNING
, "cc_cmp fail in IPv4 ESP input: %s %s\n",
353 ipsec4_logpacketstr(ip
, spi
), ipsec_logsastr(sav
)));
354 IPSEC_STAT_INCREMENT(ipsecstat
.in_espauthfail
);
360 /* strip off the authentication data */
362 ip
= mtod(m
, struct ip
*);
364 ip
->ip_len
= ip
->ip_len
- siz
;
366 ip
->ip_len
= htons(ntohs(ip
->ip_len
) - siz
);
368 m
->m_flags
|= M_AUTHIPDGM
;
369 IPSEC_STAT_INCREMENT(ipsecstat
.in_espauthsucc
);
373 * update sequence number.
375 if ((sav
->flags
& SADB_X_EXT_OLD
) == 0 && sav
->replay
) {
376 if (ipsec_updatereplay(seq
, sav
)) {
377 IPSEC_STAT_INCREMENT(ipsecstat
.in_espreplay
);
384 /* process main esp header. */
385 if (sav
->flags
& SADB_X_EXT_OLD
) {
387 esplen
= sizeof(struct esp
);
390 if (sav
->flags
& SADB_X_EXT_DERIV
) {
391 esplen
= sizeof(struct esp
);
393 esplen
= sizeof(struct newesp
);
397 if (m
->m_pkthdr
.len
< off
+ esplen
+ ivlen
+ sizeof(esptail
)) {
398 ipseclog((LOG_WARNING
,
399 "IPv4 ESP input: packet too short\n"));
400 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
404 if (m
->m_len
< off
+ esplen
+ ivlen
) {
405 m
= m_pullup(m
, off
+ esplen
+ ivlen
);
408 "IPv4 ESP input: can't pullup in esp4_input\n"));
409 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
415 * pre-compute and cache intermediate key
417 if (esp_schedule(algo
, sav
) != 0) {
418 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
423 * decrypt the packet.
425 if (!algo
->decrypt
) {
426 panic("internal error: no decrypt function");
428 KERNEL_DEBUG(DBG_FNC_DECRYPT
| DBG_FUNC_START
, 0, 0, 0, 0, 0);
429 if ((*algo
->decrypt
)(m
, off
, sav
, algo
, ivlen
)) {
430 /* m is already freed */
432 ipseclog((LOG_ERR
, "decrypt fail in IPv4 ESP input: %s\n",
433 ipsec_logsastr(sav
)));
434 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
435 KERNEL_DEBUG(DBG_FNC_DECRYPT
| DBG_FUNC_END
, 1, 0, 0, 0, 0);
438 KERNEL_DEBUG(DBG_FNC_DECRYPT
| DBG_FUNC_END
, 2, 0, 0, 0, 0);
439 IPSEC_STAT_INCREMENT(ipsecstat
.in_esphist
[sav
->alg_enc
]);
441 m
->m_flags
|= M_DECRYPTED
;
443 if (algo
->finalizedecrypt
) {
444 if ((*algo
->finalizedecrypt
)(sav
, saved_icv
, algo
->icvlen
)) {
445 ipseclog((LOG_ERR
, "packet decryption ICV failure\n"));
446 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
447 KERNEL_DEBUG(DBG_FNC_DECRYPT
| DBG_FUNC_END
, 1, 0, 0, 0, 0);
453 * find the trailer of the ESP.
455 m_copydata(m
, m
->m_pkthdr
.len
- sizeof(esptail
), sizeof(esptail
),
457 nxt
= esptail
.esp_nxt
;
458 taillen
= esptail
.esp_padlen
+ sizeof(esptail
);
460 if (m
->m_pkthdr
.len
< taillen
461 || m
->m_pkthdr
.len
- taillen
< hlen
) { /*?*/
462 ipseclog((LOG_WARNING
,
463 "bad pad length in IPv4 ESP input: %s %s\n",
464 ipsec4_logpacketstr(ip
, spi
), ipsec_logsastr(sav
)));
465 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
469 /* strip off the trailing pad area. */
471 ip
= mtod(m
, struct ip
*);
473 ip
->ip_len
= ip
->ip_len
- taillen
;
475 ip
->ip_len
= htons(ntohs(ip
->ip_len
) - taillen
);
477 if (ip
->ip_p
== IPPROTO_UDP
) {
478 // offset includes the outer ip and udp header lengths.
479 if (m
->m_len
< off
) {
480 m
= m_pullup(m
, off
);
483 "IPv4 ESP input: invalid udp encapsulated ESP packet length \n"));
484 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
489 // check the UDP encap header to detect changes in the source port, and then strip the header
490 off
-= sizeof(struct udphdr
); // off no longer includes the udphdr's size
491 // if peer is behind nat and this is the latest esp packet
492 if ((sav
->flags
& SADB_X_EXT_NATT_DETECTED_PEER
) != 0 &&
493 (sav
->flags
& SADB_X_EXT_OLD
) == 0 &&
494 seq
&& sav
->replay
&&
495 seq
>= sav
->replay
->lastseq
) {
496 struct udphdr
*encap_uh
= (__typeof__(encap_uh
))(void *)((caddr_t
)ip
+ off
);
497 if (encap_uh
->uh_sport
&&
498 ntohs(encap_uh
->uh_sport
) != sav
->remote_ike_port
) {
499 sav
->remote_ike_port
= ntohs(encap_uh
->uh_sport
);
502 ip
= esp4_input_strip_udp_encap(m
, off
);
503 esp
= (struct esp
*)(void *)(((u_int8_t
*)ip
) + off
);
506 /* was it transmitted over the IPsec tunnel SA? */
507 if (ipsec4_tunnel_validate(m
, off
+ esplen
+ ivlen
, nxt
, sav
, &ifamily
)) {
509 struct sockaddr_storage addr
;
512 * strip off all the headers that precedes ESP header.
513 * IP4 xx ESP IP4' payload -> IP4' payload
515 * XXX more sanity checks
516 * XXX relationship with gif?
522 m_adj(m
, off
+ esplen
+ ivlen
);
523 if (ifamily
== AF_INET
) {
524 struct sockaddr_in
*ipaddr
;
526 if (m
->m_len
< sizeof(*ip
)) {
527 m
= m_pullup(m
, sizeof(*ip
));
529 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
533 ip
= mtod(m
, struct ip
*);
534 /* ECN consideration. */
537 if (ip_ecn_egress(ip4_ipsec_ecn
, &tos
, &ip
->ip_tos
) == 0) {
538 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
542 if (otos
!= ip
->ip_tos
) {
543 sum
= ~ntohs(ip
->ip_sum
) & 0xffff;
544 sum
+= (~otos
& 0xffff) + ip
->ip_tos
;
545 sum
= (sum
>> 16) + (sum
& 0xffff);
546 sum
+= (sum
>> 16); /* add carry */
547 ip
->ip_sum
= htons(~sum
& 0xffff);
550 if (!key_checktunnelsanity(sav
, AF_INET
,
551 (caddr_t
)&ip
->ip_src
, (caddr_t
)&ip
->ip_dst
)) {
552 ipseclog((LOG_ERR
, "ipsec tunnel address mismatch "
553 "in ESP input: %s %s\n",
554 ipsec4_logpacketstr(ip
, spi
), ipsec_logsastr(sav
)));
555 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
559 bzero(&addr
, sizeof(addr
));
560 ipaddr
= (__typeof__(ipaddr
)) & addr
;
561 ipaddr
->sin_family
= AF_INET
;
562 ipaddr
->sin_len
= sizeof(*ipaddr
);
563 ipaddr
->sin_addr
= ip
->ip_dst
;
565 } else if (ifamily
== AF_INET6
) {
566 struct sockaddr_in6
*ip6addr
;
569 * m_pullup is prohibited in KAME IPv6 input processing
570 * but there's no other way!
572 if (m
->m_len
< sizeof(*ip6
)) {
573 m
= m_pullup(m
, sizeof(*ip6
));
575 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
581 * Expect 32-bit aligned data pointer on strict-align
584 MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m
);
586 ip6
= mtod(m
, struct ip6_hdr
*);
588 /* ECN consideration. */
589 if (ip64_ecn_egress(ip4_ipsec_ecn
, &tos
, &ip6
->ip6_flow
) == 0) {
590 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
594 if (!key_checktunnelsanity(sav
, AF_INET6
,
595 (caddr_t
)&ip6
->ip6_src
, (caddr_t
)&ip6
->ip6_dst
)) {
596 ipseclog((LOG_ERR
, "ipsec tunnel address mismatch "
597 "in ESP input: %s %s\n",
598 ipsec6_logpacketstr(ip6
, spi
), ipsec_logsastr(sav
)));
599 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
603 bzero(&addr
, sizeof(addr
));
604 ip6addr
= (__typeof__(ip6addr
)) & addr
;
605 ip6addr
->sin6_family
= AF_INET6
;
606 ip6addr
->sin6_len
= sizeof(*ip6addr
);
607 ip6addr
->sin6_addr
= ip6
->ip6_dst
;
610 ipseclog((LOG_ERR
, "ipsec tunnel unsupported address family "
615 key_sa_recordxfer(sav
, m
);
616 if (ipsec_addhist(m
, IPPROTO_ESP
, spi
) != 0 ||
617 ipsec_addhist(m
, IPPROTO_IPV4
, 0) != 0) {
618 IPSEC_STAT_INCREMENT(ipsecstat
.in_nomem
);
622 // update the receiving interface address based on the inner address
623 ifa
= ifa_ifwithaddr((struct sockaddr
*)&addr
);
625 m
->m_pkthdr
.rcvif
= ifa
->ifa_ifp
;
629 /* Clear the csum flags, they can't be valid for the inner headers */
630 m
->m_pkthdr
.csum_flags
= 0;
632 // Input via IPSec interface
633 if (sav
->sah
->ipsec_if
!= NULL
) {
635 if (interface
!= NULL
&&
636 interface
== sav
->sah
->ipsec_if
) {
641 if (ipsec_inject_inbound_packet(sav
->sah
->ipsec_if
, m
) == 0) {
649 if (proto_input(ifamily
== AF_INET
? PF_INET
: PF_INET6
, m
) != 0) {
654 KERNEL_DEBUG(DBG_FNC_ESPIN
| DBG_FUNC_END
, 2, 0, 0, 0, 0);
657 * strip off ESP header and IV.
658 * even in m_pulldown case, we need to strip off ESP so that
659 * we can always compute checksum for AH correctly.
663 stripsiz
= esplen
+ ivlen
;
665 ip
= mtod(m
, struct ip
*);
666 ovbcopy((caddr_t
)ip
, (caddr_t
)(((u_char
*)ip
) + stripsiz
), off
);
667 m
->m_data
+= stripsiz
;
668 m
->m_len
-= stripsiz
;
669 m
->m_pkthdr
.len
-= stripsiz
;
671 ip
= mtod(m
, struct ip
*);
673 ip
->ip_len
= ip
->ip_len
- stripsiz
;
675 ip
->ip_len
= htons(ntohs(ip
->ip_len
) - stripsiz
);
679 key_sa_recordxfer(sav
, m
);
680 if (ipsec_addhist(m
, IPPROTO_ESP
, spi
) != 0) {
681 IPSEC_STAT_INCREMENT(ipsecstat
.in_nomem
);
686 * Set the csum valid flag, if we authenticated the
687 * packet, the payload shouldn't be corrupt unless
688 * it was corrupted before being signed on the other
691 if (nxt
== IPPROTO_TCP
|| nxt
== IPPROTO_UDP
) {
692 m
->m_pkthdr
.csum_flags
= CSUM_DATA_VALID
| CSUM_PSEUDO_HDR
;
693 m
->m_pkthdr
.csum_data
= 0xFFFF;
694 _CASSERT(offsetof(struct pkthdr
, csum_data
) == offsetof(struct pkthdr
, csum_rx_val
));
697 if (nxt
!= IPPROTO_DONE
) {
698 if ((ip_protox
[nxt
]->pr_flags
& PR_LASTHDR
) != 0 &&
699 ipsec4_in_reject(m
, NULL
)) {
700 IPSEC_STAT_INCREMENT(ipsecstat
.in_polvio
);
703 KERNEL_DEBUG(DBG_FNC_ESPIN
| DBG_FUNC_END
, 3, 0, 0, 0, 0);
705 /* translate encapsulated UDP port ? */
706 if ((sav
->flags
& SADB_X_EXT_NATT_MULTIPLEUSERS
) != 0) {
709 if (nxt
!= IPPROTO_UDP
) { /* not UPD packet - drop it */
710 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
714 if (m
->m_len
< off
+ sizeof(struct udphdr
)) {
715 m
= m_pullup(m
, off
+ sizeof(struct udphdr
));
718 "IPv4 ESP input: can't pullup UDP header in esp4_input\n"));
719 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
722 ip
= mtod(m
, struct ip
*);
724 udp
= (struct udphdr
*)(void *)(((u_int8_t
*)ip
) + off
);
726 lck_mtx_lock(sadb_mutex
);
727 if (sav
->natt_encapsulated_src_port
== 0) {
728 sav
->natt_encapsulated_src_port
= udp
->uh_sport
;
729 } else if (sav
->natt_encapsulated_src_port
!= udp
->uh_sport
) { /* something wrong */
730 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
731 lck_mtx_unlock(sadb_mutex
);
734 lck_mtx_unlock(sadb_mutex
);
735 udp
->uh_sport
= htons(sav
->remote_ike_port
);
739 DTRACE_IP6(receive
, struct mbuf
*, m
, struct inpcb
*, NULL
,
740 struct ip
*, ip
, struct ifnet
*, m
->m_pkthdr
.rcvif
,
741 struct ip
*, ip
, struct ip6_hdr
*, NULL
);
743 // Input via IPsec interface legacy path
744 if (sav
->sah
->ipsec_if
!= NULL
) {
746 if ((mlen
= m_length2(m
, NULL
)) < hlen
) {
748 "IPv4 ESP input: decrypted packet too short %d < %d\n",
750 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
753 ip
->ip_len
= htons(ip
->ip_len
+ hlen
);
754 ip
->ip_off
= htons(ip
->ip_off
);
756 ip
->ip_sum
= ip_cksum_hdr_in(m
, hlen
);
759 if (interface
!= NULL
&&
760 interface
== sav
->sah
->ipsec_if
) {
765 if (ipsec_inject_inbound_packet(sav
->sah
->ipsec_if
, m
) == 0) {
773 ip_proto_dispatch_in(m
, off
, nxt
, 0);
782 KEYDEBUG(KEYDEBUG_IPSEC_STAMP
,
783 printf("DP esp4_input call free SA:0x%llx\n",
784 (uint64_t)VM_KERNEL_ADDRPERM(sav
)));
785 key_freesav(sav
, KEY_SADB_UNLOCKED
);
787 IPSEC_STAT_INCREMENT(ipsecstat
.in_success
);
791 KEYDEBUG(KEYDEBUG_IPSEC_STAMP
,
792 printf("DP esp4_input call free SA:0x%llx\n",
793 (uint64_t)VM_KERNEL_ADDRPERM(sav
)));
794 key_freesav(sav
, KEY_SADB_UNLOCKED
);
799 KERNEL_DEBUG(DBG_FNC_ESPIN
| DBG_FUNC_END
, 4, 0, 0, 0, 0);
807 esp6_input(struct mbuf
**mp
, int *offp
, int proto
)
809 return esp6_input_extended(mp
, offp
, proto
, NULL
);
813 esp6_input_extended(struct mbuf
**mp
, int *offp
, int proto
, ifnet_t interface
)
815 #pragma unused(proto)
816 struct mbuf
*m
= *mp
;
821 struct esptail esptail
;
824 struct secasvar
*sav
= NULL
;
828 const struct esp_algorithm
*algo
;
833 /* sanity check for alignment. */
834 if (off
% 4 != 0 || m
->m_pkthdr
.len
% 4 != 0) {
835 ipseclog((LOG_ERR
, "IPv6 ESP input: packet alignment problem "
836 "(off=%d, pktlen=%d)\n", off
, m
->m_pkthdr
.len
));
837 IPSEC_STAT_INCREMENT(ipsec6stat
.in_inval
);
841 #ifndef PULLDOWN_TEST
842 IP6_EXTHDR_CHECK(m
, off
, ESPMAXLEN
, {return IPPROTO_DONE
;});
843 esp
= (struct esp
*)(void *)(mtod(m
, caddr_t
) + off
);
845 IP6_EXTHDR_GET(esp
, struct esp
*, m
, off
, ESPMAXLEN
);
847 IPSEC_STAT_INCREMENT(ipsec6stat
.in_inval
);
851 m
->m_pkthdr
.csum_flags
&= ~CSUM_RX_FLAGS
;
853 /* Expect 32-bit data aligned pointer on strict-align platforms */
854 MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m
);
856 ip6
= mtod(m
, struct ip6_hdr
*);
858 if (ntohs(ip6
->ip6_plen
) == 0) {
859 ipseclog((LOG_ERR
, "IPv6 ESP input: "
860 "ESP with IPv6 jumbogram is not supported.\n"));
861 IPSEC_STAT_INCREMENT(ipsec6stat
.in_inval
);
865 nproto
= ip6_get_prevhdr(m
, off
);
866 if (nproto
== NULL
|| (*nproto
!= IPPROTO_ESP
&&
867 !(*nproto
== IPPROTO_UDP
&& off
>= sizeof(struct udphdr
)))) {
868 ipseclog((LOG_DEBUG
, "IPv6 ESP input: invalid protocol type\n"));
869 IPSEC_STAT_INCREMENT(ipsec6stat
.in_inval
);
873 /* find the sassoc. */
876 if ((sav
= key_allocsa_extended(AF_INET6
,
877 (caddr_t
)&ip6
->ip6_src
, (caddr_t
)&ip6
->ip6_dst
,
878 IPPROTO_ESP
, spi
, interface
)) == 0) {
879 ipseclog((LOG_WARNING
,
880 "IPv6 ESP input: no key association found for spi %u\n",
881 (u_int32_t
)ntohl(spi
)));
882 IPSEC_STAT_INCREMENT(ipsec6stat
.in_nosa
);
885 KEYDEBUG(KEYDEBUG_IPSEC_STAMP
,
886 printf("DP esp6_input called to allocate SA:0x%llx\n",
887 (uint64_t)VM_KERNEL_ADDRPERM(sav
)));
888 if (sav
->state
!= SADB_SASTATE_MATURE
889 && sav
->state
!= SADB_SASTATE_DYING
) {
891 "IPv6 ESP input: non-mature/dying SA found for spi %u\n",
892 (u_int32_t
)ntohl(spi
)));
893 IPSEC_STAT_INCREMENT(ipsec6stat
.in_badspi
);
896 algo
= esp_algorithm_lookup(sav
->alg_enc
);
898 ipseclog((LOG_DEBUG
, "IPv6 ESP input: "
899 "unsupported encryption algorithm for spi %u\n",
900 (u_int32_t
)ntohl(spi
)));
901 IPSEC_STAT_INCREMENT(ipsec6stat
.in_badspi
);
905 /* check if we have proper ivlen information */
908 ipseclog((LOG_ERR
, "inproper ivlen in IPv6 ESP input: %s %s\n",
909 ipsec6_logpacketstr(ip6
, spi
), ipsec_logsastr(sav
)));
910 IPSEC_STAT_INCREMENT(ipsec6stat
.in_badspi
);
914 seq
= ntohl(((struct newesp
*)esp
)->esp_seq
);
916 /* Save ICV from packet for verification later */
918 unsigned char saved_icv
[AH_MAXSUMSIZE
];
919 if (algo
->finalizedecrypt
) {
921 m_copydata(m
, m
->m_pkthdr
.len
- siz
, siz
, (caddr_t
) saved_icv
);
925 if (!((sav
->flags
& SADB_X_EXT_OLD
) == 0 && sav
->replay
926 && (sav
->alg_auth
&& sav
->key_auth
))) {
930 if (sav
->alg_auth
== SADB_X_AALG_NULL
||
931 sav
->alg_auth
== SADB_AALG_NONE
) {
936 * check for sequence number.
938 if (ipsec_chkreplay(seq
, sav
)) {
941 IPSEC_STAT_INCREMENT(ipsec6stat
.in_espreplay
);
942 ipseclog((LOG_WARNING
,
943 "replay packet in IPv6 ESP input: %s %s\n",
944 ipsec6_logpacketstr(ip6
, spi
), ipsec_logsastr(sav
)));
950 u_char sum0
[AH_MAXSUMSIZE
] __attribute__((aligned(4)));
951 u_char sum
[AH_MAXSUMSIZE
] __attribute__((aligned(4)));
952 const struct ah_algorithm
*sumalgo
;
954 sumalgo
= ah_algorithm_lookup(sav
->alg_auth
);
958 siz
= (((*sumalgo
->sumsiz
)(sav
) + 3) & ~(4 - 1));
959 if (m
->m_pkthdr
.len
< off
+ ESPMAXLEN
+ siz
) {
960 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
963 if (AH_MAXSUMSIZE
< siz
) {
965 "internal error: AH_MAXSUMSIZE must be larger than %lu\n",
967 IPSEC_STAT_INCREMENT(ipsec6stat
.in_inval
);
971 m_copydata(m
, m
->m_pkthdr
.len
- siz
, siz
, (caddr_t
) &sum0
[0]);
973 if (esp_auth(m
, off
, m
->m_pkthdr
.len
- off
- siz
, sav
, sum
)) {
974 ipseclog((LOG_WARNING
, "auth fail in IPv6 ESP input: %s %s\n",
975 ipsec6_logpacketstr(ip6
, spi
), ipsec_logsastr(sav
)));
976 IPSEC_STAT_INCREMENT(ipsec6stat
.in_espauthfail
);
980 if (cc_cmp_safe(siz
, sum0
, sum
)) {
981 ipseclog((LOG_WARNING
, "auth fail in IPv6 ESP input: %s %s\n",
982 ipsec6_logpacketstr(ip6
, spi
), ipsec_logsastr(sav
)));
983 IPSEC_STAT_INCREMENT(ipsec6stat
.in_espauthfail
);
989 /* strip off the authentication data */
991 ip6
= mtod(m
, struct ip6_hdr
*);
992 ip6
->ip6_plen
= htons(ntohs(ip6
->ip6_plen
) - siz
);
994 m
->m_flags
|= M_AUTHIPDGM
;
995 IPSEC_STAT_INCREMENT(ipsec6stat
.in_espauthsucc
);
999 * update sequence number.
1001 if ((sav
->flags
& SADB_X_EXT_OLD
) == 0 && sav
->replay
) {
1002 if (ipsec_updatereplay(seq
, sav
)) {
1003 IPSEC_STAT_INCREMENT(ipsec6stat
.in_espreplay
);
1010 /* process main esp header. */
1011 if (sav
->flags
& SADB_X_EXT_OLD
) {
1013 esplen
= sizeof(struct esp
);
1016 if (sav
->flags
& SADB_X_EXT_DERIV
) {
1017 esplen
= sizeof(struct esp
);
1019 esplen
= sizeof(struct newesp
);
1023 if (m
->m_pkthdr
.len
< off
+ esplen
+ ivlen
+ sizeof(esptail
)) {
1024 ipseclog((LOG_WARNING
,
1025 "IPv6 ESP input: packet too short\n"));
1026 IPSEC_STAT_INCREMENT(ipsec6stat
.in_inval
);
1030 #ifndef PULLDOWN_TEST
1031 IP6_EXTHDR_CHECK(m
, off
, esplen
+ ivlen
, return IPPROTO_DONE
); /*XXX*/
1033 IP6_EXTHDR_GET(esp
, struct esp
*, m
, off
, esplen
+ ivlen
);
1035 IPSEC_STAT_INCREMENT(ipsec6stat
.in_inval
);
1040 ip6
= mtod(m
, struct ip6_hdr
*); /*set it again just in case*/
1043 * pre-compute and cache intermediate key
1045 if (esp_schedule(algo
, sav
) != 0) {
1046 IPSEC_STAT_INCREMENT(ipsec6stat
.in_inval
);
1051 * decrypt the packet.
1053 if (!algo
->decrypt
) {
1054 panic("internal error: no decrypt function");
1056 if ((*algo
->decrypt
)(m
, off
, sav
, algo
, ivlen
)) {
1057 /* m is already freed */
1059 ipseclog((LOG_ERR
, "decrypt fail in IPv6 ESP input: %s\n",
1060 ipsec_logsastr(sav
)));
1061 IPSEC_STAT_INCREMENT(ipsec6stat
.in_inval
);
1064 IPSEC_STAT_INCREMENT(ipsec6stat
.in_esphist
[sav
->alg_enc
]);
1066 m
->m_flags
|= M_DECRYPTED
;
1068 if (algo
->finalizedecrypt
) {
1069 if ((*algo
->finalizedecrypt
)(sav
, saved_icv
, algo
->icvlen
)) {
1070 ipseclog((LOG_ERR
, "packet decryption ICV failure\n"));
1071 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
1072 KERNEL_DEBUG(DBG_FNC_DECRYPT
| DBG_FUNC_END
, 1, 0, 0, 0, 0);
1078 * find the trailer of the ESP.
1080 m_copydata(m
, m
->m_pkthdr
.len
- sizeof(esptail
), sizeof(esptail
),
1082 nxt
= esptail
.esp_nxt
;
1083 taillen
= esptail
.esp_padlen
+ sizeof(esptail
);
1085 if (m
->m_pkthdr
.len
< taillen
1086 || m
->m_pkthdr
.len
- taillen
< sizeof(struct ip6_hdr
)) { /*?*/
1087 ipseclog((LOG_WARNING
,
1088 "bad pad length in IPv6 ESP input: %s %s\n",
1089 ipsec6_logpacketstr(ip6
, spi
), ipsec_logsastr(sav
)));
1090 IPSEC_STAT_INCREMENT(ipsec6stat
.in_inval
);
1094 /* strip off the trailing pad area. */
1096 ip6
= mtod(m
, struct ip6_hdr
*);
1097 ip6
->ip6_plen
= htons(ntohs(ip6
->ip6_plen
) - taillen
);
1099 if (*nproto
== IPPROTO_UDP
) {
1100 // offset includes the outer ip and udp header lengths.
1101 if (m
->m_len
< off
) {
1102 m
= m_pullup(m
, off
);
1104 ipseclog((LOG_DEBUG
,
1105 "IPv6 ESP input: invalid udp encapsulated ESP packet length\n"));
1106 IPSEC_STAT_INCREMENT(ipsec6stat
.in_inval
);
1111 // check the UDP encap header to detect changes in the source port, and then strip the header
1112 off
-= sizeof(struct udphdr
); // off no longer includes the udphdr's size
1113 // if peer is behind nat and this is the latest esp packet
1114 if ((sav
->flags
& SADB_X_EXT_NATT_DETECTED_PEER
) != 0 &&
1115 (sav
->flags
& SADB_X_EXT_OLD
) == 0 &&
1116 seq
&& sav
->replay
&&
1117 seq
>= sav
->replay
->lastseq
) {
1118 struct udphdr
*encap_uh
= (__typeof__(encap_uh
))(void *)((caddr_t
)ip6
+ off
);
1119 if (encap_uh
->uh_sport
&&
1120 ntohs(encap_uh
->uh_sport
) != sav
->remote_ike_port
) {
1121 sav
->remote_ike_port
= ntohs(encap_uh
->uh_sport
);
1124 ip6
= esp6_input_strip_udp_encap(m
, off
);
1125 esp
= (struct esp
*)(void *)(((u_int8_t
*)ip6
) + off
);
1129 /* was it transmitted over the IPsec tunnel SA? */
1130 if (ipsec6_tunnel_validate(m
, off
+ esplen
+ ivlen
, nxt
, sav
, &ifamily
)) {
1132 struct sockaddr_storage addr
;
1135 * strip off all the headers that precedes ESP header.
1136 * IP6 xx ESP IP6' payload -> IP6' payload
1138 * XXX more sanity checks
1139 * XXX relationship with gif?
1141 u_int32_t flowinfo
; /*net endian*/
1142 flowinfo
= ip6
->ip6_flow
;
1143 m_adj(m
, off
+ esplen
+ ivlen
);
1144 if (ifamily
== AF_INET6
) {
1145 struct sockaddr_in6
*ip6addr
;
1147 if (m
->m_len
< sizeof(*ip6
)) {
1148 #ifndef PULLDOWN_TEST
1150 * m_pullup is prohibited in KAME IPv6 input processing
1151 * but there's no other way!
1154 /* okay to pullup in m_pulldown style */
1156 m
= m_pullup(m
, sizeof(*ip6
));
1158 IPSEC_STAT_INCREMENT(ipsec6stat
.in_inval
);
1162 ip6
= mtod(m
, struct ip6_hdr
*);
1163 /* ECN consideration. */
1164 if (ip6_ecn_egress(ip6_ipsec_ecn
, &flowinfo
, &ip6
->ip6_flow
) == 0) {
1165 IPSEC_STAT_INCREMENT(ipsec6stat
.in_inval
);
1168 if (!key_checktunnelsanity(sav
, AF_INET6
,
1169 (caddr_t
)&ip6
->ip6_src
, (caddr_t
)&ip6
->ip6_dst
)) {
1170 ipseclog((LOG_ERR
, "ipsec tunnel address mismatch "
1171 "in IPv6 ESP input: %s %s\n",
1172 ipsec6_logpacketstr(ip6
, spi
),
1173 ipsec_logsastr(sav
)));
1174 IPSEC_STAT_INCREMENT(ipsec6stat
.in_inval
);
1178 bzero(&addr
, sizeof(addr
));
1179 ip6addr
= (__typeof__(ip6addr
)) & addr
;
1180 ip6addr
->sin6_family
= AF_INET6
;
1181 ip6addr
->sin6_len
= sizeof(*ip6addr
);
1182 ip6addr
->sin6_addr
= ip6
->ip6_dst
;
1183 } else if (ifamily
== AF_INET
) {
1184 struct sockaddr_in
*ipaddr
;
1186 if (m
->m_len
< sizeof(*ip
)) {
1187 m
= m_pullup(m
, sizeof(*ip
));
1189 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
1197 ip
= mtod(m
, struct ip
*);
1199 /* ECN consideration. */
1200 if (ip46_ecn_egress(ip6_ipsec_ecn
, &flowinfo
, &ip
->ip_tos
) == 0) {
1201 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
1205 if (otos
!= ip
->ip_tos
) {
1206 sum
= ~ntohs(ip
->ip_sum
) & 0xffff;
1207 sum
+= (~otos
& 0xffff) + ip
->ip_tos
;
1208 sum
= (sum
>> 16) + (sum
& 0xffff);
1209 sum
+= (sum
>> 16); /* add carry */
1210 ip
->ip_sum
= htons(~sum
& 0xffff);
1213 if (!key_checktunnelsanity(sav
, AF_INET
,
1214 (caddr_t
)&ip
->ip_src
, (caddr_t
)&ip
->ip_dst
)) {
1215 ipseclog((LOG_ERR
, "ipsec tunnel address mismatch "
1216 "in ESP input: %s %s\n",
1217 ipsec4_logpacketstr(ip
, spi
), ipsec_logsastr(sav
)));
1218 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
1222 bzero(&addr
, sizeof(addr
));
1223 ipaddr
= (__typeof__(ipaddr
)) & addr
;
1224 ipaddr
->sin_family
= AF_INET
;
1225 ipaddr
->sin_len
= sizeof(*ipaddr
);
1226 ipaddr
->sin_addr
= ip
->ip_dst
;
1229 key_sa_recordxfer(sav
, m
);
1230 if (ipsec_addhist(m
, IPPROTO_ESP
, spi
) != 0 ||
1231 ipsec_addhist(m
, IPPROTO_IPV6
, 0) != 0) {
1232 IPSEC_STAT_INCREMENT(ipsec6stat
.in_nomem
);
1236 // update the receiving interface address based on the inner address
1237 ifa
= ifa_ifwithaddr((struct sockaddr
*)&addr
);
1239 m
->m_pkthdr
.rcvif
= ifa
->ifa_ifp
;
1243 // Input via IPSec interface
1244 if (sav
->sah
->ipsec_if
!= NULL
) {
1246 if (interface
!= NULL
&&
1247 interface
== sav
->sah
->ipsec_if
) {
1251 if (ipsec_inject_inbound_packet(sav
->sah
->ipsec_if
, m
) == 0) {
1260 if (proto_input(ifamily
== AF_INET
? PF_INET
: PF_INET6
, m
) != 0) {
1266 * strip off ESP header and IV.
1267 * even in m_pulldown case, we need to strip off ESP so that
1268 * we can always compute checksum for AH correctly.
1274 * Set the next header field of the previous header correctly.
1276 prvnxtp
= ip6_get_prevhdr(m
, off
); /* XXX */
1279 stripsiz
= esplen
+ ivlen
;
1281 ip6
= mtod(m
, struct ip6_hdr
*);
1282 if (m
->m_len
>= stripsiz
+ off
) {
1283 ovbcopy((caddr_t
)ip6
, ((caddr_t
)ip6
) + stripsiz
, off
);
1284 m
->m_data
+= stripsiz
;
1285 m
->m_len
-= stripsiz
;
1286 m
->m_pkthdr
.len
-= stripsiz
;
1289 * this comes with no copy if the boundary is on
1294 n
= m_split(m
, off
, M_DONTWAIT
);
1296 /* m is retained by m_split */
1300 /* m_cat does not update m_pkthdr.len */
1301 m
->m_pkthdr
.len
+= n
->m_pkthdr
.len
;
1305 #ifndef PULLDOWN_TEST
1307 * KAME requires that the packet to be contiguous on the
1308 * mbuf. We need to make that sure.
1309 * this kind of code should be avoided.
1310 * XXX other conditions to avoid running this part?
1312 if (m
->m_len
!= m
->m_pkthdr
.len
) {
1313 struct mbuf
*n
= NULL
;
1316 MGETHDR(n
, M_DONTWAIT
, MT_HEADER
); /* MAC-OK */
1319 M_COPY_PKTHDR(n
, m
);
1321 if (n
&& m
->m_pkthdr
.len
> maxlen
) {
1322 MCLGET(n
, M_DONTWAIT
);
1324 if ((n
->m_flags
& M_EXT
) == 0) {
1330 printf("esp6_input: mbuf allocation failed\n");
1334 if (m
->m_pkthdr
.len
<= maxlen
) {
1335 m_copydata(m
, 0, m
->m_pkthdr
.len
, mtod(n
, caddr_t
));
1336 n
->m_len
= m
->m_pkthdr
.len
;
1337 n
->m_pkthdr
.len
= m
->m_pkthdr
.len
;
1341 m_copydata(m
, 0, maxlen
, mtod(n
, caddr_t
));
1343 n
->m_pkthdr
.len
= m
->m_pkthdr
.len
;
1346 m
->m_flags
&= ~M_PKTHDR
;
1352 ip6
= mtod(m
, struct ip6_hdr
*);
1353 ip6
->ip6_plen
= htons(ntohs(ip6
->ip6_plen
) - stripsiz
);
1355 key_sa_recordxfer(sav
, m
);
1356 if (ipsec_addhist(m
, IPPROTO_ESP
, spi
) != 0) {
1357 IPSEC_STAT_INCREMENT(ipsec6stat
.in_nomem
);
1362 * Set the csum valid flag, if we authenticated the
1363 * packet, the payload shouldn't be corrupt unless
1364 * it was corrupted before being signed on the other
1367 if (nxt
== IPPROTO_TCP
|| nxt
== IPPROTO_UDP
) {
1368 m
->m_pkthdr
.csum_flags
= CSUM_DATA_VALID
| CSUM_PSEUDO_HDR
;
1369 m
->m_pkthdr
.csum_data
= 0xFFFF;
1370 _CASSERT(offsetof(struct pkthdr
, csum_data
) == offsetof(struct pkthdr
, csum_rx_val
));
1373 // Input via IPSec interface
1374 if (sav
->sah
->ipsec_if
!= NULL
) {
1376 if (interface
!= NULL
&&
1377 interface
== sav
->sah
->ipsec_if
) {
1381 if (ipsec_inject_inbound_packet(sav
->sah
->ipsec_if
, m
) == 0) {
1395 KEYDEBUG(KEYDEBUG_IPSEC_STAMP
,
1396 printf("DP esp6_input call free SA:0x%llx\n",
1397 (uint64_t)VM_KERNEL_ADDRPERM(sav
)));
1398 key_freesav(sav
, KEY_SADB_UNLOCKED
);
1400 IPSEC_STAT_INCREMENT(ipsec6stat
.in_success
);
1405 KEYDEBUG(KEYDEBUG_IPSEC_STAMP
,
1406 printf("DP esp6_input call free SA:0x%llx\n",
1407 (uint64_t)VM_KERNEL_ADDRPERM(sav
)));
1408 key_freesav(sav
, KEY_SADB_UNLOCKED
);
1413 if (interface
!= NULL
) {
1416 return IPPROTO_DONE
;
1420 esp6_ctlinput(int cmd
, struct sockaddr
*sa
, void *d
, __unused
struct ifnet
*ifp
)
1422 const struct newesp
*espp
;
1424 struct ip6ctlparam
*ip6cp
= NULL
, ip6cp1
;
1425 struct secasvar
*sav
;
1426 struct ip6_hdr
*ip6
;
1429 struct sockaddr_in6
*sa6_src
, *sa6_dst
;
1431 if (sa
->sa_family
!= AF_INET6
||
1432 sa
->sa_len
!= sizeof(struct sockaddr_in6
)) {
1435 if ((unsigned)cmd
>= PRC_NCMDS
) {
1439 /* if the parameter is from icmp6, decode it. */
1441 ip6cp
= (struct ip6ctlparam
*)d
;
1443 ip6
= ip6cp
->ip6c_ip6
;
1444 off
= ip6cp
->ip6c_off
;
1452 * Notify the error to all possible sockets via pfctlinput2.
1453 * Since the upper layer information (such as protocol type,
1454 * source and destination ports) is embedded in the encrypted
1455 * data and might have been cut, we can't directly call
1456 * an upper layer ctlinput function. However, the pcbnotify
1457 * function will consider source and destination addresses
1458 * as well as the flow info value, and may be able to find
1459 * some PCB that should be notified.
1460 * Although pfctlinput2 will call esp6_ctlinput(), there is
1461 * no possibility of an infinite loop of function calls,
1462 * because we don't pass the inner IPv6 header.
1464 bzero(&ip6cp1
, sizeof(ip6cp1
));
1465 ip6cp1
.ip6c_src
= ip6cp
->ip6c_src
;
1466 pfctlinput2(cmd
, sa
, (void *)&ip6cp1
);
1469 * Then go to special cases that need ESP header information.
1470 * XXX: We assume that when ip6 is non NULL,
1471 * M and OFF are valid.
1474 /* check if we can safely examine src and dst ports */
1475 if (m
->m_pkthdr
.len
< off
+ sizeof(esp
)) {
1479 if (m
->m_len
< off
+ sizeof(esp
)) {
1481 * this should be rare case,
1482 * so we compromise on this copy...
1484 m_copydata(m
, off
, sizeof(esp
), (caddr_t
)&esp
);
1487 espp
= (struct newesp
*)(void *)(mtod(m
, caddr_t
) + off
);
1490 if (cmd
== PRC_MSGSIZE
) {
1494 * Check to see if we have a valid SA corresponding to
1495 * the address in the ICMP message payload.
1497 sa6_src
= ip6cp
->ip6c_src
;
1498 sa6_dst
= (struct sockaddr_in6
*)(void *)sa
;
1499 sav
= key_allocsa(AF_INET6
,
1500 (caddr_t
)&sa6_src
->sin6_addr
,
1501 (caddr_t
)&sa6_dst
->sin6_addr
,
1502 IPPROTO_ESP
, espp
->esp_spi
);
1504 if (sav
->state
== SADB_SASTATE_MATURE
||
1505 sav
->state
== SADB_SASTATE_DYING
) {
1508 key_freesav(sav
, KEY_SADB_UNLOCKED
);
1511 /* XXX Further validation? */
1514 * Depending on the value of "valid" and routing table
1515 * size (mtudisc_{hi,lo}wat), we will:
1516 * - recalcurate the new MTU and create the
1517 * corresponding routing entry, or
1518 * - ignore the MTU change notification.
1520 icmp6_mtudisc_update((struct ip6ctlparam
*)d
, valid
);
1523 /* we normally notify any pcb here */