2 * Copyright (c) 2008-2016 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 /* $FreeBSD: src/sys/netinet6/esp_input.c,v 1.1.2.3 2001/07/03 11:01:50 ume Exp $ */
30 /* $KAME: esp_input.c,v 1.55 2001/03/23 08:08:47 itojun Exp $ */
33 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
34 * All rights reserved.
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. Neither the name of the project nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
48 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
62 * RFC1827/2406 Encapsulated Security Payload.
65 #include <sys/param.h>
66 #include <sys/systm.h>
67 #include <sys/malloc.h>
69 #include <sys/mcache.h>
70 #include <sys/domain.h>
71 #include <sys/protosw.h>
72 #include <sys/socket.h>
73 #include <sys/errno.h>
75 #include <sys/kernel.h>
76 #include <sys/syslog.h>
79 #include <net/if_ipsec.h>
80 #include <net/route.h>
81 #include <kern/cpu_number.h>
82 #include <kern/locks.h>
84 #include <netinet/in.h>
85 #include <netinet/in_systm.h>
86 #include <netinet/ip.h>
87 #include <netinet/ip_var.h>
88 #include <netinet/in_var.h>
89 #include <netinet/ip_ecn.h>
90 #include <netinet/in_pcb.h>
91 #include <netinet/udp.h>
93 #include <netinet6/ip6_ecn.h>
97 #include <netinet/ip6.h>
98 #include <netinet6/in6_pcb.h>
99 #include <netinet6/ip6_var.h>
100 #include <netinet/icmp6.h>
101 #include <netinet6/ip6protosw.h>
104 #include <netinet6/ipsec.h>
106 #include <netinet6/ipsec6.h>
108 #include <netinet6/ah.h>
110 #include <netinet6/ah6.h>
112 #include <netinet6/esp.h>
114 #include <netinet6/esp6.h>
116 #include <netkey/key.h>
117 #include <netkey/keydb.h>
118 #include <netkey/key_debug.h>
120 #include <net/kpi_protocol.h>
121 #include <netinet/kpi_ipfilter_var.h>
123 #include <net/net_osdep.h>
124 #include <mach/sdt.h>
125 #include <corecrypto/cc.h>
127 #include <sys/kdebug.h>
128 #define DBG_LAYER_BEG NETDBG_CODE(DBG_NETIPSEC, 1)
129 #define DBG_LAYER_END NETDBG_CODE(DBG_NETIPSEC, 3)
130 #define DBG_FNC_ESPIN NETDBG_CODE(DBG_NETIPSEC, (6 << 8))
131 #define DBG_FNC_DECRYPT NETDBG_CODE(DBG_NETIPSEC, (7 << 8))
132 #define IPLEN_FLIPPED
134 extern lck_mtx_t
*sadb_mutex
;
138 (sizeof(struct esp) < sizeof(struct newesp) \
139 ? sizeof(struct newesp) : sizeof(struct esp))
142 esp4_input_strip_udp_encap (struct mbuf
*m
, int iphlen
)
144 // strip the udp header that's encapsulating ESP
146 size_t stripsiz
= sizeof(struct udphdr
);
148 ip
= mtod(m
, __typeof__(ip
));
149 ovbcopy((caddr_t
)ip
, (caddr_t
)(((u_char
*)ip
) + stripsiz
), iphlen
);
150 m
->m_data
+= stripsiz
;
151 m
->m_len
-= stripsiz
;
152 m
->m_pkthdr
.len
-= stripsiz
;
153 ip
= mtod(m
, __typeof__(ip
));
154 ip
->ip_len
= ip
->ip_len
- stripsiz
;
155 ip
->ip_p
= IPPROTO_ESP
;
159 static struct ip6_hdr
*
160 esp6_input_strip_udp_encap (struct mbuf
*m
, int ip6hlen
)
162 // strip the udp header that's encapsulating ESP
164 size_t stripsiz
= sizeof(struct udphdr
);
166 ip6
= mtod(m
, __typeof__(ip6
));
167 ovbcopy((caddr_t
)ip6
, (caddr_t
)(((u_char
*)ip6
) + stripsiz
), ip6hlen
);
168 m
->m_data
+= stripsiz
;
169 m
->m_len
-= stripsiz
;
170 m
->m_pkthdr
.len
-= stripsiz
;
171 ip6
= mtod(m
, __typeof__(ip6
));
172 ip6
->ip6_plen
= htons(ntohs(ip6
->ip6_plen
) - stripsiz
);
173 ip6
->ip6_nxt
= IPPROTO_ESP
;
178 esp4_input(struct mbuf
*m
, int off
)
180 (void)esp4_input_extended(m
, off
, NULL
);
184 esp4_input_extended(struct mbuf
*m
, int off
, ifnet_t interface
)
191 struct esptail esptail
;
194 struct secasvar
*sav
= NULL
;
197 const struct esp_algorithm
*algo
;
202 struct mbuf
*out_m
= NULL
;
204 KERNEL_DEBUG(DBG_FNC_ESPIN
| DBG_FUNC_START
, 0,0,0,0,0);
205 /* sanity check for alignment. */
206 if (off
% 4 != 0 || m
->m_pkthdr
.len
% 4 != 0) {
207 ipseclog((LOG_ERR
, "IPv4 ESP input: packet alignment problem "
208 "(off=%d, pktlen=%d)\n", off
, m
->m_pkthdr
.len
));
209 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
213 if (m
->m_len
< off
+ ESPMAXLEN
) {
214 m
= m_pullup(m
, off
+ ESPMAXLEN
);
217 "IPv4 ESP input: can't pullup in esp4_input\n"));
218 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
223 m
->m_pkthdr
.csum_flags
&= ~CSUM_RX_FLAGS
;
225 /* Expect 32-bit aligned data pointer on strict-align platforms */
226 MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m
);
228 ip
= mtod(m
, struct ip
*);
229 // expect udp-encap and esp packets only
230 if (ip
->ip_p
!= IPPROTO_ESP
&&
231 !(ip
->ip_p
== IPPROTO_UDP
&& off
>= sizeof(struct udphdr
))) {
233 "IPv4 ESP input: invalid protocol type\n"));
234 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
237 esp
= (struct esp
*)(void *)(((u_int8_t
*)ip
) + off
);
239 hlen
= IP_VHL_HL(ip
->ip_vhl
) << 2;
241 hlen
= ip
->ip_hl
<< 2;
244 /* find the sassoc. */
247 if ((sav
= key_allocsa_extended(AF_INET
,
248 (caddr_t
)&ip
->ip_src
, (caddr_t
)&ip
->ip_dst
,
249 IPPROTO_ESP
, spi
, interface
)) == 0) {
250 ipseclog((LOG_WARNING
,
251 "IPv4 ESP input: no key association found for spi %u\n",
252 (u_int32_t
)ntohl(spi
)));
253 IPSEC_STAT_INCREMENT(ipsecstat
.in_nosa
);
256 KEYDEBUG(KEYDEBUG_IPSEC_STAMP
,
257 printf("DP esp4_input called to allocate SA:0x%llx\n",
258 (uint64_t)VM_KERNEL_ADDRPERM(sav
)));
259 if (sav
->state
!= SADB_SASTATE_MATURE
260 && sav
->state
!= SADB_SASTATE_DYING
) {
262 "IPv4 ESP input: non-mature/dying SA found for spi %u\n",
263 (u_int32_t
)ntohl(spi
)));
264 IPSEC_STAT_INCREMENT(ipsecstat
.in_badspi
);
267 algo
= esp_algorithm_lookup(sav
->alg_enc
);
269 ipseclog((LOG_DEBUG
, "IPv4 ESP input: "
270 "unsupported encryption algorithm for spi %u\n",
271 (u_int32_t
)ntohl(spi
)));
272 IPSEC_STAT_INCREMENT(ipsecstat
.in_badspi
);
276 /* check if we have proper ivlen information */
279 ipseclog((LOG_ERR
, "inproper ivlen in IPv4 ESP input: %s %s\n",
280 ipsec4_logpacketstr(ip
, spi
), ipsec_logsastr(sav
)));
281 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
285 seq
= ntohl(((struct newesp
*)esp
)->esp_seq
);
287 /* Save ICV from packet for verification later */
289 unsigned char saved_icv
[AH_MAXSUMSIZE
];
290 if (algo
->finalizedecrypt
) {
292 m_copydata(m
, m
->m_pkthdr
.len
- siz
, siz
, (caddr_t
) saved_icv
);
296 if (!((sav
->flags
& SADB_X_EXT_OLD
) == 0 && sav
->replay
297 && (sav
->alg_auth
&& sav
->key_auth
)))
300 if (sav
->alg_auth
== SADB_X_AALG_NULL
||
301 sav
->alg_auth
== SADB_AALG_NONE
)
305 * check for sequence number.
307 if (ipsec_chkreplay(seq
, sav
))
310 IPSEC_STAT_INCREMENT(ipsecstat
.in_espreplay
);
311 ipseclog((LOG_WARNING
,
312 "replay packet in IPv4 ESP input: %s %s\n",
313 ipsec4_logpacketstr(ip
, spi
), ipsec_logsastr(sav
)));
319 u_char sum0
[AH_MAXSUMSIZE
] __attribute__((aligned(4)));
320 u_char sum
[AH_MAXSUMSIZE
] __attribute__((aligned(4)));
321 const struct ah_algorithm
*sumalgo
;
323 sumalgo
= ah_algorithm_lookup(sav
->alg_auth
);
326 siz
= (((*sumalgo
->sumsiz
)(sav
) + 3) & ~(4 - 1));
327 if (m
->m_pkthdr
.len
< off
+ ESPMAXLEN
+ siz
) {
328 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
331 if (AH_MAXSUMSIZE
< siz
) {
333 "internal error: AH_MAXSUMSIZE must be larger than %lu\n",
335 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
339 m_copydata(m
, m
->m_pkthdr
.len
- siz
, siz
, (caddr_t
) &sum0
[0]);
341 if (esp_auth(m
, off
, m
->m_pkthdr
.len
- off
- siz
, sav
, sum
)) {
342 ipseclog((LOG_WARNING
, "auth fail in IPv4 ESP input: %s %s\n",
343 ipsec4_logpacketstr(ip
, spi
), ipsec_logsastr(sav
)));
344 IPSEC_STAT_INCREMENT(ipsecstat
.in_espauthfail
);
348 if (cc_cmp_safe(siz
, sum0
, sum
)) {
349 ipseclog((LOG_WARNING
, "cc_cmp fail in IPv4 ESP input: %s %s\n",
350 ipsec4_logpacketstr(ip
, spi
), ipsec_logsastr(sav
)));
351 IPSEC_STAT_INCREMENT(ipsecstat
.in_espauthfail
);
357 /* strip off the authentication data */
359 ip
= mtod(m
, struct ip
*);
361 ip
->ip_len
= ip
->ip_len
- siz
;
363 ip
->ip_len
= htons(ntohs(ip
->ip_len
) - siz
);
365 m
->m_flags
|= M_AUTHIPDGM
;
366 IPSEC_STAT_INCREMENT(ipsecstat
.in_espauthsucc
);
370 * update sequence number.
372 if ((sav
->flags
& SADB_X_EXT_OLD
) == 0 && sav
->replay
) {
373 if (ipsec_updatereplay(seq
, sav
)) {
374 IPSEC_STAT_INCREMENT(ipsecstat
.in_espreplay
);
381 /* process main esp header. */
382 if (sav
->flags
& SADB_X_EXT_OLD
) {
384 esplen
= sizeof(struct esp
);
387 if (sav
->flags
& SADB_X_EXT_DERIV
)
388 esplen
= sizeof(struct esp
);
390 esplen
= sizeof(struct newesp
);
393 if (m
->m_pkthdr
.len
< off
+ esplen
+ ivlen
+ sizeof(esptail
)) {
394 ipseclog((LOG_WARNING
,
395 "IPv4 ESP input: packet too short\n"));
396 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
400 if (m
->m_len
< off
+ esplen
+ ivlen
) {
401 m
= m_pullup(m
, off
+ esplen
+ ivlen
);
404 "IPv4 ESP input: can't pullup in esp4_input\n"));
405 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
411 * pre-compute and cache intermediate key
413 if (esp_schedule(algo
, sav
) != 0) {
414 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
419 * decrypt the packet.
422 panic("internal error: no decrypt function");
423 KERNEL_DEBUG(DBG_FNC_DECRYPT
| DBG_FUNC_START
, 0,0,0,0,0);
424 if ((*algo
->decrypt
)(m
, off
, sav
, algo
, ivlen
)) {
425 /* m is already freed */
427 ipseclog((LOG_ERR
, "decrypt fail in IPv4 ESP input: %s\n",
428 ipsec_logsastr(sav
)));
429 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
430 KERNEL_DEBUG(DBG_FNC_DECRYPT
| DBG_FUNC_END
, 1,0,0,0,0);
433 KERNEL_DEBUG(DBG_FNC_DECRYPT
| DBG_FUNC_END
, 2,0,0,0,0);
434 IPSEC_STAT_INCREMENT(ipsecstat
.in_esphist
[sav
->alg_enc
]);
436 m
->m_flags
|= M_DECRYPTED
;
438 if (algo
->finalizedecrypt
)
440 if ((*algo
->finalizedecrypt
)(sav
, saved_icv
, algo
->icvlen
)) {
441 ipseclog((LOG_ERR
, "packet decryption ICV failure\n"));
442 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
443 KERNEL_DEBUG(DBG_FNC_DECRYPT
| DBG_FUNC_END
, 1,0,0,0,0);
449 * find the trailer of the ESP.
451 m_copydata(m
, m
->m_pkthdr
.len
- sizeof(esptail
), sizeof(esptail
),
453 nxt
= esptail
.esp_nxt
;
454 taillen
= esptail
.esp_padlen
+ sizeof(esptail
);
456 if (m
->m_pkthdr
.len
< taillen
457 || m
->m_pkthdr
.len
- taillen
< hlen
) { /*?*/
458 ipseclog((LOG_WARNING
,
459 "bad pad length in IPv4 ESP input: %s %s\n",
460 ipsec4_logpacketstr(ip
, spi
), ipsec_logsastr(sav
)));
461 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
465 /* strip off the trailing pad area. */
467 ip
= mtod(m
, struct ip
*);
469 ip
->ip_len
= ip
->ip_len
- taillen
;
471 ip
->ip_len
= htons(ntohs(ip
->ip_len
) - taillen
);
473 if (ip
->ip_p
== IPPROTO_UDP
) {
474 // offset includes the outer ip and udp header lengths.
475 if (m
->m_len
< off
) {
476 m
= m_pullup(m
, off
);
479 "IPv4 ESP input: invalid udp encapsulated ESP packet length \n"));
480 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
485 // check the UDP encap header to detect changes in the source port, and then strip the header
486 off
-= sizeof(struct udphdr
); // off no longer includes the udphdr's size
487 // if peer is behind nat and this is the latest esp packet
488 if ((sav
->flags
& SADB_X_EXT_NATT_DETECTED_PEER
) != 0 &&
489 (sav
->flags
& SADB_X_EXT_OLD
) == 0 &&
490 seq
&& sav
->replay
&&
491 seq
>= sav
->replay
->lastseq
) {
492 struct udphdr
*encap_uh
= (__typeof__(encap_uh
))(void *)((caddr_t
)ip
+ off
);
493 if (encap_uh
->uh_sport
&&
494 ntohs(encap_uh
->uh_sport
) != sav
->remote_ike_port
) {
495 sav
->remote_ike_port
= ntohs(encap_uh
->uh_sport
);
498 ip
= esp4_input_strip_udp_encap(m
, off
);
499 esp
= (struct esp
*)(void *)(((u_int8_t
*)ip
) + off
);
502 /* was it transmitted over the IPsec tunnel SA? */
503 if (ipsec4_tunnel_validate(m
, off
+ esplen
+ ivlen
, nxt
, sav
, &ifamily
)) {
505 struct sockaddr_storage addr
;
508 * strip off all the headers that precedes ESP header.
509 * IP4 xx ESP IP4' payload -> IP4' payload
511 * XXX more sanity checks
512 * XXX relationship with gif?
518 m_adj(m
, off
+ esplen
+ ivlen
);
519 if (ifamily
== AF_INET
) {
520 struct sockaddr_in
*ipaddr
;
522 if (m
->m_len
< sizeof(*ip
)) {
523 m
= m_pullup(m
, sizeof(*ip
));
525 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
529 ip
= mtod(m
, struct ip
*);
530 /* ECN consideration. */
533 if (ip_ecn_egress(ip4_ipsec_ecn
, &tos
, &ip
->ip_tos
) == 0) {
534 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
538 if (otos
!= ip
->ip_tos
) {
539 sum
= ~ntohs(ip
->ip_sum
) & 0xffff;
540 sum
+= (~otos
& 0xffff) + ip
->ip_tos
;
541 sum
= (sum
>> 16) + (sum
& 0xffff);
542 sum
+= (sum
>> 16); /* add carry */
543 ip
->ip_sum
= htons(~sum
& 0xffff);
546 if (!key_checktunnelsanity(sav
, AF_INET
,
547 (caddr_t
)&ip
->ip_src
, (caddr_t
)&ip
->ip_dst
)) {
548 ipseclog((LOG_ERR
, "ipsec tunnel address mismatch "
549 "in ESP input: %s %s\n",
550 ipsec4_logpacketstr(ip
, spi
), ipsec_logsastr(sav
)));
551 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
555 bzero(&addr
, sizeof(addr
));
556 ipaddr
= (__typeof__(ipaddr
))&addr
;
557 ipaddr
->sin_family
= AF_INET
;
558 ipaddr
->sin_len
= sizeof(*ipaddr
);
559 ipaddr
->sin_addr
= ip
->ip_dst
;
561 } else if (ifamily
== AF_INET6
) {
562 struct sockaddr_in6
*ip6addr
;
565 * m_pullup is prohibited in KAME IPv6 input processing
566 * but there's no other way!
568 if (m
->m_len
< sizeof(*ip6
)) {
569 m
= m_pullup(m
, sizeof(*ip6
));
571 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
577 * Expect 32-bit aligned data pointer on strict-align
580 MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m
);
582 ip6
= mtod(m
, struct ip6_hdr
*);
584 /* ECN consideration. */
585 if (ip64_ecn_egress(ip4_ipsec_ecn
, &tos
, &ip6
->ip6_flow
) == 0) {
586 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
590 if (!key_checktunnelsanity(sav
, AF_INET6
,
591 (caddr_t
)&ip6
->ip6_src
, (caddr_t
)&ip6
->ip6_dst
)) {
592 ipseclog((LOG_ERR
, "ipsec tunnel address mismatch "
593 "in ESP input: %s %s\n",
594 ipsec6_logpacketstr(ip6
, spi
), ipsec_logsastr(sav
)));
595 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
599 bzero(&addr
, sizeof(addr
));
600 ip6addr
= (__typeof__(ip6addr
))&addr
;
601 ip6addr
->sin6_family
= AF_INET6
;
602 ip6addr
->sin6_len
= sizeof(*ip6addr
);
603 ip6addr
->sin6_addr
= ip6
->ip6_dst
;
606 ipseclog((LOG_ERR
, "ipsec tunnel unsupported address family "
611 key_sa_recordxfer(sav
, m
);
612 if (ipsec_addhist(m
, IPPROTO_ESP
, spi
) != 0 ||
613 ipsec_addhist(m
, IPPROTO_IPV4
, 0) != 0) {
614 IPSEC_STAT_INCREMENT(ipsecstat
.in_nomem
);
618 // update the receiving interface address based on the inner address
619 ifa
= ifa_ifwithaddr((struct sockaddr
*)&addr
);
621 m
->m_pkthdr
.rcvif
= ifa
->ifa_ifp
;
625 /* Clear the csum flags, they can't be valid for the inner headers */
626 m
->m_pkthdr
.csum_flags
= 0;
628 // Input via IPSec interface
629 if (sav
->sah
->ipsec_if
!= NULL
) {
631 if (interface
!= NULL
&&
632 interface
== sav
->sah
->ipsec_if
) {
637 if (ipsec_inject_inbound_packet(sav
->sah
->ipsec_if
, m
) == 0) {
645 if (proto_input(ifamily
== AF_INET
? PF_INET
: PF_INET6
, m
) != 0)
649 KERNEL_DEBUG(DBG_FNC_ESPIN
| DBG_FUNC_END
, 2,0,0,0,0);
652 * strip off ESP header and IV.
653 * even in m_pulldown case, we need to strip off ESP so that
654 * we can always compute checksum for AH correctly.
658 stripsiz
= esplen
+ ivlen
;
660 ip
= mtod(m
, struct ip
*);
661 ovbcopy((caddr_t
)ip
, (caddr_t
)(((u_char
*)ip
) + stripsiz
), off
);
662 m
->m_data
+= stripsiz
;
663 m
->m_len
-= stripsiz
;
664 m
->m_pkthdr
.len
-= stripsiz
;
666 ip
= mtod(m
, struct ip
*);
668 ip
->ip_len
= ip
->ip_len
- stripsiz
;
670 ip
->ip_len
= htons(ntohs(ip
->ip_len
) - stripsiz
);
674 key_sa_recordxfer(sav
, m
);
675 if (ipsec_addhist(m
, IPPROTO_ESP
, spi
) != 0) {
676 IPSEC_STAT_INCREMENT(ipsecstat
.in_nomem
);
681 * Set the csum valid flag, if we authenticated the
682 * packet, the payload shouldn't be corrupt unless
683 * it was corrupted before being signed on the other
686 if (nxt
== IPPROTO_TCP
|| nxt
== IPPROTO_UDP
) {
687 m
->m_pkthdr
.csum_flags
= CSUM_DATA_VALID
| CSUM_PSEUDO_HDR
;
688 m
->m_pkthdr
.csum_data
= 0xFFFF;
689 _CASSERT(offsetof(struct pkthdr
, csum_data
) == offsetof(struct pkthdr
, csum_rx_val
));
692 if (nxt
!= IPPROTO_DONE
) {
693 if ((ip_protox
[nxt
]->pr_flags
& PR_LASTHDR
) != 0 &&
694 ipsec4_in_reject(m
, NULL
)) {
695 IPSEC_STAT_INCREMENT(ipsecstat
.in_polvio
);
698 KERNEL_DEBUG(DBG_FNC_ESPIN
| DBG_FUNC_END
, 3,0,0,0,0);
700 /* translate encapsulated UDP port ? */
701 if ((sav
->flags
& SADB_X_EXT_NATT_MULTIPLEUSERS
) != 0) {
704 if (nxt
!= IPPROTO_UDP
) { /* not UPD packet - drop it */
705 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
709 if (m
->m_len
< off
+ sizeof(struct udphdr
)) {
710 m
= m_pullup(m
, off
+ sizeof(struct udphdr
));
713 "IPv4 ESP input: can't pullup UDP header in esp4_input\n"));
714 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
717 ip
= mtod(m
, struct ip
*);
719 udp
= (struct udphdr
*)(void *)(((u_int8_t
*)ip
) + off
);
721 lck_mtx_lock(sadb_mutex
);
722 if (sav
->natt_encapsulated_src_port
== 0) {
723 sav
->natt_encapsulated_src_port
= udp
->uh_sport
;
724 } else if (sav
->natt_encapsulated_src_port
!= udp
->uh_sport
) { /* something wrong */
725 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
726 lck_mtx_unlock(sadb_mutex
);
729 lck_mtx_unlock(sadb_mutex
);
730 udp
->uh_sport
= htons(sav
->remote_ike_port
);
734 DTRACE_IP6(receive
, struct mbuf
*, m
, struct inpcb
*, NULL
,
735 struct ip
*, ip
, struct ifnet
*, m
->m_pkthdr
.rcvif
,
736 struct ip
*, ip
, struct ip6_hdr
*, NULL
);
738 // Input via IPsec interface legacy path
739 if (sav
->sah
->ipsec_if
!= NULL
) {
741 if ((mlen
= m_length2(m
, NULL
)) < hlen
) {
743 "IPv4 ESP input: decrypted packet too short %d < %d\n",
745 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
748 ip
->ip_len
= htons(ip
->ip_len
+ hlen
);
749 ip
->ip_off
= htons(ip
->ip_off
);
751 ip
->ip_sum
= ip_cksum_hdr_in(m
, hlen
);
754 if (interface
!= NULL
&&
755 interface
== sav
->sah
->ipsec_if
) {
760 if (ipsec_inject_inbound_packet(sav
->sah
->ipsec_if
, m
) == 0) {
768 ip_proto_dispatch_in(m
, off
, nxt
, 0);
777 KEYDEBUG(KEYDEBUG_IPSEC_STAMP
,
778 printf("DP esp4_input call free SA:0x%llx\n",
779 (uint64_t)VM_KERNEL_ADDRPERM(sav
)));
780 key_freesav(sav
, KEY_SADB_UNLOCKED
);
782 IPSEC_STAT_INCREMENT(ipsecstat
.in_success
);
786 KEYDEBUG(KEYDEBUG_IPSEC_STAMP
,
787 printf("DP esp4_input call free SA:0x%llx\n",
788 (uint64_t)VM_KERNEL_ADDRPERM(sav
)));
789 key_freesav(sav
, KEY_SADB_UNLOCKED
);
794 KERNEL_DEBUG(DBG_FNC_ESPIN
| DBG_FUNC_END
, 4,0,0,0,0);
802 esp6_input(struct mbuf
**mp
, int *offp
, int proto
)
804 return esp6_input_extended(mp
, offp
, proto
, NULL
);
808 esp6_input_extended(struct mbuf
**mp
, int *offp
, int proto
, ifnet_t interface
)
810 #pragma unused(proto)
811 struct mbuf
*m
= *mp
;
816 struct esptail esptail
;
819 struct secasvar
*sav
= NULL
;
823 const struct esp_algorithm
*algo
;
828 /* sanity check for alignment. */
829 if (off
% 4 != 0 || m
->m_pkthdr
.len
% 4 != 0) {
830 ipseclog((LOG_ERR
, "IPv6 ESP input: packet alignment problem "
831 "(off=%d, pktlen=%d)\n", off
, m
->m_pkthdr
.len
));
832 IPSEC_STAT_INCREMENT(ipsec6stat
.in_inval
);
836 #ifndef PULLDOWN_TEST
837 IP6_EXTHDR_CHECK(m
, off
, ESPMAXLEN
, {return IPPROTO_DONE
;});
838 esp
= (struct esp
*)(void *)(mtod(m
, caddr_t
) + off
);
840 IP6_EXTHDR_GET(esp
, struct esp
*, m
, off
, ESPMAXLEN
);
842 IPSEC_STAT_INCREMENT(ipsec6stat
.in_inval
);
846 m
->m_pkthdr
.csum_flags
&= ~CSUM_RX_FLAGS
;
848 /* Expect 32-bit data aligned pointer on strict-align platforms */
849 MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m
);
851 ip6
= mtod(m
, struct ip6_hdr
*);
853 if (ntohs(ip6
->ip6_plen
) == 0) {
854 ipseclog((LOG_ERR
, "IPv6 ESP input: "
855 "ESP with IPv6 jumbogram is not supported.\n"));
856 IPSEC_STAT_INCREMENT(ipsec6stat
.in_inval
);
860 nproto
= ip6_get_prevhdr(m
, off
);
861 if (nproto
== NULL
|| (*nproto
!= IPPROTO_ESP
&&
862 !(*nproto
== IPPROTO_UDP
&& off
>= sizeof(struct udphdr
)))) {
863 ipseclog((LOG_DEBUG
, "IPv6 ESP input: invalid protocol type\n"));
864 IPSEC_STAT_INCREMENT(ipsec6stat
.in_inval
);
868 /* find the sassoc. */
871 if ((sav
= key_allocsa_extended(AF_INET6
,
872 (caddr_t
)&ip6
->ip6_src
, (caddr_t
)&ip6
->ip6_dst
,
873 IPPROTO_ESP
, spi
, interface
)) == 0) {
874 ipseclog((LOG_WARNING
,
875 "IPv6 ESP input: no key association found for spi %u\n",
876 (u_int32_t
)ntohl(spi
)));
877 IPSEC_STAT_INCREMENT(ipsec6stat
.in_nosa
);
880 KEYDEBUG(KEYDEBUG_IPSEC_STAMP
,
881 printf("DP esp6_input called to allocate SA:0x%llx\n",
882 (uint64_t)VM_KERNEL_ADDRPERM(sav
)));
883 if (sav
->state
!= SADB_SASTATE_MATURE
884 && sav
->state
!= SADB_SASTATE_DYING
) {
886 "IPv6 ESP input: non-mature/dying SA found for spi %u\n",
887 (u_int32_t
)ntohl(spi
)));
888 IPSEC_STAT_INCREMENT(ipsec6stat
.in_badspi
);
891 algo
= esp_algorithm_lookup(sav
->alg_enc
);
893 ipseclog((LOG_DEBUG
, "IPv6 ESP input: "
894 "unsupported encryption algorithm for spi %u\n",
895 (u_int32_t
)ntohl(spi
)));
896 IPSEC_STAT_INCREMENT(ipsec6stat
.in_badspi
);
900 /* check if we have proper ivlen information */
903 ipseclog((LOG_ERR
, "inproper ivlen in IPv6 ESP input: %s %s\n",
904 ipsec6_logpacketstr(ip6
, spi
), ipsec_logsastr(sav
)));
905 IPSEC_STAT_INCREMENT(ipsec6stat
.in_badspi
);
909 seq
= ntohl(((struct newesp
*)esp
)->esp_seq
);
911 /* Save ICV from packet for verification later */
913 unsigned char saved_icv
[AH_MAXSUMSIZE
];
914 if (algo
->finalizedecrypt
) {
916 m_copydata(m
, m
->m_pkthdr
.len
- siz
, siz
, (caddr_t
) saved_icv
);
920 if (!((sav
->flags
& SADB_X_EXT_OLD
) == 0 && sav
->replay
921 && (sav
->alg_auth
&& sav
->key_auth
)))
924 if (sav
->alg_auth
== SADB_X_AALG_NULL
||
925 sav
->alg_auth
== SADB_AALG_NONE
)
929 * check for sequence number.
931 if (ipsec_chkreplay(seq
, sav
))
934 IPSEC_STAT_INCREMENT(ipsec6stat
.in_espreplay
);
935 ipseclog((LOG_WARNING
,
936 "replay packet in IPv6 ESP input: %s %s\n",
937 ipsec6_logpacketstr(ip6
, spi
), ipsec_logsastr(sav
)));
943 u_char sum0
[AH_MAXSUMSIZE
] __attribute__((aligned(4)));
944 u_char sum
[AH_MAXSUMSIZE
] __attribute__((aligned(4)));
945 const struct ah_algorithm
*sumalgo
;
947 sumalgo
= ah_algorithm_lookup(sav
->alg_auth
);
950 siz
= (((*sumalgo
->sumsiz
)(sav
) + 3) & ~(4 - 1));
951 if (m
->m_pkthdr
.len
< off
+ ESPMAXLEN
+ siz
) {
952 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
955 if (AH_MAXSUMSIZE
< siz
) {
957 "internal error: AH_MAXSUMSIZE must be larger than %lu\n",
959 IPSEC_STAT_INCREMENT(ipsec6stat
.in_inval
);
963 m_copydata(m
, m
->m_pkthdr
.len
- siz
, siz
, (caddr_t
) &sum0
[0]);
965 if (esp_auth(m
, off
, m
->m_pkthdr
.len
- off
- siz
, sav
, sum
)) {
966 ipseclog((LOG_WARNING
, "auth fail in IPv6 ESP input: %s %s\n",
967 ipsec6_logpacketstr(ip6
, spi
), ipsec_logsastr(sav
)));
968 IPSEC_STAT_INCREMENT(ipsec6stat
.in_espauthfail
);
972 if (cc_cmp_safe(siz
, sum0
, sum
)) {
973 ipseclog((LOG_WARNING
, "auth fail in IPv6 ESP input: %s %s\n",
974 ipsec6_logpacketstr(ip6
, spi
), ipsec_logsastr(sav
)));
975 IPSEC_STAT_INCREMENT(ipsec6stat
.in_espauthfail
);
981 /* strip off the authentication data */
983 ip6
= mtod(m
, struct ip6_hdr
*);
984 ip6
->ip6_plen
= htons(ntohs(ip6
->ip6_plen
) - siz
);
986 m
->m_flags
|= M_AUTHIPDGM
;
987 IPSEC_STAT_INCREMENT(ipsec6stat
.in_espauthsucc
);
991 * update sequence number.
993 if ((sav
->flags
& SADB_X_EXT_OLD
) == 0 && sav
->replay
) {
994 if (ipsec_updatereplay(seq
, sav
)) {
995 IPSEC_STAT_INCREMENT(ipsec6stat
.in_espreplay
);
1002 /* process main esp header. */
1003 if (sav
->flags
& SADB_X_EXT_OLD
) {
1005 esplen
= sizeof(struct esp
);
1008 if (sav
->flags
& SADB_X_EXT_DERIV
)
1009 esplen
= sizeof(struct esp
);
1011 esplen
= sizeof(struct newesp
);
1014 if (m
->m_pkthdr
.len
< off
+ esplen
+ ivlen
+ sizeof(esptail
)) {
1015 ipseclog((LOG_WARNING
,
1016 "IPv6 ESP input: packet too short\n"));
1017 IPSEC_STAT_INCREMENT(ipsec6stat
.in_inval
);
1021 #ifndef PULLDOWN_TEST
1022 IP6_EXTHDR_CHECK(m
, off
, esplen
+ ivlen
, return IPPROTO_DONE
); /*XXX*/
1024 IP6_EXTHDR_GET(esp
, struct esp
*, m
, off
, esplen
+ ivlen
);
1026 IPSEC_STAT_INCREMENT(ipsec6stat
.in_inval
);
1031 ip6
= mtod(m
, struct ip6_hdr
*); /*set it again just in case*/
1034 * pre-compute and cache intermediate key
1036 if (esp_schedule(algo
, sav
) != 0) {
1037 IPSEC_STAT_INCREMENT(ipsec6stat
.in_inval
);
1042 * decrypt the packet.
1045 panic("internal error: no decrypt function");
1046 if ((*algo
->decrypt
)(m
, off
, sav
, algo
, ivlen
)) {
1047 /* m is already freed */
1049 ipseclog((LOG_ERR
, "decrypt fail in IPv6 ESP input: %s\n",
1050 ipsec_logsastr(sav
)));
1051 IPSEC_STAT_INCREMENT(ipsec6stat
.in_inval
);
1054 IPSEC_STAT_INCREMENT(ipsec6stat
.in_esphist
[sav
->alg_enc
]);
1056 m
->m_flags
|= M_DECRYPTED
;
1058 if (algo
->finalizedecrypt
)
1060 if ((*algo
->finalizedecrypt
)(sav
, saved_icv
, algo
->icvlen
)) {
1061 ipseclog((LOG_ERR
, "packet decryption ICV failure\n"));
1062 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
1063 KERNEL_DEBUG(DBG_FNC_DECRYPT
| DBG_FUNC_END
, 1,0,0,0,0);
1069 * find the trailer of the ESP.
1071 m_copydata(m
, m
->m_pkthdr
.len
- sizeof(esptail
), sizeof(esptail
),
1073 nxt
= esptail
.esp_nxt
;
1074 taillen
= esptail
.esp_padlen
+ sizeof(esptail
);
1076 if (m
->m_pkthdr
.len
< taillen
1077 || m
->m_pkthdr
.len
- taillen
< sizeof(struct ip6_hdr
)) { /*?*/
1078 ipseclog((LOG_WARNING
,
1079 "bad pad length in IPv6 ESP input: %s %s\n",
1080 ipsec6_logpacketstr(ip6
, spi
), ipsec_logsastr(sav
)));
1081 IPSEC_STAT_INCREMENT(ipsec6stat
.in_inval
);
1085 /* strip off the trailing pad area. */
1087 ip6
= mtod(m
, struct ip6_hdr
*);
1088 ip6
->ip6_plen
= htons(ntohs(ip6
->ip6_plen
) - taillen
);
1090 if (*nproto
== IPPROTO_UDP
) {
1091 // offset includes the outer ip and udp header lengths.
1092 if (m
->m_len
< off
) {
1093 m
= m_pullup(m
, off
);
1095 ipseclog((LOG_DEBUG
,
1096 "IPv6 ESP input: invalid udp encapsulated ESP packet length\n"));
1097 IPSEC_STAT_INCREMENT(ipsec6stat
.in_inval
);
1102 // check the UDP encap header to detect changes in the source port, and then strip the header
1103 off
-= sizeof(struct udphdr
); // off no longer includes the udphdr's size
1104 // if peer is behind nat and this is the latest esp packet
1105 if ((sav
->flags
& SADB_X_EXT_NATT_DETECTED_PEER
) != 0 &&
1106 (sav
->flags
& SADB_X_EXT_OLD
) == 0 &&
1107 seq
&& sav
->replay
&&
1108 seq
>= sav
->replay
->lastseq
) {
1109 struct udphdr
*encap_uh
= (__typeof__(encap_uh
))(void *)((caddr_t
)ip6
+ off
);
1110 if (encap_uh
->uh_sport
&&
1111 ntohs(encap_uh
->uh_sport
) != sav
->remote_ike_port
) {
1112 sav
->remote_ike_port
= ntohs(encap_uh
->uh_sport
);
1115 ip6
= esp6_input_strip_udp_encap(m
, off
);
1116 esp
= (struct esp
*)(void *)(((u_int8_t
*)ip6
) + off
);
1120 /* was it transmitted over the IPsec tunnel SA? */
1121 if (ipsec6_tunnel_validate(m
, off
+ esplen
+ ivlen
, nxt
, sav
, &ifamily
)) {
1123 struct sockaddr_storage addr
;
1126 * strip off all the headers that precedes ESP header.
1127 * IP6 xx ESP IP6' payload -> IP6' payload
1129 * XXX more sanity checks
1130 * XXX relationship with gif?
1132 u_int32_t flowinfo
; /*net endian*/
1133 flowinfo
= ip6
->ip6_flow
;
1134 m_adj(m
, off
+ esplen
+ ivlen
);
1135 if (ifamily
== AF_INET6
) {
1136 struct sockaddr_in6
*ip6addr
;
1138 if (m
->m_len
< sizeof(*ip6
)) {
1139 #ifndef PULLDOWN_TEST
1141 * m_pullup is prohibited in KAME IPv6 input processing
1142 * but there's no other way!
1145 /* okay to pullup in m_pulldown style */
1147 m
= m_pullup(m
, sizeof(*ip6
));
1149 IPSEC_STAT_INCREMENT(ipsec6stat
.in_inval
);
1153 ip6
= mtod(m
, struct ip6_hdr
*);
1154 /* ECN consideration. */
1155 if (ip6_ecn_egress(ip6_ipsec_ecn
, &flowinfo
, &ip6
->ip6_flow
) == 0) {
1156 IPSEC_STAT_INCREMENT(ipsec6stat
.in_inval
);
1159 if (!key_checktunnelsanity(sav
, AF_INET6
,
1160 (caddr_t
)&ip6
->ip6_src
, (caddr_t
)&ip6
->ip6_dst
)) {
1161 ipseclog((LOG_ERR
, "ipsec tunnel address mismatch "
1162 "in IPv6 ESP input: %s %s\n",
1163 ipsec6_logpacketstr(ip6
, spi
),
1164 ipsec_logsastr(sav
)));
1165 IPSEC_STAT_INCREMENT(ipsec6stat
.in_inval
);
1169 bzero(&addr
, sizeof(addr
));
1170 ip6addr
= (__typeof__(ip6addr
))&addr
;
1171 ip6addr
->sin6_family
= AF_INET6
;
1172 ip6addr
->sin6_len
= sizeof(*ip6addr
);
1173 ip6addr
->sin6_addr
= ip6
->ip6_dst
;
1174 } else if (ifamily
== AF_INET
) {
1175 struct sockaddr_in
*ipaddr
;
1177 if (m
->m_len
< sizeof(*ip
)) {
1178 m
= m_pullup(m
, sizeof(*ip
));
1180 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
1188 ip
= mtod(m
, struct ip
*);
1190 /* ECN consideration. */
1191 if (ip46_ecn_egress(ip6_ipsec_ecn
, &flowinfo
, &ip
->ip_tos
) == 0) {
1192 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
1196 if (otos
!= ip
->ip_tos
) {
1197 sum
= ~ntohs(ip
->ip_sum
) & 0xffff;
1198 sum
+= (~otos
& 0xffff) + ip
->ip_tos
;
1199 sum
= (sum
>> 16) + (sum
& 0xffff);
1200 sum
+= (sum
>> 16); /* add carry */
1201 ip
->ip_sum
= htons(~sum
& 0xffff);
1204 if (!key_checktunnelsanity(sav
, AF_INET
,
1205 (caddr_t
)&ip
->ip_src
, (caddr_t
)&ip
->ip_dst
)) {
1206 ipseclog((LOG_ERR
, "ipsec tunnel address mismatch "
1207 "in ESP input: %s %s\n",
1208 ipsec4_logpacketstr(ip
, spi
), ipsec_logsastr(sav
)));
1209 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
1213 bzero(&addr
, sizeof(addr
));
1214 ipaddr
= (__typeof__(ipaddr
))&addr
;
1215 ipaddr
->sin_family
= AF_INET
;
1216 ipaddr
->sin_len
= sizeof(*ipaddr
);
1217 ipaddr
->sin_addr
= ip
->ip_dst
;
1220 key_sa_recordxfer(sav
, m
);
1221 if (ipsec_addhist(m
, IPPROTO_ESP
, spi
) != 0 ||
1222 ipsec_addhist(m
, IPPROTO_IPV6
, 0) != 0) {
1223 IPSEC_STAT_INCREMENT(ipsec6stat
.in_nomem
);
1227 // update the receiving interface address based on the inner address
1228 ifa
= ifa_ifwithaddr((struct sockaddr
*)&addr
);
1230 m
->m_pkthdr
.rcvif
= ifa
->ifa_ifp
;
1234 // Input via IPSec interface
1235 if (sav
->sah
->ipsec_if
!= NULL
) {
1237 if (interface
!= NULL
&&
1238 interface
== sav
->sah
->ipsec_if
) {
1242 if (ipsec_inject_inbound_packet(sav
->sah
->ipsec_if
, m
) == 0) {
1251 if (proto_input(ifamily
== AF_INET
? PF_INET
: PF_INET6
, m
) != 0)
1256 * strip off ESP header and IV.
1257 * even in m_pulldown case, we need to strip off ESP so that
1258 * we can always compute checksum for AH correctly.
1264 * Set the next header field of the previous header correctly.
1266 prvnxtp
= ip6_get_prevhdr(m
, off
); /* XXX */
1269 stripsiz
= esplen
+ ivlen
;
1271 ip6
= mtod(m
, struct ip6_hdr
*);
1272 if (m
->m_len
>= stripsiz
+ off
) {
1273 ovbcopy((caddr_t
)ip6
, ((caddr_t
)ip6
) + stripsiz
, off
);
1274 m
->m_data
+= stripsiz
;
1275 m
->m_len
-= stripsiz
;
1276 m
->m_pkthdr
.len
-= stripsiz
;
1279 * this comes with no copy if the boundary is on
1284 n
= m_split(m
, off
, M_DONTWAIT
);
1286 /* m is retained by m_split */
1290 /* m_cat does not update m_pkthdr.len */
1291 m
->m_pkthdr
.len
+= n
->m_pkthdr
.len
;
1295 #ifndef PULLDOWN_TEST
1297 * KAME requires that the packet to be contiguous on the
1298 * mbuf. We need to make that sure.
1299 * this kind of code should be avoided.
1300 * XXX other conditions to avoid running this part?
1302 if (m
->m_len
!= m
->m_pkthdr
.len
) {
1303 struct mbuf
*n
= NULL
;
1306 MGETHDR(n
, M_DONTWAIT
, MT_HEADER
); /* MAC-OK */
1309 M_COPY_PKTHDR(n
, m
);
1310 if (n
&& m
->m_pkthdr
.len
> maxlen
) {
1311 MCLGET(n
, M_DONTWAIT
);
1313 if ((n
->m_flags
& M_EXT
) == 0) {
1319 printf("esp6_input: mbuf allocation failed\n");
1323 if (m
->m_pkthdr
.len
<= maxlen
) {
1324 m_copydata(m
, 0, m
->m_pkthdr
.len
, mtod(n
, caddr_t
));
1325 n
->m_len
= m
->m_pkthdr
.len
;
1326 n
->m_pkthdr
.len
= m
->m_pkthdr
.len
;
1330 m_copydata(m
, 0, maxlen
, mtod(n
, caddr_t
));
1332 n
->m_pkthdr
.len
= m
->m_pkthdr
.len
;
1335 m
->m_flags
&= ~M_PKTHDR
;
1341 ip6
= mtod(m
, struct ip6_hdr
*);
1342 ip6
->ip6_plen
= htons(ntohs(ip6
->ip6_plen
) - stripsiz
);
1344 key_sa_recordxfer(sav
, m
);
1345 if (ipsec_addhist(m
, IPPROTO_ESP
, spi
) != 0) {
1346 IPSEC_STAT_INCREMENT(ipsec6stat
.in_nomem
);
1351 * Set the csum valid flag, if we authenticated the
1352 * packet, the payload shouldn't be corrupt unless
1353 * it was corrupted before being signed on the other
1356 if (nxt
== IPPROTO_TCP
|| nxt
== IPPROTO_UDP
) {
1357 m
->m_pkthdr
.csum_flags
= CSUM_DATA_VALID
| CSUM_PSEUDO_HDR
;
1358 m
->m_pkthdr
.csum_data
= 0xFFFF;
1359 _CASSERT(offsetof(struct pkthdr
, csum_data
) == offsetof(struct pkthdr
, csum_rx_val
));
1362 // Input via IPSec interface
1363 if (sav
->sah
->ipsec_if
!= NULL
) {
1365 if (interface
!= NULL
&&
1366 interface
== sav
->sah
->ipsec_if
) {
1370 if (ipsec_inject_inbound_packet(sav
->sah
->ipsec_if
, m
) == 0) {
1385 KEYDEBUG(KEYDEBUG_IPSEC_STAMP
,
1386 printf("DP esp6_input call free SA:0x%llx\n",
1387 (uint64_t)VM_KERNEL_ADDRPERM(sav
)));
1388 key_freesav(sav
, KEY_SADB_UNLOCKED
);
1390 IPSEC_STAT_INCREMENT(ipsec6stat
.in_success
);
1395 KEYDEBUG(KEYDEBUG_IPSEC_STAMP
,
1396 printf("DP esp6_input call free SA:0x%llx\n",
1397 (uint64_t)VM_KERNEL_ADDRPERM(sav
)));
1398 key_freesav(sav
, KEY_SADB_UNLOCKED
);
1403 if (interface
!= NULL
) {
1406 return IPPROTO_DONE
;
1410 esp6_ctlinput(int cmd
, struct sockaddr
*sa
, void *d
, __unused
struct ifnet
*ifp
)
1412 const struct newesp
*espp
;
1414 struct ip6ctlparam
*ip6cp
= NULL
, ip6cp1
;
1415 struct secasvar
*sav
;
1416 struct ip6_hdr
*ip6
;
1419 struct sockaddr_in6
*sa6_src
, *sa6_dst
;
1421 if (sa
->sa_family
!= AF_INET6
||
1422 sa
->sa_len
!= sizeof(struct sockaddr_in6
))
1424 if ((unsigned)cmd
>= PRC_NCMDS
)
1427 /* if the parameter is from icmp6, decode it. */
1429 ip6cp
= (struct ip6ctlparam
*)d
;
1431 ip6
= ip6cp
->ip6c_ip6
;
1432 off
= ip6cp
->ip6c_off
;
1440 * Notify the error to all possible sockets via pfctlinput2.
1441 * Since the upper layer information (such as protocol type,
1442 * source and destination ports) is embedded in the encrypted
1443 * data and might have been cut, we can't directly call
1444 * an upper layer ctlinput function. However, the pcbnotify
1445 * function will consider source and destination addresses
1446 * as well as the flow info value, and may be able to find
1447 * some PCB that should be notified.
1448 * Although pfctlinput2 will call esp6_ctlinput(), there is
1449 * no possibility of an infinite loop of function calls,
1450 * because we don't pass the inner IPv6 header.
1452 bzero(&ip6cp1
, sizeof(ip6cp1
));
1453 ip6cp1
.ip6c_src
= ip6cp
->ip6c_src
;
1454 pfctlinput2(cmd
, sa
, (void *)&ip6cp1
);
1457 * Then go to special cases that need ESP header information.
1458 * XXX: We assume that when ip6 is non NULL,
1459 * M and OFF are valid.
1462 /* check if we can safely examine src and dst ports */
1463 if (m
->m_pkthdr
.len
< off
+ sizeof(esp
))
1466 if (m
->m_len
< off
+ sizeof(esp
)) {
1468 * this should be rare case,
1469 * so we compromise on this copy...
1471 m_copydata(m
, off
, sizeof(esp
), (caddr_t
)&esp
);
1474 espp
= (struct newesp
*)(void *)(mtod(m
, caddr_t
) + off
);
1476 if (cmd
== PRC_MSGSIZE
) {
1480 * Check to see if we have a valid SA corresponding to
1481 * the address in the ICMP message payload.
1483 sa6_src
= ip6cp
->ip6c_src
;
1484 sa6_dst
= (struct sockaddr_in6
*)(void *)sa
;
1485 sav
= key_allocsa(AF_INET6
,
1486 (caddr_t
)&sa6_src
->sin6_addr
,
1487 (caddr_t
)&sa6_dst
->sin6_addr
,
1488 IPPROTO_ESP
, espp
->esp_spi
);
1490 if (sav
->state
== SADB_SASTATE_MATURE
||
1491 sav
->state
== SADB_SASTATE_DYING
)
1493 key_freesav(sav
, KEY_SADB_UNLOCKED
);
1496 /* XXX Further validation? */
1499 * Depending on the value of "valid" and routing table
1500 * size (mtudisc_{hi,lo}wat), we will:
1501 * - recalcurate the new MTU and create the
1502 * corresponding routing entry, or
1503 * - ignore the MTU change notification.
1505 icmp6_mtudisc_update((struct ip6ctlparam
*)d
, valid
);
1508 /* we normally notify any pcb here */