2 * Copyright (c) 2008-2016 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 /* $FreeBSD: src/sys/netinet6/esp_input.c,v 1.1.2.3 2001/07/03 11:01:50 ume Exp $ */
30 /* $KAME: esp_input.c,v 1.55 2001/03/23 08:08:47 itojun Exp $ */
33 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
34 * All rights reserved.
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. Neither the name of the project nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
48 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
62 * RFC1827/2406 Encapsulated Security Payload.
65 #include <sys/param.h>
66 #include <sys/systm.h>
67 #include <sys/malloc.h>
69 #include <sys/mcache.h>
70 #include <sys/domain.h>
71 #include <sys/protosw.h>
72 #include <sys/socket.h>
73 #include <sys/errno.h>
75 #include <sys/kernel.h>
76 #include <sys/syslog.h>
79 #include <net/if_ipsec.h>
80 #include <net/route.h>
81 #include <kern/cpu_number.h>
82 #include <kern/locks.h>
84 #include <netinet/in.h>
85 #include <netinet/in_systm.h>
86 #include <netinet/ip.h>
87 #include <netinet/ip_var.h>
88 #include <netinet/in_var.h>
89 #include <netinet/ip_ecn.h>
90 #include <netinet/in_pcb.h>
91 #include <netinet/udp.h>
93 #include <netinet6/ip6_ecn.h>
97 #include <netinet/ip6.h>
98 #include <netinet6/in6_pcb.h>
99 #include <netinet6/ip6_var.h>
100 #include <netinet/icmp6.h>
101 #include <netinet6/ip6protosw.h>
104 #include <netinet6/ipsec.h>
106 #include <netinet6/ipsec6.h>
108 #include <netinet6/ah.h>
110 #include <netinet6/ah6.h>
112 #include <netinet6/esp.h>
114 #include <netinet6/esp6.h>
116 #include <netkey/key.h>
117 #include <netkey/keydb.h>
118 #include <netkey/key_debug.h>
120 #include <net/kpi_protocol.h>
121 #include <netinet/kpi_ipfilter_var.h>
123 #include <net/net_osdep.h>
124 #include <mach/sdt.h>
126 #include <sys/kdebug.h>
127 #define DBG_LAYER_BEG NETDBG_CODE(DBG_NETIPSEC, 1)
128 #define DBG_LAYER_END NETDBG_CODE(DBG_NETIPSEC, 3)
129 #define DBG_FNC_ESPIN NETDBG_CODE(DBG_NETIPSEC, (6 << 8))
130 #define DBG_FNC_DECRYPT NETDBG_CODE(DBG_NETIPSEC, (7 << 8))
131 #define IPLEN_FLIPPED
133 extern lck_mtx_t
*sadb_mutex
;
137 (sizeof(struct esp) < sizeof(struct newesp) \
138 ? sizeof(struct newesp) : sizeof(struct esp))
141 esp4_input_strip_udp_encap (struct mbuf
*m
, int iphlen
)
143 // strip the udp header that's encapsulating ESP
145 size_t stripsiz
= sizeof(struct udphdr
);
147 ip
= mtod(m
, __typeof__(ip
));
148 ovbcopy((caddr_t
)ip
, (caddr_t
)(((u_char
*)ip
) + stripsiz
), iphlen
);
149 m
->m_data
+= stripsiz
;
150 m
->m_len
-= stripsiz
;
151 m
->m_pkthdr
.len
-= stripsiz
;
152 ip
= mtod(m
, __typeof__(ip
));
153 ip
->ip_len
= ip
->ip_len
- stripsiz
;
154 ip
->ip_p
= IPPROTO_ESP
;
158 static struct ip6_hdr
*
159 esp6_input_strip_udp_encap (struct mbuf
*m
, int ip6hlen
)
161 // strip the udp header that's encapsulating ESP
163 size_t stripsiz
= sizeof(struct udphdr
);
165 ip6
= mtod(m
, __typeof__(ip6
));
166 ovbcopy((caddr_t
)ip6
, (caddr_t
)(((u_char
*)ip6
) + stripsiz
), ip6hlen
);
167 m
->m_data
+= stripsiz
;
168 m
->m_len
-= stripsiz
;
169 m
->m_pkthdr
.len
-= stripsiz
;
170 ip6
= mtod(m
, __typeof__(ip6
));
171 ip6
->ip6_plen
= htons(ntohs(ip6
->ip6_plen
) - stripsiz
);
172 ip6
->ip6_nxt
= IPPROTO_ESP
;
177 esp4_input(struct mbuf
*m
, int off
)
184 struct esptail esptail
;
187 struct secasvar
*sav
= NULL
;
190 const struct esp_algorithm
*algo
;
196 KERNEL_DEBUG(DBG_FNC_ESPIN
| DBG_FUNC_START
, 0,0,0,0,0);
197 /* sanity check for alignment. */
198 if (off
% 4 != 0 || m
->m_pkthdr
.len
% 4 != 0) {
199 ipseclog((LOG_ERR
, "IPv4 ESP input: packet alignment problem "
200 "(off=%d, pktlen=%d)\n", off
, m
->m_pkthdr
.len
));
201 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
205 if (m
->m_len
< off
+ ESPMAXLEN
) {
206 m
= m_pullup(m
, off
+ ESPMAXLEN
);
209 "IPv4 ESP input: can't pullup in esp4_input\n"));
210 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
215 /* Expect 32-bit aligned data pointer on strict-align platforms */
216 MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m
);
218 ip
= mtod(m
, struct ip
*);
219 // expect udp-encap and esp packets only
220 if (ip
->ip_p
!= IPPROTO_ESP
&&
221 !(ip
->ip_p
== IPPROTO_UDP
&& off
>= sizeof(struct udphdr
))) {
223 "IPv4 ESP input: invalid protocol type\n"));
224 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
227 esp
= (struct esp
*)(void *)(((u_int8_t
*)ip
) + off
);
229 hlen
= IP_VHL_HL(ip
->ip_vhl
) << 2;
231 hlen
= ip
->ip_hl
<< 2;
234 /* find the sassoc. */
237 if ((sav
= key_allocsa(AF_INET
,
238 (caddr_t
)&ip
->ip_src
, (caddr_t
)&ip
->ip_dst
,
239 IPPROTO_ESP
, spi
)) == 0) {
240 ipseclog((LOG_WARNING
,
241 "IPv4 ESP input: no key association found for spi %u\n",
242 (u_int32_t
)ntohl(spi
)));
243 IPSEC_STAT_INCREMENT(ipsecstat
.in_nosa
);
246 KEYDEBUG(KEYDEBUG_IPSEC_STAMP
,
247 printf("DP esp4_input called to allocate SA:0x%llx\n",
248 (uint64_t)VM_KERNEL_ADDRPERM(sav
)));
249 if (sav
->state
!= SADB_SASTATE_MATURE
250 && sav
->state
!= SADB_SASTATE_DYING
) {
252 "IPv4 ESP input: non-mature/dying SA found for spi %u\n",
253 (u_int32_t
)ntohl(spi
)));
254 IPSEC_STAT_INCREMENT(ipsecstat
.in_badspi
);
257 algo
= esp_algorithm_lookup(sav
->alg_enc
);
259 ipseclog((LOG_DEBUG
, "IPv4 ESP input: "
260 "unsupported encryption algorithm for spi %u\n",
261 (u_int32_t
)ntohl(spi
)));
262 IPSEC_STAT_INCREMENT(ipsecstat
.in_badspi
);
266 /* check if we have proper ivlen information */
269 ipseclog((LOG_ERR
, "inproper ivlen in IPv4 ESP input: %s %s\n",
270 ipsec4_logpacketstr(ip
, spi
), ipsec_logsastr(sav
)));
271 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
275 seq
= ntohl(((struct newesp
*)esp
)->esp_seq
);
277 /* Save ICV from packet for verification later */
279 unsigned char saved_icv
[AH_MAXSUMSIZE
];
280 if (algo
->finalizedecrypt
) {
282 m_copydata(m
, m
->m_pkthdr
.len
- siz
, siz
, (caddr_t
) saved_icv
);
286 if (!((sav
->flags
& SADB_X_EXT_OLD
) == 0 && sav
->replay
287 && (sav
->alg_auth
&& sav
->key_auth
)))
290 if (sav
->alg_auth
== SADB_X_AALG_NULL
||
291 sav
->alg_auth
== SADB_AALG_NONE
)
295 * check for sequence number.
297 if (ipsec_chkreplay(seq
, sav
))
300 IPSEC_STAT_INCREMENT(ipsecstat
.in_espreplay
);
301 ipseclog((LOG_WARNING
,
302 "replay packet in IPv4 ESP input: %s %s\n",
303 ipsec4_logpacketstr(ip
, spi
), ipsec_logsastr(sav
)));
309 u_char sum0
[AH_MAXSUMSIZE
] __attribute__((aligned(4)));
310 u_char sum
[AH_MAXSUMSIZE
] __attribute__((aligned(4)));
311 const struct ah_algorithm
*sumalgo
;
313 sumalgo
= ah_algorithm_lookup(sav
->alg_auth
);
316 siz
= (((*sumalgo
->sumsiz
)(sav
) + 3) & ~(4 - 1));
317 if (m
->m_pkthdr
.len
< off
+ ESPMAXLEN
+ siz
) {
318 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
321 if (AH_MAXSUMSIZE
< siz
) {
323 "internal error: AH_MAXSUMSIZE must be larger than %lu\n",
325 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
329 m_copydata(m
, m
->m_pkthdr
.len
- siz
, siz
, (caddr_t
) &sum0
[0]);
331 if (esp_auth(m
, off
, m
->m_pkthdr
.len
- off
- siz
, sav
, sum
)) {
332 ipseclog((LOG_WARNING
, "auth fail in IPv4 ESP input: %s %s\n",
333 ipsec4_logpacketstr(ip
, spi
), ipsec_logsastr(sav
)));
334 IPSEC_STAT_INCREMENT(ipsecstat
.in_espauthfail
);
338 if (bcmp(sum0
, sum
, siz
) != 0) {
339 ipseclog((LOG_WARNING
, "auth fail in IPv4 ESP input: %s %s\n",
340 ipsec4_logpacketstr(ip
, spi
), ipsec_logsastr(sav
)));
341 IPSEC_STAT_INCREMENT(ipsecstat
.in_espauthfail
);
347 /* strip off the authentication data */
349 ip
= mtod(m
, struct ip
*);
351 ip
->ip_len
= ip
->ip_len
- siz
;
353 ip
->ip_len
= htons(ntohs(ip
->ip_len
) - siz
);
355 m
->m_flags
|= M_AUTHIPDGM
;
356 IPSEC_STAT_INCREMENT(ipsecstat
.in_espauthsucc
);
360 * update sequence number.
362 if ((sav
->flags
& SADB_X_EXT_OLD
) == 0 && sav
->replay
) {
363 if (ipsec_updatereplay(seq
, sav
)) {
364 IPSEC_STAT_INCREMENT(ipsecstat
.in_espreplay
);
371 /* process main esp header. */
372 if (sav
->flags
& SADB_X_EXT_OLD
) {
374 esplen
= sizeof(struct esp
);
377 if (sav
->flags
& SADB_X_EXT_DERIV
)
378 esplen
= sizeof(struct esp
);
380 esplen
= sizeof(struct newesp
);
383 if (m
->m_pkthdr
.len
< off
+ esplen
+ ivlen
+ sizeof(esptail
)) {
384 ipseclog((LOG_WARNING
,
385 "IPv4 ESP input: packet too short\n"));
386 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
390 if (m
->m_len
< off
+ esplen
+ ivlen
) {
391 m
= m_pullup(m
, off
+ esplen
+ ivlen
);
394 "IPv4 ESP input: can't pullup in esp4_input\n"));
395 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
401 * pre-compute and cache intermediate key
403 if (esp_schedule(algo
, sav
) != 0) {
404 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
409 * decrypt the packet.
412 panic("internal error: no decrypt function");
413 KERNEL_DEBUG(DBG_FNC_DECRYPT
| DBG_FUNC_START
, 0,0,0,0,0);
414 if ((*algo
->decrypt
)(m
, off
, sav
, algo
, ivlen
)) {
415 /* m is already freed */
417 ipseclog((LOG_ERR
, "decrypt fail in IPv4 ESP input: %s\n",
418 ipsec_logsastr(sav
)));
419 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
420 KERNEL_DEBUG(DBG_FNC_DECRYPT
| DBG_FUNC_END
, 1,0,0,0,0);
423 KERNEL_DEBUG(DBG_FNC_DECRYPT
| DBG_FUNC_END
, 2,0,0,0,0);
424 IPSEC_STAT_INCREMENT(ipsecstat
.in_esphist
[sav
->alg_enc
]);
426 m
->m_flags
|= M_DECRYPTED
;
428 if (algo
->finalizedecrypt
)
430 unsigned char tag
[algo
->icvlen
];
431 if ((*algo
->finalizedecrypt
)(sav
, tag
, algo
->icvlen
)) {
432 ipseclog((LOG_ERR
, "packet decryption ICV failure\n"));
433 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
434 KERNEL_DEBUG(DBG_FNC_DECRYPT
| DBG_FUNC_END
, 1,0,0,0,0);
437 if (memcmp(saved_icv
, tag
, algo
->icvlen
)) {
438 ipseclog((LOG_ERR
, "packet decryption ICV mismatch\n"));
439 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
440 KERNEL_DEBUG(DBG_FNC_DECRYPT
| DBG_FUNC_END
, 1,0,0,0,0);
446 * find the trailer of the ESP.
448 m_copydata(m
, m
->m_pkthdr
.len
- sizeof(esptail
), sizeof(esptail
),
450 nxt
= esptail
.esp_nxt
;
451 taillen
= esptail
.esp_padlen
+ sizeof(esptail
);
453 if (m
->m_pkthdr
.len
< taillen
454 || m
->m_pkthdr
.len
- taillen
< hlen
) { /*?*/
455 ipseclog((LOG_WARNING
,
456 "bad pad length in IPv4 ESP input: %s %s\n",
457 ipsec4_logpacketstr(ip
, spi
), ipsec_logsastr(sav
)));
458 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
462 /* strip off the trailing pad area. */
464 ip
= mtod(m
, struct ip
*);
466 ip
->ip_len
= ip
->ip_len
- taillen
;
468 ip
->ip_len
= htons(ntohs(ip
->ip_len
) - taillen
);
470 if (ip
->ip_p
== IPPROTO_UDP
) {
471 // offset includes the outer ip and udp header lengths.
472 if (m
->m_len
< off
) {
473 m
= m_pullup(m
, off
);
476 "IPv4 ESP input: invalid udp encapsulated ESP packet length \n"));
477 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
482 // check the UDP encap header to detect changes in the source port, and then strip the header
483 off
-= sizeof(struct udphdr
); // off no longer includes the udphdr's size
484 // if peer is behind nat and this is the latest esp packet
485 if ((sav
->flags
& SADB_X_EXT_NATT_DETECTED_PEER
) != 0 &&
486 (sav
->flags
& SADB_X_EXT_OLD
) == 0 &&
487 seq
&& sav
->replay
&&
488 seq
>= sav
->replay
->lastseq
) {
489 struct udphdr
*encap_uh
= (__typeof__(encap_uh
))(void *)((caddr_t
)ip
+ off
);
490 if (encap_uh
->uh_sport
&&
491 ntohs(encap_uh
->uh_sport
) != sav
->remote_ike_port
) {
492 sav
->remote_ike_port
= ntohs(encap_uh
->uh_sport
);
495 ip
= esp4_input_strip_udp_encap(m
, off
);
496 esp
= (struct esp
*)(void *)(((u_int8_t
*)ip
) + off
);
499 /* was it transmitted over the IPsec tunnel SA? */
500 if (ipsec4_tunnel_validate(m
, off
+ esplen
+ ivlen
, nxt
, sav
, &ifamily
)) {
502 struct sockaddr_storage addr
;
505 * strip off all the headers that precedes ESP header.
506 * IP4 xx ESP IP4' payload -> IP4' payload
508 * XXX more sanity checks
509 * XXX relationship with gif?
515 m_adj(m
, off
+ esplen
+ ivlen
);
516 if (ifamily
== AF_INET
) {
517 struct sockaddr_in
*ipaddr
;
519 if (m
->m_len
< sizeof(*ip
)) {
520 m
= m_pullup(m
, sizeof(*ip
));
522 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
526 ip
= mtod(m
, struct ip
*);
527 /* ECN consideration. */
530 if (ip_ecn_egress(ip4_ipsec_ecn
, &tos
, &ip
->ip_tos
) == 0) {
531 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
535 if (otos
!= ip
->ip_tos
) {
536 sum
= ~ntohs(ip
->ip_sum
) & 0xffff;
537 sum
+= (~otos
& 0xffff) + ip
->ip_tos
;
538 sum
= (sum
>> 16) + (sum
& 0xffff);
539 sum
+= (sum
>> 16); /* add carry */
540 ip
->ip_sum
= htons(~sum
& 0xffff);
543 if (!key_checktunnelsanity(sav
, AF_INET
,
544 (caddr_t
)&ip
->ip_src
, (caddr_t
)&ip
->ip_dst
)) {
545 ipseclog((LOG_ERR
, "ipsec tunnel address mismatch "
546 "in ESP input: %s %s\n",
547 ipsec4_logpacketstr(ip
, spi
), ipsec_logsastr(sav
)));
548 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
552 bzero(&addr
, sizeof(addr
));
553 ipaddr
= (__typeof__(ipaddr
))&addr
;
554 ipaddr
->sin_family
= AF_INET
;
555 ipaddr
->sin_len
= sizeof(*ipaddr
);
556 ipaddr
->sin_addr
= ip
->ip_dst
;
558 } else if (ifamily
== AF_INET6
) {
559 struct sockaddr_in6
*ip6addr
;
562 * m_pullup is prohibited in KAME IPv6 input processing
563 * but there's no other way!
565 if (m
->m_len
< sizeof(*ip6
)) {
566 m
= m_pullup(m
, sizeof(*ip6
));
568 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
574 * Expect 32-bit aligned data pointer on strict-align
577 MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m
);
579 ip6
= mtod(m
, struct ip6_hdr
*);
581 /* ECN consideration. */
582 if (ip64_ecn_egress(ip4_ipsec_ecn
, &tos
, &ip6
->ip6_flow
) == 0) {
583 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
587 if (!key_checktunnelsanity(sav
, AF_INET6
,
588 (caddr_t
)&ip6
->ip6_src
, (caddr_t
)&ip6
->ip6_dst
)) {
589 ipseclog((LOG_ERR
, "ipsec tunnel address mismatch "
590 "in ESP input: %s %s\n",
591 ipsec6_logpacketstr(ip6
, spi
), ipsec_logsastr(sav
)));
592 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
596 bzero(&addr
, sizeof(addr
));
597 ip6addr
= (__typeof__(ip6addr
))&addr
;
598 ip6addr
->sin6_family
= AF_INET6
;
599 ip6addr
->sin6_len
= sizeof(*ip6addr
);
600 ip6addr
->sin6_addr
= ip6
->ip6_dst
;
603 ipseclog((LOG_ERR
, "ipsec tunnel unsupported address family "
608 key_sa_recordxfer(sav
, m
);
609 if (ipsec_addhist(m
, IPPROTO_ESP
, spi
) != 0 ||
610 ipsec_addhist(m
, IPPROTO_IPV4
, 0) != 0) {
611 IPSEC_STAT_INCREMENT(ipsecstat
.in_nomem
);
615 // update the receiving interface address based on the inner address
616 ifa
= ifa_ifwithaddr((struct sockaddr
*)&addr
);
618 m
->m_pkthdr
.rcvif
= ifa
->ifa_ifp
;
622 /* Clear the csum flags, they can't be valid for the inner headers */
623 m
->m_pkthdr
.csum_flags
= 0;
625 // Input via IPSec interface
626 if (sav
->sah
->ipsec_if
!= NULL
) {
627 if (ipsec_inject_inbound_packet(sav
->sah
->ipsec_if
, m
) == 0) {
635 if (proto_input(ifamily
== AF_INET
? PF_INET
: PF_INET6
, m
) != 0)
639 KERNEL_DEBUG(DBG_FNC_ESPIN
| DBG_FUNC_END
, 2,0,0,0,0);
642 * strip off ESP header and IV.
643 * even in m_pulldown case, we need to strip off ESP so that
644 * we can always compute checksum for AH correctly.
648 stripsiz
= esplen
+ ivlen
;
650 ip
= mtod(m
, struct ip
*);
651 ovbcopy((caddr_t
)ip
, (caddr_t
)(((u_char
*)ip
) + stripsiz
), off
);
652 m
->m_data
+= stripsiz
;
653 m
->m_len
-= stripsiz
;
654 m
->m_pkthdr
.len
-= stripsiz
;
656 ip
= mtod(m
, struct ip
*);
658 ip
->ip_len
= ip
->ip_len
- stripsiz
;
660 ip
->ip_len
= htons(ntohs(ip
->ip_len
) - stripsiz
);
664 key_sa_recordxfer(sav
, m
);
665 if (ipsec_addhist(m
, IPPROTO_ESP
, spi
) != 0) {
666 IPSEC_STAT_INCREMENT(ipsecstat
.in_nomem
);
671 * Set the csum valid flag, if we authenticated the
672 * packet, the payload shouldn't be corrupt unless
673 * it was corrupted before being signed on the other
676 if (nxt
== IPPROTO_TCP
|| nxt
== IPPROTO_UDP
) {
677 m
->m_pkthdr
.csum_flags
= CSUM_DATA_VALID
| CSUM_PSEUDO_HDR
;
678 m
->m_pkthdr
.csum_data
= 0xFFFF;
681 if (nxt
!= IPPROTO_DONE
) {
682 if ((ip_protox
[nxt
]->pr_flags
& PR_LASTHDR
) != 0 &&
683 ipsec4_in_reject(m
, NULL
)) {
684 IPSEC_STAT_INCREMENT(ipsecstat
.in_polvio
);
687 KERNEL_DEBUG(DBG_FNC_ESPIN
| DBG_FUNC_END
, 3,0,0,0,0);
689 /* translate encapsulated UDP port ? */
690 if ((sav
->flags
& SADB_X_EXT_NATT_MULTIPLEUSERS
) != 0) {
693 if (nxt
!= IPPROTO_UDP
) { /* not UPD packet - drop it */
694 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
698 if (m
->m_len
< off
+ sizeof(struct udphdr
)) {
699 m
= m_pullup(m
, off
+ sizeof(struct udphdr
));
702 "IPv4 ESP input: can't pullup UDP header in esp4_input\n"));
703 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
706 ip
= mtod(m
, struct ip
*);
708 udp
= (struct udphdr
*)(void *)(((u_int8_t
*)ip
) + off
);
710 lck_mtx_lock(sadb_mutex
);
711 if (sav
->natt_encapsulated_src_port
== 0) {
712 sav
->natt_encapsulated_src_port
= udp
->uh_sport
;
713 } else if (sav
->natt_encapsulated_src_port
!= udp
->uh_sport
) { /* something wrong */
714 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
715 lck_mtx_unlock(sadb_mutex
);
718 lck_mtx_unlock(sadb_mutex
);
719 udp
->uh_sport
= htons(sav
->remote_ike_port
);
723 DTRACE_IP6(receive
, struct mbuf
*, m
, struct inpcb
*, NULL
,
724 struct ip
*, ip
, struct ifnet
*, m
->m_pkthdr
.rcvif
,
725 struct ip
*, ip
, struct ip6_hdr
*, NULL
);
727 // Input via IPSec interface
728 if (sav
->sah
->ipsec_if
!= NULL
) {
729 ip
->ip_len
= htons(ip
->ip_len
+ hlen
);
730 ip
->ip_off
= htons(ip
->ip_off
);
732 ip
->ip_sum
= ip_cksum_hdr_in(m
, hlen
);
733 if (ipsec_inject_inbound_packet(sav
->sah
->ipsec_if
, m
) == 0) {
741 ip_proto_dispatch_in(m
, off
, nxt
, 0);
749 KEYDEBUG(KEYDEBUG_IPSEC_STAMP
,
750 printf("DP esp4_input call free SA:0x%llx\n",
751 (uint64_t)VM_KERNEL_ADDRPERM(sav
)));
752 key_freesav(sav
, KEY_SADB_UNLOCKED
);
754 IPSEC_STAT_INCREMENT(ipsecstat
.in_success
);
759 KEYDEBUG(KEYDEBUG_IPSEC_STAMP
,
760 printf("DP esp4_input call free SA:0x%llx\n",
761 (uint64_t)VM_KERNEL_ADDRPERM(sav
)));
762 key_freesav(sav
, KEY_SADB_UNLOCKED
);
766 KERNEL_DEBUG(DBG_FNC_ESPIN
| DBG_FUNC_END
, 4,0,0,0,0);
773 esp6_input(struct mbuf
**mp
, int *offp
, int proto
)
775 #pragma unused(proto)
776 struct mbuf
*m
= *mp
;
781 struct esptail esptail
;
784 struct secasvar
*sav
= NULL
;
788 const struct esp_algorithm
*algo
;
793 /* sanity check for alignment. */
794 if (off
% 4 != 0 || m
->m_pkthdr
.len
% 4 != 0) {
795 ipseclog((LOG_ERR
, "IPv6 ESP input: packet alignment problem "
796 "(off=%d, pktlen=%d)\n", off
, m
->m_pkthdr
.len
));
797 IPSEC_STAT_INCREMENT(ipsec6stat
.in_inval
);
801 #ifndef PULLDOWN_TEST
802 IP6_EXTHDR_CHECK(m
, off
, ESPMAXLEN
, {return IPPROTO_DONE
;});
803 esp
= (struct esp
*)(void *)(mtod(m
, caddr_t
) + off
);
805 IP6_EXTHDR_GET(esp
, struct esp
*, m
, off
, ESPMAXLEN
);
807 IPSEC_STAT_INCREMENT(ipsec6stat
.in_inval
);
811 /* Expect 32-bit data aligned pointer on strict-align platforms */
812 MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m
);
814 ip6
= mtod(m
, struct ip6_hdr
*);
816 if (ntohs(ip6
->ip6_plen
) == 0) {
817 ipseclog((LOG_ERR
, "IPv6 ESP input: "
818 "ESP with IPv6 jumbogram is not supported.\n"));
819 IPSEC_STAT_INCREMENT(ipsec6stat
.in_inval
);
823 nproto
= ip6_get_prevhdr(m
, off
);
824 if (nproto
== NULL
|| (*nproto
!= IPPROTO_ESP
&&
825 !(*nproto
== IPPROTO_UDP
&& off
>= sizeof(struct udphdr
)))) {
826 ipseclog((LOG_DEBUG
, "IPv6 ESP input: invalid protocol type\n"));
827 IPSEC_STAT_INCREMENT(ipsec6stat
.in_inval
);
831 /* find the sassoc. */
834 if ((sav
= key_allocsa(AF_INET6
,
835 (caddr_t
)&ip6
->ip6_src
, (caddr_t
)&ip6
->ip6_dst
,
836 IPPROTO_ESP
, spi
)) == 0) {
837 ipseclog((LOG_WARNING
,
838 "IPv6 ESP input: no key association found for spi %u\n",
839 (u_int32_t
)ntohl(spi
)));
840 IPSEC_STAT_INCREMENT(ipsec6stat
.in_nosa
);
843 KEYDEBUG(KEYDEBUG_IPSEC_STAMP
,
844 printf("DP esp6_input called to allocate SA:0x%llx\n",
845 (uint64_t)VM_KERNEL_ADDRPERM(sav
)));
846 if (sav
->state
!= SADB_SASTATE_MATURE
847 && sav
->state
!= SADB_SASTATE_DYING
) {
849 "IPv6 ESP input: non-mature/dying SA found for spi %u\n",
850 (u_int32_t
)ntohl(spi
)));
851 IPSEC_STAT_INCREMENT(ipsec6stat
.in_badspi
);
854 algo
= esp_algorithm_lookup(sav
->alg_enc
);
856 ipseclog((LOG_DEBUG
, "IPv6 ESP input: "
857 "unsupported encryption algorithm for spi %u\n",
858 (u_int32_t
)ntohl(spi
)));
859 IPSEC_STAT_INCREMENT(ipsec6stat
.in_badspi
);
863 /* check if we have proper ivlen information */
866 ipseclog((LOG_ERR
, "inproper ivlen in IPv6 ESP input: %s %s\n",
867 ipsec6_logpacketstr(ip6
, spi
), ipsec_logsastr(sav
)));
868 IPSEC_STAT_INCREMENT(ipsec6stat
.in_badspi
);
872 seq
= ntohl(((struct newesp
*)esp
)->esp_seq
);
874 /* Save ICV from packet for verification later */
876 unsigned char saved_icv
[AH_MAXSUMSIZE
];
877 if (algo
->finalizedecrypt
) {
879 m_copydata(m
, m
->m_pkthdr
.len
- siz
, siz
, (caddr_t
) saved_icv
);
883 if (!((sav
->flags
& SADB_X_EXT_OLD
) == 0 && sav
->replay
884 && (sav
->alg_auth
&& sav
->key_auth
)))
887 if (sav
->alg_auth
== SADB_X_AALG_NULL
||
888 sav
->alg_auth
== SADB_AALG_NONE
)
892 * check for sequence number.
894 if (ipsec_chkreplay(seq
, sav
))
897 IPSEC_STAT_INCREMENT(ipsec6stat
.in_espreplay
);
898 ipseclog((LOG_WARNING
,
899 "replay packet in IPv6 ESP input: %s %s\n",
900 ipsec6_logpacketstr(ip6
, spi
), ipsec_logsastr(sav
)));
906 u_char sum0
[AH_MAXSUMSIZE
] __attribute__((aligned(4)));
907 u_char sum
[AH_MAXSUMSIZE
] __attribute__((aligned(4)));
908 const struct ah_algorithm
*sumalgo
;
910 sumalgo
= ah_algorithm_lookup(sav
->alg_auth
);
913 siz
= (((*sumalgo
->sumsiz
)(sav
) + 3) & ~(4 - 1));
914 if (m
->m_pkthdr
.len
< off
+ ESPMAXLEN
+ siz
) {
915 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
918 if (AH_MAXSUMSIZE
< siz
) {
920 "internal error: AH_MAXSUMSIZE must be larger than %lu\n",
922 IPSEC_STAT_INCREMENT(ipsec6stat
.in_inval
);
926 m_copydata(m
, m
->m_pkthdr
.len
- siz
, siz
, (caddr_t
) &sum0
[0]);
928 if (esp_auth(m
, off
, m
->m_pkthdr
.len
- off
- siz
, sav
, sum
)) {
929 ipseclog((LOG_WARNING
, "auth fail in IPv6 ESP input: %s %s\n",
930 ipsec6_logpacketstr(ip6
, spi
), ipsec_logsastr(sav
)));
931 IPSEC_STAT_INCREMENT(ipsec6stat
.in_espauthfail
);
935 if (bcmp(sum0
, sum
, siz
) != 0) {
936 ipseclog((LOG_WARNING
, "auth fail in IPv6 ESP input: %s %s\n",
937 ipsec6_logpacketstr(ip6
, spi
), ipsec_logsastr(sav
)));
938 IPSEC_STAT_INCREMENT(ipsec6stat
.in_espauthfail
);
944 /* strip off the authentication data */
946 ip6
= mtod(m
, struct ip6_hdr
*);
947 ip6
->ip6_plen
= htons(ntohs(ip6
->ip6_plen
) - siz
);
949 m
->m_flags
|= M_AUTHIPDGM
;
950 IPSEC_STAT_INCREMENT(ipsec6stat
.in_espauthsucc
);
954 * update sequence number.
956 if ((sav
->flags
& SADB_X_EXT_OLD
) == 0 && sav
->replay
) {
957 if (ipsec_updatereplay(seq
, sav
)) {
958 IPSEC_STAT_INCREMENT(ipsec6stat
.in_espreplay
);
965 /* process main esp header. */
966 if (sav
->flags
& SADB_X_EXT_OLD
) {
968 esplen
= sizeof(struct esp
);
971 if (sav
->flags
& SADB_X_EXT_DERIV
)
972 esplen
= sizeof(struct esp
);
974 esplen
= sizeof(struct newesp
);
977 if (m
->m_pkthdr
.len
< off
+ esplen
+ ivlen
+ sizeof(esptail
)) {
978 ipseclog((LOG_WARNING
,
979 "IPv6 ESP input: packet too short\n"));
980 IPSEC_STAT_INCREMENT(ipsec6stat
.in_inval
);
984 #ifndef PULLDOWN_TEST
985 IP6_EXTHDR_CHECK(m
, off
, esplen
+ ivlen
, return IPPROTO_DONE
); /*XXX*/
987 IP6_EXTHDR_GET(esp
, struct esp
*, m
, off
, esplen
+ ivlen
);
989 IPSEC_STAT_INCREMENT(ipsec6stat
.in_inval
);
994 ip6
= mtod(m
, struct ip6_hdr
*); /*set it again just in case*/
997 * pre-compute and cache intermediate key
999 if (esp_schedule(algo
, sav
) != 0) {
1000 IPSEC_STAT_INCREMENT(ipsec6stat
.in_inval
);
1005 * decrypt the packet.
1008 panic("internal error: no decrypt function");
1009 if ((*algo
->decrypt
)(m
, off
, sav
, algo
, ivlen
)) {
1010 /* m is already freed */
1012 ipseclog((LOG_ERR
, "decrypt fail in IPv6 ESP input: %s\n",
1013 ipsec_logsastr(sav
)));
1014 IPSEC_STAT_INCREMENT(ipsec6stat
.in_inval
);
1017 IPSEC_STAT_INCREMENT(ipsec6stat
.in_esphist
[sav
->alg_enc
]);
1019 m
->m_flags
|= M_DECRYPTED
;
1021 if (algo
->finalizedecrypt
)
1023 unsigned char tag
[algo
->icvlen
];
1024 if ((*algo
->finalizedecrypt
)(sav
, tag
, algo
->icvlen
)) {
1025 ipseclog((LOG_ERR
, "packet decryption ICV failure\n"));
1026 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
1027 KERNEL_DEBUG(DBG_FNC_DECRYPT
| DBG_FUNC_END
, 1,0,0,0,0);
1030 if (memcmp(saved_icv
, tag
, algo
->icvlen
)) {
1031 ipseclog((LOG_ERR
, "packet decryption ICV mismatch\n"));
1032 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
1033 KERNEL_DEBUG(DBG_FNC_DECRYPT
| DBG_FUNC_END
, 1,0,0,0,0);
1039 * find the trailer of the ESP.
1041 m_copydata(m
, m
->m_pkthdr
.len
- sizeof(esptail
), sizeof(esptail
),
1043 nxt
= esptail
.esp_nxt
;
1044 taillen
= esptail
.esp_padlen
+ sizeof(esptail
);
1046 if (m
->m_pkthdr
.len
< taillen
1047 || m
->m_pkthdr
.len
- taillen
< sizeof(struct ip6_hdr
)) { /*?*/
1048 ipseclog((LOG_WARNING
,
1049 "bad pad length in IPv6 ESP input: %s %s\n",
1050 ipsec6_logpacketstr(ip6
, spi
), ipsec_logsastr(sav
)));
1051 IPSEC_STAT_INCREMENT(ipsec6stat
.in_inval
);
1055 /* strip off the trailing pad area. */
1057 ip6
= mtod(m
, struct ip6_hdr
*);
1058 ip6
->ip6_plen
= htons(ntohs(ip6
->ip6_plen
) - taillen
);
1060 if (*nproto
== IPPROTO_UDP
) {
1061 // offset includes the outer ip and udp header lengths.
1062 if (m
->m_len
< off
) {
1063 m
= m_pullup(m
, off
);
1065 ipseclog((LOG_DEBUG
,
1066 "IPv6 ESP input: invalid udp encapsulated ESP packet length\n"));
1067 IPSEC_STAT_INCREMENT(ipsec6stat
.in_inval
);
1072 // check the UDP encap header to detect changes in the source port, and then strip the header
1073 off
-= sizeof(struct udphdr
); // off no longer includes the udphdr's size
1074 // if peer is behind nat and this is the latest esp packet
1075 if ((sav
->flags
& SADB_X_EXT_NATT_DETECTED_PEER
) != 0 &&
1076 (sav
->flags
& SADB_X_EXT_OLD
) == 0 &&
1077 seq
&& sav
->replay
&&
1078 seq
>= sav
->replay
->lastseq
) {
1079 struct udphdr
*encap_uh
= (__typeof__(encap_uh
))(void *)((caddr_t
)ip6
+ off
);
1080 if (encap_uh
->uh_sport
&&
1081 ntohs(encap_uh
->uh_sport
) != sav
->remote_ike_port
) {
1082 sav
->remote_ike_port
= ntohs(encap_uh
->uh_sport
);
1085 ip6
= esp6_input_strip_udp_encap(m
, off
);
1086 esp
= (struct esp
*)(void *)(((u_int8_t
*)ip6
) + off
);
1090 /* was it transmitted over the IPsec tunnel SA? */
1091 if (ipsec6_tunnel_validate(m
, off
+ esplen
+ ivlen
, nxt
, sav
, &ifamily
)) {
1093 struct sockaddr_storage addr
;
1096 * strip off all the headers that precedes ESP header.
1097 * IP6 xx ESP IP6' payload -> IP6' payload
1099 * XXX more sanity checks
1100 * XXX relationship with gif?
1102 u_int32_t flowinfo
; /*net endian*/
1103 flowinfo
= ip6
->ip6_flow
;
1104 m_adj(m
, off
+ esplen
+ ivlen
);
1105 if (ifamily
== AF_INET6
) {
1106 struct sockaddr_in6
*ip6addr
;
1108 if (m
->m_len
< sizeof(*ip6
)) {
1109 #ifndef PULLDOWN_TEST
1111 * m_pullup is prohibited in KAME IPv6 input processing
1112 * but there's no other way!
1115 /* okay to pullup in m_pulldown style */
1117 m
= m_pullup(m
, sizeof(*ip6
));
1119 IPSEC_STAT_INCREMENT(ipsec6stat
.in_inval
);
1123 ip6
= mtod(m
, struct ip6_hdr
*);
1124 /* ECN consideration. */
1125 if (ip6_ecn_egress(ip6_ipsec_ecn
, &flowinfo
, &ip6
->ip6_flow
) == 0) {
1126 IPSEC_STAT_INCREMENT(ipsec6stat
.in_inval
);
1129 if (!key_checktunnelsanity(sav
, AF_INET6
,
1130 (caddr_t
)&ip6
->ip6_src
, (caddr_t
)&ip6
->ip6_dst
)) {
1131 ipseclog((LOG_ERR
, "ipsec tunnel address mismatch "
1132 "in IPv6 ESP input: %s %s\n",
1133 ipsec6_logpacketstr(ip6
, spi
),
1134 ipsec_logsastr(sav
)));
1135 IPSEC_STAT_INCREMENT(ipsec6stat
.in_inval
);
1139 bzero(&addr
, sizeof(addr
));
1140 ip6addr
= (__typeof__(ip6addr
))&addr
;
1141 ip6addr
->sin6_family
= AF_INET6
;
1142 ip6addr
->sin6_len
= sizeof(*ip6addr
);
1143 ip6addr
->sin6_addr
= ip6
->ip6_dst
;
1144 } else if (ifamily
== AF_INET
) {
1145 struct sockaddr_in
*ipaddr
;
1147 if (m
->m_len
< sizeof(*ip
)) {
1148 m
= m_pullup(m
, sizeof(*ip
));
1150 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
1158 ip
= mtod(m
, struct ip
*);
1160 /* ECN consideration. */
1161 if (ip46_ecn_egress(ip6_ipsec_ecn
, &flowinfo
, &ip
->ip_tos
) == 0) {
1162 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
1166 if (otos
!= ip
->ip_tos
) {
1167 sum
= ~ntohs(ip
->ip_sum
) & 0xffff;
1168 sum
+= (~otos
& 0xffff) + ip
->ip_tos
;
1169 sum
= (sum
>> 16) + (sum
& 0xffff);
1170 sum
+= (sum
>> 16); /* add carry */
1171 ip
->ip_sum
= htons(~sum
& 0xffff);
1174 if (!key_checktunnelsanity(sav
, AF_INET
,
1175 (caddr_t
)&ip
->ip_src
, (caddr_t
)&ip
->ip_dst
)) {
1176 ipseclog((LOG_ERR
, "ipsec tunnel address mismatch "
1177 "in ESP input: %s %s\n",
1178 ipsec4_logpacketstr(ip
, spi
), ipsec_logsastr(sav
)));
1179 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
1183 bzero(&addr
, sizeof(addr
));
1184 ipaddr
= (__typeof__(ipaddr
))&addr
;
1185 ipaddr
->sin_family
= AF_INET
;
1186 ipaddr
->sin_len
= sizeof(*ipaddr
);
1187 ipaddr
->sin_addr
= ip
->ip_dst
;
1190 key_sa_recordxfer(sav
, m
);
1191 if (ipsec_addhist(m
, IPPROTO_ESP
, spi
) != 0 ||
1192 ipsec_addhist(m
, IPPROTO_IPV6
, 0) != 0) {
1193 IPSEC_STAT_INCREMENT(ipsec6stat
.in_nomem
);
1197 // update the receiving interface address based on the inner address
1198 ifa
= ifa_ifwithaddr((struct sockaddr
*)&addr
);
1200 m
->m_pkthdr
.rcvif
= ifa
->ifa_ifp
;
1204 // Input via IPSec interface
1205 if (sav
->sah
->ipsec_if
!= NULL
) {
1206 if (ipsec_inject_inbound_packet(sav
->sah
->ipsec_if
, m
) == 0) {
1215 if (proto_input(ifamily
== AF_INET
? PF_INET
: PF_INET6
, m
) != 0)
1220 * strip off ESP header and IV.
1221 * even in m_pulldown case, we need to strip off ESP so that
1222 * we can always compute checksum for AH correctly.
1228 * Set the next header field of the previous header correctly.
1230 prvnxtp
= ip6_get_prevhdr(m
, off
); /* XXX */
1233 stripsiz
= esplen
+ ivlen
;
1235 ip6
= mtod(m
, struct ip6_hdr
*);
1236 if (m
->m_len
>= stripsiz
+ off
) {
1237 ovbcopy((caddr_t
)ip6
, ((caddr_t
)ip6
) + stripsiz
, off
);
1238 m
->m_data
+= stripsiz
;
1239 m
->m_len
-= stripsiz
;
1240 m
->m_pkthdr
.len
-= stripsiz
;
1243 * this comes with no copy if the boundary is on
1248 n
= m_split(m
, off
, M_DONTWAIT
);
1250 /* m is retained by m_split */
1254 /* m_cat does not update m_pkthdr.len */
1255 m
->m_pkthdr
.len
+= n
->m_pkthdr
.len
;
1259 #ifndef PULLDOWN_TEST
1261 * KAME requires that the packet to be contiguous on the
1262 * mbuf. We need to make that sure.
1263 * this kind of code should be avoided.
1264 * XXX other conditions to avoid running this part?
1266 if (m
->m_len
!= m
->m_pkthdr
.len
) {
1267 struct mbuf
*n
= NULL
;
1270 MGETHDR(n
, M_DONTWAIT
, MT_HEADER
); /* MAC-OK */
1273 M_COPY_PKTHDR(n
, m
);
1274 if (n
&& m
->m_pkthdr
.len
> maxlen
) {
1275 MCLGET(n
, M_DONTWAIT
);
1277 if ((n
->m_flags
& M_EXT
) == 0) {
1283 printf("esp6_input: mbuf allocation failed\n");
1287 if (m
->m_pkthdr
.len
<= maxlen
) {
1288 m_copydata(m
, 0, m
->m_pkthdr
.len
, mtod(n
, caddr_t
));
1289 n
->m_len
= m
->m_pkthdr
.len
;
1290 n
->m_pkthdr
.len
= m
->m_pkthdr
.len
;
1294 m_copydata(m
, 0, maxlen
, mtod(n
, caddr_t
));
1296 n
->m_pkthdr
.len
= m
->m_pkthdr
.len
;
1299 m
->m_flags
&= ~M_PKTHDR
;
1305 ip6
= mtod(m
, struct ip6_hdr
*);
1306 ip6
->ip6_plen
= htons(ntohs(ip6
->ip6_plen
) - stripsiz
);
1308 key_sa_recordxfer(sav
, m
);
1309 if (ipsec_addhist(m
, IPPROTO_ESP
, spi
) != 0) {
1310 IPSEC_STAT_INCREMENT(ipsec6stat
.in_nomem
);
1315 * Set the csum valid flag, if we authenticated the
1316 * packet, the payload shouldn't be corrupt unless
1317 * it was corrupted before being signed on the other
1320 if (nxt
== IPPROTO_TCP
|| nxt
== IPPROTO_UDP
) {
1321 m
->m_pkthdr
.csum_flags
= CSUM_DATA_VALID
| CSUM_PSEUDO_HDR
;
1322 m
->m_pkthdr
.csum_data
= 0xFFFF;
1325 // Input via IPSec interface
1326 if (sav
->sah
->ipsec_if
!= NULL
) {
1327 if (ipsec_inject_inbound_packet(sav
->sah
->ipsec_if
, m
) == 0) {
1342 KEYDEBUG(KEYDEBUG_IPSEC_STAMP
,
1343 printf("DP esp6_input call free SA:0x%llx\n",
1344 (uint64_t)VM_KERNEL_ADDRPERM(sav
)));
1345 key_freesav(sav
, KEY_SADB_UNLOCKED
);
1347 IPSEC_STAT_INCREMENT(ipsec6stat
.in_success
);
1352 KEYDEBUG(KEYDEBUG_IPSEC_STAMP
,
1353 printf("DP esp6_input call free SA:0x%llx\n",
1354 (uint64_t)VM_KERNEL_ADDRPERM(sav
)));
1355 key_freesav(sav
, KEY_SADB_UNLOCKED
);
1359 return IPPROTO_DONE
;
1363 esp6_ctlinput(int cmd
, struct sockaddr
*sa
, void *d
)
1365 const struct newesp
*espp
;
1367 struct ip6ctlparam
*ip6cp
= NULL
, ip6cp1
;
1368 struct secasvar
*sav
;
1369 struct ip6_hdr
*ip6
;
1372 struct sockaddr_in6
*sa6_src
, *sa6_dst
;
1374 if (sa
->sa_family
!= AF_INET6
||
1375 sa
->sa_len
!= sizeof(struct sockaddr_in6
))
1377 if ((unsigned)cmd
>= PRC_NCMDS
)
1380 /* if the parameter is from icmp6, decode it. */
1382 ip6cp
= (struct ip6ctlparam
*)d
;
1384 ip6
= ip6cp
->ip6c_ip6
;
1385 off
= ip6cp
->ip6c_off
;
1393 * Notify the error to all possible sockets via pfctlinput2.
1394 * Since the upper layer information (such as protocol type,
1395 * source and destination ports) is embedded in the encrypted
1396 * data and might have been cut, we can't directly call
1397 * an upper layer ctlinput function. However, the pcbnotify
1398 * function will consider source and destination addresses
1399 * as well as the flow info value, and may be able to find
1400 * some PCB that should be notified.
1401 * Although pfctlinput2 will call esp6_ctlinput(), there is
1402 * no possibility of an infinite loop of function calls,
1403 * because we don't pass the inner IPv6 header.
1405 bzero(&ip6cp1
, sizeof(ip6cp1
));
1406 ip6cp1
.ip6c_src
= ip6cp
->ip6c_src
;
1407 pfctlinput2(cmd
, sa
, (void *)&ip6cp1
);
1410 * Then go to special cases that need ESP header information.
1411 * XXX: We assume that when ip6 is non NULL,
1412 * M and OFF are valid.
1415 /* check if we can safely examine src and dst ports */
1416 if (m
->m_pkthdr
.len
< off
+ sizeof(esp
))
1419 if (m
->m_len
< off
+ sizeof(esp
)) {
1421 * this should be rare case,
1422 * so we compromise on this copy...
1424 m_copydata(m
, off
, sizeof(esp
), (caddr_t
)&esp
);
1427 espp
= (struct newesp
*)(void *)(mtod(m
, caddr_t
) + off
);
1429 if (cmd
== PRC_MSGSIZE
) {
1433 * Check to see if we have a valid SA corresponding to
1434 * the address in the ICMP message payload.
1436 sa6_src
= ip6cp
->ip6c_src
;
1437 sa6_dst
= (struct sockaddr_in6
*)(void *)sa
;
1438 sav
= key_allocsa(AF_INET6
,
1439 (caddr_t
)&sa6_src
->sin6_addr
,
1440 (caddr_t
)&sa6_dst
->sin6_addr
,
1441 IPPROTO_ESP
, espp
->esp_spi
);
1443 if (sav
->state
== SADB_SASTATE_MATURE
||
1444 sav
->state
== SADB_SASTATE_DYING
)
1446 key_freesav(sav
, KEY_SADB_UNLOCKED
);
1449 /* XXX Further validation? */
1452 * Depending on the value of "valid" and routing table
1453 * size (mtudisc_{hi,lo}wat), we will:
1454 * - recalcurate the new MTU and create the
1455 * corresponding routing entry, or
1456 * - ignore the MTU change notification.
1458 icmp6_mtudisc_update((struct ip6ctlparam
*)d
, valid
);
1461 /* we normally notify any pcb here */