2 * Copyright (c) 2008-2016 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 /* $FreeBSD: src/sys/netinet6/esp_input.c,v 1.1.2.3 2001/07/03 11:01:50 ume Exp $ */
30 /* $KAME: esp_input.c,v 1.55 2001/03/23 08:08:47 itojun Exp $ */
33 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
34 * All rights reserved.
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. Neither the name of the project nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
48 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * RFC1827/2406 Encapsulated Security Payload.
67 #include <sys/param.h>
68 #include <sys/systm.h>
69 #include <sys/malloc.h>
71 #include <sys/mcache.h>
72 #include <sys/domain.h>
73 #include <sys/protosw.h>
74 #include <sys/socket.h>
75 #include <sys/errno.h>
77 #include <sys/kernel.h>
78 #include <sys/syslog.h>
81 #include <net/if_ipsec.h>
82 #include <net/route.h>
83 #include <kern/cpu_number.h>
84 #include <kern/locks.h>
86 #include <netinet/in.h>
87 #include <netinet/in_systm.h>
88 #include <netinet/ip.h>
89 #include <netinet/ip_var.h>
90 #include <netinet/in_var.h>
91 #include <netinet/ip_ecn.h>
92 #include <netinet/in_pcb.h>
93 #include <netinet/udp.h>
94 #include <netinet/tcp.h>
95 #include <netinet/in_tclass.h>
97 #include <netinet6/ip6_ecn.h>
101 #include <netinet/ip6.h>
102 #include <netinet6/in6_pcb.h>
103 #include <netinet6/ip6_var.h>
104 #include <netinet/icmp6.h>
105 #include <netinet6/ip6protosw.h>
108 #include <netinet6/ipsec.h>
110 #include <netinet6/ipsec6.h>
112 #include <netinet6/ah.h>
114 #include <netinet6/ah6.h>
116 #include <netinet6/esp.h>
118 #include <netinet6/esp6.h>
120 #include <netkey/key.h>
121 #include <netkey/keydb.h>
122 #include <netkey/key_debug.h>
124 #include <net/kpi_protocol.h>
125 #include <netinet/kpi_ipfilter_var.h>
127 #include <net/net_osdep.h>
128 #include <mach/sdt.h>
129 #include <corecrypto/cc.h>
131 #include <sys/kdebug.h>
132 #define DBG_LAYER_BEG NETDBG_CODE(DBG_NETIPSEC, 1)
133 #define DBG_LAYER_END NETDBG_CODE(DBG_NETIPSEC, 3)
134 #define DBG_FNC_ESPIN NETDBG_CODE(DBG_NETIPSEC, (6 << 8))
135 #define DBG_FNC_DECRYPT NETDBG_CODE(DBG_NETIPSEC, (7 << 8))
136 #define IPLEN_FLIPPED
138 extern lck_mtx_t
*sadb_mutex
;
142 (sizeof(struct esp) < sizeof(struct newesp) \
143 ? sizeof(struct newesp) : sizeof(struct esp))
146 esp4_input_strip_udp_encap(struct mbuf
*m
, int iphlen
)
148 // strip the udp header that's encapsulating ESP
150 size_t stripsiz
= sizeof(struct udphdr
);
152 ip
= mtod(m
, __typeof__(ip
));
153 ovbcopy((caddr_t
)ip
, (caddr_t
)(((u_char
*)ip
) + stripsiz
), iphlen
);
154 m
->m_data
+= stripsiz
;
155 m
->m_len
-= stripsiz
;
156 m
->m_pkthdr
.len
-= stripsiz
;
157 ip
= mtod(m
, __typeof__(ip
));
158 ip
->ip_len
= ip
->ip_len
- stripsiz
;
159 ip
->ip_p
= IPPROTO_ESP
;
163 static struct ip6_hdr
*
164 esp6_input_strip_udp_encap(struct mbuf
*m
, int ip6hlen
)
166 // strip the udp header that's encapsulating ESP
168 size_t stripsiz
= sizeof(struct udphdr
);
170 ip6
= mtod(m
, __typeof__(ip6
));
171 ovbcopy((caddr_t
)ip6
, (caddr_t
)(((u_char
*)ip6
) + stripsiz
), ip6hlen
);
172 m
->m_data
+= stripsiz
;
173 m
->m_len
-= stripsiz
;
174 m
->m_pkthdr
.len
-= stripsiz
;
175 ip6
= mtod(m
, __typeof__(ip6
));
176 ip6
->ip6_plen
= htons(ntohs(ip6
->ip6_plen
) - stripsiz
);
177 ip6
->ip6_nxt
= IPPROTO_ESP
;
182 esp_input_log(struct mbuf
*m
, struct secasvar
*sav
, u_int32_t spi
, u_int32_t seq
)
184 if (net_mpklog_enabled
&&
185 (sav
->sah
->ipsec_if
->if_xflags
& IFXF_MPK_LOG
) == IFXF_MPK_LOG
) {
186 struct tcphdr th
= {};
188 u_int32_t proto_len
= 0;
191 struct ip
*inner_ip
= mtod(m
, struct ip
*);
192 if (IP_VHL_V(inner_ip
->ip_vhl
) == 4) {
193 iphlen
= IP_VHL_HL(inner_ip
->ip_vhl
) << 2;
194 proto
= inner_ip
->ip_p
;
195 } else if (IP_VHL_V(inner_ip
->ip_vhl
) == 6) {
196 struct ip6_hdr
*inner_ip6
= mtod(m
, struct ip6_hdr
*);
197 iphlen
= sizeof(struct ip6_hdr
);
198 proto
= inner_ip6
->ip6_nxt
;
201 if (proto
== IPPROTO_TCP
) {
202 if ((int)(iphlen
+ sizeof(th
)) <= m
->m_pkthdr
.len
) {
203 m_copydata(m
, iphlen
, sizeof(th
), (u_int8_t
*)&th
);
206 proto_len
= m
->m_pkthdr
.len
- iphlen
- (th
.th_off
<< 2);
207 MPKL_ESP_INPUT_TCP(esp_mpkl_log_object
,
209 ntohs(th
.th_sport
), ntohs(th
.th_dport
),
210 ntohl(th
.th_seq
), proto_len
);
216 esp4_input(struct mbuf
*m
, int off
)
218 (void)esp4_input_extended(m
, off
, NULL
);
222 esp4_input_extended(struct mbuf
*m
, int off
, ifnet_t interface
)
229 struct esptail esptail
;
232 struct secasvar
*sav
= NULL
;
235 const struct esp_algorithm
*algo
;
240 struct mbuf
*out_m
= NULL
;
241 mbuf_traffic_class_t traffic_class
= 0;
243 KERNEL_DEBUG(DBG_FNC_ESPIN
| DBG_FUNC_START
, 0, 0, 0, 0, 0);
244 /* sanity check for alignment. */
245 if (off
% 4 != 0 || m
->m_pkthdr
.len
% 4 != 0) {
246 ipseclog((LOG_ERR
, "IPv4 ESP input: packet alignment problem "
247 "(off=%d, pktlen=%d)\n", off
, m
->m_pkthdr
.len
));
248 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
252 if (m
->m_len
< off
+ ESPMAXLEN
) {
253 m
= m_pullup(m
, off
+ ESPMAXLEN
);
256 "IPv4 ESP input: can't pullup in esp4_input\n"));
257 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
262 m
->m_pkthdr
.csum_flags
&= ~CSUM_RX_FLAGS
;
264 /* Expect 32-bit aligned data pointer on strict-align platforms */
265 MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m
);
267 ip
= mtod(m
, struct ip
*);
268 // expect udp-encap and esp packets only
269 if (ip
->ip_p
!= IPPROTO_ESP
&&
270 !(ip
->ip_p
== IPPROTO_UDP
&& off
>= sizeof(struct udphdr
))) {
272 "IPv4 ESP input: invalid protocol type\n"));
273 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
276 esp
= (struct esp
*)(void *)(((u_int8_t
*)ip
) + off
);
278 hlen
= IP_VHL_HL(ip
->ip_vhl
) << 2;
280 hlen
= ip
->ip_hl
<< 2;
283 /* find the sassoc. */
286 if ((sav
= key_allocsa_extended(AF_INET
,
287 (caddr_t
)&ip
->ip_src
, (caddr_t
)&ip
->ip_dst
,
288 IPPROTO_ESP
, spi
, interface
)) == 0) {
289 ipseclog((LOG_WARNING
,
290 "IPv4 ESP input: no key association found for spi %u (0x%08x)\n",
291 (u_int32_t
)ntohl(spi
), (u_int32_t
)ntohl(spi
)));
292 IPSEC_STAT_INCREMENT(ipsecstat
.in_nosa
);
295 KEYDEBUG(KEYDEBUG_IPSEC_STAMP
,
296 printf("DP esp4_input called to allocate SA:0x%llx\n",
297 (uint64_t)VM_KERNEL_ADDRPERM(sav
)));
298 if (sav
->state
!= SADB_SASTATE_MATURE
299 && sav
->state
!= SADB_SASTATE_DYING
) {
301 "IPv4 ESP input: non-mature/dying SA found for spi %u (0x%08x)\n",
302 (u_int32_t
)ntohl(spi
), (u_int32_t
)ntohl(spi
)));
303 IPSEC_STAT_INCREMENT(ipsecstat
.in_badspi
);
306 algo
= esp_algorithm_lookup(sav
->alg_enc
);
308 ipseclog((LOG_DEBUG
, "IPv4 ESP input: "
309 "unsupported encryption algorithm for spi %u (0x%08x)\n",
310 (u_int32_t
)ntohl(spi
), (u_int32_t
)ntohl(spi
)));
311 IPSEC_STAT_INCREMENT(ipsecstat
.in_badspi
);
315 /* check if we have proper ivlen information */
318 ipseclog((LOG_ERR
, "inproper ivlen in IPv4 ESP input: %s %s\n",
319 ipsec4_logpacketstr(ip
, spi
), ipsec_logsastr(sav
)));
320 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
324 seq
= ntohl(((struct newesp
*)esp
)->esp_seq
);
326 if ((sav
->flags2
& SADB_X_EXT_SA2_SEQ_PER_TRAFFIC_CLASS
) ==
327 SADB_X_EXT_SA2_SEQ_PER_TRAFFIC_CLASS
) {
328 u_int8_t dscp
= ip
->ip_tos
>> IPTOS_DSCP_SHIFT
;
329 traffic_class
= rfc4594_dscp_to_tc(dscp
);
332 /* Save ICV from packet for verification later */
334 unsigned char saved_icv
[AH_MAXSUMSIZE
];
335 if (algo
->finalizedecrypt
) {
337 m_copydata(m
, m
->m_pkthdr
.len
- siz
, siz
, (caddr_t
) saved_icv
);
341 if (!((sav
->flags
& SADB_X_EXT_OLD
) == 0 && sav
->replay
[traffic_class
] != NULL
&&
342 (sav
->alg_auth
&& sav
->key_auth
))) {
346 if (sav
->alg_auth
== SADB_X_AALG_NULL
||
347 sav
->alg_auth
== SADB_AALG_NONE
) {
352 * check for sequence number.
354 if (ipsec_chkreplay(seq
, sav
, traffic_class
)) {
357 IPSEC_STAT_INCREMENT(ipsecstat
.in_espreplay
);
358 ipseclog((LOG_WARNING
,
359 "replay packet in IPv4 ESP input: %s %s\n",
360 ipsec4_logpacketstr(ip
, spi
), ipsec_logsastr(sav
)));
366 u_char sum0
[AH_MAXSUMSIZE
] __attribute__((aligned(4)));
367 u_char sum
[AH_MAXSUMSIZE
] __attribute__((aligned(4)));
368 const struct ah_algorithm
*sumalgo
;
370 sumalgo
= ah_algorithm_lookup(sav
->alg_auth
);
374 siz
= (((*sumalgo
->sumsiz
)(sav
) + 3) & ~(4 - 1));
375 if (m
->m_pkthdr
.len
< off
+ ESPMAXLEN
+ siz
) {
376 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
379 if (AH_MAXSUMSIZE
< siz
) {
381 "internal error: AH_MAXSUMSIZE must be larger than %u\n",
383 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
387 m_copydata(m
, m
->m_pkthdr
.len
- siz
, siz
, (caddr_t
) &sum0
[0]);
389 if (esp_auth(m
, off
, m
->m_pkthdr
.len
- off
- siz
, sav
, sum
)) {
390 ipseclog((LOG_WARNING
, "auth fail in IPv4 ESP input: %s %s\n",
391 ipsec4_logpacketstr(ip
, spi
), ipsec_logsastr(sav
)));
392 IPSEC_STAT_INCREMENT(ipsecstat
.in_espauthfail
);
396 if (cc_cmp_safe(siz
, sum0
, sum
)) {
397 ipseclog((LOG_WARNING
, "cc_cmp fail in IPv4 ESP input: %s %s\n",
398 ipsec4_logpacketstr(ip
, spi
), ipsec_logsastr(sav
)));
399 IPSEC_STAT_INCREMENT(ipsecstat
.in_espauthfail
);
405 /* strip off the authentication data */
407 ip
= mtod(m
, struct ip
*);
409 ip
->ip_len
= ip
->ip_len
- siz
;
411 ip
->ip_len
= htons(ntohs(ip
->ip_len
) - siz
);
413 m
->m_flags
|= M_AUTHIPDGM
;
414 IPSEC_STAT_INCREMENT(ipsecstat
.in_espauthsucc
);
418 * update sequence number.
420 if ((sav
->flags
& SADB_X_EXT_OLD
) == 0 && sav
->replay
[traffic_class
] != NULL
) {
421 if (ipsec_updatereplay(seq
, sav
, traffic_class
)) {
422 IPSEC_STAT_INCREMENT(ipsecstat
.in_espreplay
);
429 /* process main esp header. */
430 if (sav
->flags
& SADB_X_EXT_OLD
) {
432 esplen
= sizeof(struct esp
);
435 if (sav
->flags
& SADB_X_EXT_DERIV
) {
436 esplen
= sizeof(struct esp
);
438 esplen
= sizeof(struct newesp
);
442 if (m
->m_pkthdr
.len
< off
+ esplen
+ ivlen
+ sizeof(esptail
)) {
443 ipseclog((LOG_WARNING
,
444 "IPv4 ESP input: packet too short\n"));
445 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
449 if (m
->m_len
< off
+ esplen
+ ivlen
) {
450 m
= m_pullup(m
, off
+ esplen
+ ivlen
);
453 "IPv4 ESP input: can't pullup in esp4_input\n"));
454 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
460 * pre-compute and cache intermediate key
462 if (esp_schedule(algo
, sav
) != 0) {
463 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
468 * decrypt the packet.
470 if (!algo
->decrypt
) {
471 panic("internal error: no decrypt function");
473 KERNEL_DEBUG(DBG_FNC_DECRYPT
| DBG_FUNC_START
, 0, 0, 0, 0, 0);
474 if ((*algo
->decrypt
)(m
, off
, sav
, algo
, ivlen
)) {
475 /* m is already freed */
477 ipseclog((LOG_ERR
, "decrypt fail in IPv4 ESP input: %s\n",
478 ipsec_logsastr(sav
)));
479 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
480 KERNEL_DEBUG(DBG_FNC_DECRYPT
| DBG_FUNC_END
, 1, 0, 0, 0, 0);
483 KERNEL_DEBUG(DBG_FNC_DECRYPT
| DBG_FUNC_END
, 2, 0, 0, 0, 0);
484 IPSEC_STAT_INCREMENT(ipsecstat
.in_esphist
[sav
->alg_enc
]);
486 m
->m_flags
|= M_DECRYPTED
;
488 if (algo
->finalizedecrypt
) {
489 if ((*algo
->finalizedecrypt
)(sav
, saved_icv
, algo
->icvlen
)) {
490 ipseclog((LOG_ERR
, "esp4 packet decryption ICV failure\n"));
491 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
492 KERNEL_DEBUG(DBG_FNC_DECRYPT
| DBG_FUNC_END
, 1, 0, 0, 0, 0);
498 * find the trailer of the ESP.
500 m_copydata(m
, m
->m_pkthdr
.len
- sizeof(esptail
), sizeof(esptail
),
502 nxt
= esptail
.esp_nxt
;
503 taillen
= esptail
.esp_padlen
+ sizeof(esptail
);
505 if (m
->m_pkthdr
.len
< taillen
506 || m
->m_pkthdr
.len
- taillen
< hlen
) { /*?*/
507 ipseclog((LOG_WARNING
,
508 "bad pad length in IPv4 ESP input: %s %s\n",
509 ipsec4_logpacketstr(ip
, spi
), ipsec_logsastr(sav
)));
510 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
514 /* strip off the trailing pad area. */
516 ip
= mtod(m
, struct ip
*);
518 ip
->ip_len
= ip
->ip_len
- taillen
;
520 ip
->ip_len
= htons(ntohs(ip
->ip_len
) - taillen
);
522 if (ip
->ip_p
== IPPROTO_UDP
) {
523 // offset includes the outer ip and udp header lengths.
524 if (m
->m_len
< off
) {
525 m
= m_pullup(m
, off
);
528 "IPv4 ESP input: invalid udp encapsulated ESP packet length \n"));
529 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
532 ip
= mtod(m
, struct ip
*);
535 // check the UDP encap header to detect changes in the source port, and then strip the header
536 off
-= sizeof(struct udphdr
); // off no longer includes the udphdr's size
537 // if peer is behind nat and this is the latest esp packet
538 if ((sav
->flags
& SADB_X_EXT_NATT_DETECTED_PEER
) != 0 &&
539 (sav
->flags
& SADB_X_EXT_OLD
) == 0 &&
540 seq
&& sav
->replay
[traffic_class
] &&
541 seq
>= sav
->replay
[traffic_class
]->lastseq
) {
542 struct udphdr
*encap_uh
= (__typeof__(encap_uh
))(void *)((caddr_t
)ip
+ off
);
543 if (encap_uh
->uh_sport
&&
544 ntohs(encap_uh
->uh_sport
) != sav
->remote_ike_port
) {
545 sav
->remote_ike_port
= ntohs(encap_uh
->uh_sport
);
548 ip
= esp4_input_strip_udp_encap(m
, off
);
549 esp
= (struct esp
*)(void *)(((u_int8_t
*)ip
) + off
);
552 /* was it transmitted over the IPsec tunnel SA? */
553 if (ipsec4_tunnel_validate(m
, off
+ esplen
+ ivlen
, nxt
, sav
, &ifamily
)) {
555 struct sockaddr_storage addr
;
558 * strip off all the headers that precedes ESP header.
559 * IP4 xx ESP IP4' payload -> IP4' payload
561 * XXX more sanity checks
562 * XXX relationship with gif?
568 m_adj(m
, off
+ esplen
+ ivlen
);
569 if (ifamily
== AF_INET
) {
570 struct sockaddr_in
*ipaddr
;
572 if (m
->m_len
< sizeof(*ip
)) {
573 m
= m_pullup(m
, sizeof(*ip
));
575 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
579 ip
= mtod(m
, struct ip
*);
580 /* ECN consideration. */
583 if (ip_ecn_egress(ip4_ipsec_ecn
, &tos
, &ip
->ip_tos
) == 0) {
584 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
588 if (otos
!= ip
->ip_tos
) {
589 sum
= ~ntohs(ip
->ip_sum
) & 0xffff;
590 sum
+= (~otos
& 0xffff) + ip
->ip_tos
;
591 sum
= (sum
>> 16) + (sum
& 0xffff);
592 sum
+= (sum
>> 16); /* add carry */
593 ip
->ip_sum
= htons(~sum
& 0xffff);
596 if (!key_checktunnelsanity(sav
, AF_INET
,
597 (caddr_t
)&ip
->ip_src
, (caddr_t
)&ip
->ip_dst
)) {
598 ipseclog((LOG_ERR
, "ipsec tunnel address mismatch "
599 "in ESP input: %s %s\n",
600 ipsec4_logpacketstr(ip
, spi
), ipsec_logsastr(sav
)));
601 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
605 bzero(&addr
, sizeof(addr
));
606 ipaddr
= (__typeof__(ipaddr
)) & addr
;
607 ipaddr
->sin_family
= AF_INET
;
608 ipaddr
->sin_len
= sizeof(*ipaddr
);
609 ipaddr
->sin_addr
= ip
->ip_dst
;
611 } else if (ifamily
== AF_INET6
) {
612 struct sockaddr_in6
*ip6addr
;
615 * m_pullup is prohibited in KAME IPv6 input processing
616 * but there's no other way!
618 if (m
->m_len
< sizeof(*ip6
)) {
619 m
= m_pullup(m
, sizeof(*ip6
));
621 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
627 * Expect 32-bit aligned data pointer on strict-align
630 MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m
);
632 ip6
= mtod(m
, struct ip6_hdr
*);
634 /* ECN consideration. */
635 if (ip64_ecn_egress(ip4_ipsec_ecn
, &tos
, &ip6
->ip6_flow
) == 0) {
636 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
640 if (!key_checktunnelsanity(sav
, AF_INET6
,
641 (caddr_t
)&ip6
->ip6_src
, (caddr_t
)&ip6
->ip6_dst
)) {
642 ipseclog((LOG_ERR
, "ipsec tunnel address mismatch "
643 "in ESP input: %s %s\n",
644 ipsec6_logpacketstr(ip6
, spi
), ipsec_logsastr(sav
)));
645 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
649 bzero(&addr
, sizeof(addr
));
650 ip6addr
= (__typeof__(ip6addr
)) & addr
;
651 ip6addr
->sin6_family
= AF_INET6
;
652 ip6addr
->sin6_len
= sizeof(*ip6addr
);
653 ip6addr
->sin6_addr
= ip6
->ip6_dst
;
656 ipseclog((LOG_ERR
, "ipsec tunnel unsupported address family "
661 key_sa_recordxfer(sav
, m
);
662 if (ipsec_addhist(m
, IPPROTO_ESP
, spi
) != 0 ||
663 ipsec_addhist(m
, IPPROTO_IPV4
, 0) != 0) {
664 IPSEC_STAT_INCREMENT(ipsecstat
.in_nomem
);
668 // update the receiving interface address based on the inner address
669 ifa
= ifa_ifwithaddr((struct sockaddr
*)&addr
);
671 m
->m_pkthdr
.rcvif
= ifa
->ifa_ifp
;
675 /* Clear the csum flags, they can't be valid for the inner headers */
676 m
->m_pkthdr
.csum_flags
= 0;
678 // Input via IPsec interface
679 lck_mtx_lock(sadb_mutex
);
680 ifnet_t ipsec_if
= sav
->sah
->ipsec_if
;
681 if (ipsec_if
!= NULL
) {
682 // If an interface is found, add a reference count before dropping the lock
683 ifnet_reference(ipsec_if
);
685 lck_mtx_unlock(sadb_mutex
);
686 if (ipsec_if
!= NULL
) {
687 esp_input_log(m
, sav
, spi
, seq
);
688 ipsec_save_wake_packet(m
, ntohl(spi
), seq
);
691 if (interface
!= NULL
&&
692 interface
== ipsec_if
) {
694 ifnet_release(ipsec_if
);
698 errno_t inject_error
= ipsec_inject_inbound_packet(ipsec_if
, m
);
699 ifnet_release(ipsec_if
);
701 if (inject_error
== 0) {
709 if (proto_input(ifamily
== AF_INET
? PF_INET
: PF_INET6
, m
) != 0) {
714 KERNEL_DEBUG(DBG_FNC_ESPIN
| DBG_FUNC_END
, 2, 0, 0, 0, 0);
717 * strip off ESP header and IV.
718 * even in m_pulldown case, we need to strip off ESP so that
719 * we can always compute checksum for AH correctly.
723 stripsiz
= esplen
+ ivlen
;
725 ip
= mtod(m
, struct ip
*);
726 ovbcopy((caddr_t
)ip
, (caddr_t
)(((u_char
*)ip
) + stripsiz
), off
);
727 m
->m_data
+= stripsiz
;
728 m
->m_len
-= stripsiz
;
729 m
->m_pkthdr
.len
-= stripsiz
;
731 ip
= mtod(m
, struct ip
*);
733 ip
->ip_len
= ip
->ip_len
- stripsiz
;
735 ip
->ip_len
= htons(ntohs(ip
->ip_len
) - stripsiz
);
739 key_sa_recordxfer(sav
, m
);
740 if (ipsec_addhist(m
, IPPROTO_ESP
, spi
) != 0) {
741 IPSEC_STAT_INCREMENT(ipsecstat
.in_nomem
);
746 * Set the csum valid flag, if we authenticated the
747 * packet, the payload shouldn't be corrupt unless
748 * it was corrupted before being signed on the other
751 if (nxt
== IPPROTO_TCP
|| nxt
== IPPROTO_UDP
) {
752 m
->m_pkthdr
.csum_flags
= CSUM_DATA_VALID
| CSUM_PSEUDO_HDR
;
753 m
->m_pkthdr
.csum_data
= 0xFFFF;
754 _CASSERT(offsetof(struct pkthdr
, csum_data
) == offsetof(struct pkthdr
, csum_rx_val
));
757 if (nxt
!= IPPROTO_DONE
) {
758 if ((ip_protox
[nxt
]->pr_flags
& PR_LASTHDR
) != 0 &&
759 ipsec4_in_reject(m
, NULL
)) {
760 IPSEC_STAT_INCREMENT(ipsecstat
.in_polvio
);
763 KERNEL_DEBUG(DBG_FNC_ESPIN
| DBG_FUNC_END
, 3, 0, 0, 0, 0);
765 /* translate encapsulated UDP port ? */
766 if ((sav
->flags
& SADB_X_EXT_NATT_MULTIPLEUSERS
) != 0) {
769 if (nxt
!= IPPROTO_UDP
) { /* not UPD packet - drop it */
770 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
774 if (m
->m_len
< off
+ sizeof(struct udphdr
)) {
775 m
= m_pullup(m
, off
+ sizeof(struct udphdr
));
778 "IPv4 ESP input: can't pullup UDP header in esp4_input\n"));
779 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
782 ip
= mtod(m
, struct ip
*);
784 udp
= (struct udphdr
*)(void *)(((u_int8_t
*)ip
) + off
);
786 lck_mtx_lock(sadb_mutex
);
787 if (sav
->natt_encapsulated_src_port
== 0) {
788 sav
->natt_encapsulated_src_port
= udp
->uh_sport
;
789 } else if (sav
->natt_encapsulated_src_port
!= udp
->uh_sport
) { /* something wrong */
790 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
791 lck_mtx_unlock(sadb_mutex
);
794 lck_mtx_unlock(sadb_mutex
);
795 udp
->uh_sport
= htons(sav
->remote_ike_port
);
799 DTRACE_IP6(receive
, struct mbuf
*, m
, struct inpcb
*, NULL
,
800 struct ip
*, ip
, struct ifnet
*, m
->m_pkthdr
.rcvif
,
801 struct ip
*, ip
, struct ip6_hdr
*, NULL
);
803 // Input via IPsec interface legacy path
804 lck_mtx_lock(sadb_mutex
);
805 ifnet_t ipsec_if
= sav
->sah
->ipsec_if
;
806 if (ipsec_if
!= NULL
) {
807 // If an interface is found, add a reference count before dropping the lock
808 ifnet_reference(ipsec_if
);
810 lck_mtx_unlock(sadb_mutex
);
811 if (ipsec_if
!= NULL
) {
813 if ((mlen
= m_length2(m
, NULL
)) < hlen
) {
815 "IPv4 ESP input: decrypted packet too short %d < %zu\n",
817 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
818 ifnet_release(ipsec_if
);
821 ip
->ip_len
= htons(ip
->ip_len
+ hlen
);
822 ip
->ip_off
= htons(ip
->ip_off
);
824 ip
->ip_sum
= ip_cksum_hdr_in(m
, hlen
);
826 esp_input_log(m
, sav
, spi
, seq
);
827 ipsec_save_wake_packet(m
, ntohl(spi
), seq
);
830 if (interface
!= NULL
&&
831 interface
== ipsec_if
) {
833 ifnet_release(ipsec_if
);
837 errno_t inject_error
= ipsec_inject_inbound_packet(ipsec_if
, m
);
838 ifnet_release(ipsec_if
);
840 if (inject_error
== 0) {
848 ip_proto_dispatch_in(m
, off
, nxt
, 0);
857 KEYDEBUG(KEYDEBUG_IPSEC_STAMP
,
858 printf("DP esp4_input call free SA:0x%llx\n",
859 (uint64_t)VM_KERNEL_ADDRPERM(sav
)));
860 key_freesav(sav
, KEY_SADB_UNLOCKED
);
862 IPSEC_STAT_INCREMENT(ipsecstat
.in_success
);
866 KEYDEBUG(KEYDEBUG_IPSEC_STAMP
,
867 printf("DP esp4_input call free SA:0x%llx\n",
868 (uint64_t)VM_KERNEL_ADDRPERM(sav
)));
869 key_freesav(sav
, KEY_SADB_UNLOCKED
);
874 KERNEL_DEBUG(DBG_FNC_ESPIN
| DBG_FUNC_END
, 4, 0, 0, 0, 0);
882 esp6_input(struct mbuf
**mp
, int *offp
, int proto
)
884 return esp6_input_extended(mp
, offp
, proto
, NULL
);
888 esp6_input_extended(struct mbuf
**mp
, int *offp
, int proto
, ifnet_t interface
)
890 #pragma unused(proto)
891 struct mbuf
*m
= *mp
;
896 struct esptail esptail
;
899 struct secasvar
*sav
= NULL
;
903 const struct esp_algorithm
*algo
;
907 mbuf_traffic_class_t traffic_class
= 0;
909 /* sanity check for alignment. */
910 if (off
% 4 != 0 || m
->m_pkthdr
.len
% 4 != 0) {
911 ipseclog((LOG_ERR
, "IPv6 ESP input: packet alignment problem "
912 "(off=%d, pktlen=%d)\n", off
, m
->m_pkthdr
.len
));
913 IPSEC_STAT_INCREMENT(ipsec6stat
.in_inval
);
917 #ifndef PULLDOWN_TEST
918 IP6_EXTHDR_CHECK(m
, off
, ESPMAXLEN
, {return IPPROTO_DONE
;});
919 esp
= (struct esp
*)(void *)(mtod(m
, caddr_t
) + off
);
921 IP6_EXTHDR_GET(esp
, struct esp
*, m
, off
, ESPMAXLEN
);
923 IPSEC_STAT_INCREMENT(ipsec6stat
.in_inval
);
927 m
->m_pkthdr
.csum_flags
&= ~CSUM_RX_FLAGS
;
929 /* Expect 32-bit data aligned pointer on strict-align platforms */
930 MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m
);
932 ip6
= mtod(m
, struct ip6_hdr
*);
934 if (ntohs(ip6
->ip6_plen
) == 0) {
935 ipseclog((LOG_ERR
, "IPv6 ESP input: "
936 "ESP with IPv6 jumbogram is not supported.\n"));
937 IPSEC_STAT_INCREMENT(ipsec6stat
.in_inval
);
941 nproto
= ip6_get_prevhdr(m
, off
);
942 if (nproto
== NULL
|| (*nproto
!= IPPROTO_ESP
&&
943 !(*nproto
== IPPROTO_UDP
&& off
>= sizeof(struct udphdr
)))) {
944 ipseclog((LOG_DEBUG
, "IPv6 ESP input: invalid protocol type\n"));
945 IPSEC_STAT_INCREMENT(ipsec6stat
.in_inval
);
949 /* find the sassoc. */
952 if ((sav
= key_allocsa_extended(AF_INET6
,
953 (caddr_t
)&ip6
->ip6_src
, (caddr_t
)&ip6
->ip6_dst
,
954 IPPROTO_ESP
, spi
, interface
)) == 0) {
955 ipseclog((LOG_WARNING
,
956 "IPv6 ESP input: no key association found for spi %u (0x%08x) seq %u"
957 " src %04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x"
958 " dst %04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x if %s\n",
959 (u_int32_t
)ntohl(spi
), (u_int32_t
)ntohl(spi
), ntohl(((struct newesp
*)esp
)->esp_seq
),
960 ntohs(ip6
->ip6_src
.__u6_addr
.__u6_addr16
[0]), ntohs(ip6
->ip6_src
.__u6_addr
.__u6_addr16
[1]),
961 ntohs(ip6
->ip6_src
.__u6_addr
.__u6_addr16
[2]), ntohs(ip6
->ip6_src
.__u6_addr
.__u6_addr16
[3]),
962 ntohs(ip6
->ip6_src
.__u6_addr
.__u6_addr16
[4]), ntohs(ip6
->ip6_src
.__u6_addr
.__u6_addr16
[5]),
963 ntohs(ip6
->ip6_src
.__u6_addr
.__u6_addr16
[6]), ntohs(ip6
->ip6_src
.__u6_addr
.__u6_addr16
[7]),
964 ntohs(ip6
->ip6_dst
.__u6_addr
.__u6_addr16
[0]), ntohs(ip6
->ip6_dst
.__u6_addr
.__u6_addr16
[1]),
965 ntohs(ip6
->ip6_dst
.__u6_addr
.__u6_addr16
[2]), ntohs(ip6
->ip6_dst
.__u6_addr
.__u6_addr16
[3]),
966 ntohs(ip6
->ip6_dst
.__u6_addr
.__u6_addr16
[4]), ntohs(ip6
->ip6_dst
.__u6_addr
.__u6_addr16
[5]),
967 ntohs(ip6
->ip6_dst
.__u6_addr
.__u6_addr16
[6]), ntohs(ip6
->ip6_dst
.__u6_addr
.__u6_addr16
[7]),
968 ((interface
!= NULL
) ? if_name(interface
) : "NONE")));
969 IPSEC_STAT_INCREMENT(ipsec6stat
.in_nosa
);
972 KEYDEBUG(KEYDEBUG_IPSEC_STAMP
,
973 printf("DP esp6_input called to allocate SA:0x%llx\n",
974 (uint64_t)VM_KERNEL_ADDRPERM(sav
)));
975 if (sav
->state
!= SADB_SASTATE_MATURE
976 && sav
->state
!= SADB_SASTATE_DYING
) {
978 "IPv6 ESP input: non-mature/dying SA found for spi %u (0x%08x)\n",
979 (u_int32_t
)ntohl(spi
), (u_int32_t
)ntohl(spi
)));
980 IPSEC_STAT_INCREMENT(ipsec6stat
.in_badspi
);
983 algo
= esp_algorithm_lookup(sav
->alg_enc
);
985 ipseclog((LOG_DEBUG
, "IPv6 ESP input: "
986 "unsupported encryption algorithm for spi %u (0x%08x)\n",
987 (u_int32_t
)ntohl(spi
), (u_int32_t
)ntohl(spi
)));
988 IPSEC_STAT_INCREMENT(ipsec6stat
.in_badspi
);
992 /* check if we have proper ivlen information */
995 ipseclog((LOG_ERR
, "inproper ivlen in IPv6 ESP input: %s %s\n",
996 ipsec6_logpacketstr(ip6
, spi
), ipsec_logsastr(sav
)));
997 IPSEC_STAT_INCREMENT(ipsec6stat
.in_badspi
);
1001 seq
= ntohl(((struct newesp
*)esp
)->esp_seq
);
1003 if ((sav
->flags2
& SADB_X_EXT_SA2_SEQ_PER_TRAFFIC_CLASS
) ==
1004 SADB_X_EXT_SA2_SEQ_PER_TRAFFIC_CLASS
) {
1005 u_int8_t dscp
= (ntohl(ip6
->ip6_flow
) & IP6FLOW_DSCP_MASK
) >> IP6FLOW_DSCP_SHIFT
;
1006 traffic_class
= rfc4594_dscp_to_tc(dscp
);
1009 /* Save ICV from packet for verification later */
1011 unsigned char saved_icv
[AH_MAXSUMSIZE
];
1012 if (algo
->finalizedecrypt
) {
1014 m_copydata(m
, m
->m_pkthdr
.len
- siz
, siz
, (caddr_t
) saved_icv
);
1018 if (!((sav
->flags
& SADB_X_EXT_OLD
) == 0 &&
1019 sav
->replay
[traffic_class
] != NULL
&&
1020 (sav
->alg_auth
&& sav
->key_auth
))) {
1024 if (sav
->alg_auth
== SADB_X_AALG_NULL
||
1025 sav
->alg_auth
== SADB_AALG_NONE
) {
1030 * check for sequence number.
1032 if (ipsec_chkreplay(seq
, sav
, traffic_class
)) {
1035 IPSEC_STAT_INCREMENT(ipsec6stat
.in_espreplay
);
1036 ipseclog((LOG_WARNING
,
1037 "replay packet in IPv6 ESP input: %s %s\n",
1038 ipsec6_logpacketstr(ip6
, spi
), ipsec_logsastr(sav
)));
1044 u_char sum0
[AH_MAXSUMSIZE
] __attribute__((aligned(4)));
1045 u_char sum
[AH_MAXSUMSIZE
] __attribute__((aligned(4)));
1046 const struct ah_algorithm
*sumalgo
;
1048 sumalgo
= ah_algorithm_lookup(sav
->alg_auth
);
1052 siz
= (((*sumalgo
->sumsiz
)(sav
) + 3) & ~(4 - 1));
1053 if (m
->m_pkthdr
.len
< off
+ ESPMAXLEN
+ siz
) {
1054 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
1057 if (AH_MAXSUMSIZE
< siz
) {
1058 ipseclog((LOG_DEBUG
,
1059 "internal error: AH_MAXSUMSIZE must be larger than %u\n",
1061 IPSEC_STAT_INCREMENT(ipsec6stat
.in_inval
);
1065 m_copydata(m
, m
->m_pkthdr
.len
- siz
, siz
, (caddr_t
) &sum0
[0]);
1067 if (esp_auth(m
, off
, m
->m_pkthdr
.len
- off
- siz
, sav
, sum
)) {
1068 ipseclog((LOG_WARNING
, "auth fail in IPv6 ESP input: %s %s\n",
1069 ipsec6_logpacketstr(ip6
, spi
), ipsec_logsastr(sav
)));
1070 IPSEC_STAT_INCREMENT(ipsec6stat
.in_espauthfail
);
1074 if (cc_cmp_safe(siz
, sum0
, sum
)) {
1075 ipseclog((LOG_WARNING
, "auth fail in IPv6 ESP input: %s %s\n",
1076 ipsec6_logpacketstr(ip6
, spi
), ipsec_logsastr(sav
)));
1077 IPSEC_STAT_INCREMENT(ipsec6stat
.in_espauthfail
);
1083 /* strip off the authentication data */
1085 ip6
= mtod(m
, struct ip6_hdr
*);
1086 ip6
->ip6_plen
= htons(ntohs(ip6
->ip6_plen
) - siz
);
1088 m
->m_flags
|= M_AUTHIPDGM
;
1089 IPSEC_STAT_INCREMENT(ipsec6stat
.in_espauthsucc
);
1093 * update sequence number.
1095 if ((sav
->flags
& SADB_X_EXT_OLD
) == 0 && sav
->replay
[traffic_class
] != NULL
) {
1096 if (ipsec_updatereplay(seq
, sav
, traffic_class
)) {
1097 IPSEC_STAT_INCREMENT(ipsec6stat
.in_espreplay
);
1104 /* process main esp header. */
1105 if (sav
->flags
& SADB_X_EXT_OLD
) {
1107 esplen
= sizeof(struct esp
);
1110 if (sav
->flags
& SADB_X_EXT_DERIV
) {
1111 esplen
= sizeof(struct esp
);
1113 esplen
= sizeof(struct newesp
);
1117 if (m
->m_pkthdr
.len
< off
+ esplen
+ ivlen
+ sizeof(esptail
)) {
1118 ipseclog((LOG_WARNING
,
1119 "IPv6 ESP input: packet too short\n"));
1120 IPSEC_STAT_INCREMENT(ipsec6stat
.in_inval
);
1124 #ifndef PULLDOWN_TEST
1125 IP6_EXTHDR_CHECK(m
, off
, esplen
+ ivlen
, return IPPROTO_DONE
); /*XXX*/
1127 IP6_EXTHDR_GET(esp
, struct esp
*, m
, off
, esplen
+ ivlen
);
1129 IPSEC_STAT_INCREMENT(ipsec6stat
.in_inval
);
1134 ip6
= mtod(m
, struct ip6_hdr
*); /*set it again just in case*/
1137 * pre-compute and cache intermediate key
1139 if (esp_schedule(algo
, sav
) != 0) {
1140 IPSEC_STAT_INCREMENT(ipsec6stat
.in_inval
);
1145 * decrypt the packet.
1147 if (!algo
->decrypt
) {
1148 panic("internal error: no decrypt function");
1150 if ((*algo
->decrypt
)(m
, off
, sav
, algo
, ivlen
)) {
1151 /* m is already freed */
1153 ipseclog((LOG_ERR
, "decrypt fail in IPv6 ESP input: %s\n",
1154 ipsec_logsastr(sav
)));
1155 IPSEC_STAT_INCREMENT(ipsec6stat
.in_inval
);
1158 IPSEC_STAT_INCREMENT(ipsec6stat
.in_esphist
[sav
->alg_enc
]);
1160 m
->m_flags
|= M_DECRYPTED
;
1162 if (algo
->finalizedecrypt
) {
1163 if ((*algo
->finalizedecrypt
)(sav
, saved_icv
, algo
->icvlen
)) {
1164 ipseclog((LOG_ERR
, "esp6 packet decryption ICV failure\n"));
1165 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
1166 KERNEL_DEBUG(DBG_FNC_DECRYPT
| DBG_FUNC_END
, 1, 0, 0, 0, 0);
1172 * find the trailer of the ESP.
1174 m_copydata(m
, m
->m_pkthdr
.len
- sizeof(esptail
), sizeof(esptail
),
1176 nxt
= esptail
.esp_nxt
;
1177 taillen
= esptail
.esp_padlen
+ sizeof(esptail
);
1179 if (m
->m_pkthdr
.len
< taillen
1180 || m
->m_pkthdr
.len
- taillen
< sizeof(struct ip6_hdr
)) { /*?*/
1181 ipseclog((LOG_WARNING
,
1182 "bad pad length in IPv6 ESP input: %s %s\n",
1183 ipsec6_logpacketstr(ip6
, spi
), ipsec_logsastr(sav
)));
1184 IPSEC_STAT_INCREMENT(ipsec6stat
.in_inval
);
1188 /* strip off the trailing pad area. */
1190 ip6
= mtod(m
, struct ip6_hdr
*);
1191 ip6
->ip6_plen
= htons(ntohs(ip6
->ip6_plen
) - taillen
);
1193 if (*nproto
== IPPROTO_UDP
) {
1194 // offset includes the outer ip and udp header lengths.
1195 if (m
->m_len
< off
) {
1196 m
= m_pullup(m
, off
);
1198 ipseclog((LOG_DEBUG
,
1199 "IPv6 ESP input: invalid udp encapsulated ESP packet length\n"));
1200 IPSEC_STAT_INCREMENT(ipsec6stat
.in_inval
);
1203 ip6
= mtod(m
, struct ip6_hdr
*);
1206 // check the UDP encap header to detect changes in the source port, and then strip the header
1207 off
-= sizeof(struct udphdr
); // off no longer includes the udphdr's size
1208 // if peer is behind nat and this is the latest esp packet
1209 if ((sav
->flags
& SADB_X_EXT_NATT_DETECTED_PEER
) != 0 &&
1210 (sav
->flags
& SADB_X_EXT_OLD
) == 0 &&
1211 seq
&& sav
->replay
[traffic_class
] &&
1212 seq
>= sav
->replay
[traffic_class
]->lastseq
) {
1213 struct udphdr
*encap_uh
= (__typeof__(encap_uh
))(void *)((caddr_t
)ip6
+ off
);
1214 if (encap_uh
->uh_sport
&&
1215 ntohs(encap_uh
->uh_sport
) != sav
->remote_ike_port
) {
1216 sav
->remote_ike_port
= ntohs(encap_uh
->uh_sport
);
1219 ip6
= esp6_input_strip_udp_encap(m
, off
);
1220 esp
= (struct esp
*)(void *)(((u_int8_t
*)ip6
) + off
);
1224 /* was it transmitted over the IPsec tunnel SA? */
1225 if (ipsec6_tunnel_validate(m
, off
+ esplen
+ ivlen
, nxt
, sav
, &ifamily
)) {
1227 struct sockaddr_storage addr
;
1230 * strip off all the headers that precedes ESP header.
1231 * IP6 xx ESP IP6' payload -> IP6' payload
1233 * XXX more sanity checks
1234 * XXX relationship with gif?
1236 u_int32_t flowinfo
; /*net endian*/
1237 flowinfo
= ip6
->ip6_flow
;
1238 m_adj(m
, off
+ esplen
+ ivlen
);
1239 if (ifamily
== AF_INET6
) {
1240 struct sockaddr_in6
*ip6addr
;
1242 if (m
->m_len
< sizeof(*ip6
)) {
1243 #ifndef PULLDOWN_TEST
1245 * m_pullup is prohibited in KAME IPv6 input processing
1246 * but there's no other way!
1249 /* okay to pullup in m_pulldown style */
1251 m
= m_pullup(m
, sizeof(*ip6
));
1253 IPSEC_STAT_INCREMENT(ipsec6stat
.in_inval
);
1257 ip6
= mtod(m
, struct ip6_hdr
*);
1258 /* ECN consideration. */
1259 if (ip6_ecn_egress(ip6_ipsec_ecn
, &flowinfo
, &ip6
->ip6_flow
) == 0) {
1260 IPSEC_STAT_INCREMENT(ipsec6stat
.in_inval
);
1263 if (!key_checktunnelsanity(sav
, AF_INET6
,
1264 (caddr_t
)&ip6
->ip6_src
, (caddr_t
)&ip6
->ip6_dst
)) {
1265 ipseclog((LOG_ERR
, "ipsec tunnel address mismatch "
1266 "in IPv6 ESP input: %s %s\n",
1267 ipsec6_logpacketstr(ip6
, spi
),
1268 ipsec_logsastr(sav
)));
1269 IPSEC_STAT_INCREMENT(ipsec6stat
.in_inval
);
1273 bzero(&addr
, sizeof(addr
));
1274 ip6addr
= (__typeof__(ip6addr
)) & addr
;
1275 ip6addr
->sin6_family
= AF_INET6
;
1276 ip6addr
->sin6_len
= sizeof(*ip6addr
);
1277 ip6addr
->sin6_addr
= ip6
->ip6_dst
;
1278 } else if (ifamily
== AF_INET
) {
1279 struct sockaddr_in
*ipaddr
;
1281 if (m
->m_len
< sizeof(*ip
)) {
1282 m
= m_pullup(m
, sizeof(*ip
));
1284 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
1292 ip
= mtod(m
, struct ip
*);
1294 /* ECN consideration. */
1295 if (ip46_ecn_egress(ip6_ipsec_ecn
, &flowinfo
, &ip
->ip_tos
) == 0) {
1296 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
1300 if (otos
!= ip
->ip_tos
) {
1301 sum
= ~ntohs(ip
->ip_sum
) & 0xffff;
1302 sum
+= (~otos
& 0xffff) + ip
->ip_tos
;
1303 sum
= (sum
>> 16) + (sum
& 0xffff);
1304 sum
+= (sum
>> 16); /* add carry */
1305 ip
->ip_sum
= htons(~sum
& 0xffff);
1308 if (!key_checktunnelsanity(sav
, AF_INET
,
1309 (caddr_t
)&ip
->ip_src
, (caddr_t
)&ip
->ip_dst
)) {
1310 ipseclog((LOG_ERR
, "ipsec tunnel address mismatch "
1311 "in ESP input: %s %s\n",
1312 ipsec4_logpacketstr(ip
, spi
), ipsec_logsastr(sav
)));
1313 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
1317 bzero(&addr
, sizeof(addr
));
1318 ipaddr
= (__typeof__(ipaddr
)) & addr
;
1319 ipaddr
->sin_family
= AF_INET
;
1320 ipaddr
->sin_len
= sizeof(*ipaddr
);
1321 ipaddr
->sin_addr
= ip
->ip_dst
;
1324 key_sa_recordxfer(sav
, m
);
1325 if (ipsec_addhist(m
, IPPROTO_ESP
, spi
) != 0 ||
1326 ipsec_addhist(m
, IPPROTO_IPV6
, 0) != 0) {
1327 IPSEC_STAT_INCREMENT(ipsec6stat
.in_nomem
);
1331 // update the receiving interface address based on the inner address
1332 ifa
= ifa_ifwithaddr((struct sockaddr
*)&addr
);
1334 m
->m_pkthdr
.rcvif
= ifa
->ifa_ifp
;
1338 // Input via IPsec interface
1339 lck_mtx_lock(sadb_mutex
);
1340 ifnet_t ipsec_if
= sav
->sah
->ipsec_if
;
1341 if (ipsec_if
!= NULL
) {
1342 // If an interface is found, add a reference count before dropping the lock
1343 ifnet_reference(ipsec_if
);
1345 lck_mtx_unlock(sadb_mutex
);
1346 if (ipsec_if
!= NULL
) {
1347 esp_input_log(m
, sav
, spi
, seq
);
1348 ipsec_save_wake_packet(m
, ntohl(spi
), seq
);
1351 if (interface
!= NULL
&&
1352 interface
== ipsec_if
) {
1353 ifnet_release(ipsec_if
);
1357 errno_t inject_error
= ipsec_inject_inbound_packet(ipsec_if
, m
);
1358 ifnet_release(ipsec_if
);
1360 if (inject_error
== 0) {
1369 if (proto_input(ifamily
== AF_INET
? PF_INET
: PF_INET6
, m
) != 0) {
1375 * strip off ESP header and IV.
1376 * even in m_pulldown case, we need to strip off ESP so that
1377 * we can always compute checksum for AH correctly.
1383 * Set the next header field of the previous header correctly.
1385 prvnxtp
= ip6_get_prevhdr(m
, off
); /* XXX */
1388 stripsiz
= esplen
+ ivlen
;
1390 ip6
= mtod(m
, struct ip6_hdr
*);
1391 if (m
->m_len
>= stripsiz
+ off
) {
1392 ovbcopy((caddr_t
)ip6
, ((caddr_t
)ip6
) + stripsiz
, off
);
1393 m
->m_data
+= stripsiz
;
1394 m
->m_len
-= stripsiz
;
1395 m
->m_pkthdr
.len
-= stripsiz
;
1398 * this comes with no copy if the boundary is on
1403 n
= m_split(m
, off
, M_DONTWAIT
);
1405 /* m is retained by m_split */
1409 /* m_cat does not update m_pkthdr.len */
1410 m
->m_pkthdr
.len
+= n
->m_pkthdr
.len
;
1414 #ifndef PULLDOWN_TEST
1416 * KAME requires that the packet to be contiguous on the
1417 * mbuf. We need to make that sure.
1418 * this kind of code should be avoided.
1419 * XXX other conditions to avoid running this part?
1421 if (m
->m_len
!= m
->m_pkthdr
.len
) {
1422 struct mbuf
*n
= NULL
;
1425 MGETHDR(n
, M_DONTWAIT
, MT_HEADER
); /* MAC-OK */
1428 M_COPY_PKTHDR(n
, m
);
1430 if (n
&& m
->m_pkthdr
.len
> maxlen
) {
1431 MCLGET(n
, M_DONTWAIT
);
1433 if ((n
->m_flags
& M_EXT
) == 0) {
1439 printf("esp6_input: mbuf allocation failed\n");
1443 if (m
->m_pkthdr
.len
<= maxlen
) {
1444 m_copydata(m
, 0, m
->m_pkthdr
.len
, mtod(n
, caddr_t
));
1445 n
->m_len
= m
->m_pkthdr
.len
;
1446 n
->m_pkthdr
.len
= m
->m_pkthdr
.len
;
1450 m_copydata(m
, 0, maxlen
, mtod(n
, caddr_t
));
1452 n
->m_pkthdr
.len
= m
->m_pkthdr
.len
;
1455 m
->m_flags
&= ~M_PKTHDR
;
1460 ip6
= mtod(m
, struct ip6_hdr
*);
1461 ip6
->ip6_plen
= htons(ntohs(ip6
->ip6_plen
) - stripsiz
);
1463 key_sa_recordxfer(sav
, m
);
1464 if (ipsec_addhist(m
, IPPROTO_ESP
, spi
) != 0) {
1465 IPSEC_STAT_INCREMENT(ipsec6stat
.in_nomem
);
1470 * Set the csum valid flag, if we authenticated the
1471 * packet, the payload shouldn't be corrupt unless
1472 * it was corrupted before being signed on the other
1475 if (nxt
== IPPROTO_TCP
|| nxt
== IPPROTO_UDP
) {
1476 m
->m_pkthdr
.csum_flags
= CSUM_DATA_VALID
| CSUM_PSEUDO_HDR
;
1477 m
->m_pkthdr
.csum_data
= 0xFFFF;
1478 _CASSERT(offsetof(struct pkthdr
, csum_data
) == offsetof(struct pkthdr
, csum_rx_val
));
1481 // Input via IPsec interface
1482 lck_mtx_lock(sadb_mutex
);
1483 ifnet_t ipsec_if
= sav
->sah
->ipsec_if
;
1484 if (ipsec_if
!= NULL
) {
1485 // If an interface is found, add a reference count before dropping the lock
1486 ifnet_reference(ipsec_if
);
1488 lck_mtx_unlock(sadb_mutex
);
1489 if (ipsec_if
!= NULL
) {
1490 esp_input_log(m
, sav
, spi
, seq
);
1491 ipsec_save_wake_packet(m
, ntohl(spi
), seq
);
1494 if (interface
!= NULL
&&
1495 interface
== ipsec_if
) {
1496 ifnet_release(ipsec_if
);
1500 errno_t inject_error
= ipsec_inject_inbound_packet(ipsec_if
, m
);
1501 ifnet_release(ipsec_if
);
1503 if (inject_error
== 0) {
1517 KEYDEBUG(KEYDEBUG_IPSEC_STAMP
,
1518 printf("DP esp6_input call free SA:0x%llx\n",
1519 (uint64_t)VM_KERNEL_ADDRPERM(sav
)));
1520 key_freesav(sav
, KEY_SADB_UNLOCKED
);
1522 IPSEC_STAT_INCREMENT(ipsec6stat
.in_success
);
1527 KEYDEBUG(KEYDEBUG_IPSEC_STAMP
,
1528 printf("DP esp6_input call free SA:0x%llx\n",
1529 (uint64_t)VM_KERNEL_ADDRPERM(sav
)));
1530 key_freesav(sav
, KEY_SADB_UNLOCKED
);
1535 if (interface
!= NULL
) {
1538 return IPPROTO_DONE
;
1542 esp6_ctlinput(int cmd
, struct sockaddr
*sa
, void *d
, __unused
struct ifnet
*ifp
)
1544 const struct newesp
*espp
;
1546 struct ip6ctlparam
*ip6cp
= NULL
, ip6cp1
;
1547 struct secasvar
*sav
;
1548 struct ip6_hdr
*ip6
;
1551 struct sockaddr_in6
*sa6_src
, *sa6_dst
;
1553 if (sa
->sa_family
!= AF_INET6
||
1554 sa
->sa_len
!= sizeof(struct sockaddr_in6
)) {
1557 if ((unsigned)cmd
>= PRC_NCMDS
) {
1561 /* if the parameter is from icmp6, decode it. */
1563 ip6cp
= (struct ip6ctlparam
*)d
;
1565 ip6
= ip6cp
->ip6c_ip6
;
1566 off
= ip6cp
->ip6c_off
;
1574 * Notify the error to all possible sockets via pfctlinput2.
1575 * Since the upper layer information (such as protocol type,
1576 * source and destination ports) is embedded in the encrypted
1577 * data and might have been cut, we can't directly call
1578 * an upper layer ctlinput function. However, the pcbnotify
1579 * function will consider source and destination addresses
1580 * as well as the flow info value, and may be able to find
1581 * some PCB that should be notified.
1582 * Although pfctlinput2 will call esp6_ctlinput(), there is
1583 * no possibility of an infinite loop of function calls,
1584 * because we don't pass the inner IPv6 header.
1586 bzero(&ip6cp1
, sizeof(ip6cp1
));
1587 ip6cp1
.ip6c_src
= ip6cp
->ip6c_src
;
1588 pfctlinput2(cmd
, sa
, (void *)&ip6cp1
);
1591 * Then go to special cases that need ESP header information.
1592 * XXX: We assume that when ip6 is non NULL,
1593 * M and OFF are valid.
1596 /* check if we can safely examine src and dst ports */
1597 if (m
->m_pkthdr
.len
< off
+ sizeof(esp
)) {
1601 if (m
->m_len
< off
+ sizeof(esp
)) {
1603 * this should be rare case,
1604 * so we compromise on this copy...
1606 m_copydata(m
, off
, sizeof(esp
), (caddr_t
)&esp
);
1609 espp
= (struct newesp
*)(void *)(mtod(m
, caddr_t
) + off
);
1612 if (cmd
== PRC_MSGSIZE
) {
1616 * Check to see if we have a valid SA corresponding to
1617 * the address in the ICMP message payload.
1619 sa6_src
= ip6cp
->ip6c_src
;
1620 sa6_dst
= (struct sockaddr_in6
*)(void *)sa
;
1621 sav
= key_allocsa(AF_INET6
,
1622 (caddr_t
)&sa6_src
->sin6_addr
,
1623 (caddr_t
)&sa6_dst
->sin6_addr
,
1624 IPPROTO_ESP
, espp
->esp_spi
);
1626 if (sav
->state
== SADB_SASTATE_MATURE
||
1627 sav
->state
== SADB_SASTATE_DYING
) {
1630 key_freesav(sav
, KEY_SADB_UNLOCKED
);
1633 /* XXX Further validation? */
1636 * Depending on the value of "valid" and routing table
1637 * size (mtudisc_{hi,lo}wat), we will:
1638 * - recalcurate the new MTU and create the
1639 * corresponding routing entry, or
1640 * - ignore the MTU change notification.
1642 icmp6_mtudisc_update((struct ip6ctlparam
*)d
, valid
);
1645 /* we normally notify any pcb here */