2 * Copyright (c) 2008-2016 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 /* $FreeBSD: src/sys/netinet6/esp_input.c,v 1.1.2.3 2001/07/03 11:01:50 ume Exp $ */
30 /* $KAME: esp_input.c,v 1.55 2001/03/23 08:08:47 itojun Exp $ */
33 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
34 * All rights reserved.
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. Neither the name of the project nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
48 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * RFC1827/2406 Encapsulated Security Payload.
67 #include <sys/param.h>
68 #include <sys/systm.h>
69 #include <sys/malloc.h>
71 #include <sys/mcache.h>
72 #include <sys/domain.h>
73 #include <sys/protosw.h>
74 #include <sys/socket.h>
75 #include <sys/errno.h>
77 #include <sys/kernel.h>
78 #include <sys/syslog.h>
81 #include <net/if_ipsec.h>
82 #include <net/route.h>
83 #include <kern/cpu_number.h>
84 #include <kern/locks.h>
86 #include <netinet/in.h>
87 #include <netinet/in_systm.h>
88 #include <netinet/ip.h>
89 #include <netinet/ip_var.h>
90 #include <netinet/in_var.h>
91 #include <netinet/ip_ecn.h>
92 #include <netinet/in_pcb.h>
93 #include <netinet/udp.h>
94 #include <netinet/tcp.h>
95 #include <netinet/in_tclass.h>
97 #include <netinet6/ip6_ecn.h>
101 #include <netinet/ip6.h>
102 #include <netinet6/in6_pcb.h>
103 #include <netinet6/ip6_var.h>
104 #include <netinet/icmp6.h>
105 #include <netinet6/ip6protosw.h>
108 #include <netinet6/ipsec.h>
110 #include <netinet6/ipsec6.h>
112 #include <netinet6/ah.h>
114 #include <netinet6/ah6.h>
116 #include <netinet6/esp.h>
118 #include <netinet6/esp6.h>
120 #include <netkey/key.h>
121 #include <netkey/keydb.h>
122 #include <netkey/key_debug.h>
124 #include <net/kpi_protocol.h>
125 #include <netinet/kpi_ipfilter_var.h>
127 #include <net/net_osdep.h>
128 #include <mach/sdt.h>
129 #include <corecrypto/cc.h>
131 #include <sys/kdebug.h>
132 #define DBG_LAYER_BEG NETDBG_CODE(DBG_NETIPSEC, 1)
133 #define DBG_LAYER_END NETDBG_CODE(DBG_NETIPSEC, 3)
134 #define DBG_FNC_ESPIN NETDBG_CODE(DBG_NETIPSEC, (6 << 8))
135 #define DBG_FNC_DECRYPT NETDBG_CODE(DBG_NETIPSEC, (7 << 8))
136 #define IPLEN_FLIPPED
138 extern lck_mtx_t
*sadb_mutex
;
142 (sizeof(struct esp) < sizeof(struct newesp) \
143 ? sizeof(struct newesp) : sizeof(struct esp))
146 esp4_input_strip_udp_encap(struct mbuf
*m
, int iphlen
)
148 // strip the udp header that's encapsulating ESP
150 size_t stripsiz
= sizeof(struct udphdr
);
152 ip
= mtod(m
, __typeof__(ip
));
153 ovbcopy((caddr_t
)ip
, (caddr_t
)(((u_char
*)ip
) + stripsiz
), iphlen
);
154 m
->m_data
+= stripsiz
;
155 m
->m_len
-= stripsiz
;
156 m
->m_pkthdr
.len
-= stripsiz
;
157 ip
= mtod(m
, __typeof__(ip
));
158 ip
->ip_len
= ip
->ip_len
- stripsiz
;
159 ip
->ip_p
= IPPROTO_ESP
;
163 static struct ip6_hdr
*
164 esp6_input_strip_udp_encap(struct mbuf
*m
, int ip6hlen
)
166 // strip the udp header that's encapsulating ESP
168 size_t stripsiz
= sizeof(struct udphdr
);
170 ip6
= mtod(m
, __typeof__(ip6
));
171 ovbcopy((caddr_t
)ip6
, (caddr_t
)(((u_char
*)ip6
) + stripsiz
), ip6hlen
);
172 m
->m_data
+= stripsiz
;
173 m
->m_len
-= stripsiz
;
174 m
->m_pkthdr
.len
-= stripsiz
;
175 ip6
= mtod(m
, __typeof__(ip6
));
176 ip6
->ip6_plen
= htons(ntohs(ip6
->ip6_plen
) - stripsiz
);
177 ip6
->ip6_nxt
= IPPROTO_ESP
;
182 esp_input_log(struct mbuf
*m
, struct secasvar
*sav
, u_int32_t spi
, u_int32_t seq
)
184 if (net_mpklog_enabled
&&
185 (sav
->sah
->ipsec_if
->if_xflags
& IFXF_MPK_LOG
) == IFXF_MPK_LOG
) {
186 struct tcphdr th
= {};
188 u_int32_t proto_len
= 0;
191 struct ip
*inner_ip
= mtod(m
, struct ip
*);
192 if (IP_VHL_V(inner_ip
->ip_vhl
) == 4) {
193 iphlen
= IP_VHL_HL(inner_ip
->ip_vhl
) << 2;
194 proto
= inner_ip
->ip_p
;
195 } else if (IP_VHL_V(inner_ip
->ip_vhl
) == 6) {
196 struct ip6_hdr
*inner_ip6
= mtod(m
, struct ip6_hdr
*);
197 iphlen
= sizeof(struct ip6_hdr
);
198 proto
= inner_ip6
->ip6_nxt
;
201 if (proto
== IPPROTO_TCP
) {
202 if ((int)(iphlen
+ sizeof(th
)) <= m
->m_pkthdr
.len
) {
203 m_copydata(m
, iphlen
, sizeof(th
), (u_int8_t
*)&th
);
206 proto_len
= m
->m_pkthdr
.len
- iphlen
- (th
.th_off
<< 2);
207 MPKL_ESP_INPUT_TCP(esp_mpkl_log_object
,
209 ntohs(th
.th_sport
), ntohs(th
.th_dport
),
210 ntohl(th
.th_seq
), proto_len
);
216 esp4_input(struct mbuf
*m
, int off
)
218 (void)esp4_input_extended(m
, off
, NULL
);
222 esp4_input_extended(struct mbuf
*m
, int off
, ifnet_t interface
)
229 struct esptail esptail
;
232 struct secasvar
*sav
= NULL
;
235 const struct esp_algorithm
*algo
;
240 struct mbuf
*out_m
= NULL
;
241 mbuf_traffic_class_t traffic_class
= 0;
243 KERNEL_DEBUG(DBG_FNC_ESPIN
| DBG_FUNC_START
, 0, 0, 0, 0, 0);
244 /* sanity check for alignment. */
245 if (off
% 4 != 0 || m
->m_pkthdr
.len
% 4 != 0) {
246 ipseclog((LOG_ERR
, "IPv4 ESP input: packet alignment problem "
247 "(off=%d, pktlen=%d)\n", off
, m
->m_pkthdr
.len
));
248 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
252 if (m
->m_len
< off
+ ESPMAXLEN
) {
253 m
= m_pullup(m
, off
+ ESPMAXLEN
);
256 "IPv4 ESP input: can't pullup in esp4_input\n"));
257 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
262 m
->m_pkthdr
.csum_flags
&= ~CSUM_RX_FLAGS
;
264 /* Expect 32-bit aligned data pointer on strict-align platforms */
265 MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m
);
267 ip
= mtod(m
, struct ip
*);
268 // expect udp-encap and esp packets only
269 if (ip
->ip_p
!= IPPROTO_ESP
&&
270 !(ip
->ip_p
== IPPROTO_UDP
&& off
>= sizeof(struct udphdr
))) {
272 "IPv4 ESP input: invalid protocol type\n"));
273 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
276 esp
= (struct esp
*)(void *)(((u_int8_t
*)ip
) + off
);
278 hlen
= IP_VHL_HL(ip
->ip_vhl
) << 2;
280 hlen
= ip
->ip_hl
<< 2;
283 /* find the sassoc. */
286 if ((sav
= key_allocsa_extended(AF_INET
,
287 (caddr_t
)&ip
->ip_src
, (caddr_t
)&ip
->ip_dst
,
288 IPPROTO_ESP
, spi
, interface
)) == 0) {
289 ipseclog((LOG_WARNING
,
290 "IPv4 ESP input: no key association found for spi %u (0x%08x)\n",
291 (u_int32_t
)ntohl(spi
), (u_int32_t
)ntohl(spi
)));
292 IPSEC_STAT_INCREMENT(ipsecstat
.in_nosa
);
295 KEYDEBUG(KEYDEBUG_IPSEC_STAMP
,
296 printf("DP esp4_input called to allocate SA:0x%llx\n",
297 (uint64_t)VM_KERNEL_ADDRPERM(sav
)));
298 if (sav
->state
!= SADB_SASTATE_MATURE
299 && sav
->state
!= SADB_SASTATE_DYING
) {
301 "IPv4 ESP input: non-mature/dying SA found for spi %u (0x%08x)\n",
302 (u_int32_t
)ntohl(spi
), (u_int32_t
)ntohl(spi
)));
303 IPSEC_STAT_INCREMENT(ipsecstat
.in_badspi
);
306 algo
= esp_algorithm_lookup(sav
->alg_enc
);
308 ipseclog((LOG_DEBUG
, "IPv4 ESP input: "
309 "unsupported encryption algorithm for spi %u (0x%08x)\n",
310 (u_int32_t
)ntohl(spi
), (u_int32_t
)ntohl(spi
)));
311 IPSEC_STAT_INCREMENT(ipsecstat
.in_badspi
);
315 /* check if we have proper ivlen information */
318 ipseclog((LOG_ERR
, "inproper ivlen in IPv4 ESP input: %s %s\n",
319 ipsec4_logpacketstr(ip
, spi
), ipsec_logsastr(sav
)));
320 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
324 seq
= ntohl(((struct newesp
*)esp
)->esp_seq
);
326 if ((sav
->flags2
& SADB_X_EXT_SA2_SEQ_PER_TRAFFIC_CLASS
) ==
327 SADB_X_EXT_SA2_SEQ_PER_TRAFFIC_CLASS
) {
328 u_int8_t dscp
= ip
->ip_tos
>> IPTOS_DSCP_SHIFT
;
329 traffic_class
= rfc4594_dscp_to_tc(dscp
);
332 /* Save ICV from packet for verification later */
334 unsigned char saved_icv
[AH_MAXSUMSIZE
];
335 if (algo
->finalizedecrypt
) {
337 m_copydata(m
, m
->m_pkthdr
.len
- siz
, siz
, (caddr_t
) saved_icv
);
341 if (!((sav
->flags
& SADB_X_EXT_OLD
) == 0 && sav
->replay
[traffic_class
] != NULL
&&
342 (sav
->alg_auth
&& sav
->key_auth
))) {
346 if (sav
->alg_auth
== SADB_X_AALG_NULL
||
347 sav
->alg_auth
== SADB_AALG_NONE
) {
352 * check for sequence number.
354 if (ipsec_chkreplay(seq
, sav
, traffic_class
)) {
357 IPSEC_STAT_INCREMENT(ipsecstat
.in_espreplay
);
358 ipseclog((LOG_WARNING
,
359 "replay packet in IPv4 ESP input: %s %s\n",
360 ipsec4_logpacketstr(ip
, spi
), ipsec_logsastr(sav
)));
366 u_char sum0
[AH_MAXSUMSIZE
] __attribute__((aligned(4)));
367 u_char sum
[AH_MAXSUMSIZE
] __attribute__((aligned(4)));
368 const struct ah_algorithm
*sumalgo
;
370 sumalgo
= ah_algorithm_lookup(sav
->alg_auth
);
374 siz
= (((*sumalgo
->sumsiz
)(sav
) + 3) & ~(4 - 1));
375 if (m
->m_pkthdr
.len
< off
+ ESPMAXLEN
+ siz
) {
376 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
379 if (AH_MAXSUMSIZE
< siz
) {
381 "internal error: AH_MAXSUMSIZE must be larger than %u\n",
383 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
387 m_copydata(m
, m
->m_pkthdr
.len
- siz
, siz
, (caddr_t
) &sum0
[0]);
389 if (esp_auth(m
, off
, m
->m_pkthdr
.len
- off
- siz
, sav
, sum
)) {
390 ipseclog((LOG_WARNING
, "auth fail in IPv4 ESP input: %s %s\n",
391 ipsec4_logpacketstr(ip
, spi
), ipsec_logsastr(sav
)));
392 IPSEC_STAT_INCREMENT(ipsecstat
.in_espauthfail
);
396 if (cc_cmp_safe(siz
, sum0
, sum
)) {
397 ipseclog((LOG_WARNING
, "cc_cmp fail in IPv4 ESP input: %s %s\n",
398 ipsec4_logpacketstr(ip
, spi
), ipsec_logsastr(sav
)));
399 IPSEC_STAT_INCREMENT(ipsecstat
.in_espauthfail
);
405 /* strip off the authentication data */
407 ip
= mtod(m
, struct ip
*);
409 ip
->ip_len
= ip
->ip_len
- siz
;
411 ip
->ip_len
= htons(ntohs(ip
->ip_len
) - siz
);
413 m
->m_flags
|= M_AUTHIPDGM
;
414 IPSEC_STAT_INCREMENT(ipsecstat
.in_espauthsucc
);
418 * update sequence number.
420 if ((sav
->flags
& SADB_X_EXT_OLD
) == 0 && sav
->replay
[traffic_class
] != NULL
) {
421 if (ipsec_updatereplay(seq
, sav
, traffic_class
)) {
422 IPSEC_STAT_INCREMENT(ipsecstat
.in_espreplay
);
429 /* process main esp header. */
430 if (sav
->flags
& SADB_X_EXT_OLD
) {
432 esplen
= sizeof(struct esp
);
435 if (sav
->flags
& SADB_X_EXT_DERIV
) {
436 esplen
= sizeof(struct esp
);
438 esplen
= sizeof(struct newesp
);
442 if (m
->m_pkthdr
.len
< off
+ esplen
+ ivlen
+ sizeof(esptail
)) {
443 ipseclog((LOG_WARNING
,
444 "IPv4 ESP input: packet too short\n"));
445 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
449 if (m
->m_len
< off
+ esplen
+ ivlen
) {
450 m
= m_pullup(m
, off
+ esplen
+ ivlen
);
453 "IPv4 ESP input: can't pullup in esp4_input\n"));
454 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
460 * pre-compute and cache intermediate key
462 if (esp_schedule(algo
, sav
) != 0) {
463 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
468 * decrypt the packet.
470 if (!algo
->decrypt
) {
471 panic("internal error: no decrypt function");
473 KERNEL_DEBUG(DBG_FNC_DECRYPT
| DBG_FUNC_START
, 0, 0, 0, 0, 0);
474 if ((*algo
->decrypt
)(m
, off
, sav
, algo
, ivlen
)) {
475 /* m is already freed */
477 ipseclog((LOG_ERR
, "decrypt fail in IPv4 ESP input: %s\n",
478 ipsec_logsastr(sav
)));
479 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
480 KERNEL_DEBUG(DBG_FNC_DECRYPT
| DBG_FUNC_END
, 1, 0, 0, 0, 0);
483 KERNEL_DEBUG(DBG_FNC_DECRYPT
| DBG_FUNC_END
, 2, 0, 0, 0, 0);
484 IPSEC_STAT_INCREMENT(ipsecstat
.in_esphist
[sav
->alg_enc
]);
486 m
->m_flags
|= M_DECRYPTED
;
488 if (algo
->finalizedecrypt
) {
489 if ((*algo
->finalizedecrypt
)(sav
, saved_icv
, algo
->icvlen
)) {
490 ipseclog((LOG_ERR
, "esp4 packet decryption ICV failure\n"));
491 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
492 KERNEL_DEBUG(DBG_FNC_DECRYPT
| DBG_FUNC_END
, 1, 0, 0, 0, 0);
498 * find the trailer of the ESP.
500 m_copydata(m
, m
->m_pkthdr
.len
- sizeof(esptail
), sizeof(esptail
),
502 nxt
= esptail
.esp_nxt
;
503 taillen
= esptail
.esp_padlen
+ sizeof(esptail
);
505 if (m
->m_pkthdr
.len
< taillen
506 || m
->m_pkthdr
.len
- taillen
< hlen
) { /*?*/
507 ipseclog((LOG_WARNING
,
508 "bad pad length in IPv4 ESP input: %s %s\n",
509 ipsec4_logpacketstr(ip
, spi
), ipsec_logsastr(sav
)));
510 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
514 /* strip off the trailing pad area. */
516 ip
= mtod(m
, struct ip
*);
518 ip
->ip_len
= ip
->ip_len
- taillen
;
520 ip
->ip_len
= htons(ntohs(ip
->ip_len
) - taillen
);
522 if (ip
->ip_p
== IPPROTO_UDP
) {
523 // offset includes the outer ip and udp header lengths.
524 if (m
->m_len
< off
) {
525 m
= m_pullup(m
, off
);
528 "IPv4 ESP input: invalid udp encapsulated ESP packet length \n"));
529 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
534 // check the UDP encap header to detect changes in the source port, and then strip the header
535 off
-= sizeof(struct udphdr
); // off no longer includes the udphdr's size
536 // if peer is behind nat and this is the latest esp packet
537 if ((sav
->flags
& SADB_X_EXT_NATT_DETECTED_PEER
) != 0 &&
538 (sav
->flags
& SADB_X_EXT_OLD
) == 0 &&
539 seq
&& sav
->replay
[traffic_class
] &&
540 seq
>= sav
->replay
[traffic_class
]->lastseq
) {
541 struct udphdr
*encap_uh
= (__typeof__(encap_uh
))(void *)((caddr_t
)ip
+ off
);
542 if (encap_uh
->uh_sport
&&
543 ntohs(encap_uh
->uh_sport
) != sav
->remote_ike_port
) {
544 sav
->remote_ike_port
= ntohs(encap_uh
->uh_sport
);
547 ip
= esp4_input_strip_udp_encap(m
, off
);
548 esp
= (struct esp
*)(void *)(((u_int8_t
*)ip
) + off
);
551 /* was it transmitted over the IPsec tunnel SA? */
552 if (ipsec4_tunnel_validate(m
, off
+ esplen
+ ivlen
, nxt
, sav
, &ifamily
)) {
554 struct sockaddr_storage addr
;
557 * strip off all the headers that precedes ESP header.
558 * IP4 xx ESP IP4' payload -> IP4' payload
560 * XXX more sanity checks
561 * XXX relationship with gif?
567 m_adj(m
, off
+ esplen
+ ivlen
);
568 if (ifamily
== AF_INET
) {
569 struct sockaddr_in
*ipaddr
;
571 if (m
->m_len
< sizeof(*ip
)) {
572 m
= m_pullup(m
, sizeof(*ip
));
574 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
578 ip
= mtod(m
, struct ip
*);
579 /* ECN consideration. */
582 if (ip_ecn_egress(ip4_ipsec_ecn
, &tos
, &ip
->ip_tos
) == 0) {
583 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
587 if (otos
!= ip
->ip_tos
) {
588 sum
= ~ntohs(ip
->ip_sum
) & 0xffff;
589 sum
+= (~otos
& 0xffff) + ip
->ip_tos
;
590 sum
= (sum
>> 16) + (sum
& 0xffff);
591 sum
+= (sum
>> 16); /* add carry */
592 ip
->ip_sum
= htons(~sum
& 0xffff);
595 if (!key_checktunnelsanity(sav
, AF_INET
,
596 (caddr_t
)&ip
->ip_src
, (caddr_t
)&ip
->ip_dst
)) {
597 ipseclog((LOG_ERR
, "ipsec tunnel address mismatch "
598 "in ESP input: %s %s\n",
599 ipsec4_logpacketstr(ip
, spi
), ipsec_logsastr(sav
)));
600 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
604 bzero(&addr
, sizeof(addr
));
605 ipaddr
= (__typeof__(ipaddr
)) & addr
;
606 ipaddr
->sin_family
= AF_INET
;
607 ipaddr
->sin_len
= sizeof(*ipaddr
);
608 ipaddr
->sin_addr
= ip
->ip_dst
;
610 } else if (ifamily
== AF_INET6
) {
611 struct sockaddr_in6
*ip6addr
;
614 * m_pullup is prohibited in KAME IPv6 input processing
615 * but there's no other way!
617 if (m
->m_len
< sizeof(*ip6
)) {
618 m
= m_pullup(m
, sizeof(*ip6
));
620 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
626 * Expect 32-bit aligned data pointer on strict-align
629 MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m
);
631 ip6
= mtod(m
, struct ip6_hdr
*);
633 /* ECN consideration. */
634 if (ip64_ecn_egress(ip4_ipsec_ecn
, &tos
, &ip6
->ip6_flow
) == 0) {
635 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
639 if (!key_checktunnelsanity(sav
, AF_INET6
,
640 (caddr_t
)&ip6
->ip6_src
, (caddr_t
)&ip6
->ip6_dst
)) {
641 ipseclog((LOG_ERR
, "ipsec tunnel address mismatch "
642 "in ESP input: %s %s\n",
643 ipsec6_logpacketstr(ip6
, spi
), ipsec_logsastr(sav
)));
644 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
648 bzero(&addr
, sizeof(addr
));
649 ip6addr
= (__typeof__(ip6addr
)) & addr
;
650 ip6addr
->sin6_family
= AF_INET6
;
651 ip6addr
->sin6_len
= sizeof(*ip6addr
);
652 ip6addr
->sin6_addr
= ip6
->ip6_dst
;
655 ipseclog((LOG_ERR
, "ipsec tunnel unsupported address family "
660 key_sa_recordxfer(sav
, m
);
661 if (ipsec_addhist(m
, IPPROTO_ESP
, spi
) != 0 ||
662 ipsec_addhist(m
, IPPROTO_IPV4
, 0) != 0) {
663 IPSEC_STAT_INCREMENT(ipsecstat
.in_nomem
);
667 // update the receiving interface address based on the inner address
668 ifa
= ifa_ifwithaddr((struct sockaddr
*)&addr
);
670 m
->m_pkthdr
.rcvif
= ifa
->ifa_ifp
;
674 /* Clear the csum flags, they can't be valid for the inner headers */
675 m
->m_pkthdr
.csum_flags
= 0;
677 // Input via IPsec interface
678 lck_mtx_lock(sadb_mutex
);
679 ifnet_t ipsec_if
= sav
->sah
->ipsec_if
;
680 if (ipsec_if
!= NULL
) {
681 // If an interface is found, add a reference count before dropping the lock
682 ifnet_reference(ipsec_if
);
684 lck_mtx_unlock(sadb_mutex
);
685 if (ipsec_if
!= NULL
) {
686 esp_input_log(m
, sav
, spi
, seq
);
687 ipsec_save_wake_packet(m
, ntohl(spi
), seq
);
690 if (interface
!= NULL
&&
691 interface
== ipsec_if
) {
693 ifnet_release(ipsec_if
);
697 errno_t inject_error
= ipsec_inject_inbound_packet(ipsec_if
, m
);
698 ifnet_release(ipsec_if
);
700 if (inject_error
== 0) {
708 if (proto_input(ifamily
== AF_INET
? PF_INET
: PF_INET6
, m
) != 0) {
713 KERNEL_DEBUG(DBG_FNC_ESPIN
| DBG_FUNC_END
, 2, 0, 0, 0, 0);
716 * strip off ESP header and IV.
717 * even in m_pulldown case, we need to strip off ESP so that
718 * we can always compute checksum for AH correctly.
722 stripsiz
= esplen
+ ivlen
;
724 ip
= mtod(m
, struct ip
*);
725 ovbcopy((caddr_t
)ip
, (caddr_t
)(((u_char
*)ip
) + stripsiz
), off
);
726 m
->m_data
+= stripsiz
;
727 m
->m_len
-= stripsiz
;
728 m
->m_pkthdr
.len
-= stripsiz
;
730 ip
= mtod(m
, struct ip
*);
732 ip
->ip_len
= ip
->ip_len
- stripsiz
;
734 ip
->ip_len
= htons(ntohs(ip
->ip_len
) - stripsiz
);
738 key_sa_recordxfer(sav
, m
);
739 if (ipsec_addhist(m
, IPPROTO_ESP
, spi
) != 0) {
740 IPSEC_STAT_INCREMENT(ipsecstat
.in_nomem
);
745 * Set the csum valid flag, if we authenticated the
746 * packet, the payload shouldn't be corrupt unless
747 * it was corrupted before being signed on the other
750 if (nxt
== IPPROTO_TCP
|| nxt
== IPPROTO_UDP
) {
751 m
->m_pkthdr
.csum_flags
= CSUM_DATA_VALID
| CSUM_PSEUDO_HDR
;
752 m
->m_pkthdr
.csum_data
= 0xFFFF;
753 _CASSERT(offsetof(struct pkthdr
, csum_data
) == offsetof(struct pkthdr
, csum_rx_val
));
756 if (nxt
!= IPPROTO_DONE
) {
757 if ((ip_protox
[nxt
]->pr_flags
& PR_LASTHDR
) != 0 &&
758 ipsec4_in_reject(m
, NULL
)) {
759 IPSEC_STAT_INCREMENT(ipsecstat
.in_polvio
);
762 KERNEL_DEBUG(DBG_FNC_ESPIN
| DBG_FUNC_END
, 3, 0, 0, 0, 0);
764 /* translate encapsulated UDP port ? */
765 if ((sav
->flags
& SADB_X_EXT_NATT_MULTIPLEUSERS
) != 0) {
768 if (nxt
!= IPPROTO_UDP
) { /* not UPD packet - drop it */
769 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
773 if (m
->m_len
< off
+ sizeof(struct udphdr
)) {
774 m
= m_pullup(m
, off
+ sizeof(struct udphdr
));
777 "IPv4 ESP input: can't pullup UDP header in esp4_input\n"));
778 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
781 ip
= mtod(m
, struct ip
*);
783 udp
= (struct udphdr
*)(void *)(((u_int8_t
*)ip
) + off
);
785 lck_mtx_lock(sadb_mutex
);
786 if (sav
->natt_encapsulated_src_port
== 0) {
787 sav
->natt_encapsulated_src_port
= udp
->uh_sport
;
788 } else if (sav
->natt_encapsulated_src_port
!= udp
->uh_sport
) { /* something wrong */
789 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
790 lck_mtx_unlock(sadb_mutex
);
793 lck_mtx_unlock(sadb_mutex
);
794 udp
->uh_sport
= htons(sav
->remote_ike_port
);
798 DTRACE_IP6(receive
, struct mbuf
*, m
, struct inpcb
*, NULL
,
799 struct ip
*, ip
, struct ifnet
*, m
->m_pkthdr
.rcvif
,
800 struct ip
*, ip
, struct ip6_hdr
*, NULL
);
802 // Input via IPsec interface legacy path
803 lck_mtx_lock(sadb_mutex
);
804 ifnet_t ipsec_if
= sav
->sah
->ipsec_if
;
805 if (ipsec_if
!= NULL
) {
806 // If an interface is found, add a reference count before dropping the lock
807 ifnet_reference(ipsec_if
);
809 lck_mtx_unlock(sadb_mutex
);
810 if (ipsec_if
!= NULL
) {
812 if ((mlen
= m_length2(m
, NULL
)) < hlen
) {
814 "IPv4 ESP input: decrypted packet too short %d < %zu\n",
816 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
817 ifnet_release(ipsec_if
);
820 ip
->ip_len
= htons(ip
->ip_len
+ hlen
);
821 ip
->ip_off
= htons(ip
->ip_off
);
823 ip
->ip_sum
= ip_cksum_hdr_in(m
, hlen
);
825 esp_input_log(m
, sav
, spi
, seq
);
826 ipsec_save_wake_packet(m
, ntohl(spi
), seq
);
829 if (interface
!= NULL
&&
830 interface
== ipsec_if
) {
832 ifnet_release(ipsec_if
);
836 errno_t inject_error
= ipsec_inject_inbound_packet(ipsec_if
, m
);
837 ifnet_release(ipsec_if
);
839 if (inject_error
== 0) {
847 ip_proto_dispatch_in(m
, off
, nxt
, 0);
856 KEYDEBUG(KEYDEBUG_IPSEC_STAMP
,
857 printf("DP esp4_input call free SA:0x%llx\n",
858 (uint64_t)VM_KERNEL_ADDRPERM(sav
)));
859 key_freesav(sav
, KEY_SADB_UNLOCKED
);
861 IPSEC_STAT_INCREMENT(ipsecstat
.in_success
);
865 KEYDEBUG(KEYDEBUG_IPSEC_STAMP
,
866 printf("DP esp4_input call free SA:0x%llx\n",
867 (uint64_t)VM_KERNEL_ADDRPERM(sav
)));
868 key_freesav(sav
, KEY_SADB_UNLOCKED
);
873 KERNEL_DEBUG(DBG_FNC_ESPIN
| DBG_FUNC_END
, 4, 0, 0, 0, 0);
881 esp6_input(struct mbuf
**mp
, int *offp
, int proto
)
883 return esp6_input_extended(mp
, offp
, proto
, NULL
);
887 esp6_input_extended(struct mbuf
**mp
, int *offp
, int proto
, ifnet_t interface
)
889 #pragma unused(proto)
890 struct mbuf
*m
= *mp
;
895 struct esptail esptail
;
898 struct secasvar
*sav
= NULL
;
902 const struct esp_algorithm
*algo
;
906 mbuf_traffic_class_t traffic_class
= 0;
908 /* sanity check for alignment. */
909 if (off
% 4 != 0 || m
->m_pkthdr
.len
% 4 != 0) {
910 ipseclog((LOG_ERR
, "IPv6 ESP input: packet alignment problem "
911 "(off=%d, pktlen=%d)\n", off
, m
->m_pkthdr
.len
));
912 IPSEC_STAT_INCREMENT(ipsec6stat
.in_inval
);
916 #ifndef PULLDOWN_TEST
917 IP6_EXTHDR_CHECK(m
, off
, ESPMAXLEN
, {return IPPROTO_DONE
;});
918 esp
= (struct esp
*)(void *)(mtod(m
, caddr_t
) + off
);
920 IP6_EXTHDR_GET(esp
, struct esp
*, m
, off
, ESPMAXLEN
);
922 IPSEC_STAT_INCREMENT(ipsec6stat
.in_inval
);
926 m
->m_pkthdr
.csum_flags
&= ~CSUM_RX_FLAGS
;
928 /* Expect 32-bit data aligned pointer on strict-align platforms */
929 MBUF_STRICT_DATA_ALIGNMENT_CHECK_32(m
);
931 ip6
= mtod(m
, struct ip6_hdr
*);
933 if (ntohs(ip6
->ip6_plen
) == 0) {
934 ipseclog((LOG_ERR
, "IPv6 ESP input: "
935 "ESP with IPv6 jumbogram is not supported.\n"));
936 IPSEC_STAT_INCREMENT(ipsec6stat
.in_inval
);
940 nproto
= ip6_get_prevhdr(m
, off
);
941 if (nproto
== NULL
|| (*nproto
!= IPPROTO_ESP
&&
942 !(*nproto
== IPPROTO_UDP
&& off
>= sizeof(struct udphdr
)))) {
943 ipseclog((LOG_DEBUG
, "IPv6 ESP input: invalid protocol type\n"));
944 IPSEC_STAT_INCREMENT(ipsec6stat
.in_inval
);
948 /* find the sassoc. */
951 if ((sav
= key_allocsa_extended(AF_INET6
,
952 (caddr_t
)&ip6
->ip6_src
, (caddr_t
)&ip6
->ip6_dst
,
953 IPPROTO_ESP
, spi
, interface
)) == 0) {
954 ipseclog((LOG_WARNING
,
955 "IPv6 ESP input: no key association found for spi %u (0x%08x) seq %u"
956 " src %04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x"
957 " dst %04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x if %s\n",
958 (u_int32_t
)ntohl(spi
), (u_int32_t
)ntohl(spi
), ntohl(((struct newesp
*)esp
)->esp_seq
),
959 ntohs(ip6
->ip6_src
.__u6_addr
.__u6_addr16
[0]), ntohs(ip6
->ip6_src
.__u6_addr
.__u6_addr16
[1]),
960 ntohs(ip6
->ip6_src
.__u6_addr
.__u6_addr16
[2]), ntohs(ip6
->ip6_src
.__u6_addr
.__u6_addr16
[3]),
961 ntohs(ip6
->ip6_src
.__u6_addr
.__u6_addr16
[4]), ntohs(ip6
->ip6_src
.__u6_addr
.__u6_addr16
[5]),
962 ntohs(ip6
->ip6_src
.__u6_addr
.__u6_addr16
[6]), ntohs(ip6
->ip6_src
.__u6_addr
.__u6_addr16
[7]),
963 ntohs(ip6
->ip6_dst
.__u6_addr
.__u6_addr16
[0]), ntohs(ip6
->ip6_dst
.__u6_addr
.__u6_addr16
[1]),
964 ntohs(ip6
->ip6_dst
.__u6_addr
.__u6_addr16
[2]), ntohs(ip6
->ip6_dst
.__u6_addr
.__u6_addr16
[3]),
965 ntohs(ip6
->ip6_dst
.__u6_addr
.__u6_addr16
[4]), ntohs(ip6
->ip6_dst
.__u6_addr
.__u6_addr16
[5]),
966 ntohs(ip6
->ip6_dst
.__u6_addr
.__u6_addr16
[6]), ntohs(ip6
->ip6_dst
.__u6_addr
.__u6_addr16
[7]),
967 ((interface
!= NULL
) ? if_name(interface
) : "NONE")));
968 IPSEC_STAT_INCREMENT(ipsec6stat
.in_nosa
);
971 KEYDEBUG(KEYDEBUG_IPSEC_STAMP
,
972 printf("DP esp6_input called to allocate SA:0x%llx\n",
973 (uint64_t)VM_KERNEL_ADDRPERM(sav
)));
974 if (sav
->state
!= SADB_SASTATE_MATURE
975 && sav
->state
!= SADB_SASTATE_DYING
) {
977 "IPv6 ESP input: non-mature/dying SA found for spi %u (0x%08x)\n",
978 (u_int32_t
)ntohl(spi
), (u_int32_t
)ntohl(spi
)));
979 IPSEC_STAT_INCREMENT(ipsec6stat
.in_badspi
);
982 algo
= esp_algorithm_lookup(sav
->alg_enc
);
984 ipseclog((LOG_DEBUG
, "IPv6 ESP input: "
985 "unsupported encryption algorithm for spi %u (0x%08x)\n",
986 (u_int32_t
)ntohl(spi
), (u_int32_t
)ntohl(spi
)));
987 IPSEC_STAT_INCREMENT(ipsec6stat
.in_badspi
);
991 /* check if we have proper ivlen information */
994 ipseclog((LOG_ERR
, "inproper ivlen in IPv6 ESP input: %s %s\n",
995 ipsec6_logpacketstr(ip6
, spi
), ipsec_logsastr(sav
)));
996 IPSEC_STAT_INCREMENT(ipsec6stat
.in_badspi
);
1000 seq
= ntohl(((struct newesp
*)esp
)->esp_seq
);
1002 if ((sav
->flags2
& SADB_X_EXT_SA2_SEQ_PER_TRAFFIC_CLASS
) ==
1003 SADB_X_EXT_SA2_SEQ_PER_TRAFFIC_CLASS
) {
1004 u_int8_t dscp
= (ntohl(ip6
->ip6_flow
) & IP6FLOW_DSCP_MASK
) >> IP6FLOW_DSCP_SHIFT
;
1005 traffic_class
= rfc4594_dscp_to_tc(dscp
);
1008 /* Save ICV from packet for verification later */
1010 unsigned char saved_icv
[AH_MAXSUMSIZE
];
1011 if (algo
->finalizedecrypt
) {
1013 m_copydata(m
, m
->m_pkthdr
.len
- siz
, siz
, (caddr_t
) saved_icv
);
1017 if (!((sav
->flags
& SADB_X_EXT_OLD
) == 0 &&
1018 sav
->replay
[traffic_class
] != NULL
&&
1019 (sav
->alg_auth
&& sav
->key_auth
))) {
1023 if (sav
->alg_auth
== SADB_X_AALG_NULL
||
1024 sav
->alg_auth
== SADB_AALG_NONE
) {
1029 * check for sequence number.
1031 if (ipsec_chkreplay(seq
, sav
, traffic_class
)) {
1034 IPSEC_STAT_INCREMENT(ipsec6stat
.in_espreplay
);
1035 ipseclog((LOG_WARNING
,
1036 "replay packet in IPv6 ESP input: %s %s\n",
1037 ipsec6_logpacketstr(ip6
, spi
), ipsec_logsastr(sav
)));
1043 u_char sum0
[AH_MAXSUMSIZE
] __attribute__((aligned(4)));
1044 u_char sum
[AH_MAXSUMSIZE
] __attribute__((aligned(4)));
1045 const struct ah_algorithm
*sumalgo
;
1047 sumalgo
= ah_algorithm_lookup(sav
->alg_auth
);
1051 siz
= (((*sumalgo
->sumsiz
)(sav
) + 3) & ~(4 - 1));
1052 if (m
->m_pkthdr
.len
< off
+ ESPMAXLEN
+ siz
) {
1053 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
1056 if (AH_MAXSUMSIZE
< siz
) {
1057 ipseclog((LOG_DEBUG
,
1058 "internal error: AH_MAXSUMSIZE must be larger than %u\n",
1060 IPSEC_STAT_INCREMENT(ipsec6stat
.in_inval
);
1064 m_copydata(m
, m
->m_pkthdr
.len
- siz
, siz
, (caddr_t
) &sum0
[0]);
1066 if (esp_auth(m
, off
, m
->m_pkthdr
.len
- off
- siz
, sav
, sum
)) {
1067 ipseclog((LOG_WARNING
, "auth fail in IPv6 ESP input: %s %s\n",
1068 ipsec6_logpacketstr(ip6
, spi
), ipsec_logsastr(sav
)));
1069 IPSEC_STAT_INCREMENT(ipsec6stat
.in_espauthfail
);
1073 if (cc_cmp_safe(siz
, sum0
, sum
)) {
1074 ipseclog((LOG_WARNING
, "auth fail in IPv6 ESP input: %s %s\n",
1075 ipsec6_logpacketstr(ip6
, spi
), ipsec_logsastr(sav
)));
1076 IPSEC_STAT_INCREMENT(ipsec6stat
.in_espauthfail
);
1082 /* strip off the authentication data */
1084 ip6
= mtod(m
, struct ip6_hdr
*);
1085 ip6
->ip6_plen
= htons(ntohs(ip6
->ip6_plen
) - siz
);
1087 m
->m_flags
|= M_AUTHIPDGM
;
1088 IPSEC_STAT_INCREMENT(ipsec6stat
.in_espauthsucc
);
1092 * update sequence number.
1094 if ((sav
->flags
& SADB_X_EXT_OLD
) == 0 && sav
->replay
[traffic_class
] != NULL
) {
1095 if (ipsec_updatereplay(seq
, sav
, traffic_class
)) {
1096 IPSEC_STAT_INCREMENT(ipsec6stat
.in_espreplay
);
1103 /* process main esp header. */
1104 if (sav
->flags
& SADB_X_EXT_OLD
) {
1106 esplen
= sizeof(struct esp
);
1109 if (sav
->flags
& SADB_X_EXT_DERIV
) {
1110 esplen
= sizeof(struct esp
);
1112 esplen
= sizeof(struct newesp
);
1116 if (m
->m_pkthdr
.len
< off
+ esplen
+ ivlen
+ sizeof(esptail
)) {
1117 ipseclog((LOG_WARNING
,
1118 "IPv6 ESP input: packet too short\n"));
1119 IPSEC_STAT_INCREMENT(ipsec6stat
.in_inval
);
1123 #ifndef PULLDOWN_TEST
1124 IP6_EXTHDR_CHECK(m
, off
, esplen
+ ivlen
, return IPPROTO_DONE
); /*XXX*/
1126 IP6_EXTHDR_GET(esp
, struct esp
*, m
, off
, esplen
+ ivlen
);
1128 IPSEC_STAT_INCREMENT(ipsec6stat
.in_inval
);
1133 ip6
= mtod(m
, struct ip6_hdr
*); /*set it again just in case*/
1136 * pre-compute and cache intermediate key
1138 if (esp_schedule(algo
, sav
) != 0) {
1139 IPSEC_STAT_INCREMENT(ipsec6stat
.in_inval
);
1144 * decrypt the packet.
1146 if (!algo
->decrypt
) {
1147 panic("internal error: no decrypt function");
1149 if ((*algo
->decrypt
)(m
, off
, sav
, algo
, ivlen
)) {
1150 /* m is already freed */
1152 ipseclog((LOG_ERR
, "decrypt fail in IPv6 ESP input: %s\n",
1153 ipsec_logsastr(sav
)));
1154 IPSEC_STAT_INCREMENT(ipsec6stat
.in_inval
);
1157 IPSEC_STAT_INCREMENT(ipsec6stat
.in_esphist
[sav
->alg_enc
]);
1159 m
->m_flags
|= M_DECRYPTED
;
1161 if (algo
->finalizedecrypt
) {
1162 if ((*algo
->finalizedecrypt
)(sav
, saved_icv
, algo
->icvlen
)) {
1163 ipseclog((LOG_ERR
, "esp6 packet decryption ICV failure\n"));
1164 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
1165 KERNEL_DEBUG(DBG_FNC_DECRYPT
| DBG_FUNC_END
, 1, 0, 0, 0, 0);
1171 * find the trailer of the ESP.
1173 m_copydata(m
, m
->m_pkthdr
.len
- sizeof(esptail
), sizeof(esptail
),
1175 nxt
= esptail
.esp_nxt
;
1176 taillen
= esptail
.esp_padlen
+ sizeof(esptail
);
1178 if (m
->m_pkthdr
.len
< taillen
1179 || m
->m_pkthdr
.len
- taillen
< sizeof(struct ip6_hdr
)) { /*?*/
1180 ipseclog((LOG_WARNING
,
1181 "bad pad length in IPv6 ESP input: %s %s\n",
1182 ipsec6_logpacketstr(ip6
, spi
), ipsec_logsastr(sav
)));
1183 IPSEC_STAT_INCREMENT(ipsec6stat
.in_inval
);
1187 /* strip off the trailing pad area. */
1189 ip6
= mtod(m
, struct ip6_hdr
*);
1190 ip6
->ip6_plen
= htons(ntohs(ip6
->ip6_plen
) - taillen
);
1192 if (*nproto
== IPPROTO_UDP
) {
1193 // offset includes the outer ip and udp header lengths.
1194 if (m
->m_len
< off
) {
1195 m
= m_pullup(m
, off
);
1197 ipseclog((LOG_DEBUG
,
1198 "IPv6 ESP input: invalid udp encapsulated ESP packet length\n"));
1199 IPSEC_STAT_INCREMENT(ipsec6stat
.in_inval
);
1204 // check the UDP encap header to detect changes in the source port, and then strip the header
1205 off
-= sizeof(struct udphdr
); // off no longer includes the udphdr's size
1206 // if peer is behind nat and this is the latest esp packet
1207 if ((sav
->flags
& SADB_X_EXT_NATT_DETECTED_PEER
) != 0 &&
1208 (sav
->flags
& SADB_X_EXT_OLD
) == 0 &&
1209 seq
&& sav
->replay
[traffic_class
] &&
1210 seq
>= sav
->replay
[traffic_class
]->lastseq
) {
1211 struct udphdr
*encap_uh
= (__typeof__(encap_uh
))(void *)((caddr_t
)ip6
+ off
);
1212 if (encap_uh
->uh_sport
&&
1213 ntohs(encap_uh
->uh_sport
) != sav
->remote_ike_port
) {
1214 sav
->remote_ike_port
= ntohs(encap_uh
->uh_sport
);
1217 ip6
= esp6_input_strip_udp_encap(m
, off
);
1218 esp
= (struct esp
*)(void *)(((u_int8_t
*)ip6
) + off
);
1222 /* was it transmitted over the IPsec tunnel SA? */
1223 if (ipsec6_tunnel_validate(m
, off
+ esplen
+ ivlen
, nxt
, sav
, &ifamily
)) {
1225 struct sockaddr_storage addr
;
1228 * strip off all the headers that precedes ESP header.
1229 * IP6 xx ESP IP6' payload -> IP6' payload
1231 * XXX more sanity checks
1232 * XXX relationship with gif?
1234 u_int32_t flowinfo
; /*net endian*/
1235 flowinfo
= ip6
->ip6_flow
;
1236 m_adj(m
, off
+ esplen
+ ivlen
);
1237 if (ifamily
== AF_INET6
) {
1238 struct sockaddr_in6
*ip6addr
;
1240 if (m
->m_len
< sizeof(*ip6
)) {
1241 #ifndef PULLDOWN_TEST
1243 * m_pullup is prohibited in KAME IPv6 input processing
1244 * but there's no other way!
1247 /* okay to pullup in m_pulldown style */
1249 m
= m_pullup(m
, sizeof(*ip6
));
1251 IPSEC_STAT_INCREMENT(ipsec6stat
.in_inval
);
1255 ip6
= mtod(m
, struct ip6_hdr
*);
1256 /* ECN consideration. */
1257 if (ip6_ecn_egress(ip6_ipsec_ecn
, &flowinfo
, &ip6
->ip6_flow
) == 0) {
1258 IPSEC_STAT_INCREMENT(ipsec6stat
.in_inval
);
1261 if (!key_checktunnelsanity(sav
, AF_INET6
,
1262 (caddr_t
)&ip6
->ip6_src
, (caddr_t
)&ip6
->ip6_dst
)) {
1263 ipseclog((LOG_ERR
, "ipsec tunnel address mismatch "
1264 "in IPv6 ESP input: %s %s\n",
1265 ipsec6_logpacketstr(ip6
, spi
),
1266 ipsec_logsastr(sav
)));
1267 IPSEC_STAT_INCREMENT(ipsec6stat
.in_inval
);
1271 bzero(&addr
, sizeof(addr
));
1272 ip6addr
= (__typeof__(ip6addr
)) & addr
;
1273 ip6addr
->sin6_family
= AF_INET6
;
1274 ip6addr
->sin6_len
= sizeof(*ip6addr
);
1275 ip6addr
->sin6_addr
= ip6
->ip6_dst
;
1276 } else if (ifamily
== AF_INET
) {
1277 struct sockaddr_in
*ipaddr
;
1279 if (m
->m_len
< sizeof(*ip
)) {
1280 m
= m_pullup(m
, sizeof(*ip
));
1282 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
1290 ip
= mtod(m
, struct ip
*);
1292 /* ECN consideration. */
1293 if (ip46_ecn_egress(ip6_ipsec_ecn
, &flowinfo
, &ip
->ip_tos
) == 0) {
1294 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
1298 if (otos
!= ip
->ip_tos
) {
1299 sum
= ~ntohs(ip
->ip_sum
) & 0xffff;
1300 sum
+= (~otos
& 0xffff) + ip
->ip_tos
;
1301 sum
= (sum
>> 16) + (sum
& 0xffff);
1302 sum
+= (sum
>> 16); /* add carry */
1303 ip
->ip_sum
= htons(~sum
& 0xffff);
1306 if (!key_checktunnelsanity(sav
, AF_INET
,
1307 (caddr_t
)&ip
->ip_src
, (caddr_t
)&ip
->ip_dst
)) {
1308 ipseclog((LOG_ERR
, "ipsec tunnel address mismatch "
1309 "in ESP input: %s %s\n",
1310 ipsec4_logpacketstr(ip
, spi
), ipsec_logsastr(sav
)));
1311 IPSEC_STAT_INCREMENT(ipsecstat
.in_inval
);
1315 bzero(&addr
, sizeof(addr
));
1316 ipaddr
= (__typeof__(ipaddr
)) & addr
;
1317 ipaddr
->sin_family
= AF_INET
;
1318 ipaddr
->sin_len
= sizeof(*ipaddr
);
1319 ipaddr
->sin_addr
= ip
->ip_dst
;
1322 key_sa_recordxfer(sav
, m
);
1323 if (ipsec_addhist(m
, IPPROTO_ESP
, spi
) != 0 ||
1324 ipsec_addhist(m
, IPPROTO_IPV6
, 0) != 0) {
1325 IPSEC_STAT_INCREMENT(ipsec6stat
.in_nomem
);
1329 // update the receiving interface address based on the inner address
1330 ifa
= ifa_ifwithaddr((struct sockaddr
*)&addr
);
1332 m
->m_pkthdr
.rcvif
= ifa
->ifa_ifp
;
1336 // Input via IPsec interface
1337 lck_mtx_lock(sadb_mutex
);
1338 ifnet_t ipsec_if
= sav
->sah
->ipsec_if
;
1339 if (ipsec_if
!= NULL
) {
1340 // If an interface is found, add a reference count before dropping the lock
1341 ifnet_reference(ipsec_if
);
1343 lck_mtx_unlock(sadb_mutex
);
1344 if (ipsec_if
!= NULL
) {
1345 esp_input_log(m
, sav
, spi
, seq
);
1346 ipsec_save_wake_packet(m
, ntohl(spi
), seq
);
1349 if (interface
!= NULL
&&
1350 interface
== ipsec_if
) {
1351 ifnet_release(ipsec_if
);
1355 errno_t inject_error
= ipsec_inject_inbound_packet(ipsec_if
, m
);
1356 ifnet_release(ipsec_if
);
1358 if (inject_error
== 0) {
1367 if (proto_input(ifamily
== AF_INET
? PF_INET
: PF_INET6
, m
) != 0) {
1373 * strip off ESP header and IV.
1374 * even in m_pulldown case, we need to strip off ESP so that
1375 * we can always compute checksum for AH correctly.
1381 * Set the next header field of the previous header correctly.
1383 prvnxtp
= ip6_get_prevhdr(m
, off
); /* XXX */
1386 stripsiz
= esplen
+ ivlen
;
1388 ip6
= mtod(m
, struct ip6_hdr
*);
1389 if (m
->m_len
>= stripsiz
+ off
) {
1390 ovbcopy((caddr_t
)ip6
, ((caddr_t
)ip6
) + stripsiz
, off
);
1391 m
->m_data
+= stripsiz
;
1392 m
->m_len
-= stripsiz
;
1393 m
->m_pkthdr
.len
-= stripsiz
;
1396 * this comes with no copy if the boundary is on
1401 n
= m_split(m
, off
, M_DONTWAIT
);
1403 /* m is retained by m_split */
1407 /* m_cat does not update m_pkthdr.len */
1408 m
->m_pkthdr
.len
+= n
->m_pkthdr
.len
;
1412 #ifndef PULLDOWN_TEST
1414 * KAME requires that the packet to be contiguous on the
1415 * mbuf. We need to make that sure.
1416 * this kind of code should be avoided.
1417 * XXX other conditions to avoid running this part?
1419 if (m
->m_len
!= m
->m_pkthdr
.len
) {
1420 struct mbuf
*n
= NULL
;
1423 MGETHDR(n
, M_DONTWAIT
, MT_HEADER
); /* MAC-OK */
1426 M_COPY_PKTHDR(n
, m
);
1428 if (n
&& m
->m_pkthdr
.len
> maxlen
) {
1429 MCLGET(n
, M_DONTWAIT
);
1431 if ((n
->m_flags
& M_EXT
) == 0) {
1437 printf("esp6_input: mbuf allocation failed\n");
1441 if (m
->m_pkthdr
.len
<= maxlen
) {
1442 m_copydata(m
, 0, m
->m_pkthdr
.len
, mtod(n
, caddr_t
));
1443 n
->m_len
= m
->m_pkthdr
.len
;
1444 n
->m_pkthdr
.len
= m
->m_pkthdr
.len
;
1448 m_copydata(m
, 0, maxlen
, mtod(n
, caddr_t
));
1450 n
->m_pkthdr
.len
= m
->m_pkthdr
.len
;
1453 m
->m_flags
&= ~M_PKTHDR
;
1458 ip6
= mtod(m
, struct ip6_hdr
*);
1459 ip6
->ip6_plen
= htons(ntohs(ip6
->ip6_plen
) - stripsiz
);
1461 key_sa_recordxfer(sav
, m
);
1462 if (ipsec_addhist(m
, IPPROTO_ESP
, spi
) != 0) {
1463 IPSEC_STAT_INCREMENT(ipsec6stat
.in_nomem
);
1468 * Set the csum valid flag, if we authenticated the
1469 * packet, the payload shouldn't be corrupt unless
1470 * it was corrupted before being signed on the other
1473 if (nxt
== IPPROTO_TCP
|| nxt
== IPPROTO_UDP
) {
1474 m
->m_pkthdr
.csum_flags
= CSUM_DATA_VALID
| CSUM_PSEUDO_HDR
;
1475 m
->m_pkthdr
.csum_data
= 0xFFFF;
1476 _CASSERT(offsetof(struct pkthdr
, csum_data
) == offsetof(struct pkthdr
, csum_rx_val
));
1479 // Input via IPsec interface
1480 lck_mtx_lock(sadb_mutex
);
1481 ifnet_t ipsec_if
= sav
->sah
->ipsec_if
;
1482 if (ipsec_if
!= NULL
) {
1483 // If an interface is found, add a reference count before dropping the lock
1484 ifnet_reference(ipsec_if
);
1486 lck_mtx_unlock(sadb_mutex
);
1487 if (ipsec_if
!= NULL
) {
1488 esp_input_log(m
, sav
, spi
, seq
);
1489 ipsec_save_wake_packet(m
, ntohl(spi
), seq
);
1492 if (interface
!= NULL
&&
1493 interface
== ipsec_if
) {
1494 ifnet_release(ipsec_if
);
1498 errno_t inject_error
= ipsec_inject_inbound_packet(ipsec_if
, m
);
1499 ifnet_release(ipsec_if
);
1501 if (inject_error
== 0) {
1515 KEYDEBUG(KEYDEBUG_IPSEC_STAMP
,
1516 printf("DP esp6_input call free SA:0x%llx\n",
1517 (uint64_t)VM_KERNEL_ADDRPERM(sav
)));
1518 key_freesav(sav
, KEY_SADB_UNLOCKED
);
1520 IPSEC_STAT_INCREMENT(ipsec6stat
.in_success
);
1525 KEYDEBUG(KEYDEBUG_IPSEC_STAMP
,
1526 printf("DP esp6_input call free SA:0x%llx\n",
1527 (uint64_t)VM_KERNEL_ADDRPERM(sav
)));
1528 key_freesav(sav
, KEY_SADB_UNLOCKED
);
1533 if (interface
!= NULL
) {
1536 return IPPROTO_DONE
;
1540 esp6_ctlinput(int cmd
, struct sockaddr
*sa
, void *d
, __unused
struct ifnet
*ifp
)
1542 const struct newesp
*espp
;
1544 struct ip6ctlparam
*ip6cp
= NULL
, ip6cp1
;
1545 struct secasvar
*sav
;
1546 struct ip6_hdr
*ip6
;
1549 struct sockaddr_in6
*sa6_src
, *sa6_dst
;
1551 if (sa
->sa_family
!= AF_INET6
||
1552 sa
->sa_len
!= sizeof(struct sockaddr_in6
)) {
1555 if ((unsigned)cmd
>= PRC_NCMDS
) {
1559 /* if the parameter is from icmp6, decode it. */
1561 ip6cp
= (struct ip6ctlparam
*)d
;
1563 ip6
= ip6cp
->ip6c_ip6
;
1564 off
= ip6cp
->ip6c_off
;
1572 * Notify the error to all possible sockets via pfctlinput2.
1573 * Since the upper layer information (such as protocol type,
1574 * source and destination ports) is embedded in the encrypted
1575 * data and might have been cut, we can't directly call
1576 * an upper layer ctlinput function. However, the pcbnotify
1577 * function will consider source and destination addresses
1578 * as well as the flow info value, and may be able to find
1579 * some PCB that should be notified.
1580 * Although pfctlinput2 will call esp6_ctlinput(), there is
1581 * no possibility of an infinite loop of function calls,
1582 * because we don't pass the inner IPv6 header.
1584 bzero(&ip6cp1
, sizeof(ip6cp1
));
1585 ip6cp1
.ip6c_src
= ip6cp
->ip6c_src
;
1586 pfctlinput2(cmd
, sa
, (void *)&ip6cp1
);
1589 * Then go to special cases that need ESP header information.
1590 * XXX: We assume that when ip6 is non NULL,
1591 * M and OFF are valid.
1594 /* check if we can safely examine src and dst ports */
1595 if (m
->m_pkthdr
.len
< off
+ sizeof(esp
)) {
1599 if (m
->m_len
< off
+ sizeof(esp
)) {
1601 * this should be rare case,
1602 * so we compromise on this copy...
1604 m_copydata(m
, off
, sizeof(esp
), (caddr_t
)&esp
);
1607 espp
= (struct newesp
*)(void *)(mtod(m
, caddr_t
) + off
);
1610 if (cmd
== PRC_MSGSIZE
) {
1614 * Check to see if we have a valid SA corresponding to
1615 * the address in the ICMP message payload.
1617 sa6_src
= ip6cp
->ip6c_src
;
1618 sa6_dst
= (struct sockaddr_in6
*)(void *)sa
;
1619 sav
= key_allocsa(AF_INET6
,
1620 (caddr_t
)&sa6_src
->sin6_addr
,
1621 (caddr_t
)&sa6_dst
->sin6_addr
,
1622 IPPROTO_ESP
, espp
->esp_spi
);
1624 if (sav
->state
== SADB_SASTATE_MATURE
||
1625 sav
->state
== SADB_SASTATE_DYING
) {
1628 key_freesav(sav
, KEY_SADB_UNLOCKED
);
1631 /* XXX Further validation? */
1634 * Depending on the value of "valid" and routing table
1635 * size (mtudisc_{hi,lo}wat), we will:
1636 * - recalcurate the new MTU and create the
1637 * corresponding routing entry, or
1638 * - ignore the MTU change notification.
1640 icmp6_mtudisc_update((struct ip6ctlparam
*)d
, valid
);
1643 /* we normally notify any pcb here */