2 * Copyright (c) 2008-2017 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 /* $FreeBSD: src/sys/netinet6/esp_output.c,v 1.1.2.3 2002/04/28 05:40:26 suz Exp $ */
30 /* $KAME: esp_output.c,v 1.44 2001/07/26 06:53:15 jinmei Exp $ */
33 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
34 * All rights reserved.
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. Neither the name of the project nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
48 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * RFC1827/2406 Encapsulated Security Payload.
67 #include <sys/param.h>
68 #include <sys/systm.h>
69 #include <sys/malloc.h>
71 #include <sys/domain.h>
72 #include <sys/protosw.h>
73 #include <sys/socket.h>
74 #include <sys/socketvar.h>
75 #include <sys/errno.h>
77 #include <sys/kernel.h>
78 #include <sys/syslog.h>
81 #include <net/route.h>
82 #include <net/multi_layer_pkt_log.h>
84 #include <netinet/in.h>
85 #include <netinet/in_systm.h>
86 #include <netinet/ip.h>
87 #include <netinet/in_var.h>
88 #include <netinet/udp.h> /* for nat traversal */
89 #include <netinet/tcp.h>
90 #include <netinet/in_tclass.h>
92 #include <netinet/ip6.h>
93 #include <netinet6/ip6_var.h>
94 #include <netinet/icmp6.h>
96 #include <netinet6/ipsec.h>
97 #include <netinet6/ipsec6.h>
98 #include <netinet6/ah.h>
99 #include <netinet6/ah6.h>
100 #include <netinet6/esp.h>
101 #include <netinet6/esp6.h>
102 #include <netkey/key.h>
103 #include <netkey/keydb.h>
105 #include <net/net_osdep.h>
107 #include <sys/kdebug.h>
108 #define DBG_LAYER_BEG NETDBG_CODE(DBG_NETIPSEC, 1)
109 #define DBG_LAYER_END NETDBG_CODE(DBG_NETIPSEC, 3)
110 #define DBG_FNC_ESPOUT NETDBG_CODE(DBG_NETIPSEC, (4 << 8))
111 #define DBG_FNC_ENCRYPT NETDBG_CODE(DBG_NETIPSEC, (5 << 8))
113 static int esp_output(struct mbuf
*, u_char
*, struct mbuf
*,
114 int, struct secasvar
*sav
);
116 extern int esp_udp_encap_port
;
117 extern u_int64_t natt_now
;
119 extern lck_mtx_t
*sadb_mutex
;
122 * compute ESP header size.
125 esp_hdrsiz(__unused
struct ipsecrequest
*isr
)
130 panic("esp_hdrsiz: NULL was passed.\n");
134 lck_mtx_lock(sadb_mutex
);
136 struct secasvar
*sav
;
137 const struct esp_algorithm
*algo
;
138 const struct ah_algorithm
*aalgo
;
144 /*%%%% this needs to change - no sav in ipsecrequest any more */
147 if (isr
->saidx
.proto
!= IPPROTO_ESP
) {
148 panic("unsupported mode passed to esp_hdrsiz");
154 if (sav
->state
!= SADB_SASTATE_MATURE
155 && sav
->state
!= SADB_SASTATE_DYING
) {
159 /* we need transport mode ESP. */
160 algo
= esp_algorithm_lookup(sav
->alg_enc
);
169 if (algo
->padbound
) {
170 maxpad
= algo
->padbound
;
174 maxpad
+= 1; /* maximum 'extendsiz' is padbound + 1, see esp_output */
176 if (sav
->flags
& SADB_X_EXT_OLD
) {
178 hdrsiz
= sizeof(struct esp
) + ivlen
+ maxpad
;
181 aalgo
= ah_algorithm_lookup(sav
->alg_auth
);
182 if (aalgo
&& sav
->replay
[0] != NULL
&& sav
->key_auth
) {
183 authlen
= (aalgo
->sumsiz
)(sav
);
187 hdrsiz
= sizeof(struct newesp
) + ivlen
+ maxpad
+ authlen
;
191 * If the security association indicates that NATT is required,
192 * add the size of the NATT encapsulation header:
194 if ((sav
->flags
& SADB_X_EXT_NATT
) != 0) {
195 hdrsiz
+= sizeof(struct udphdr
) + 4;
198 lck_mtx_unlock(sadb_mutex
);
202 lck_mtx_unlock(sadb_mutex
);
206 * sizeof(struct newesp) > sizeof(struct esp). (8)
207 * esp_max_ivlen() = max ivlen for CBC mode
208 * 17 = (maximum padding length without random padding length)
209 * + (Pad Length field) + (Next Header field).
210 * 64 = maximum ICV we support.
211 * sizeof(struct udphdr) in case NAT traversal is used
213 return sizeof(struct newesp
) + esp_max_ivlen() + 17 + AH_MAXSUMSIZE
+ sizeof(struct udphdr
);
217 * Modify the packet so that the payload is encrypted.
218 * The mbuf (m) must start with IPv4 or IPv6 header.
219 * On failure, free the given mbuf and return NULL.
224 * IP ......... payload
225 * during the encryption:
226 * m nexthdrp mprev md
228 * IP ............... esp iv payload pad padlen nxthdr
229 * <--><-><------><--------------->
230 * esplen plen extendsiz
234 * <-----------------> espoff
242 struct secasvar
*sav
)
247 struct esptail
*esptail
;
248 const struct esp_algorithm
*algo
;
249 struct tcphdr th
= {};
252 size_t inner_payload_len
= 0;
253 u_int8_t inner_protocol
= 0;
255 size_t plen
; /*payload length to be encrypted*/
257 size_t esphlen
; /* sizeof(struct esp/newesp) + ivlen */
262 struct ipsecstat
*stat
;
263 struct udphdr
*udp
= NULL
;
264 int udp_encapsulate
= (sav
->flags
& SADB_X_EXT_NATT
&& (af
== AF_INET
|| af
== AF_INET6
) &&
265 ((esp_udp_encap_port
& 0xFFFF) != 0 || sav
->natt_encapsulated_src_port
!= 0));
267 KERNEL_DEBUG(DBG_FNC_ESPOUT
| DBG_FUNC_START
, sav
->ivlen
, 0, 0, 0, 0);
278 ipseclog((LOG_ERR
, "esp_output: unsupported af %d\n", af
));
279 KERNEL_DEBUG(DBG_FNC_ESPOUT
| DBG_FUNC_END
, 1, 0, 0, 0, 0);
280 return 0; /* no change at all */
283 mbuf_traffic_class_t traffic_class
= 0;
284 if ((sav
->flags2
& SADB_X_EXT_SA2_SEQ_PER_TRAFFIC_CLASS
) ==
285 SADB_X_EXT_SA2_SEQ_PER_TRAFFIC_CLASS
) {
290 struct ip
*ip
= mtod(m
, struct ip
*);
291 dscp
= ip
->ip_tos
>> IPTOS_DSCP_SHIFT
;
296 struct ip6_hdr
*ip6
= mtod(m
, struct ip6_hdr
*);
297 dscp
= (ntohl(ip6
->ip6_flow
) & IP6FLOW_DSCP_MASK
) >> IP6FLOW_DSCP_SHIFT
;
301 panic("esp_output: should not reach here");
303 traffic_class
= rfc4594_dscp_to_tc(dscp
);
306 /* some sanity check */
307 if ((sav
->flags
& SADB_X_EXT_OLD
) == 0 && sav
->replay
[traffic_class
] == NULL
) {
313 ip
= mtod(m
, struct ip
*);
314 ipseclog((LOG_DEBUG
, "esp4_output: internal error: "
315 "sav->replay is null: %x->%x, SPI=%u\n",
316 (u_int32_t
)ntohl(ip
->ip_src
.s_addr
),
317 (u_int32_t
)ntohl(ip
->ip_dst
.s_addr
),
318 (u_int32_t
)ntohl(sav
->spi
)));
319 IPSEC_STAT_INCREMENT(ipsecstat
.out_inval
);
323 ipseclog((LOG_DEBUG
, "esp6_output: internal error: "
324 "sav->replay is null: SPI=%u\n",
325 (u_int32_t
)ntohl(sav
->spi
)));
326 IPSEC_STAT_INCREMENT(ipsec6stat
.out_inval
);
329 panic("esp_output: should not reach here");
332 KERNEL_DEBUG(DBG_FNC_ESPOUT
| DBG_FUNC_END
, 2, 0, 0, 0, 0);
336 algo
= esp_algorithm_lookup(sav
->alg_enc
);
338 ipseclog((LOG_ERR
, "esp_output: unsupported algorithm: "
339 "SPI=%u\n", (u_int32_t
)ntohl(sav
->spi
)));
341 KERNEL_DEBUG(DBG_FNC_ESPOUT
| DBG_FUNC_END
, 3, 0, 0, 0, 0);
348 panic("invalid ivlen");
354 * XXX inserts ESP header right after IPv4 header. should
355 * chase the header chain.
356 * XXX sequential number
358 struct ip
*ip
= NULL
;
359 struct ip6_hdr
*ip6
= NULL
;
360 size_t esplen
; /* sizeof(struct esp/newesp) */
361 size_t hlen
= 0; /* ip header len */
363 if (sav
->flags
& SADB_X_EXT_OLD
) {
365 esplen
= sizeof(struct esp
);
368 if (sav
->flags
& SADB_X_EXT_DERIV
) {
369 esplen
= sizeof(struct esp
);
371 esplen
= sizeof(struct newesp
);
374 esphlen
= esplen
+ ivlen
;
376 for (mprev
= m
; mprev
&& mprev
->m_next
!= md
; mprev
= mprev
->m_next
) {
379 if (mprev
== NULL
|| mprev
->m_next
!= md
) {
380 ipseclog((LOG_DEBUG
, "esp%d_output: md is not in chain\n",
383 KERNEL_DEBUG(DBG_FNC_ESPOUT
| DBG_FUNC_END
, 4, 0, 0, 0, 0);
388 for (n
= md
; n
; n
= n
->m_next
) {
394 ip
= mtod(m
, struct ip
*);
396 hlen
= IP_VHL_HL(ip
->ip_vhl
) << 2;
398 hlen
= ip
->ip_hl
<< 2;
402 ip6
= mtod(m
, struct ip6_hdr
*);
407 /* grab info for packet logging */
408 struct secashead
*sah
= sav
->sah
;
409 if (net_mpklog_enabled
&&
410 sah
!= NULL
&& sah
->ipsec_if
!= NULL
) {
411 ifnet_t ifp
= sah
->ipsec_if
;
413 if ((ifp
->if_xflags
& IFXF_MPK_LOG
) == IFXF_MPK_LOG
) {
416 if (sav
->sah
->saidx
.mode
== IPSEC_MODE_TUNNEL
) {
417 struct ip
*inner_ip
= mtod(md
, struct ip
*);
418 if (IP_VHL_V(inner_ip
->ip_vhl
) == IPVERSION
) {
420 iphlen
= IP_VHL_HL(inner_ip
->ip_vhl
) << 2;
422 iphlen
= inner_ip
->ip_hl
<< 2;
424 inner_protocol
= inner_ip
->ip_p
;
425 } else if (IP_VHL_V(inner_ip
->ip_vhl
) == IPV6_VERSION
) {
426 struct ip6_hdr
*inner_ip6
= mtod(md
, struct ip6_hdr
*);
427 iphlen
= sizeof(struct ip6_hdr
);
428 inner_protocol
= inner_ip6
->ip6_nxt
;
431 if (inner_protocol
== IPPROTO_TCP
) {
432 if ((int)(iphlen
+ sizeof(th
)) <=
433 (m
->m_pkthdr
.len
- m
->m_len
)) {
434 m_copydata(md
, (int)iphlen
, sizeof(th
), (u_int8_t
*)&th
);
437 inner_payload_len
= m
->m_pkthdr
.len
- m
->m_len
- iphlen
- (th
.th_off
<< 2);
442 inner_protocol
= ip
->ip_p
;
443 } else if (af
== AF_INET6
) {
444 inner_protocol
= ip6
->ip6_nxt
;
447 if (inner_protocol
== IPPROTO_TCP
) {
448 if ((int)(iphlen
+ sizeof(th
)) <=
450 m_copydata(m
, (int)iphlen
, sizeof(th
), (u_int8_t
*)&th
);
453 inner_payload_len
= m
->m_pkthdr
.len
- iphlen
- (th
.th_off
<< 2);
459 /* make the packet over-writable */
460 mprev
->m_next
= NULL
;
461 if ((md
= ipsec_copypkt(md
)) == NULL
) {
469 * Translate UDP source port back to its original value.
470 * SADB_X_EXT_NATT_MULTIPLEUSERS is only set for transort mode.
472 if ((sav
->flags
& SADB_X_EXT_NATT_MULTIPLEUSERS
) != 0) {
473 /* if not UDP - drop it */
474 if (ip
->ip_p
!= IPPROTO_UDP
) {
475 IPSEC_STAT_INCREMENT(ipsecstat
.out_inval
);
481 udp
= mtod(md
, struct udphdr
*);
483 /* if src port not set in sav - find it */
484 if (sav
->natt_encapsulated_src_port
== 0) {
485 if (key_natt_get_translated_port(sav
) == 0) {
491 if (sav
->remote_ike_port
== htons(udp
->uh_dport
)) {
492 /* translate UDP port */
493 udp
->uh_dport
= sav
->natt_encapsulated_src_port
;
494 udp
->uh_sum
= 0; /* don't need checksum with ESP auth */
496 /* drop the packet - can't translate the port */
497 IPSEC_STAT_INCREMENT(ipsecstat
.out_inval
);
505 espoff
= m
->m_pkthdr
.len
- plen
;
507 if (udp_encapsulate
) {
508 esphlen
+= sizeof(struct udphdr
);
509 espoff
+= sizeof(struct udphdr
);
513 * grow the mbuf to accomodate ESP header.
514 * before: IP ... payload
515 * after: IP ... [UDP] ESP IV payload
517 if (M_LEADINGSPACE(md
) < esphlen
|| (md
->m_flags
& M_EXT
) != 0) {
518 MGET(n
, M_DONTWAIT
, MT_DATA
);
524 VERIFY(esphlen
<= INT32_MAX
);
525 n
->m_len
= (int)esphlen
;
528 m
->m_pkthdr
.len
+= esphlen
;
529 if (udp_encapsulate
) {
530 udp
= mtod(n
, struct udphdr
*);
531 esp
= (struct esp
*)(void *)((caddr_t
)udp
+ sizeof(struct udphdr
));
533 esp
= mtod(n
, struct esp
*);
536 md
->m_len
+= esphlen
;
537 md
->m_data
-= esphlen
;
538 m
->m_pkthdr
.len
+= esphlen
;
539 esp
= mtod(md
, struct esp
*);
540 if (udp_encapsulate
) {
541 udp
= mtod(md
, struct udphdr
*);
542 esp
= (struct esp
*)(void *)((caddr_t
)udp
+ sizeof(struct udphdr
));
544 esp
= mtod(md
, struct esp
*);
550 if (esphlen
< (IP_MAXPACKET
- ntohs(ip
->ip_len
))) {
551 ip
->ip_len
= htons(ntohs(ip
->ip_len
) + (u_short
)esphlen
);
554 "IPv4 ESP output: size exceeds limit\n"));
555 IPSEC_STAT_INCREMENT(ipsecstat
.out_inval
);
562 /* total packet length will be computed in ip6_output() */
567 /* initialize esp header. */
569 if ((sav
->flags
& SADB_X_EXT_OLD
) == 0) {
571 nesp
= (struct newesp
*)esp
;
572 if (sav
->replay
[traffic_class
]->count
== sav
->replay
[traffic_class
]->lastseq
) {
573 if ((sav
->flags
& SADB_X_EXT_CYCSEQ
) == 0) {
574 /* XXX Is it noisy ? */
575 ipseclog((LOG_WARNING
,
576 "replay counter overflowed. %s\n",
577 ipsec_logsastr(sav
)));
578 IPSEC_STAT_INCREMENT(stat
->out_inval
);
580 KERNEL_DEBUG(DBG_FNC_ESPOUT
| DBG_FUNC_END
, 5, 0, 0, 0, 0);
584 lck_mtx_lock(sadb_mutex
);
585 sav
->replay
[traffic_class
]->count
++;
586 lck_mtx_unlock(sadb_mutex
);
588 * XXX sequence number must not be cycled, if the SA is
589 * installed by IKE daemon.
591 nesp
->esp_seq
= htonl(sav
->replay
[traffic_class
]->count
);
592 seq
= sav
->replay
[traffic_class
]->count
;
597 * find the last mbuf. make some room for ESP trailer.
599 struct ip
*ip
= NULL
;
605 if (algo
->padbound
) {
606 padbound
= algo
->padbound
;
610 /* ESP packet, including nxthdr field, must be length of 4n */
615 extendsiz
= padbound
- (plen
% padbound
);
616 if (extendsiz
== 1) {
617 extendsiz
= padbound
+ 1;
623 randpadmax
= ip4_esp_randpad
;
626 randpadmax
= ip6_esp_randpad
;
632 if (randpadmax
< 0 || plen
+ extendsiz
>= randpadmax
) {
638 randpadmax
= (int)((randpadmax
/ padbound
) * padbound
);
639 pad
= (randpadmax
- plen
+ extendsiz
) / padbound
;
642 pad
= (random() % pad
) * padbound
;
648 * make sure we do not pad too much.
649 * MLEN limitation comes from the trailer attachment
651 * 256 limitation comes from sequential padding.
652 * also, the 1-octet length field in ESP trailer imposes
653 * limitation (but is less strict than sequential padding
654 * as length field do not count the last 2 octets).
656 if (extendsiz
+ pad
<= MLEN
&& extendsiz
+ pad
< 256) {
667 * if M_EXT, the external mbuf data may be shared among
668 * two consequtive TCP packets, and it may be unsafe to use the
671 if (!(n
->m_flags
& M_EXT
) && extendsiz
< M_TRAILINGSPACE(n
)) {
672 extend
= mtod(n
, u_char
*) + n
->m_len
;
673 n
->m_len
+= (int)extendsiz
;
674 m
->m_pkthdr
.len
+= extendsiz
;
678 MGET(nn
, M_DONTWAIT
, MT_DATA
);
680 ipseclog((LOG_DEBUG
, "esp%d_output: can't alloc mbuf",
686 extend
= mtod(nn
, u_char
*);
687 VERIFY(extendsiz
<= INT_MAX
);
688 nn
->m_len
= (int)extendsiz
;
692 m
->m_pkthdr
.len
+= extendsiz
;
694 switch (sav
->flags
& SADB_X_EXT_PMASK
) {
695 case SADB_X_EXT_PRAND
:
696 key_randomfill(extend
, extendsiz
);
698 case SADB_X_EXT_PZERO
:
699 bzero(extend
, extendsiz
);
701 case SADB_X_EXT_PSEQ
:
702 for (i
= 0; i
< extendsiz
; i
++) {
703 extend
[i
] = (i
+ 1) & 0xff;
709 if (udp_encapsulate
) {
710 *nexthdrp
= IPPROTO_UDP
;
712 /* Fill out the UDP header */
713 if (sav
->natt_encapsulated_src_port
!= 0) {
714 udp
->uh_sport
= (u_short
)sav
->natt_encapsulated_src_port
;
716 udp
->uh_sport
= htons((u_short
)esp_udp_encap_port
);
718 udp
->uh_dport
= htons(sav
->remote_ike_port
);
719 // udp->uh_len set later, after all length tweaks are complete
722 /* Update last sent so we know if we need to send keepalive */
723 sav
->natt_last_activity
= natt_now
;
725 *nexthdrp
= IPPROTO_ESP
;
728 /* initialize esp trailer. */
729 esptail
= (struct esptail
*)
730 (mtod(n
, u_int8_t
*) + n
->m_len
- sizeof(struct esptail
));
731 esptail
->esp_nxt
= nxt
;
732 VERIFY((extendsiz
- 2) <= UINT8_MAX
);
733 esptail
->esp_padlen
= (u_int8_t
)(extendsiz
- 2);
735 /* modify IP header (for ESP header part only) */
738 ip
= mtod(m
, struct ip
*);
739 if (extendsiz
< (IP_MAXPACKET
- ntohs(ip
->ip_len
))) {
740 ip
->ip_len
= htons(ntohs(ip
->ip_len
) + (u_short
)extendsiz
);
743 "IPv4 ESP output: size exceeds limit\n"));
744 IPSEC_STAT_INCREMENT(ipsecstat
.out_inval
);
751 /* total packet length will be computed in ip6_output() */
757 * pre-compute and cache intermediate key
759 error
= esp_schedule(algo
, sav
);
762 IPSEC_STAT_INCREMENT(stat
->out_inval
);
767 * encrypt the packet, based on security association
768 * and the algorithm specified.
770 if (!algo
->encrypt
) {
771 panic("internal error: no encrypt function");
773 KERNEL_DEBUG(DBG_FNC_ENCRYPT
| DBG_FUNC_START
, 0, 0, 0, 0, 0);
774 if ((*algo
->encrypt
)(m
, espoff
, plen
+ extendsiz
, sav
, algo
, ivlen
)) {
775 /* m is already freed */
776 ipseclog((LOG_ERR
, "packet encryption failure\n"));
777 IPSEC_STAT_INCREMENT(stat
->out_inval
);
779 KERNEL_DEBUG(DBG_FNC_ENCRYPT
| DBG_FUNC_END
, 1, error
, 0, 0, 0);
782 KERNEL_DEBUG(DBG_FNC_ENCRYPT
| DBG_FUNC_END
, 2, 0, 0, 0, 0);
785 * calculate ICV if required.
788 u_char authbuf
[AH_MAXSUMSIZE
] __attribute__((aligned(4)));
790 if (algo
->finalizeencrypt
) {
792 if ((*algo
->finalizeencrypt
)(sav
, authbuf
, siz
)) {
793 ipseclog((LOG_ERR
, "packet encryption ICV failure\n"));
794 IPSEC_STAT_INCREMENT(stat
->out_inval
);
796 KERNEL_DEBUG(DBG_FNC_ENCRYPT
| DBG_FUNC_END
, 1, error
, 0, 0, 0);
802 if (!sav
->replay
[traffic_class
]) {
805 if (!sav
->key_auth
) {
808 if (sav
->key_auth
== SADB_AALG_NONE
) {
813 const struct ah_algorithm
*aalgo
;
815 aalgo
= ah_algorithm_lookup(sav
->alg_auth
);
819 siz
= ((aalgo
->sumsiz
)(sav
) + 3) & ~(4 - 1);
820 if (AH_MAXSUMSIZE
< siz
) {
821 panic("assertion failed for AH_MAXSUMSIZE");
824 if (esp_auth(m
, espoff
, m
->m_pkthdr
.len
- espoff
, sav
, authbuf
)) {
825 ipseclog((LOG_ERR
, "ESP checksum generation failure\n"));
828 IPSEC_STAT_INCREMENT(stat
->out_inval
);
843 if (!(n
->m_flags
& M_EXT
) && siz
< M_TRAILINGSPACE(n
)) { /* XXX */
845 m
->m_pkthdr
.len
+= siz
;
846 p
= mtod(n
, u_char
*) + n
->m_len
- siz
;
850 MGET(nn
, M_DONTWAIT
, MT_DATA
);
852 ipseclog((LOG_DEBUG
, "can't alloc mbuf in esp%d_output",
858 nn
->m_len
= (int)siz
;
862 m
->m_pkthdr
.len
+= siz
;
863 p
= mtod(nn
, u_char
*);
865 bcopy(authbuf
, p
, siz
);
867 /* modify IP header (for ESP header part only) */
870 ip
= mtod(m
, struct ip
*);
871 if (siz
< (IP_MAXPACKET
- ntohs(ip
->ip_len
))) {
872 ip
->ip_len
= htons(ntohs(ip
->ip_len
) + (u_short
)siz
);
875 "IPv4 ESP output: size exceeds limit\n"));
876 IPSEC_STAT_INCREMENT(ipsecstat
.out_inval
);
883 /* total packet length will be computed in ip6_output() */
888 if (udp_encapsulate
) {
894 ip
= mtod(m
, struct ip
*);
895 udp
->uh_ulen
= htons((u_int16_t
)(ntohs(ip
->ip_len
) - (IP_VHL_HL(ip
->ip_vhl
) << 2)));
898 ip6
= mtod(m
, struct ip6_hdr
*);
899 VERIFY((plen
+ siz
+ extendsiz
+ esphlen
) <= UINT16_MAX
);
900 udp
->uh_ulen
= htons((u_int16_t
)(plen
+ siz
+ extendsiz
+ esphlen
));
901 udp
->uh_sum
= in6_pseudo(&ip6
->ip6_src
, &ip6
->ip6_dst
, htonl(ntohs(udp
->uh_ulen
) + IPPROTO_UDP
));
902 m
->m_pkthdr
.csum_flags
= (CSUM_UDPIPV6
| CSUM_ZERO_INVERT
);
903 m
->m_pkthdr
.csum_data
= offsetof(struct udphdr
, uh_sum
);
909 if (net_mpklog_enabled
&& sav
->sah
!= NULL
&&
910 sav
->sah
->ipsec_if
!= NULL
&&
911 (sav
->sah
->ipsec_if
->if_xflags
& IFXF_MPK_LOG
) &&
912 inner_protocol
== IPPROTO_TCP
) {
913 MPKL_ESP_OUTPUT_TCP(esp_mpkl_log_object
,
915 ntohs(th
.th_sport
), ntohs(th
.th_dport
),
916 ntohl(th
.th_seq
), ntohl(th
.th_ack
),
917 inner_payload_len
, th
.th_flags
);
920 lck_mtx_lock(sadb_mutex
);
923 "NULL mbuf after encryption in esp%d_output", afnumber
));
927 stat
->out_esphist
[sav
->alg_enc
]++;
928 lck_mtx_unlock(sadb_mutex
);
929 key_sa_recordxfer(sav
, m
);
930 KERNEL_DEBUG(DBG_FNC_ESPOUT
| DBG_FUNC_END
, 6, 0, 0, 0, 0);
934 KERNEL_DEBUG(DBG_FNC_ESPOUT
| DBG_FUNC_END
, 7, error
, 0, 0, 0);
941 struct secasvar
*sav
)
944 if (m
->m_len
< sizeof(struct ip
)) {
945 ipseclog((LOG_DEBUG
, "esp4_output: first mbuf too short\n"));
949 ip
= mtod(m
, struct ip
*);
950 /* XXX assumes that m->m_next points to payload */
951 return esp_output(m
, &ip
->ip_p
, m
->m_next
, AF_INET
, sav
);
959 struct secasvar
*sav
)
961 if (m
->m_len
< sizeof(struct ip6_hdr
)) {
962 ipseclog((LOG_DEBUG
, "esp6_output: first mbuf too short\n"));
966 return esp_output(m
, nexthdrp
, md
, AF_INET6
, sav
);