]>
git.saurik.com Git - apple/xnu.git/blob - bsd/netinet6/esp_rijndael.c
2 * Copyright (c) 2008-2016 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 /* $FreeBSD: src/sys/netinet6/esp_rijndael.c,v 1.1.2.1 2001/07/03 11:01:50 ume Exp $ */
30 /* $KAME: esp_rijndael.c,v 1.4 2001/03/02 05:53:05 itojun Exp $ */
33 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
34 * All rights reserved.
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. Neither the name of the project nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
48 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
61 #include <sys/param.h>
62 #include <sys/systm.h>
63 #include <sys/socket.h>
64 #include <sys/queue.h>
65 #include <sys/syslog.h>
67 #include <sys/mcache.h>
69 #include <kern/locks.h>
72 #include <net/route.h>
74 #include <netinet6/ipsec.h>
75 #include <netinet6/esp.h>
76 #include <netinet6/esp_rijndael.h>
78 #include <libkern/crypto/aes.h>
80 #include <netkey/key.h>
82 #include <net/net_osdep.h>
84 #define MAX_REALIGN_LEN 2000
85 #define AES_BLOCKLEN 16
86 #define ESP_GCM_SALT_LEN 4 // RFC 4106 Section 4
87 #define ESP_GCM_IVLEN 8
88 #define ESP_GCM_ALIGN 16
90 extern lck_mtx_t
*sadb_mutex
;
100 __unused
const struct esp_algorithm
*algo
)
102 return sizeof(aes_ctx
);
107 __unused
const struct esp_algorithm
*algo
,
108 struct secasvar
*sav
)
110 LCK_MTX_ASSERT(sadb_mutex
, LCK_MTX_ASSERT_OWNED
);
111 aes_ctx
*ctx
= (aes_ctx
*)sav
->sched
;
113 aes_decrypt_key((const unsigned char *) _KEYBUF(sav
->key_enc
), _KEYLEN(sav
->key_enc
), &ctx
->decrypt
);
114 aes_encrypt_key((const unsigned char *) _KEYBUF(sav
->key_enc
), _KEYLEN(sav
->key_enc
), &ctx
->encrypt
);
120 /* The following 2 functions decrypt or encrypt the contents of
121 * the mbuf chain passed in keeping the IP and ESP header's in place,
123 * The code attempts to call the crypto code with the largest chunk
124 * of data it can based on the amount of source data in
125 * the current source mbuf and the space remaining in the current
126 * destination mbuf. The crypto code requires data to be a multiples
127 * of 16 bytes. A separate buffer is used when a 16 byte block spans
131 * off = offset to ESP header
133 * local vars for source:
134 * soff = offset from beginning of the chain to the head of the
136 * scut = last mbuf that contains headers to be retained
137 * scutoff = offset to end of the headers in scut
138 * s = the current mbuf
139 * sn = current offset to data in s (next source data to process)
141 * local vars for dest:
144 * dn = current offset in d (next location to store result)
152 struct secasvar
*sav
,
153 const struct esp_algorithm
*algo
,
157 struct mbuf
*d
, *d0
, *dp
;
158 int soff
; /* offset from the head of chain, to head of this mbuf */
159 int sn
, dn
; /* offset from the head of the mbuf, to meat */
160 size_t ivoff
, bodyoff
;
161 u_int8_t iv
[AES_BLOCKLEN
] __attribute__((aligned(4))), *dptr
;
162 u_int8_t sbuf
[AES_BLOCKLEN
] __attribute__((aligned(4))), *sp
, *sp_unaligned
, *sp_aligned
= NULL
;
168 if (ivlen
!= AES_BLOCKLEN
) {
169 ipseclog((LOG_ERR
, "esp_cbc_decrypt %s: "
170 "unsupported ivlen %d\n", algo
->name
, ivlen
));
175 if (sav
->flags
& SADB_X_EXT_OLD
) {
177 ivoff
= off
+ sizeof(struct esp
);
178 bodyoff
= off
+ sizeof(struct esp
) + ivlen
;
180 ivoff
= off
+ sizeof(struct newesp
);
181 bodyoff
= off
+ sizeof(struct newesp
) + ivlen
;
184 if (m
->m_pkthdr
.len
< bodyoff
) {
185 ipseclog((LOG_ERR
, "esp_cbc_decrypt %s: bad len %d/%u\n",
186 algo
->name
, m
->m_pkthdr
.len
, (u_int32_t
)bodyoff
));
190 if ((m
->m_pkthdr
.len
- bodyoff
) % AES_BLOCKLEN
) {
191 ipseclog((LOG_ERR
, "esp_cbc_decrypt %s: "
192 "payload length must be multiple of %d\n",
193 algo
->name
, AES_BLOCKLEN
));
198 VERIFY(ivoff
<= INT_MAX
);
201 m_copydata(m
, (int)ivoff
, ivlen
, (caddr_t
) iv
);
208 /* skip header/IV offset */
209 while (soff
< bodyoff
) {
210 if (soff
+ s
->m_len
> bodyoff
) {
211 sn
= (int)(bodyoff
- soff
);
221 /* skip over empty mbuf */
222 while (s
&& s
->m_len
== 0) {
226 while (soff
< m
->m_pkthdr
.len
) {
228 if (sn
+ AES_BLOCKLEN
<= s
->m_len
) {
229 /* body is continuous */
230 sp
= mtod(s
, u_int8_t
*) + sn
;
232 len
-= len
% AES_BLOCKLEN
; // full blocks only
234 /* body is non-continuous */
235 m_copydata(s
, sn
, AES_BLOCKLEN
, (caddr_t
) sbuf
);
237 len
= AES_BLOCKLEN
; // 1 block only in sbuf
241 if (!d
|| dn
+ AES_BLOCKLEN
> d
->m_len
) {
245 MGET(d
, M_DONTWAIT
, MT_DATA
);
246 i
= m
->m_pkthdr
.len
- (soff
+ sn
);
248 MCLGET(d
, M_DONTWAIT
);
249 if ((d
->m_flags
& M_EXT
) == 0) {
250 d
= m_mbigget(d
, M_DONTWAIT
);
251 if ((d
->m_flags
& M_EXT
) == 0) {
271 // try to make mbuf data aligned
272 if (!IPSEC_IS_P2ALIGNED(d
->m_data
)) {
273 m_adj(d
, IPSEC_GET_P2UNALIGNED_OFS(d
->m_data
));
276 d
->m_len
= (int)M_TRAILINGSPACE(d
);
277 d
->m_len
-= d
->m_len
% AES_BLOCKLEN
;
281 dptr
= mtod(d
, u_int8_t
*);
285 /* adjust len if greater than space available in dest */
286 if (len
> d
->m_len
- dn
) {
291 // check input pointer alignment and use a separate aligned buffer (if sp is unaligned on 4-byte boundary).
292 if (IPSEC_IS_P2ALIGNED(sp
)) {
296 if (len
> MAX_REALIGN_LEN
) {
301 if (sp_aligned
!= NULL
) {
302 FREE(sp_aligned
, M_SECA
);
307 if (sp_aligned
== NULL
) {
308 sp_aligned
= (u_int8_t
*)_MALLOC(MAX_REALIGN_LEN
, M_SECA
, M_DONTWAIT
);
309 if (sp_aligned
== NULL
) {
318 memcpy(sp
, sp_unaligned
, len
);
320 // no need to check output pointer alignment
321 aes_decrypt_cbc(sp
, iv
, len
>> 4, dptr
+ dn
,
322 (aes_decrypt_ctx
*)(&(((aes_ctx
*)sav
->sched
)->decrypt
)));
324 // update unaligned pointers
325 if (!IPSEC_IS_P2ALIGNED(sp_unaligned
)) {
334 bcopy(sp
+ len
- AES_BLOCKLEN
, iv
, AES_BLOCKLEN
);
336 /* find the next source block */
337 while (s
&& sn
>= s
->m_len
) {
344 /* free un-needed source mbufs and add dest mbufs to chain */
345 m_freem(scut
->m_next
);
346 scut
->m_len
= scutoff
;
350 if (sp_aligned
!= NULL
) {
351 FREE(sp_aligned
, M_SECA
);
356 bzero(iv
, sizeof(iv
));
357 bzero(sbuf
, sizeof(sbuf
));
366 __unused
size_t plen
,
367 struct secasvar
*sav
,
368 const struct esp_algorithm
*algo
,
372 struct mbuf
*d
, *d0
, *dp
;
373 int soff
; /* offset from the head of chain, to head of this mbuf */
374 int sn
, dn
; /* offset from the head of the mbuf, to meat */
375 size_t ivoff
, bodyoff
;
376 u_int8_t
*ivp
, *dptr
, *ivp_unaligned
;
377 u_int8_t sbuf
[AES_BLOCKLEN
] __attribute__((aligned(4))), *sp
, *sp_unaligned
, *sp_aligned
= NULL
;
378 u_int8_t ivp_aligned_buf
[AES_BLOCKLEN
] __attribute__((aligned(4)));
383 if (ivlen
!= AES_BLOCKLEN
) {
384 ipseclog((LOG_ERR
, "esp_cbc_encrypt %s: "
385 "unsupported ivlen %d\n", algo
->name
, ivlen
));
390 if (sav
->flags
& SADB_X_EXT_OLD
) {
392 ivoff
= off
+ sizeof(struct esp
);
393 bodyoff
= off
+ sizeof(struct esp
) + ivlen
;
395 ivoff
= off
+ sizeof(struct newesp
);
396 bodyoff
= off
+ sizeof(struct newesp
) + ivlen
;
399 VERIFY(ivoff
<= INT_MAX
);
401 /* put iv into the packet */
402 m_copyback(m
, (int)ivoff
, ivlen
, sav
->iv
);
403 ivp
= (u_int8_t
*) sav
->iv
;
405 if (m
->m_pkthdr
.len
< bodyoff
) {
406 ipseclog((LOG_ERR
, "esp_cbc_encrypt %s: bad len %d/%u\n",
407 algo
->name
, m
->m_pkthdr
.len
, (u_int32_t
)bodyoff
));
411 if ((m
->m_pkthdr
.len
- bodyoff
) % AES_BLOCKLEN
) {
412 ipseclog((LOG_ERR
, "esp_cbc_encrypt %s: "
413 "payload length must be multiple of %d\n",
414 algo
->name
, AES_BLOCKLEN
));
424 /* skip headers/IV */
425 while (soff
< bodyoff
) {
426 if (soff
+ s
->m_len
> bodyoff
) {
427 sn
= (int)(bodyoff
- soff
);
437 /* skip over empty mbuf */
438 while (s
&& s
->m_len
== 0) {
442 while (soff
< m
->m_pkthdr
.len
) {
444 if (sn
+ AES_BLOCKLEN
<= s
->m_len
) {
445 /* body is continuous */
446 sp
= mtod(s
, u_int8_t
*) + sn
;
448 len
-= len
% AES_BLOCKLEN
; // full blocks only
450 /* body is non-continuous */
451 m_copydata(s
, sn
, AES_BLOCKLEN
, (caddr_t
) sbuf
);
453 len
= AES_BLOCKLEN
; // 1 block only in sbuf
457 if (!d
|| dn
+ AES_BLOCKLEN
> d
->m_len
) {
461 MGET(d
, M_DONTWAIT
, MT_DATA
);
462 i
= m
->m_pkthdr
.len
- (soff
+ sn
);
464 MCLGET(d
, M_DONTWAIT
);
465 if ((d
->m_flags
& M_EXT
) == 0) {
466 d
= m_mbigget(d
, M_DONTWAIT
);
467 if ((d
->m_flags
& M_EXT
) == 0) {
487 // try to make mbuf data aligned
488 if (!IPSEC_IS_P2ALIGNED(d
->m_data
)) {
489 m_adj(d
, IPSEC_GET_P2UNALIGNED_OFS(d
->m_data
));
492 d
->m_len
= (int)M_TRAILINGSPACE(d
);
493 d
->m_len
-= d
->m_len
% AES_BLOCKLEN
;
497 dptr
= mtod(d
, u_int8_t
*);
501 /* adjust len if greater than space available */
502 if (len
> d
->m_len
- dn
) {
507 // check input pointer alignment and use a separate aligned buffer (if sp is not aligned on 4-byte boundary).
508 if (IPSEC_IS_P2ALIGNED(sp
)) {
512 if (len
> MAX_REALIGN_LEN
) {
517 if (sp_aligned
!= NULL
) {
518 FREE(sp_aligned
, M_SECA
);
523 if (sp_aligned
== NULL
) {
524 sp_aligned
= (u_int8_t
*)_MALLOC(MAX_REALIGN_LEN
, M_SECA
, M_DONTWAIT
);
525 if (sp_aligned
== NULL
) {
534 memcpy(sp
, sp_unaligned
, len
);
536 // check ivp pointer alignment and use a separate aligned buffer (if ivp is not aligned on 4-byte boundary).
537 if (IPSEC_IS_P2ALIGNED(ivp
)) {
538 ivp_unaligned
= NULL
;
541 ivp
= ivp_aligned_buf
;
542 memcpy(ivp
, ivp_unaligned
, AES_BLOCKLEN
);
544 // no need to check output pointer alignment
545 aes_encrypt_cbc(sp
, ivp
, len
>> 4, dptr
+ dn
,
546 (aes_encrypt_ctx
*)(&(((aes_ctx
*)sav
->sched
)->encrypt
)));
548 // update unaligned pointers
549 if (!IPSEC_IS_P2ALIGNED(sp_unaligned
)) {
552 if (!IPSEC_IS_P2ALIGNED(ivp_unaligned
)) {
561 ivp
= dptr
+ dn
- AES_BLOCKLEN
; // last block encrypted
563 /* find the next source block and skip empty mbufs */
564 while (s
&& sn
>= s
->m_len
) {
571 /* free un-needed source mbufs and add dest mbufs to chain */
572 m_freem(scut
->m_next
);
573 scut
->m_len
= scutoff
;
577 if (sp_aligned
!= NULL
) {
578 FREE(sp_aligned
, M_SECA
);
583 bzero(sbuf
, sizeof(sbuf
));
591 __unused
const struct esp_algorithm
*algo
)
593 return sizeof(aes_gcm_ctx
) + aes_decrypt_get_ctx_size_gcm() + aes_encrypt_get_ctx_size_gcm() + ESP_GCM_ALIGN
;
597 esp_gcm_schedule( __unused
const struct esp_algorithm
*algo
,
598 struct secasvar
*sav
)
600 LCK_MTX_ASSERT(sadb_mutex
, LCK_MTX_ASSERT_OWNED
);
601 aes_gcm_ctx
*ctx
= (aes_gcm_ctx
*)P2ROUNDUP(sav
->sched
, ESP_GCM_ALIGN
);
602 u_int ivlen
= sav
->ivlen
;
603 unsigned char nonce
[ESP_GCM_SALT_LEN
+ ivlen
];
606 ctx
->decrypt
= &ctx
->ctxt
[0];
607 ctx
->encrypt
= &ctx
->ctxt
[aes_decrypt_get_ctx_size_gcm() / sizeof(ccgcm_ctx
)];
609 rc
= aes_decrypt_key_gcm((const unsigned char *) _KEYBUF(sav
->key_enc
), _KEYLEN(sav
->key_enc
) - ESP_GCM_SALT_LEN
, ctx
->decrypt
);
614 bzero(nonce
, ESP_GCM_SALT_LEN
+ ivlen
);
615 memcpy(nonce
, _KEYBUF(sav
->key_enc
) + _KEYLEN(sav
->key_enc
) - ESP_GCM_SALT_LEN
, ESP_GCM_SALT_LEN
);
616 memcpy(nonce
+ ESP_GCM_SALT_LEN
, sav
->iv
, ivlen
);
618 rc
= aes_encrypt_key_with_iv_gcm((const unsigned char *) _KEYBUF(sav
->key_enc
), _KEYLEN(sav
->key_enc
) - ESP_GCM_SALT_LEN
, nonce
, ctx
->encrypt
);
623 rc
= aes_encrypt_reset_gcm(ctx
->encrypt
);
632 esp_gcm_encrypt_finalize(struct secasvar
*sav
,
633 unsigned char *tag
, size_t tag_bytes
)
635 aes_gcm_ctx
*ctx
= (aes_gcm_ctx
*)P2ROUNDUP(sav
->sched
, ESP_GCM_ALIGN
);
636 return aes_encrypt_finalize_gcm(tag
, tag_bytes
, ctx
->encrypt
);
640 esp_gcm_decrypt_finalize(struct secasvar
*sav
,
641 unsigned char *tag
, size_t tag_bytes
)
643 aes_gcm_ctx
*ctx
= (aes_gcm_ctx
*)P2ROUNDUP(sav
->sched
, ESP_GCM_ALIGN
);
644 return aes_decrypt_finalize_gcm(tag
, tag_bytes
, ctx
->decrypt
);
651 __unused
size_t plen
,
652 struct secasvar
*sav
,
653 const struct esp_algorithm
*algo __unused
,
657 struct mbuf
*d
, *d0
, *dp
;
658 int soff
; /* offset from the head of chain, to head of this mbuf */
659 int sn
, dn
; /* offset from the head of the mbuf, to meat */
660 size_t ivoff
, bodyoff
;
661 u_int8_t
*dptr
, *sp
, *sp_unaligned
, *sp_aligned
= NULL
;
666 unsigned char nonce
[ESP_GCM_SALT_LEN
+ ivlen
];
668 if (ivlen
!= ESP_GCM_IVLEN
) {
669 ipseclog((LOG_ERR
, "%s: unsupported ivlen %d\n", __FUNCTION__
, ivlen
));
674 if (sav
->flags
& SADB_X_EXT_OLD
) {
676 ivoff
= off
+ sizeof(struct esp
);
677 bodyoff
= off
+ sizeof(struct esp
) + ivlen
;
679 ivoff
= off
+ sizeof(struct newesp
);
680 bodyoff
= off
+ sizeof(struct newesp
) + ivlen
;
683 bzero(nonce
, ESP_GCM_SALT_LEN
+ ivlen
);
684 /* generate new iv */
685 ctx
= (aes_gcm_ctx
*)P2ROUNDUP(sav
->sched
, ESP_GCM_ALIGN
);
687 if (aes_encrypt_reset_gcm(ctx
->encrypt
)) {
688 ipseclog((LOG_ERR
, "%s: gcm reset failure\n", __FUNCTION__
));
693 if (aes_encrypt_inc_iv_gcm((unsigned char *)nonce
, ctx
->encrypt
)) {
694 ipseclog((LOG_ERR
, "%s: iv generation failure\n", __FUNCTION__
));
699 VERIFY(ivoff
<= INT_MAX
);
702 * The IV is now generated within corecrypto and
703 * is provided to ESP using aes_encrypt_inc_iv_gcm().
704 * This makes the sav->iv redundant and is no longer
705 * used in GCM operations. But we still copy the IV
706 * back to sav->iv to ensure that any future code reading
707 * this value will get the latest IV.
709 memcpy(sav
->iv
, (nonce
+ ESP_GCM_SALT_LEN
), ivlen
);
710 m_copyback(m
, (int)ivoff
, ivlen
, sav
->iv
);
711 bzero(nonce
, ESP_GCM_SALT_LEN
+ ivlen
);
713 if (m
->m_pkthdr
.len
< bodyoff
) {
714 ipseclog((LOG_ERR
, "%s: bad len %d/%u\n", __FUNCTION__
,
715 m
->m_pkthdr
.len
, (u_int32_t
)bodyoff
));
720 VERIFY(off
<= INT_MAX
);
722 /* Set Additional Authentication Data */
723 if (!(sav
->flags
& SADB_X_EXT_OLD
)) {
725 m_copydata(m
, (int)off
, sizeof(esp
), (caddr_t
) &esp
);
726 if (aes_encrypt_aad_gcm((unsigned char*)&esp
, sizeof(esp
), ctx
->encrypt
)) {
727 ipseclog((LOG_ERR
, "%s: packet decryption AAD failure\n", __FUNCTION__
));
738 /* skip headers/IV */
739 while (soff
< bodyoff
) {
740 if (soff
+ s
->m_len
> bodyoff
) {
741 sn
= (int)(bodyoff
- soff
);
751 /* skip over empty mbuf */
752 while (s
&& s
->m_len
== 0) {
756 while (soff
< m
->m_pkthdr
.len
) {
758 sp
= mtod(s
, u_int8_t
*) + sn
;
762 if (!d
|| (dn
+ len
> d
->m_len
)) {
766 MGET(d
, M_DONTWAIT
, MT_DATA
);
767 i
= m
->m_pkthdr
.len
- (soff
+ sn
);
769 MCLGET(d
, M_DONTWAIT
);
770 if ((d
->m_flags
& M_EXT
) == 0) {
771 d
= m_mbigget(d
, M_DONTWAIT
);
772 if ((d
->m_flags
& M_EXT
) == 0) {
792 // try to make mbuf data aligned
793 if (!IPSEC_IS_P2ALIGNED(d
->m_data
)) {
794 m_adj(d
, IPSEC_GET_P2UNALIGNED_OFS(d
->m_data
));
797 d
->m_len
= (int)M_TRAILINGSPACE(d
);
803 dptr
= mtod(d
, u_int8_t
*);
807 /* adjust len if greater than space available */
808 if (len
> d
->m_len
- dn
) {
813 // check input pointer alignment and use a separate aligned buffer (if sp is not aligned on 4-byte boundary).
814 if (IPSEC_IS_P2ALIGNED(sp
)) {
818 if (len
> MAX_REALIGN_LEN
) {
823 if (sp_aligned
!= NULL
) {
824 FREE(sp_aligned
, M_SECA
);
829 if (sp_aligned
== NULL
) {
830 sp_aligned
= (u_int8_t
*)_MALLOC(MAX_REALIGN_LEN
, M_SECA
, M_DONTWAIT
);
831 if (sp_aligned
== NULL
) {
840 memcpy(sp
, sp_unaligned
, len
);
843 if (aes_encrypt_gcm(sp
, len
, dptr
+ dn
, ctx
->encrypt
)) {
844 ipseclog((LOG_ERR
, "%s: failed to encrypt\n", __FUNCTION__
));
849 // update unaligned pointers
850 if (!IPSEC_IS_P2ALIGNED(sp_unaligned
)) {
858 /* find the next source block and skip empty mbufs */
859 while (s
&& sn
>= s
->m_len
) {
866 /* free un-needed source mbufs and add dest mbufs to chain */
867 m_freem(scut
->m_next
);
868 scut
->m_len
= scutoff
;
872 if (sp_aligned
!= NULL
) {
873 FREE(sp_aligned
, M_SECA
);
884 struct secasvar
*sav
,
885 const struct esp_algorithm
*algo __unused
,
889 struct mbuf
*d
, *d0
, *dp
;
890 int soff
; /* offset from the head of chain, to head of this mbuf */
891 int sn
, dn
; /* offset from the head of the mbuf, to meat */
892 size_t ivoff
, bodyoff
;
893 u_int8_t iv
[ESP_GCM_IVLEN
] __attribute__((aligned(4))), *dptr
;
894 u_int8_t
*sp
, *sp_unaligned
, *sp_aligned
= NULL
;
899 unsigned char nonce
[ESP_GCM_SALT_LEN
+ ivlen
];
901 if (ivlen
!= ESP_GCM_IVLEN
) {
902 ipseclog((LOG_ERR
, "%s: unsupported ivlen %d\n", __FUNCTION__
, ivlen
));
907 if (sav
->flags
& SADB_X_EXT_OLD
) {
909 ivoff
= off
+ sizeof(struct esp
);
910 bodyoff
= off
+ sizeof(struct esp
) + ivlen
;
912 ivoff
= off
+ sizeof(struct newesp
);
913 bodyoff
= off
+ sizeof(struct newesp
) + ivlen
;
916 if (m
->m_pkthdr
.len
< bodyoff
) {
917 ipseclog((LOG_ERR
, "%s: bad len %d/%u\n", __FUNCTION__
,
918 m
->m_pkthdr
.len
, (u_int32_t
)bodyoff
));
923 VERIFY(ivoff
<= INT_MAX
);
926 m_copydata(m
, (int)ivoff
, ivlen
, (caddr_t
) iv
);
929 memcpy(nonce
, _KEYBUF(sav
->key_enc
) + _KEYLEN(sav
->key_enc
) - ESP_GCM_SALT_LEN
, ESP_GCM_SALT_LEN
);
930 memcpy(nonce
+ ESP_GCM_SALT_LEN
, iv
, ivlen
);
932 ctx
= (aes_gcm_ctx
*)P2ROUNDUP(sav
->sched
, ESP_GCM_ALIGN
);
933 if (aes_decrypt_set_iv_gcm(nonce
, sizeof(nonce
), ctx
->decrypt
)) {
934 ipseclog((LOG_ERR
, "%s: failed to set IV\n", __FUNCTION__
));
936 bzero(nonce
, sizeof(nonce
));
939 bzero(nonce
, sizeof(nonce
));
941 VERIFY(off
<= INT_MAX
);
943 /* Set Additional Authentication Data */
944 if (!(sav
->flags
& SADB_X_EXT_OLD
)) {
946 m_copydata(m
, (int)off
, sizeof(esp
), (caddr_t
) &esp
);
947 if (aes_decrypt_aad_gcm((unsigned char*)&esp
, sizeof(esp
), ctx
->decrypt
)) {
948 ipseclog((LOG_ERR
, "%s: packet decryption AAD failure\n", __FUNCTION__
));
958 /* skip header/IV offset */
959 while (soff
< bodyoff
) {
960 if (soff
+ s
->m_len
> bodyoff
) {
961 sn
= (int)(bodyoff
- soff
);
971 /* skip over empty mbuf */
972 while (s
&& s
->m_len
== 0) {
976 while (soff
< m
->m_pkthdr
.len
) {
978 sp
= mtod(s
, u_int8_t
*) + sn
;
982 if (!d
|| (dn
+ len
> d
->m_len
)) {
986 MGET(d
, M_DONTWAIT
, MT_DATA
);
987 i
= m
->m_pkthdr
.len
- (soff
+ sn
);
989 MCLGET(d
, M_DONTWAIT
);
990 if ((d
->m_flags
& M_EXT
) == 0) {
991 d
= m_mbigget(d
, M_DONTWAIT
);
992 if ((d
->m_flags
& M_EXT
) == 0) {
1012 // try to make mbuf data aligned
1013 if (!IPSEC_IS_P2ALIGNED(d
->m_data
)) {
1014 m_adj(d
, IPSEC_GET_P2UNALIGNED_OFS(d
->m_data
));
1017 d
->m_len
= (int)M_TRAILINGSPACE(d
);
1023 dptr
= mtod(d
, u_int8_t
*);
1027 /* adjust len if greater than space available in dest */
1028 if (len
> d
->m_len
- dn
) {
1029 len
= d
->m_len
- dn
;
1033 // check input pointer alignment and use a separate aligned buffer (if sp is unaligned on 4-byte boundary).
1034 if (IPSEC_IS_P2ALIGNED(sp
)) {
1035 sp_unaligned
= NULL
;
1038 if (len
> MAX_REALIGN_LEN
) {
1043 if (sp_aligned
!= NULL
) {
1044 FREE(sp_aligned
, M_SECA
);
1049 if (sp_aligned
== NULL
) {
1050 sp_aligned
= (u_int8_t
*)_MALLOC(MAX_REALIGN_LEN
, M_SECA
, M_DONTWAIT
);
1051 if (sp_aligned
== NULL
) {
1060 memcpy(sp
, sp_unaligned
, len
);
1062 // no need to check output pointer alignment
1064 if (aes_decrypt_gcm(sp
, len
, dptr
+ dn
, ctx
->decrypt
)) {
1065 ipseclog((LOG_ERR
, "%s: failed to decrypt\n", __FUNCTION__
));
1070 // update unaligned pointers
1071 if (!IPSEC_IS_P2ALIGNED(sp_unaligned
)) {
1075 /* udpate offsets */
1079 /* find the next source block */
1080 while (s
&& sn
>= s
->m_len
) {
1087 /* free un-needed source mbufs and add dest mbufs to chain */
1088 m_freem(scut
->m_next
);
1089 scut
->m_len
= scutoff
;
1093 if (sp_aligned
!= NULL
) {
1094 FREE(sp_aligned
, M_SECA
);
1099 bzero(iv
, sizeof(iv
));