]>
git.saurik.com Git - apple/xnu.git/blob - bsd/netinet6/esp_rijndael.c
2 * Copyright (c) 2008 Apple Inc. All rights reserved.
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
29 /* $FreeBSD: src/sys/netinet6/esp_rijndael.c,v 1.1.2.1 2001/07/03 11:01:50 ume Exp $ */
30 /* $KAME: esp_rijndael.c,v 1.4 2001/03/02 05:53:05 itojun Exp $ */
33 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project.
34 * All rights reserved.
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
39 * 1. Redistributions of source code must retain the above copyright
40 * notice, this list of conditions and the following disclaimer.
41 * 2. Redistributions in binary form must reproduce the above copyright
42 * notice, this list of conditions and the following disclaimer in the
43 * documentation and/or other materials provided with the distribution.
44 * 3. Neither the name of the project nor the names of its contributors
45 * may be used to endorse or promote products derived from this software
46 * without specific prior written permission.
48 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND
49 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
50 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
51 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE
52 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
53 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
54 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
55 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
56 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
57 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
61 #include <sys/param.h>
62 #include <sys/systm.h>
63 #include <sys/socket.h>
64 #include <sys/queue.h>
65 #include <sys/syslog.h>
67 #include <sys/mcache.h>
69 #include <kern/locks.h>
72 #include <net/route.h>
74 #include <netinet6/ipsec.h>
75 #include <netinet6/esp.h>
76 #include <netinet6/esp_rijndael.h>
78 #include <libkern/crypto/aes.h>
80 #include <netkey/key.h>
82 #include <net/net_osdep.h>
84 #define MAX_REALIGN_LEN 2000
85 #define AES_BLOCKLEN 16
87 extern lck_mtx_t
*sadb_mutex
;
91 __unused
const struct esp_algorithm
*algo
)
94 return sizeof(aes_ctx
);
99 __unused
const struct esp_algorithm
*algo
,
100 struct secasvar
*sav
)
103 lck_mtx_assert(sadb_mutex
, LCK_MTX_ASSERT_OWNED
);
104 aes_ctx
*ctx
= (aes_ctx
*)sav
->sched
;
106 aes_decrypt_key((const unsigned char *) _KEYBUF(sav
->key_enc
), _KEYLEN(sav
->key_enc
), &ctx
->decrypt
);
107 aes_encrypt_key((const unsigned char *) _KEYBUF(sav
->key_enc
), _KEYLEN(sav
->key_enc
), &ctx
->encrypt
);
113 /* The following 2 functions decrypt or encrypt the contents of
114 * the mbuf chain passed in keeping the IP and ESP header's in place,
116 * The code attempts to call the crypto code with the largest chunk
117 * of data it can based on the amount of source data in
118 * the current source mbuf and the space remaining in the current
119 * destination mbuf. The crypto code requires data to be a multiples
120 * of 16 bytes. A separate buffer is used when a 16 byte block spans
124 * off = offset to ESP header
126 * local vars for source:
127 * soff = offset from beginning of the chain to the head of the
129 * scut = last mbuf that contains headers to be retained
130 * scutoff = offset to end of the headers in scut
131 * s = the current mbuf
132 * sn = current offset to data in s (next source data to process)
134 * local vars for dest:
137 * dn = current offset in d (next location to store result)
142 esp_cbc_decrypt_aes(m
, off
, sav
, algo
, ivlen
)
145 struct secasvar
*sav
;
146 const struct esp_algorithm
*algo
;
150 struct mbuf
*d
, *d0
, *dp
;
151 int soff
; /* offset from the head of chain, to head of this mbuf */
152 int sn
, dn
; /* offset from the head of the mbuf, to meat */
153 size_t ivoff
, bodyoff
;
154 u_int8_t iv
[AES_BLOCKLEN
] __attribute__((aligned(4))), *dptr
;
155 u_int8_t sbuf
[AES_BLOCKLEN
] __attribute__((aligned(4))), *sp
, *sp_unaligned
, *sp_aligned
= NULL
;
161 if (ivlen
!= AES_BLOCKLEN
) {
162 ipseclog((LOG_ERR
, "esp_cbc_decrypt %s: "
163 "unsupported ivlen %d\n", algo
->name
, ivlen
));
168 if (sav
->flags
& SADB_X_EXT_OLD
) {
170 ivoff
= off
+ sizeof(struct esp
);
171 bodyoff
= off
+ sizeof(struct esp
) + ivlen
;
173 ivoff
= off
+ sizeof(struct newesp
);
174 bodyoff
= off
+ sizeof(struct newesp
) + ivlen
;
177 if (m
->m_pkthdr
.len
< bodyoff
) {
178 ipseclog((LOG_ERR
, "esp_cbc_decrypt %s: bad len %d/%lu\n",
179 algo
->name
, m
->m_pkthdr
.len
, (u_int32_t
)bodyoff
));
183 if ((m
->m_pkthdr
.len
- bodyoff
) % AES_BLOCKLEN
) {
184 ipseclog((LOG_ERR
, "esp_cbc_decrypt %s: "
185 "payload length must be multiple of %d\n",
186 algo
->name
, AES_BLOCKLEN
));
192 m_copydata(m
, ivoff
, ivlen
, (caddr_t
) iv
);
199 /* skip header/IV offset */
200 while (soff
< bodyoff
) {
201 if (soff
+ s
->m_len
> bodyoff
) {
212 /* skip over empty mbuf */
213 while (s
&& s
->m_len
== 0)
216 while (soff
< m
->m_pkthdr
.len
) {
218 if (sn
+ AES_BLOCKLEN
<= s
->m_len
) {
219 /* body is continuous */
220 sp
= mtod(s
, u_int8_t
*) + sn
;
222 len
-= len
% AES_BLOCKLEN
; // full blocks only
224 /* body is non-continuous */
225 m_copydata(s
, sn
, AES_BLOCKLEN
, (caddr_t
) sbuf
);
227 len
= AES_BLOCKLEN
; // 1 block only in sbuf
231 if (!d
|| dn
+ AES_BLOCKLEN
> d
->m_len
) {
234 MGET(d
, M_DONTWAIT
, MT_DATA
);
235 i
= m
->m_pkthdr
.len
- (soff
+ sn
);
237 MCLGET(d
, M_DONTWAIT
);
238 if ((d
->m_flags
& M_EXT
) == 0) {
239 d
= m_mbigget(d
, M_DONTWAIT
);
240 if ((d
->m_flags
& M_EXT
) == 0) {
257 // try to make mbuf data aligned
258 if (!IPSEC_IS_P2ALIGNED(d
->m_data
)) {
259 m_adj(d
, IPSEC_GET_P2UNALIGNED_OFS(d
->m_data
));
262 d
->m_len
= M_TRAILINGSPACE(d
);
263 d
->m_len
-= d
->m_len
% AES_BLOCKLEN
;
266 dptr
= mtod(d
, u_int8_t
*);
270 /* adjust len if greater than space available in dest */
271 if (len
> d
->m_len
- dn
)
275 // check input pointer alignment and use a separate aligned buffer (if sp is unaligned on 4-byte boundary).
276 if (IPSEC_IS_P2ALIGNED(sp
)) {
280 if (len
> MAX_REALIGN_LEN
) {
283 if (sp_aligned
== NULL
) {
284 sp_aligned
= (u_int8_t
*)_MALLOC(MAX_REALIGN_LEN
, M_SECA
, M_DONTWAIT
);
285 if (sp_aligned
== NULL
)
289 memcpy(sp
, sp_unaligned
, len
);
291 // no need to check output pointer alignment
292 aes_decrypt_cbc(sp
, iv
, len
>> 4, dptr
+ dn
,
293 (aes_decrypt_ctx
*)(&(((aes_ctx
*)sav
->sched
)->decrypt
)));
295 // update unaligned pointers
296 if (!IPSEC_IS_P2ALIGNED(sp_unaligned
)) {
305 bcopy(sp
+ len
- AES_BLOCKLEN
, iv
, AES_BLOCKLEN
);
307 /* find the next source block */
308 while (s
&& sn
>= s
->m_len
) {
316 /* free un-needed source mbufs and add dest mbufs to chain */
317 m_freem(scut
->m_next
);
318 scut
->m_len
= scutoff
;
322 if (sp_aligned
!= NULL
) {
323 FREE(sp_aligned
, M_SECA
);
328 bzero(iv
, sizeof(iv
));
329 bzero(sbuf
, sizeof(sbuf
));
338 __unused
size_t plen
,
339 struct secasvar
*sav
,
340 const struct esp_algorithm
*algo
,
344 struct mbuf
*d
, *d0
, *dp
;
345 int soff
; /* offset from the head of chain, to head of this mbuf */
346 int sn
, dn
; /* offset from the head of the mbuf, to meat */
347 size_t ivoff
, bodyoff
;
348 u_int8_t
*ivp
, *dptr
, *ivp_unaligned
;
349 u_int8_t sbuf
[AES_BLOCKLEN
] __attribute__((aligned(4))), *sp
, *sp_unaligned
, *sp_aligned
= NULL
;
350 u_int8_t ivp_aligned_buf
[AES_BLOCKLEN
] __attribute__((aligned(4)));
355 if (ivlen
!= AES_BLOCKLEN
) {
356 ipseclog((LOG_ERR
, "esp_cbc_encrypt %s: "
357 "unsupported ivlen %d\n", algo
->name
, ivlen
));
362 if (sav
->flags
& SADB_X_EXT_OLD
) {
364 ivoff
= off
+ sizeof(struct esp
);
365 bodyoff
= off
+ sizeof(struct esp
) + ivlen
;
367 ivoff
= off
+ sizeof(struct newesp
);
368 bodyoff
= off
+ sizeof(struct newesp
) + ivlen
;
371 /* put iv into the packet */
372 m_copyback(m
, ivoff
, ivlen
, sav
->iv
);
373 ivp
= (u_int8_t
*) sav
->iv
;
375 if (m
->m_pkthdr
.len
< bodyoff
) {
376 ipseclog((LOG_ERR
, "esp_cbc_encrypt %s: bad len %d/%lu\n",
377 algo
->name
, m
->m_pkthdr
.len
, (u_int32_t
)bodyoff
));
381 if ((m
->m_pkthdr
.len
- bodyoff
) % AES_BLOCKLEN
) {
382 ipseclog((LOG_ERR
, "esp_cbc_encrypt %s: "
383 "payload length must be multiple of %lu\n",
384 algo
->name
, AES_BLOCKLEN
));
394 /* skip headers/IV */
395 while (soff
< bodyoff
) {
396 if (soff
+ s
->m_len
> bodyoff
) {
407 /* skip over empty mbuf */
408 while (s
&& s
->m_len
== 0)
411 while (soff
< m
->m_pkthdr
.len
) {
413 if (sn
+ AES_BLOCKLEN
<= s
->m_len
) {
414 /* body is continuous */
415 sp
= mtod(s
, u_int8_t
*) + sn
;
417 len
-= len
% AES_BLOCKLEN
; // full blocks only
419 /* body is non-continuous */
420 m_copydata(s
, sn
, AES_BLOCKLEN
, (caddr_t
) sbuf
);
422 len
= AES_BLOCKLEN
; // 1 block only in sbuf
426 if (!d
|| dn
+ AES_BLOCKLEN
> d
->m_len
) {
429 MGET(d
, M_DONTWAIT
, MT_DATA
);
430 i
= m
->m_pkthdr
.len
- (soff
+ sn
);
432 MCLGET(d
, M_DONTWAIT
);
433 if ((d
->m_flags
& M_EXT
) == 0) {
434 d
= m_mbigget(d
, M_DONTWAIT
);
435 if ((d
->m_flags
& M_EXT
) == 0) {
452 // try to make mbuf data aligned
453 if (!IPSEC_IS_P2ALIGNED(d
->m_data
)) {
454 m_adj(d
, IPSEC_GET_P2UNALIGNED_OFS(d
->m_data
));
457 d
->m_len
= M_TRAILINGSPACE(d
);
458 d
->m_len
-= d
->m_len
% AES_BLOCKLEN
;
461 dptr
= mtod(d
, u_int8_t
*);
465 /* adjust len if greater than space available */
466 if (len
> d
->m_len
- dn
)
470 // check input pointer alignment and use a separate aligned buffer (if sp is not aligned on 4-byte boundary).
471 if (IPSEC_IS_P2ALIGNED(sp
)) {
475 if (len
> MAX_REALIGN_LEN
) {
478 if (sp_aligned
== NULL
) {
479 sp_aligned
= (u_int8_t
*)_MALLOC(MAX_REALIGN_LEN
, M_SECA
, M_DONTWAIT
);
480 if (sp_aligned
== NULL
)
484 memcpy(sp
, sp_unaligned
, len
);
486 // check ivp pointer alignment and use a separate aligned buffer (if ivp is not aligned on 4-byte boundary).
487 if (IPSEC_IS_P2ALIGNED(ivp
)) {
488 ivp_unaligned
= NULL
;
491 ivp
= ivp_aligned_buf
;
492 memcpy(ivp
, ivp_unaligned
, AES_BLOCKLEN
);
494 // no need to check output pointer alignment
495 aes_encrypt_cbc(sp
, ivp
, len
>> 4, dptr
+ dn
,
496 (aes_encrypt_ctx
*)(&(((aes_ctx
*)sav
->sched
)->encrypt
)));
498 // update unaligned pointers
499 if (!IPSEC_IS_P2ALIGNED(sp_unaligned
)) {
502 if (!IPSEC_IS_P2ALIGNED(ivp_unaligned
)) {
511 ivp
= dptr
+ dn
- AES_BLOCKLEN
; // last block encrypted
513 /* find the next source block and skip empty mbufs */
514 while (s
&& sn
>= s
->m_len
) {
521 /* free un-needed source mbufs and add dest mbufs to chain */
522 m_freem(scut
->m_next
);
523 scut
->m_len
= scutoff
;
527 if (sp_aligned
!= NULL
) {
528 FREE(sp_aligned
, M_SECA
);
533 bzero(sbuf
, sizeof(sbuf
));