]>
git.saurik.com Git - apple/xnu.git/blob - bsd/crypto/aes/i386/aes_modes.c
2 ---------------------------------------------------------------------------
3 Copyright (c) 2003, Dr Brian Gladman, Worcester, UK. All rights reserved.
7 The free distribution and use of this software in both source and binary
8 form is allowed (with or without changes) provided that:
10 1. distributions of this source code include the above copyright
11 notice, this list of conditions and the following disclaimer;
13 2. distributions in binary form include the above copyright
14 notice, this list of conditions and the following disclaimer
15 in the documentation and/or other associated materials;
17 3. the copyright holder's name is not used to endorse products
18 built using this software without specific written permission.
20 ALTERNATIVELY, provided that this notice is retained in full, this product
21 may be distributed under the terms of the GNU General Public License (GPL),
22 in which case the provisions of the GPL apply INSTEAD OF those given above.
26 This software is provided 'as is' with no explicit or implied warranties
27 in respect of its properties, including, but not limited to, correctness
28 and/or fitness for purpose.
29 ---------------------------------------------------------------------------
32 These subroutines implement multiple block AES modes for ECB, CBC, CFB,
33 OFB and CTR encryption, The code provides support for the VIA Advanced
34 Cryptography Engine (ACE).
36 NOTE: In the following subroutines, the AES contexts (ctx) must be
37 16 byte aligned if VIA ACE is being used
41 #include <kern/assert.h>
45 #if defined( AES_MODES )
46 #if defined(__cplusplus)
51 #if defined( _MSC_VER ) && ( _MSC_VER > 800 )
52 #pragma intrinsic(memcpy)
53 #define in_line __inline
60 /* These values are used to detect long word alignment in order to */
61 /* speed up some buffer operations. This facility may not work on */
62 /* some machines so this define can be commented out if necessary */
64 #define FAST_BUFFER_OPERATIONS
65 #pragma warning( disable : 4311 4312 )
67 #define lp08(x) ((uint_8t*)(x))
68 #define lp32(x) ((uint_32t*)(x))
69 #define addr_mod_04(x) ((unsigned long)(x) & 3)
70 #define addr_mod_16(x) ((unsigned long)(x) & 15)
72 #if defined( USE_VIA_ACE_IF_PRESENT )
78 aligned_array(unsigned long, enc_gen_table
, 12, 16) = NEH_ENC_GEN_DATA
;
79 aligned_array(unsigned long, enc_load_table
, 12, 16) = NEH_ENC_LOAD_DATA
;
80 aligned_array(unsigned long, enc_hybrid_table
, 12, 16) = NEH_ENC_HYBRID_DATA
;
81 aligned_array(unsigned long, dec_gen_table
, 12, 16) = NEH_DEC_GEN_DATA
;
82 aligned_array(unsigned long, dec_load_table
, 12, 16) = NEH_DEC_LOAD_DATA
;
83 aligned_array(unsigned long, dec_hybrid_table
, 12, 16) = NEH_DEC_HYBRID_DATA
;
85 /* NOTE: These control word macros must only be used after */
86 /* a key has been set up because they depend on key size */
88 #if NEH_KEY_TYPE == NEH_LOAD
89 #define kd_adr(c) ((uint_8t*)(c)->ks)
90 #elif NEH_KEY_TYPE == NEH_GENERATE
91 #define kd_adr(c) ((uint_8t*)(c)->ks + (c)->inf.b[0])
93 #define kd_adr(c) ((uint_8t*)(c)->ks + ((c)->inf.b[0] == 160 ? 160 : 0))
98 #define aligned_array(type, name, no, stride) type name[no]
99 #define aligned_auto(type, name, no, stride) type name[no]
103 #if defined( _MSC_VER ) && _MSC_VER > 1200
105 #define via_cwd(cwd, ty, dir, len) unsigned long* cwd = (dir##_##ty##_table + ((len - 128) >> 4))
109 #define via_cwd(cwd, ty, dir, len) \
110 aligned_auto(unsigned long, cwd, 4, 16); \
111 cwd[1] = cwd[2] = cwd[3] = 0; \
112 cwd[0] = neh_##dir##_##ty##_key(len)
116 /* implemented in case of wrong call for fixed tables */
121 aes_rval
aes_mode_reset(aes_encrypt_ctx ctx
[1])
127 aes_rval
aes_ecb_encrypt(const unsigned char *ibuf
, unsigned char *obuf
,
128 int len
, const aes_encrypt_ctx ctx
[1])
131 if(len
& (AES_BLOCK_SIZE
- 1))
134 #if defined( USE_VIA_ACE_IF_PRESENT )
136 if(ctx
->inf
.b
[1] == 0xff)
137 { uint_8t
*ksp
= (uint_8t
*)(ctx
->ks
);
138 via_cwd(cwd
, hybrid
, enc
, 2* ctx
->inf
.b
[0] - 192);
143 if(!addr_mod_16(ibuf
) && !addr_mod_16(obuf
))
145 via_ecb_op5(ksp
,cwd
,ibuf
,obuf
,nb
);
148 { aligned_auto(uint_8t
, buf
, BFR_BLOCKS
* AES_BLOCK_SIZE
, 16);
153 int m
= (nb
> BFR_BLOCKS
? BFR_BLOCKS
: nb
);
155 ip
= (addr_mod_16(ibuf
) ? buf
: (uint_8t
*)ibuf
);
156 op
= (addr_mod_16(obuf
) ? buf
: obuf
);
159 memcpy(buf
, ibuf
, m
* AES_BLOCK_SIZE
);
161 via_ecb_op5(ksp
,cwd
,ip
,op
,m
);
164 memcpy(obuf
, buf
, m
* AES_BLOCK_SIZE
);
166 ibuf
+= m
* AES_BLOCK_SIZE
;
167 obuf
+= m
* AES_BLOCK_SIZE
;
177 #if !defined( ASSUME_VIA_ACE_PRESENT )
180 aes_encrypt(ibuf
, obuf
, ctx
);
181 ibuf
+= AES_BLOCK_SIZE
;
182 obuf
+= AES_BLOCK_SIZE
;
188 aes_rval
aes_ecb_decrypt(const unsigned char *ibuf
, unsigned char *obuf
,
189 int len
, const aes_decrypt_ctx ctx
[1])
192 if(len
& (AES_BLOCK_SIZE
- 1))
195 #if defined( USE_VIA_ACE_IF_PRESENT )
197 if(ctx
->inf
.b
[1] == 0xff)
198 { uint_8t
*ksp
= kd_adr(ctx
);
199 via_cwd(cwd
, hybrid
, dec
, 2* ctx
->inf
.b
[0] - 192);
204 if(!addr_mod_16(ibuf
) && !addr_mod_16(obuf
))
206 via_ecb_op5(ksp
,cwd
,ibuf
,obuf
,nb
);
209 { aligned_auto(uint_8t
, buf
, BFR_BLOCKS
* AES_BLOCK_SIZE
, 16);
214 int m
= (nb
> BFR_BLOCKS
? BFR_BLOCKS
: nb
);
216 ip
= (addr_mod_16(ibuf
) ? buf
: (uint_8t
*)ibuf
);
217 op
= (addr_mod_16(obuf
) ? buf
: obuf
);
220 memcpy(buf
, ibuf
, m
* AES_BLOCK_SIZE
);
222 via_ecb_op5(ksp
,cwd
,ip
,op
,m
);
225 memcpy(obuf
, buf
, m
* AES_BLOCK_SIZE
);
227 ibuf
+= m
* AES_BLOCK_SIZE
;
228 obuf
+= m
* AES_BLOCK_SIZE
;
238 #if !defined( ASSUME_VIA_ACE_PRESENT )
241 aes_decrypt(ibuf
, obuf
, ctx
);
242 ibuf
+= AES_BLOCK_SIZE
;
243 obuf
+= AES_BLOCK_SIZE
;
249 aes_rval
aes_cbc_encrypt(const unsigned char *ibuf
, unsigned char *obuf
,
250 int len
, unsigned char *iv
, const aes_encrypt_ctx ctx
[1])
253 if(len
& (AES_BLOCK_SIZE
- 1))
256 #if defined( USE_VIA_ACE_IF_PRESENT )
258 if(ctx
->inf
.b
[1] == 0xff)
259 { uint_8t
*ksp
= (uint_8t
*)(ctx
->ks
), *ivp
= iv
;
260 aligned_auto(uint_8t
, liv
, AES_BLOCK_SIZE
, 16);
261 via_cwd(cwd
, hybrid
, enc
, 2* ctx
->inf
.b
[0] - 192);
266 if(addr_mod_16(iv
)) /* ensure an aligned iv */
269 memcpy(liv
, iv
, AES_BLOCK_SIZE
);
272 if(!addr_mod_16(ibuf
) && !addr_mod_16(obuf
) && !addr_mod_16(iv
))
274 via_cbc_op7(ksp
,cwd
,ibuf
,obuf
,nb
,ivp
,ivp
);
277 { aligned_auto(uint_8t
, buf
, BFR_BLOCKS
* AES_BLOCK_SIZE
, 16);
282 int m
= (nb
> BFR_BLOCKS
? BFR_BLOCKS
: nb
);
284 ip
= (addr_mod_16(ibuf
) ? buf
: (uint_8t
*)ibuf
);
285 op
= (addr_mod_16(obuf
) ? buf
: obuf
);
288 memcpy(buf
, ibuf
, m
* AES_BLOCK_SIZE
);
290 via_cbc_op7(ksp
,cwd
,ip
,op
,m
,ivp
,ivp
);
293 memcpy(obuf
, buf
, m
* AES_BLOCK_SIZE
);
295 ibuf
+= m
* AES_BLOCK_SIZE
;
296 obuf
+= m
* AES_BLOCK_SIZE
;
302 memcpy(iv
, ivp
, AES_BLOCK_SIZE
);
309 #if !defined( ASSUME_VIA_ACE_PRESENT )
310 # ifdef FAST_BUFFER_OPERATIONS
311 if(!addr_mod_04(ibuf
) && !addr_mod_04(iv
))
314 lp32(iv
)[0] ^= lp32(ibuf
)[0];
315 lp32(iv
)[1] ^= lp32(ibuf
)[1];
316 lp32(iv
)[2] ^= lp32(ibuf
)[2];
317 lp32(iv
)[3] ^= lp32(ibuf
)[3];
318 aes_encrypt(iv
, iv
, ctx
);
319 memcpy(obuf
, iv
, AES_BLOCK_SIZE
);
320 ibuf
+= AES_BLOCK_SIZE
;
321 obuf
+= AES_BLOCK_SIZE
;
327 iv
[ 0] ^= ibuf
[ 0]; iv
[ 1] ^= ibuf
[ 1];
328 iv
[ 2] ^= ibuf
[ 2]; iv
[ 3] ^= ibuf
[ 3];
329 iv
[ 4] ^= ibuf
[ 4]; iv
[ 5] ^= ibuf
[ 5];
330 iv
[ 6] ^= ibuf
[ 6]; iv
[ 7] ^= ibuf
[ 7];
331 iv
[ 8] ^= ibuf
[ 8]; iv
[ 9] ^= ibuf
[ 9];
332 iv
[10] ^= ibuf
[10]; iv
[11] ^= ibuf
[11];
333 iv
[12] ^= ibuf
[12]; iv
[13] ^= ibuf
[13];
334 iv
[14] ^= ibuf
[14]; iv
[15] ^= ibuf
[15];
335 aes_encrypt(iv
, iv
, ctx
);
336 memcpy(obuf
, iv
, AES_BLOCK_SIZE
);
337 ibuf
+= AES_BLOCK_SIZE
;
338 obuf
+= AES_BLOCK_SIZE
;
344 aes_rval
aes_encrypt_cbc(const unsigned char *in_blk
, const unsigned char *in_iv
, unsigned int num_blk
,
345 unsigned char *out_blk
, const aes_encrypt_ctx cx
[1])
347 unsigned char tmp_iv
[16];
350 for (i
= 0; i
< 16; i
++)
351 tmp_iv
[i
] = *(in_iv
+ i
);
353 return aes_cbc_encrypt(in_blk
, out_blk
, num_blk
<<4, tmp_iv
, cx
);
357 aes_rval
aes_cbc_decrypt(const unsigned char *ibuf
, unsigned char *obuf
,
358 int len
, unsigned char *iv
, const aes_decrypt_ctx ctx
[1])
359 { unsigned char tmp
[AES_BLOCK_SIZE
];
362 if(len
& (AES_BLOCK_SIZE
- 1))
365 #if defined( USE_VIA_ACE_IF_PRESENT )
367 if(ctx
->inf
.b
[1] == 0xff)
368 { uint_8t
*ksp
= kd_adr(ctx
), *ivp
= iv
;
369 aligned_auto(uint_8t
, liv
, AES_BLOCK_SIZE
, 16);
370 via_cwd(cwd
, hybrid
, dec
, 2* ctx
->inf
.b
[0] - 192);
375 if(addr_mod_16(iv
)) /* ensure an aligned iv */
378 memcpy(liv
, iv
, AES_BLOCK_SIZE
);
381 if(!addr_mod_16(ibuf
) && !addr_mod_16(obuf
) && !addr_mod_16(iv
))
383 via_cbc_op6(ksp
,cwd
,ibuf
,obuf
,nb
,ivp
);
386 { aligned_auto(uint_8t
, buf
, BFR_BLOCKS
* AES_BLOCK_SIZE
, 16);
391 int m
= (nb
> BFR_BLOCKS
? BFR_BLOCKS
: nb
);
393 ip
= (addr_mod_16(ibuf
) ? buf
: (uint_8t
*)ibuf
);
394 op
= (addr_mod_16(obuf
) ? buf
: obuf
);
397 memcpy(buf
, ibuf
, m
* AES_BLOCK_SIZE
);
399 via_cbc_op6(ksp
,cwd
,ip
,op
,m
,ivp
);
402 memcpy(obuf
, buf
, m
* AES_BLOCK_SIZE
);
404 ibuf
+= m
* AES_BLOCK_SIZE
;
405 obuf
+= m
* AES_BLOCK_SIZE
;
411 memcpy(iv
, ivp
, AES_BLOCK_SIZE
);
417 #if !defined( ASSUME_VIA_ACE_PRESENT )
418 # ifdef FAST_BUFFER_OPERATIONS
419 if(!addr_mod_04(obuf
) && !addr_mod_04(iv
))
422 memcpy(tmp
, ibuf
, AES_BLOCK_SIZE
);
423 aes_decrypt(ibuf
, obuf
, ctx
);
424 lp32(obuf
)[0] ^= lp32(iv
)[0];
425 lp32(obuf
)[1] ^= lp32(iv
)[1];
426 lp32(obuf
)[2] ^= lp32(iv
)[2];
427 lp32(obuf
)[3] ^= lp32(iv
)[3];
428 memcpy(iv
, tmp
, AES_BLOCK_SIZE
);
429 ibuf
+= AES_BLOCK_SIZE
;
430 obuf
+= AES_BLOCK_SIZE
;
436 memcpy(tmp
, ibuf
, AES_BLOCK_SIZE
);
437 aes_decrypt(ibuf
, obuf
, ctx
);
438 obuf
[ 0] ^= iv
[ 0]; obuf
[ 1] ^= iv
[ 1];
439 obuf
[ 2] ^= iv
[ 2]; obuf
[ 3] ^= iv
[ 3];
440 obuf
[ 4] ^= iv
[ 4]; obuf
[ 5] ^= iv
[ 5];
441 obuf
[ 6] ^= iv
[ 6]; obuf
[ 7] ^= iv
[ 7];
442 obuf
[ 8] ^= iv
[ 8]; obuf
[ 9] ^= iv
[ 9];
443 obuf
[10] ^= iv
[10]; obuf
[11] ^= iv
[11];
444 obuf
[12] ^= iv
[12]; obuf
[13] ^= iv
[13];
445 obuf
[14] ^= iv
[14]; obuf
[15] ^= iv
[15];
446 memcpy(iv
, tmp
, AES_BLOCK_SIZE
);
447 ibuf
+= AES_BLOCK_SIZE
;
448 obuf
+= AES_BLOCK_SIZE
;
454 aes_rval
aes_decrypt_cbc(const unsigned char *in_blk
, const unsigned char *in_iv
, unsigned int num_blk
,
455 unsigned char *out_blk
, const aes_decrypt_ctx cx
[1])
457 unsigned char tmp_iv
[16];
460 for (i
= 0; i
< 16; i
++)
461 tmp_iv
[i
] = *(in_iv
+ i
);
463 return aes_cbc_decrypt(in_blk
, out_blk
, num_blk
<<4, tmp_iv
, cx
);
468 #if defined(__cplusplus)