]> git.saurik.com Git - apple/xnu.git/blob - bsd/crypto/aes/i386/aes_modes.c
xnu-1228.5.18.tar.gz
[apple/xnu.git] / bsd / crypto / aes / i386 / aes_modes.c
1 /*
2 ---------------------------------------------------------------------------
3 Copyright (c) 2003, Dr Brian Gladman, Worcester, UK. All rights reserved.
4
5 LICENSE TERMS
6
7 The free distribution and use of this software in both source and binary
8 form is allowed (with or without changes) provided that:
9
10 1. distributions of this source code include the above copyright
11 notice, this list of conditions and the following disclaimer;
12
13 2. distributions in binary form include the above copyright
14 notice, this list of conditions and the following disclaimer
15 in the documentation and/or other associated materials;
16
17 3. the copyright holder's name is not used to endorse products
18 built using this software without specific written permission.
19
20 ALTERNATIVELY, provided that this notice is retained in full, this product
21 may be distributed under the terms of the GNU General Public License (GPL),
22 in which case the provisions of the GPL apply INSTEAD OF those given above.
23
24 DISCLAIMER
25
26 This software is provided 'as is' with no explicit or implied warranties
27 in respect of its properties, including, but not limited to, correctness
28 and/or fitness for purpose.
29 ---------------------------------------------------------------------------
30 Issue 31/01/2006
31
32 These subroutines implement multiple block AES modes for ECB, CBC, CFB,
33 OFB and CTR encryption, The code provides support for the VIA Advanced
34 Cryptography Engine (ACE).
35
36 NOTE: In the following subroutines, the AES contexts (ctx) must be
37 16 byte aligned if VIA ACE is being used
38 */
39
40 //#include <memory.h>
41 #include <kern/assert.h>
42
43 #include "aesopt.h"
44
45 #if defined( AES_MODES )
46 #if defined(__cplusplus)
47 extern "C"
48 {
49 #endif
50
51 #if defined( _MSC_VER ) && ( _MSC_VER > 800 )
52 #pragma intrinsic(memcpy)
53 #define in_line __inline
54 #else
55 #define in_line
56 #endif
57
58 #define BFR_BLOCKS 8
59
60 /* These values are used to detect long word alignment in order to */
61 /* speed up some buffer operations. This facility may not work on */
62 /* some machines so this define can be commented out if necessary */
63
64 #define FAST_BUFFER_OPERATIONS
65 #pragma warning( disable : 4311 4312 )
66
67 #define lp08(x) ((uint_8t*)(x))
68 #define lp32(x) ((uint_32t*)(x))
69 #define addr_mod_04(x) ((unsigned long)(x) & 3)
70 #define addr_mod_16(x) ((unsigned long)(x) & 15)
71
72 #if defined( USE_VIA_ACE_IF_PRESENT )
73
74 #include "via_ace.h"
75
76 #pragma pack(16)
77
78 aligned_array(unsigned long, enc_gen_table, 12, 16) = NEH_ENC_GEN_DATA;
79 aligned_array(unsigned long, enc_load_table, 12, 16) = NEH_ENC_LOAD_DATA;
80 aligned_array(unsigned long, enc_hybrid_table, 12, 16) = NEH_ENC_HYBRID_DATA;
81 aligned_array(unsigned long, dec_gen_table, 12, 16) = NEH_DEC_GEN_DATA;
82 aligned_array(unsigned long, dec_load_table, 12, 16) = NEH_DEC_LOAD_DATA;
83 aligned_array(unsigned long, dec_hybrid_table, 12, 16) = NEH_DEC_HYBRID_DATA;
84
85 /* NOTE: These control word macros must only be used after */
86 /* a key has been set up because they depend on key size */
87
88 #if NEH_KEY_TYPE == NEH_LOAD
89 #define kd_adr(c) ((uint_8t*)(c)->ks)
90 #elif NEH_KEY_TYPE == NEH_GENERATE
91 #define kd_adr(c) ((uint_8t*)(c)->ks + (c)->inf.b[0])
92 #else
93 #define kd_adr(c) ((uint_8t*)(c)->ks + ((c)->inf.b[0] == 160 ? 160 : 0))
94 #endif
95
96 #else
97
98 #define aligned_array(type, name, no, stride) type name[no]
99 #define aligned_auto(type, name, no, stride) type name[no]
100
101 #endif
102
103 #if defined( _MSC_VER ) && _MSC_VER > 1200
104
105 #define via_cwd(cwd, ty, dir, len) unsigned long* cwd = (dir##_##ty##_table + ((len - 128) >> 4))
106
107 #else
108
109 #define via_cwd(cwd, ty, dir, len) \
110 aligned_auto(unsigned long, cwd, 4, 16); \
111 cwd[1] = cwd[2] = cwd[3] = 0; \
112 cwd[0] = neh_##dir##_##ty##_key(len)
113
114 #endif
115
116 /* implemented in case of wrong call for fixed tables */
117 void gen_tabs(void)
118 {
119 }
120
121 aes_rval aes_mode_reset(aes_encrypt_ctx ctx[1])
122 {
123 ctx->inf.b[2] = 0;
124 return 0;
125 }
126
127 aes_rval aes_ecb_encrypt(const unsigned char *ibuf, unsigned char *obuf,
128 int len, const aes_encrypt_ctx ctx[1])
129 { int nb = len >> 4;
130
131 if(len & (AES_BLOCK_SIZE - 1))
132 return 1;
133
134 #if defined( USE_VIA_ACE_IF_PRESENT )
135
136 if(ctx->inf.b[1] == 0xff)
137 { uint_8t *ksp = (uint_8t*)(ctx->ks);
138 via_cwd(cwd, hybrid, enc, 2* ctx->inf.b[0] - 192);
139
140 if(addr_mod_16(ctx))
141 return 1;
142
143 if(!addr_mod_16(ibuf) && !addr_mod_16(obuf))
144 {
145 via_ecb_op5(ksp,cwd,ibuf,obuf,nb);
146 }
147 else
148 { aligned_auto(uint_8t, buf, BFR_BLOCKS * AES_BLOCK_SIZE, 16);
149 uint_8t *ip, *op;
150
151 while(nb)
152 {
153 int m = (nb > BFR_BLOCKS ? BFR_BLOCKS : nb);
154
155 ip = (addr_mod_16(ibuf) ? buf : (uint_8t*)ibuf);
156 op = (addr_mod_16(obuf) ? buf : obuf);
157
158 if(ip != ibuf)
159 memcpy(buf, ibuf, m * AES_BLOCK_SIZE);
160
161 via_ecb_op5(ksp,cwd,ip,op,m);
162
163 if(op != obuf)
164 memcpy(obuf, buf, m * AES_BLOCK_SIZE);
165
166 ibuf += m * AES_BLOCK_SIZE;
167 obuf += m * AES_BLOCK_SIZE;
168 nb -= m;
169 }
170 }
171
172 return 0;
173 }
174
175 #endif
176
177 #if !defined( ASSUME_VIA_ACE_PRESENT )
178 while(nb--)
179 {
180 aes_encrypt(ibuf, obuf, ctx);
181 ibuf += AES_BLOCK_SIZE;
182 obuf += AES_BLOCK_SIZE;
183 }
184 #endif
185 return 0;
186 }
187
188 aes_rval aes_ecb_decrypt(const unsigned char *ibuf, unsigned char *obuf,
189 int len, const aes_decrypt_ctx ctx[1])
190 { int nb = len >> 4;
191
192 if(len & (AES_BLOCK_SIZE - 1))
193 return 1;
194
195 #if defined( USE_VIA_ACE_IF_PRESENT )
196
197 if(ctx->inf.b[1] == 0xff)
198 { uint_8t *ksp = kd_adr(ctx);
199 via_cwd(cwd, hybrid, dec, 2* ctx->inf.b[0] - 192);
200
201 if(addr_mod_16(ctx))
202 return 1;
203
204 if(!addr_mod_16(ibuf) && !addr_mod_16(obuf))
205 {
206 via_ecb_op5(ksp,cwd,ibuf,obuf,nb);
207 }
208 else
209 { aligned_auto(uint_8t, buf, BFR_BLOCKS * AES_BLOCK_SIZE, 16);
210 uint_8t *ip, *op;
211
212 while(nb)
213 {
214 int m = (nb > BFR_BLOCKS ? BFR_BLOCKS : nb);
215
216 ip = (addr_mod_16(ibuf) ? buf : (uint_8t*)ibuf);
217 op = (addr_mod_16(obuf) ? buf : obuf);
218
219 if(ip != ibuf)
220 memcpy(buf, ibuf, m * AES_BLOCK_SIZE);
221
222 via_ecb_op5(ksp,cwd,ip,op,m);
223
224 if(op != obuf)
225 memcpy(obuf, buf, m * AES_BLOCK_SIZE);
226
227 ibuf += m * AES_BLOCK_SIZE;
228 obuf += m * AES_BLOCK_SIZE;
229 nb -= m;
230 }
231 }
232
233 return 0;
234 }
235
236 #endif
237
238 #if !defined( ASSUME_VIA_ACE_PRESENT )
239 while(nb--)
240 {
241 aes_decrypt(ibuf, obuf, ctx);
242 ibuf += AES_BLOCK_SIZE;
243 obuf += AES_BLOCK_SIZE;
244 }
245 #endif
246 return 0;
247 }
248
249 aes_rval aes_cbc_encrypt(const unsigned char *ibuf, unsigned char *obuf,
250 int len, unsigned char *iv, const aes_encrypt_ctx ctx[1])
251 { int nb = len >> 4;
252
253 if(len & (AES_BLOCK_SIZE - 1))
254 return 1;
255
256 #if defined( USE_VIA_ACE_IF_PRESENT )
257
258 if(ctx->inf.b[1] == 0xff)
259 { uint_8t *ksp = (uint_8t*)(ctx->ks), *ivp = iv;
260 aligned_auto(uint_8t, liv, AES_BLOCK_SIZE, 16);
261 via_cwd(cwd, hybrid, enc, 2* ctx->inf.b[0] - 192);
262
263 if(addr_mod_16(ctx))
264 return 1;
265
266 if(addr_mod_16(iv)) /* ensure an aligned iv */
267 {
268 ivp = liv;
269 memcpy(liv, iv, AES_BLOCK_SIZE);
270 }
271
272 if(!addr_mod_16(ibuf) && !addr_mod_16(obuf) && !addr_mod_16(iv))
273 {
274 via_cbc_op7(ksp,cwd,ibuf,obuf,nb,ivp,ivp);
275 }
276 else
277 { aligned_auto(uint_8t, buf, BFR_BLOCKS * AES_BLOCK_SIZE, 16);
278 uint_8t *ip, *op;
279
280 while(nb)
281 {
282 int m = (nb > BFR_BLOCKS ? BFR_BLOCKS : nb);
283
284 ip = (addr_mod_16(ibuf) ? buf : (uint_8t*)ibuf);
285 op = (addr_mod_16(obuf) ? buf : obuf);
286
287 if(ip != ibuf)
288 memcpy(buf, ibuf, m * AES_BLOCK_SIZE);
289
290 via_cbc_op7(ksp,cwd,ip,op,m,ivp,ivp);
291
292 if(op != obuf)
293 memcpy(obuf, buf, m * AES_BLOCK_SIZE);
294
295 ibuf += m * AES_BLOCK_SIZE;
296 obuf += m * AES_BLOCK_SIZE;
297 nb -= m;
298 }
299 }
300
301 if(iv != ivp)
302 memcpy(iv, ivp, AES_BLOCK_SIZE);
303
304 return 0;
305 }
306
307 #endif
308
309 #if !defined( ASSUME_VIA_ACE_PRESENT )
310 # ifdef FAST_BUFFER_OPERATIONS
311 if(!addr_mod_04(ibuf) && !addr_mod_04(iv))
312 while(nb--)
313 {
314 lp32(iv)[0] ^= lp32(ibuf)[0];
315 lp32(iv)[1] ^= lp32(ibuf)[1];
316 lp32(iv)[2] ^= lp32(ibuf)[2];
317 lp32(iv)[3] ^= lp32(ibuf)[3];
318 aes_encrypt(iv, iv, ctx);
319 memcpy(obuf, iv, AES_BLOCK_SIZE);
320 ibuf += AES_BLOCK_SIZE;
321 obuf += AES_BLOCK_SIZE;
322 }
323 else
324 # endif
325 while(nb--)
326 {
327 iv[ 0] ^= ibuf[ 0]; iv[ 1] ^= ibuf[ 1];
328 iv[ 2] ^= ibuf[ 2]; iv[ 3] ^= ibuf[ 3];
329 iv[ 4] ^= ibuf[ 4]; iv[ 5] ^= ibuf[ 5];
330 iv[ 6] ^= ibuf[ 6]; iv[ 7] ^= ibuf[ 7];
331 iv[ 8] ^= ibuf[ 8]; iv[ 9] ^= ibuf[ 9];
332 iv[10] ^= ibuf[10]; iv[11] ^= ibuf[11];
333 iv[12] ^= ibuf[12]; iv[13] ^= ibuf[13];
334 iv[14] ^= ibuf[14]; iv[15] ^= ibuf[15];
335 aes_encrypt(iv, iv, ctx);
336 memcpy(obuf, iv, AES_BLOCK_SIZE);
337 ibuf += AES_BLOCK_SIZE;
338 obuf += AES_BLOCK_SIZE;
339 }
340 #endif
341 return 0;
342 }
343
344 aes_rval aes_encrypt_cbc(const unsigned char *in_blk, const unsigned char *in_iv, unsigned int num_blk,
345 unsigned char *out_blk, const aes_encrypt_ctx cx[1])
346 {
347 unsigned char tmp_iv[16];
348 int i;
349
350 for (i = 0; i < 16; i++)
351 tmp_iv[i] = *(in_iv + i);
352
353 return aes_cbc_encrypt(in_blk, out_blk, num_blk<<4, tmp_iv, cx);
354
355 }
356
357 aes_rval aes_cbc_decrypt(const unsigned char *ibuf, unsigned char *obuf,
358 int len, unsigned char *iv, const aes_decrypt_ctx ctx[1])
359 { unsigned char tmp[AES_BLOCK_SIZE];
360 int nb = len >> 4;
361
362 if(len & (AES_BLOCK_SIZE - 1))
363 return 1;
364
365 #if defined( USE_VIA_ACE_IF_PRESENT )
366
367 if(ctx->inf.b[1] == 0xff)
368 { uint_8t *ksp = kd_adr(ctx), *ivp = iv;
369 aligned_auto(uint_8t, liv, AES_BLOCK_SIZE, 16);
370 via_cwd(cwd, hybrid, dec, 2* ctx->inf.b[0] - 192);
371
372 if(addr_mod_16(ctx))
373 return 1;
374
375 if(addr_mod_16(iv)) /* ensure an aligned iv */
376 {
377 ivp = liv;
378 memcpy(liv, iv, AES_BLOCK_SIZE);
379 }
380
381 if(!addr_mod_16(ibuf) && !addr_mod_16(obuf) && !addr_mod_16(iv))
382 {
383 via_cbc_op6(ksp,cwd,ibuf,obuf,nb,ivp);
384 }
385 else
386 { aligned_auto(uint_8t, buf, BFR_BLOCKS * AES_BLOCK_SIZE, 16);
387 uint_8t *ip, *op;
388
389 while(nb)
390 {
391 int m = (nb > BFR_BLOCKS ? BFR_BLOCKS : nb);
392
393 ip = (addr_mod_16(ibuf) ? buf : (uint_8t*)ibuf);
394 op = (addr_mod_16(obuf) ? buf : obuf);
395
396 if(ip != ibuf)
397 memcpy(buf, ibuf, m * AES_BLOCK_SIZE);
398
399 via_cbc_op6(ksp,cwd,ip,op,m,ivp);
400
401 if(op != obuf)
402 memcpy(obuf, buf, m * AES_BLOCK_SIZE);
403
404 ibuf += m * AES_BLOCK_SIZE;
405 obuf += m * AES_BLOCK_SIZE;
406 nb -= m;
407 }
408 }
409
410 if(iv != ivp)
411 memcpy(iv, ivp, AES_BLOCK_SIZE);
412
413 return 0;
414 }
415 #endif
416
417 #if !defined( ASSUME_VIA_ACE_PRESENT )
418 # ifdef FAST_BUFFER_OPERATIONS
419 if(!addr_mod_04(obuf) && !addr_mod_04(iv))
420 while(nb--)
421 {
422 memcpy(tmp, ibuf, AES_BLOCK_SIZE);
423 aes_decrypt(ibuf, obuf, ctx);
424 lp32(obuf)[0] ^= lp32(iv)[0];
425 lp32(obuf)[1] ^= lp32(iv)[1];
426 lp32(obuf)[2] ^= lp32(iv)[2];
427 lp32(obuf)[3] ^= lp32(iv)[3];
428 memcpy(iv, tmp, AES_BLOCK_SIZE);
429 ibuf += AES_BLOCK_SIZE;
430 obuf += AES_BLOCK_SIZE;
431 }
432 else
433 # endif
434 while(nb--)
435 {
436 memcpy(tmp, ibuf, AES_BLOCK_SIZE);
437 aes_decrypt(ibuf, obuf, ctx);
438 obuf[ 0] ^= iv[ 0]; obuf[ 1] ^= iv[ 1];
439 obuf[ 2] ^= iv[ 2]; obuf[ 3] ^= iv[ 3];
440 obuf[ 4] ^= iv[ 4]; obuf[ 5] ^= iv[ 5];
441 obuf[ 6] ^= iv[ 6]; obuf[ 7] ^= iv[ 7];
442 obuf[ 8] ^= iv[ 8]; obuf[ 9] ^= iv[ 9];
443 obuf[10] ^= iv[10]; obuf[11] ^= iv[11];
444 obuf[12] ^= iv[12]; obuf[13] ^= iv[13];
445 obuf[14] ^= iv[14]; obuf[15] ^= iv[15];
446 memcpy(iv, tmp, AES_BLOCK_SIZE);
447 ibuf += AES_BLOCK_SIZE;
448 obuf += AES_BLOCK_SIZE;
449 }
450 #endif
451 return 0;
452 }
453
454 aes_rval aes_decrypt_cbc(const unsigned char *in_blk, const unsigned char *in_iv, unsigned int num_blk,
455 unsigned char *out_blk, const aes_decrypt_ctx cx[1])
456 {
457 unsigned char tmp_iv[16];
458 int i;
459
460 for (i = 0; i < 16; i++)
461 tmp_iv[i] = *(in_iv + i);
462
463 return aes_cbc_decrypt(in_blk, out_blk, num_blk<<4, tmp_iv, cx);
464
465 }
466
467
468 #if defined(__cplusplus)
469 }
470 #endif
471 #endif