]> git.saurik.com Git - apple/xnu.git/blob - bsd/crypto/aes/i386/aes_x86_v2.s
xnu-1456.1.26.tar.gz
[apple/xnu.git] / bsd / crypto / aes / i386 / aes_x86_v2.s
1 /*
2 * Copyright (c) 2000-2006 Apple Computer, Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28
29 /*
30 * ---------------------------------------------------------------------------
31 * Copyright (c) 2002, Dr Brian Gladman, Worcester, UK. All rights reserved.
32 *
33 * LICENSE TERMS
34 *
35 * The free distribution and use of this software in both source and binary
36 * form is allowed (with or without changes) provided that:
37 *
38 * 1. distributions of this source code include the above copyright
39 * notice, this list of conditions and the following disclaimer;
40 *
41 * 2. distributions in binary form include the above copyright
42 * notice, this list of conditions and the following disclaimer
43 * in the documentation and/or other associated materials;
44 *
45 * 3. the copyright holder's name is not used to endorse products
46 * built using this software without specific written permission.
47 *
48 * ALTERNATIVELY, provided that this notice is retained in full, this product
49 * may be distributed under the terms of the GNU General Public License (GPL),
50 * in which case the provisions of the GPL apply INSTEAD OF those given above.
51 *
52 * DISCLAIMER
53 *
54 * This software is provided 'as is' with no explicit or implied warranties
55 * in respect of its properties, including, but not limited to, correctness
56 * and/or fitness for purpose.
57 * ---------------------------------------------------------------------------
58 * Issue 31/01/2006
59 *
60 * This code requires either ASM_X86_V2 or ASM_X86_V2C to be set in aesopt.h
61 * and the same define to be set here as well. If AES_V2C is set this file
62 * requires the C files aeskey.c and aestab.c for support.
63 *
64 * This is a full assembler implementation covering encryption, decryption and
65 * key scheduling. It uses 2k bytes of tables but its encryption and decryption
66 * performance is very close to that obtained using large tables. Key schedule
67 * expansion is slower for both encryption and decryption but this is likely to
68 * be offset by the much smaller load that this version places on the processor
69 * cache. I acknowledge the contribution made by Daniel Bernstein to aspects of
70 * the design of the AES round function used here.
71 *
72 * This code provides the standard AES block size (128 bits, 16 bytes) and the
73 * three standard AES key sizes (128, 192 and 256 bits). It has the same call
74 * interface as my C implementation. The ebx, esi, edi and ebp registers are
75 * preserved across calls but eax, ecx and edx and the artihmetic status flags
76 * are not.
77 */
78
79 #include <mach/i386/asm.h>
80
81 #define AES_128 /* define if AES with 128 bit keys is needed */
82 #define AES_192 /* define if AES with 192 bit keys is needed */
83 #define AES_256 /* define if AES with 256 bit keys is needed */
84 #define AES_VAR /* define if a variable key size is needed */
85 #define ENCRYPTION /* define if encryption is needed */
86 #define DECRYPTION /* define if decryption is needed */
87 #define AES_REV_DKS /* define if key decryption schedule is reversed */
88
89 #ifndef ASM_X86_V2C
90 #define ENCRYPTION_KEY_SCHEDULE /* define if enc. key expansion is needed */
91 #define DECRYPTION_KEY_SCHEDULE /* define if dec. key expansion is needed */
92 #endif
93
94 /*
95 * The encryption key schedule has the following in memory layout where N is the
96 * number of rounds (10, 12 or 14):
97 *
98 * lo: | input key (round 0) | ; each round is four 32-bit words
99 * | encryption round 1 |
100 * | encryption round 2 |
101 * ....
102 * | encryption round N-1 |
103 * hi: | encryption round N |
104 *
105 * The decryption key schedule is normally set up so that it has the same
106 * layout as above by actually reversing the order of the encryption key
107 * schedule in memory (this happens when AES_REV_DKS is set):
108 *
109 * lo: | decryption round 0 | = | encryption round N |
110 * | decryption round 1 | = INV_MIX_COL[ | encryption round N-1 | ]
111 * | decryption round 2 | = INV_MIX_COL[ | encryption round N-2 | ]
112 * .... ....
113 * | decryption round N-1 | = INV_MIX_COL[ | encryption round 1 | ]
114 * hi: | decryption round N | = | input key (round 0) |
115 *
116 * with rounds except the first and last modified using inv_mix_column()
117 * But if AES_REV_DKS is NOT set the order of keys is left as it is for
118 * encryption so that it has to be accessed in reverse when used for
119 * decryption (although the inverse mix column modifications are done)
120 *
121 * lo: | decryption round 0 | = | input key (round 0) |
122 * | decryption round 1 | = INV_MIX_COL[ | encryption round 1 | ]
123 * | decryption round 2 | = INV_MIX_COL[ | encryption round 2 | ]
124 * .... ....
125 * | decryption round N-1 | = INV_MIX_COL[ | encryption round N-1 | ]
126 * hi: | decryption round N | = | encryption round N |
127 *
128 * This layout is faster when the assembler key scheduling provided here
129 * is used.
130 */
131
132 /* End of user defines */
133
134 #ifdef AES_VAR
135 #ifndef AES_128
136 #define AES_128
137 #endif
138 #ifndef AES_192
139 #define AES_192
140 #endif
141 #ifndef AES_256
142 #define AES_256
143 #endif
144 #endif
145
146 #ifdef AES_VAR
147 #define KS_LENGTH 60
148 #else
149 #ifdef AES_256
150 #define KS_LENGTH 60
151 #else
152 #ifdef AES_192
153 #define KS_LENGTH 52
154 #else
155 #define KS_LENGTH 44
156 #endif
157 #endif
158 #endif
159
160 /*
161 * These macros implement stack based local variables
162 */
163 #define save(r1) \
164 movl %r1, (%esp);
165
166 #define restore(r1) \
167 movl (%esp), %r1;
168
169 #define do_call(f, n) \
170 call EXT(f); \
171 addl $(n), %esp;
172
173 /*
174 * finite field multiplies by {02}, {04} and {08}
175 */
176 #define f2(x) ((x<<1)^(((x>>7)&1)*0x11b))
177 #define f4(x) ((x<<2)^(((x>>6)&1)*0x11b)^(((x>>6)&2)*0x11b))
178 #define f8(x) ((x<<3)^(((x>>5)&1)*0x11b)^(((x>>5)&2)*0x11b)^(((x>>5)&4)*0x11b))
179
180 /*
181 * finite field multiplies required in table generation
182 */
183 #define f3(x) (f2(x) ^ x)
184 #define f9(x) (f8(x) ^ x)
185 #define fb(x) (f8(x) ^ f2(x) ^ x)
186 #define fd(x) (f8(x) ^ f4(x) ^ x)
187 #define fe(x) (f8(x) ^ f4(x) ^ f2(x))
188
189 #define etab_0(x) enc_tab+4(,x,8)
190 #define etab_1(x) enc_tab+3(,x,8)
191 #define etab_2(x) enc_tab+2(,x,8)
192 #define etab_3(x) enc_tab+1(,x,8)
193
194 #define etab_b(x) etab_3(x)
195
196 #define btab_0(x) enc_tab+6(,x,8)
197 #define btab_1(x) enc_tab+5(,x,8)
198 #define btab_2(x) enc_tab+4(,x,8)
199 #define btab_3(x) enc_tab+3(,x,8)
200
201 /*
202 * ROUND FUNCTION. Build column[2] on ESI and column[3] on EDI that have the
203 * round keys pre-loaded. Build column[0] in EBP and column[1] in EBX.
204 *
205 * Input:
206 *
207 * EAX column[0]
208 * EBX column[1]
209 * ECX column[2]
210 * EDX column[3]
211 * ESI column key[round][2]
212 * EDI column key[round][3]
213 * EBP scratch
214 *
215 * Output:
216 *
217 * EBP column[0] unkeyed
218 * EBX column[1] unkeyed
219 * ESI column[2] keyed
220 * EDI column[3] keyed
221 * EAX scratch
222 * ECX scratch
223 * EDX scratch
224 */
225 #define rnd_fun(m1, m2) \
226 roll $16, %ebx; \
227 \
228 ## m1 ## _zo(esi, cl, 0, ebp); \
229 m1(esi, dh, 1, ebp); \
230 m1(esi, bh, 3, ebp); \
231 ## m1 ## _zo(edi, dl, 0, ebp); \
232 m1(edi, ah, 1, ebp); \
233 m1(edi, bl, 2, ebp); \
234 ## m2 ## _zo(ebp, al, 0, ebp); \
235 \
236 shrl $16, %ebx; \
237 andl $0xffff0000, %eax; \
238 orl %ebx, %eax; \
239 shrl $16, %edx; \
240 \
241 m1(ebp, ah, 1, ebx); \
242 m1(ebp, dh, 3, ebx); \
243 m2(ebx, dl, 2, ebx); \
244 m1(ebx, ch, 1, edx); \
245 ## m1 ## _zo(ebx, al, 0, edx); \
246 \
247 shrl $16, %eax; \
248 shrl $16, %ecx; \
249 \
250 m1(ebp, cl, 2, edx); \
251 m1(edi, ch, 3, edx); \
252 m1(esi, al, 2, edx); \
253 m1(ebx, ah, 3, edx)
254
255 /*
256 * Basic MOV and XOR Operations for normal rounds
257 */
258 #define nr_xor_zo nr_xor
259 #define nr_xor(r1, r2, r3, r4) \
260 movzbl %r2, %r4; \
261 xorl etab_ ## r3(%r4), %r1;
262
263 #define nr_mov_zo nr_mov
264 #define nr_mov(r1, r2, r3, r4) \
265 movzbl %r2, %r4; \
266 movl etab_ ## r3(%r4), %r1;
267
268 /*
269 * Basic MOV and XOR Operations for last round
270 */
271
272 #if 1
273
274 #define lr_xor_zo(r1, r2, r3, r4) \
275 movzbl %r2, %r4; \
276 movzbl etab_b(%r4), %r4; \
277 xor %r4, %r1;
278
279 #define lr_xor(r1, r2, r3, r4) \
280 movzbl %r2, %r4; \
281 movzbl etab_b(%r4), %r4; \
282 shll $(8*r3), %r4; \
283 xor %r4, %r1;
284
285 #define lr_mov_zo(r1, r2, r3, r4) \
286 movzbl %r2, %r4; \
287 movzbl etab_b(%r4), %r1;
288
289 #define lr_mov(r1, r2, r3, r4) \
290 movzbl %r2, %r4; \
291 movzbl etab_b(%r4), %r1; \
292 shll $(8*r3), %r1;
293
294 #else /* less effective but worth leaving as an option */
295
296 #define lr_xor_zo lr_xor
297 #define lr_xor(r1, r2, r3, r4) \
298 movzbl %r2, %r4; \
299 mov btab_ ## r3(%r4), %r4; \
300 andl $(0x000000ff << 8 * r3), %r4; \
301 xor %r4, %r1;
302
303 #define lr_mov_zo lr_mov
304 #define lr_mov(r1, r2, r3, r4) \
305 movzbl %r2, %r4; \
306 mov btab_ ## r3(%r4), %r1; \
307 andl $(0x000000ff << 8 * r3), %r1;
308
309 #endif
310
311 /*
312 * Apply S-Box to the 4 bytes in a 32-bit word and rotate left 3 byte positions
313 *
314 * r1 : output is xored into this register
315 * r2 : input: a => eax, b => ebx, c => ecx, d => edx
316 * r3 : scratch register
317 */
318
319 #define l3s_col(r1, r2, r3) \
320 lr_xor_zo(r1, ## r2 ## h, 0, r3); \
321 lr_xor(r1, ## r2 ## l, 3, r3); \
322 shrl $16, %e ## r2 ## x; \
323 lr_xor(r1, ## r2 ## h, 2, r3); \
324 lr_xor(r1, ## r2 ## l, 1, r3);
325
326 /*
327 * offsets to parameters
328 */
329 #define in_blk 4 /* input byte array address parameter */
330 #define out_blk 8 /* output byte array address parameter */
331 #define ctx 12 /* AES context structure */
332 #define stk_spc 20 /* stack space */
333
334 #ifdef ENCRYPTION
335
336 #define ENCRYPTION_TABLE
337
338 #define enc_round \
339 addl $16, %ebp; \
340 save(ebp); \
341 movl 8(%ebp), %esi; \
342 movl 12(%ebp), %edi; \
343 \
344 rnd_fun(nr_xor, nr_mov); \
345 \
346 movl %ebp, %eax; \
347 movl %esi, %ecx; \
348 movl %edi, %edx; \
349 restore(ebp); \
350 xorl (%ebp), %eax; \
351 xorl 4(%ebp), %ebx;
352
353 #define enc_last_round \
354 addl $16, %ebp; \
355 save(ebp); \
356 movl 8(%ebp), %esi; \
357 movl 12(%ebp), %edi; \
358 \
359 rnd_fun(lr_xor, lr_mov); \
360 \
361 movl %ebp, %eax; \
362 restore(ebp); \
363 xorl (%ebp), %eax; \
364 xorl 4(%ebp), %ebx;
365
366 .section __TEXT, __text
367
368 /*
369 * AES Encryption Subroutine
370 */
371 Entry(aes_encrypt)
372
373 subl $stk_spc, %esp
374 movl %ebp, 16(%esp)
375 movl %ebx, 12(%esp)
376 movl %esi, 8(%esp)
377 movl %edi, 4(%esp)
378
379 movl in_blk+stk_spc(%esp), %esi /* input pointer */
380 movl (%esi), %eax
381 movl 4(%esi), %ebx
382 movl 8(%esi), %ecx
383 movl 12(%esi), %edx
384
385 movl ctx+stk_spc(%esp), %ebp /* key pointer */
386 movzbl 4*KS_LENGTH(%ebp), %edi
387 xorl (%ebp), %eax
388 xorl 4(%ebp), %ebx
389 xorl 8(%ebp), %ecx
390 xorl 12(%ebp), %edx
391
392 /*
393 * determine the number of rounds
394 */
395 cmpl $10*16, %edi
396 je aes_encrypt.3
397 cmpl $12*16, %edi
398 je aes_encrypt.2
399 cmpl $14*16, %edi
400 je aes_encrypt.1
401 movl $-1, %eax
402 jmp aes_encrypt.5
403
404 aes_encrypt.1:
405 enc_round
406 enc_round
407 aes_encrypt.2:
408 enc_round
409 enc_round
410 aes_encrypt.3:
411 enc_round
412 enc_round
413 enc_round
414 enc_round
415 enc_round
416 enc_round
417 enc_round
418 enc_round
419 enc_round
420 enc_last_round
421
422 movl out_blk+stk_spc(%esp), %edx
423 movl %eax, (%edx)
424 movl %ebx, 4(%edx)
425 movl %esi, 8(%edx)
426 movl %edi, 12(%edx)
427 xorl %eax, %eax
428
429 aes_encrypt.5:
430 movl 16(%esp), %ebp
431 movl 12(%esp), %ebx
432 movl 8(%esp), %esi
433 movl 4(%esp), %edi
434 addl $stk_spc, %esp
435 ret
436
437 #endif
438
439 /*
440 * For r2 == 16, or r2 == 24 && r1 == 7, or r2 ==32 && r1 == 6
441 */
442 #define f_key(r1, r2, rc_val) \
443 l3s_col(esi, a, ebx); \
444 xorl $rc_val, %esi; \
445 \
446 movl %esi, r1*r2(%ebp); \
447 xorl %esi, %edi; \
448 movl %edi, r1*r2+4(%ebp); \
449 xorl %edi, %ecx; \
450 movl %ecx, r1*r2+8(%ebp); \
451 xorl %ecx, %edx; \
452 movl %edx, r1*r2+12(%ebp); \
453 movl %edx, %eax;
454
455 /*
456 * For r2 == 24 && r1 == 0 to 6
457 */
458 #define f_key_24(r1, r2, rc_val) \
459 f_key(r1, r2, rc_val); \
460 \
461 xorl r1*r2+16-r2(%ebp), %eax; \
462 movl %eax, r1*r2+16(%ebp); \
463 xorl r1*r2+20-r2(%ebp), %eax; \
464 movl %eax, r1*r2+20(%ebp);
465
466 /*
467 * For r2 ==32 && r1 == 0 to 5
468 */
469 #define f_key_32(r1, r2, rc_val) \
470 f_key(r1, r2, rc_val); \
471 \
472 roll $8, %eax; \
473 pushl %edx; \
474 movl r1*r2+16-r2(%ebp), %edx; \
475 l3s_col(edx, a, ebx); \
476 movl %edx, %eax; \
477 popl %edx; \
478 movl %eax, r1*r2+16(%ebp); \
479 xorl r1*r2+20-r2(%ebp), %eax; \
480 movl %eax, r1*r2+20(%ebp); \
481 xorl r1*r2+24-r2(%ebp), %eax; \
482 movl %eax, r1*r2+24(%ebp); \
483 xorl r1*r2+28-r2(%ebp), %eax; \
484 movl %eax, r1*r2+28(%ebp);
485
486 #ifdef ENCRYPTION_KEY_SCHEDULE
487
488 #ifdef AES_128
489
490 #ifndef ENCRYPTION_TABLE
491 #define ENCRYPTION_TABLE
492 #endif
493
494 Entry(aes_encrypt_key128)
495
496 pushl %ebp
497 pushl %ebx
498 pushl %esi
499 pushl %edi
500
501 movl 24(%esp), %ebp
502 movl $10*16, 4*KS_LENGTH(%ebp)
503 movl 20(%esp), %ebx
504
505 movl (%ebx), %esi
506 movl %esi, (%ebp)
507 movl 4(%ebx), %edi
508 movl %edi, 4(%ebp)
509 movl 8(%ebx), %ecx
510 movl %ecx, 8(%ebp)
511 movl 12(%ebx), %edx
512 movl %edx, 12(%ebp)
513 addl $16, %ebp
514 movl %edx, %eax
515
516 f_key(0, 16, 1)
517 f_key(1, 16, 2)
518 f_key(2, 16, 4)
519 f_key(3, 16, 8)
520 f_key(4, 16, 16)
521 f_key(5, 16, 32)
522 f_key(6, 16, 64)
523 f_key(7, 16, 128)
524 f_key(8, 16, 27)
525 f_key(9, 16, 54)
526
527 popl %edi
528 popl %esi
529 popl %ebx
530 popl %ebp
531 xorl %eax, %eax
532 ret
533
534 #endif
535
536 #ifdef AES_192
537
538 #ifndef ENCRYPTION_TABLE
539 #define ENCRYPTION_TABLE
540 #endif
541
542 Entry(aes_encrypt_key192)
543
544 pushl %ebp
545 pushl %ebx
546 pushl %esi
547 pushl %edi
548
549 movl 24(%esp), %ebp
550 movl $12*16, 4*KS_LENGTH(%ebp)
551 movl 20(%esp), %ebx
552
553 movl (%ebx), %esi
554 movl %esi, (%ebp)
555 movl 4(%ebx), %edi
556 movl %edi, 4(%ebp)
557 movl 8(%ebx), %ecx
558 movl %ecx, 8(%ebp)
559 movl 12(%ebx), %edx
560 movl %edx, 12(%ebp)
561 movl 16(%ebx), %eax
562 movl %eax, 16(%ebp)
563 movl 20(%ebx), %eax
564 movl %eax, 20(%ebp)
565 addl $24, %ebp
566
567 f_key_24(0, 24, 1)
568 f_key_24(1, 24, 2)
569 f_key_24(2, 24, 4)
570 f_key_24(3, 24, 8)
571 f_key_24(4, 24, 16)
572 f_key_24(5, 24, 32)
573 f_key_24(6, 24, 64)
574 f_key(7, 24, 128)
575
576 popl %edi
577 popl %esi
578 popl %ebx
579 popl %ebp
580 xorl %eax, %eax
581 ret
582
583 #endif
584
585 #ifdef AES_256
586
587 #ifndef ENCRYPTION_TABLE
588 #define ENCRYPTION_TABLE
589 #endif
590
591 Entry(aes_encrypt_key256)
592
593 pushl %ebp
594 pushl %ebx
595 pushl %esi
596 pushl %edi
597
598 movl 24(%esp), %ebp
599 movl $14*16, 4*KS_LENGTH(%ebp)
600 movl 20(%esp), %ebx
601
602 movl (%ebx), %esi
603 movl %esi, (%ebp)
604 movl 4(%ebx), %edi
605 movl %edi, 4(%ebp)
606 movl 8(%ebx), %ecx
607 movl %ecx, 8(%ebp)
608 movl 12(%ebx), %edx
609 movl %edx, 12(%ebp)
610 movl 16(%ebx), %eax
611 movl %eax, 16(%ebp)
612 movl 20(%ebx), %eax
613 movl %eax, 20(%ebp)
614 movl 24(%ebx), %eax
615 movl %eax, 24(%ebp)
616 movl 28(%ebx), %eax
617 movl %eax, 28(%ebp)
618 addl $32, %ebp
619
620 f_key_32(0, 32, 1)
621 f_key_32(1, 32, 2)
622 f_key_32(2, 32, 4)
623 f_key_32(3, 32, 8)
624 f_key_32(4, 32, 16)
625 f_key_32(5, 32, 32)
626 f_key(6, 32, 64)
627
628 popl %edi
629 popl %esi
630 popl %ebx
631 popl %ebp
632 xorl %eax, %eax
633 ret
634
635 #endif
636
637 #ifdef AES_VAR
638
639 #ifndef ENCRYPTION_TABLE
640 #define ENCRYPTION_TABLE
641 #endif
642
643 Entry(aes_encrypt_key)
644
645 movl 4(%esp), %ecx
646 movl 8(%esp), %eax
647 movl 12(%esp), %edx
648 pushl %edx
649 pushl %ecx
650
651 cmpl $16, %eax
652 je aes_encrypt_key.1
653 cmpl $128, %eax
654 je aes_encrypt_key.1
655
656 cmpl $24, %eax
657 je aes_encrypt_key.2
658 cmpl $192, %eax
659 je aes_encrypt_key.2
660
661 cmpl $32, %eax
662 je aes_encrypt_key.3
663 cmpl $256, %eax
664 je aes_encrypt_key.3
665 movl $-1, %eax
666 addl $8, %esp
667 ret
668
669 aes_encrypt_key.1:
670 do_call(aes_encrypt_key128, 8)
671 ret
672 aes_encrypt_key.2:
673 do_call(aes_encrypt_key192, 8)
674 ret
675 aes_encrypt_key.3:
676 do_call(aes_encrypt_key256, 8)
677 ret
678
679 #endif
680
681 #endif
682
683 #ifdef ENCRYPTION_TABLE
684
685 # S-box data - 256 entries
686
687 .section __DATA, __data
688 .align ALIGN
689
690 #define u8(x) 0, x, x, f3(x), f2(x), x, x, f3(x)
691
692 enc_tab:
693 .byte u8(0x63),u8(0x7c),u8(0x77),u8(0x7b),u8(0xf2),u8(0x6b),u8(0x6f),u8(0xc5)
694 .byte u8(0x30),u8(0x01),u8(0x67),u8(0x2b),u8(0xfe),u8(0xd7),u8(0xab),u8(0x76)
695 .byte u8(0xca),u8(0x82),u8(0xc9),u8(0x7d),u8(0xfa),u8(0x59),u8(0x47),u8(0xf0)
696 .byte u8(0xad),u8(0xd4),u8(0xa2),u8(0xaf),u8(0x9c),u8(0xa4),u8(0x72),u8(0xc0)
697 .byte u8(0xb7),u8(0xfd),u8(0x93),u8(0x26),u8(0x36),u8(0x3f),u8(0xf7),u8(0xcc)
698 .byte u8(0x34),u8(0xa5),u8(0xe5),u8(0xf1),u8(0x71),u8(0xd8),u8(0x31),u8(0x15)
699 .byte u8(0x04),u8(0xc7),u8(0x23),u8(0xc3),u8(0x18),u8(0x96),u8(0x05),u8(0x9a)
700 .byte u8(0x07),u8(0x12),u8(0x80),u8(0xe2),u8(0xeb),u8(0x27),u8(0xb2),u8(0x75)
701 .byte u8(0x09),u8(0x83),u8(0x2c),u8(0x1a),u8(0x1b),u8(0x6e),u8(0x5a),u8(0xa0)
702 .byte u8(0x52),u8(0x3b),u8(0xd6),u8(0xb3),u8(0x29),u8(0xe3),u8(0x2f),u8(0x84)
703 .byte u8(0x53),u8(0xd1),u8(0x00),u8(0xed),u8(0x20),u8(0xfc),u8(0xb1),u8(0x5b)
704 .byte u8(0x6a),u8(0xcb),u8(0xbe),u8(0x39),u8(0x4a),u8(0x4c),u8(0x58),u8(0xcf)
705 .byte u8(0xd0),u8(0xef),u8(0xaa),u8(0xfb),u8(0x43),u8(0x4d),u8(0x33),u8(0x85)
706 .byte u8(0x45),u8(0xf9),u8(0x02),u8(0x7f),u8(0x50),u8(0x3c),u8(0x9f),u8(0xa8)
707 .byte u8(0x51),u8(0xa3),u8(0x40),u8(0x8f),u8(0x92),u8(0x9d),u8(0x38),u8(0xf5)
708 .byte u8(0xbc),u8(0xb6),u8(0xda),u8(0x21),u8(0x10),u8(0xff),u8(0xf3),u8(0xd2)
709 .byte u8(0xcd),u8(0x0c),u8(0x13),u8(0xec),u8(0x5f),u8(0x97),u8(0x44),u8(0x17)
710 .byte u8(0xc4),u8(0xa7),u8(0x7e),u8(0x3d),u8(0x64),u8(0x5d),u8(0x19),u8(0x73)
711 .byte u8(0x60),u8(0x81),u8(0x4f),u8(0xdc),u8(0x22),u8(0x2a),u8(0x90),u8(0x88)
712 .byte u8(0x46),u8(0xee),u8(0xb8),u8(0x14),u8(0xde),u8(0x5e),u8(0x0b),u8(0xdb)
713 .byte u8(0xe0),u8(0x32),u8(0x3a),u8(0x0a),u8(0x49),u8(0x06),u8(0x24),u8(0x5c)
714 .byte u8(0xc2),u8(0xd3),u8(0xac),u8(0x62),u8(0x91),u8(0x95),u8(0xe4),u8(0x79)
715 .byte u8(0xe7),u8(0xc8),u8(0x37),u8(0x6d),u8(0x8d),u8(0xd5),u8(0x4e),u8(0xa9)
716 .byte u8(0x6c),u8(0x56),u8(0xf4),u8(0xea),u8(0x65),u8(0x7a),u8(0xae),u8(0x08)
717 .byte u8(0xba),u8(0x78),u8(0x25),u8(0x2e),u8(0x1c),u8(0xa6),u8(0xb4),u8(0xc6)
718 .byte u8(0xe8),u8(0xdd),u8(0x74),u8(0x1f),u8(0x4b),u8(0xbd),u8(0x8b),u8(0x8a)
719 .byte u8(0x70),u8(0x3e),u8(0xb5),u8(0x66),u8(0x48),u8(0x03),u8(0xf6),u8(0x0e)
720 .byte u8(0x61),u8(0x35),u8(0x57),u8(0xb9),u8(0x86),u8(0xc1),u8(0x1d),u8(0x9e)
721 .byte u8(0xe1),u8(0xf8),u8(0x98),u8(0x11),u8(0x69),u8(0xd9),u8(0x8e),u8(0x94)
722 .byte u8(0x9b),u8(0x1e),u8(0x87),u8(0xe9),u8(0xce),u8(0x55),u8(0x28),u8(0xdf)
723 .byte u8(0x8c),u8(0xa1),u8(0x89),u8(0x0d),u8(0xbf),u8(0xe6),u8(0x42),u8(0x68)
724 .byte u8(0x41),u8(0x99),u8(0x2d),u8(0x0f),u8(0xb0),u8(0x54),u8(0xbb),u8(0x16)
725
726 #endif
727
728 #ifdef DECRYPTION
729
730 #define DECRYPTION_TABLE
731
732 #define dtab_0(x) dec_tab(,x,8)
733 #define dtab_1(x) dec_tab+3(,x,8)
734 #define dtab_2(x) dec_tab+2(,x,8)
735 #define dtab_3(x) dec_tab+1(,x,8)
736 #define dtab_x(x) dec_tab+7(,x,8)
737
738 #define irn_fun(m1, m2) \
739 roll $16, %eax; \
740 \
741 ## m1 ## _zo(esi, cl, 0, ebp); \
742 m1(esi, bh, 1, ebp); \
743 m1(esi, al, 2, ebp); \
744 ## m1 ## _zo(edi, dl, 0, ebp); \
745 m1(edi, ch, 1, ebp); \
746 m1(edi, ah, 3, ebp); \
747 ## m2 ## _zo(ebp, bl, 0, ebp); \
748 \
749 shrl $16, %eax; \
750 andl $0xffff0000, %ebx; \
751 orl %eax, %ebx; \
752 shrl $16, %ecx; \
753 \
754 m1(ebp, bh, 1, eax); \
755 m1(ebp, ch, 3, eax); \
756 m2(eax, cl, 2, ecx); \
757 ## m1 ## _zo(eax, bl, 0, ecx); \
758 m1(eax, dh, 1, ecx); \
759 \
760 shrl $16, %ebx; \
761 shrl $16, %edx; \
762 \
763 m1(esi, dh, 3, ecx); \
764 m1(ebp, dl, 2, ecx); \
765 m1(eax, bh, 3, ecx); \
766 m1(edi, bl, 2, ecx);
767
768 /*
769 * Basic MOV and XOR Operations for normal rounds
770 */
771 #define ni_xor_zo ni_xor
772 #define ni_xor(r1, r2, r3, r4) \
773 movzbl %r2, %r4; \
774 xorl dtab_ ## r3 ## (%r4), %r1;
775
776 #define ni_mov_zo ni_mov
777 #define ni_mov(r1, r2, r3, r4) \
778 movzbl %r2, %r4; \
779 movl dtab_ ## r3 ## (%r4), %r1;
780
781 /*
782 * Basic MOV and XOR Operations for last round
783 */
784
785 #define li_xor_zo(r1, r2, r3, r4) \
786 movzbl %r2, %r4; \
787 movzbl dtab_x(%r4), %r4; \
788 xor %r4, %r1;
789
790 #define li_xor(r1, r2, r3, r4) \
791 movzbl %r2, %r4; \
792 movzbl dtab_x(%r4), %r4; \
793 shll $(8*r3), %r4; \
794 xor %r4, %r1;
795
796 #define li_mov_zo(r1, r2, r3, r4) \
797 movzbl %r2, %r4; \
798 movzbl dtab_x(%r4), %r1;
799
800 #define li_mov(r1, r2, r3, r4) \
801 movzbl %r2, %r4; \
802 movzbl dtab_x(%r4), %r1; \
803 shl $(8*r3), %r1;
804
805 #ifdef AES_REV_DKS
806
807 #define dec_round \
808 addl $16, %ebp; \
809 save(ebp); \
810 movl 8(%ebp), %esi; \
811 movl 12(%ebp), %edi; \
812 \
813 irn_fun(ni_xor, ni_mov); \
814 \
815 movl %ebp, %ebx; \
816 movl %esi, %ecx; \
817 movl %edi, %edx; \
818 restore(ebp); \
819 xorl (%ebp), %eax; \
820 xorl 4(%ebp), %ebx;
821
822 #define dec_last_round \
823 addl $16, %ebp; \
824 save(ebp); \
825 movl 8(%ebp), %esi; \
826 movl 12(%ebp), %edi; \
827 \
828 irn_fun(li_xor, li_mov); \
829 \
830 movl %ebp, %ebx; \
831 restore(ebp); \
832 xorl (%ebp), %eax; \
833 xorl 4(%ebp), %ebx;
834
835 #else
836
837 #define dec_round \
838 subl $16, %ebp; \
839 save(ebp); \
840 movl 8(%ebp), %esi; \
841 movl 12(%ebp), %edi; \
842 \
843 irn_fun(ni_xor, ni_mov); \
844 \
845 movl %ebp, %ebx; \
846 movl %esi, %ecx; \
847 movl %edi, %edx; \
848 restore(ebp); \
849 xorl (%ebp), %eax; \
850 xorl 4(%ebp), %ebx;
851
852 #define dec_last_round \
853 subl $16, %ebp; \
854 save(ebp); \
855 movl 8(%ebp), %esi; \
856 movl 12(%ebp), %edi; \
857 \
858 irn_fun(li_xor, li_mov); \
859 \
860 movl %ebp, %ebx; \
861 restore(ebp); \
862 xorl (%ebp), %eax; \
863 xorl 4(%ebp), %ebx;
864
865 #endif /* AES_REV_DKS */
866
867 .section __TEXT, __text
868
869 /*
870 * AES Decryption Subroutine
871 */
872 Entry(aes_decrypt)
873
874 subl $stk_spc, %esp
875 movl %ebp, 16(%esp)
876 movl %ebx, 12(%esp)
877 movl %esi, 8(%esp)
878 movl %edi, 4(%esp)
879
880 /*
881 * input four columns and xor in first round key
882 */
883 movl in_blk+stk_spc(%esp), %esi /* input pointer */
884 movl (%esi), %eax
885 movl 4(%esi), %ebx
886 movl 8(%esi), %ecx
887 movl 12(%esi), %edx
888 leal 16(%esi), %esi
889
890 movl ctx+stk_spc(%esp), %ebp /* key pointer */
891 movzbl 4*KS_LENGTH(%ebp), %edi
892 #ifndef AES_REV_DKS /* if decryption key schedule is not reversed */
893 leal (%ebp,%edi), %ebp /* we have to access it from the top down */
894 #endif
895 xorl (%ebp), %eax /* key schedule */
896 xorl 4(%ebp), %ebx
897 xorl 8(%ebp), %ecx
898 xorl 12(%ebp), %edx
899
900 /*
901 * determine the number of rounds
902 */
903 cmpl $10*16, %edi
904 je aes_decrypt.3
905 cmpl $12*16, %edi
906 je aes_decrypt.2
907 cmpl $14*16, %edi
908 je aes_decrypt.1
909 movl $-1, %eax
910 jmp aes_decrypt.5
911
912 aes_decrypt.1:
913 dec_round
914 dec_round
915 aes_decrypt.2:
916 dec_round
917 dec_round
918 aes_decrypt.3:
919 dec_round
920 dec_round
921 dec_round
922 dec_round
923 dec_round
924 dec_round
925 dec_round
926 dec_round
927 dec_round
928 dec_last_round
929
930 /*
931 * move final values to the output array.
932 */
933 movl out_blk+stk_spc(%esp), %ebp
934 movl %eax, (%ebp)
935 movl %ebx, 4(%ebp)
936 movl %esi, 8(%ebp)
937 movl %edi, 12(%ebp)
938 xorl %eax, %eax
939
940 aes_decrypt.5:
941 movl 16(%esp), %ebp
942 movl 12(%esp), %ebx
943 movl 8(%esp), %esi
944 movl 4(%esp), %edi
945 addl $stk_spc, %esp
946 ret
947
948 #endif
949
950 #define inv_mix_col \
951 movzbl %dl, %ebx; \
952 movzbl etab_b(%ebx), %ebx; \
953 movl dtab_0(%ebx), %eax; \
954 movzbl %dh, %ebx; \
955 shrl $16, %edx; \
956 movzbl etab_b(%ebx), %ebx; \
957 xorl dtab_1(%ebx), %eax; \
958 movzbl %dl, %ebx; \
959 movzbl etab_b(%ebx), %ebx; \
960 xorl dtab_2(%ebx), %eax; \
961 movzbl %dh, %ebx; \
962 movzbl etab_b(%ebx), %ebx; \
963 xorl dtab_3(%ebx), %eax;
964
965 #ifdef DECRYPTION_KEY_SCHEDULE
966
967 #ifdef AES_128
968
969 #ifndef DECRYPTION_TABLE
970 #define DECRYPTION_TABLE
971 #endif
972
973 Entry(aes_decrypt_key128)
974
975 pushl %ebp
976 pushl %ebx
977 pushl %esi
978 pushl %edi
979 movl 24(%esp), %eax /* context */
980 movl 20(%esp), %edx /* key */
981 pushl %eax
982 pushl %edx
983 do_call(aes_encrypt_key128, 8)
984 movl $10*16, %eax
985 movl 24(%esp), %esi /* pointer to first round key */
986 leal (%esi,%eax), %edi /* pointer to last round key */
987 addl $32, %esi
988 /* the inverse mix column transformation */
989 movl -16(%esi), %edx /* needs to be applied to all round keys */
990 inv_mix_col
991 movl %eax, -16(%esi) /* transforming the four sub-keys in the */
992 movl -12(%esi), %edx /* second round key */
993 inv_mix_col
994 movl %eax, -12(%esi) /* transformations for subsequent rounds */
995 movl -8(%esi), %edx /* can then be made more efficient by */
996 inv_mix_col
997 movl %eax, -8(%esi) /* in the encryption round key ek[r]: */
998 movl -4(%esi), %edx
999 inv_mix_col
1000 movl %eax, -4(%esi) /* where n is 1..3. Hence the corresponding */
1001
1002 aes_decrypt_key128.0:
1003 movl (%esi), %edx /* subkeys in the decryption round key dk[r] */
1004 inv_mix_col
1005 movl %eax, (%esi) /* GF(256): */
1006 xorl -12(%esi), %eax
1007 movl %eax, 4(%esi) /* dk[r][n] = dk[r][n-1] ^ dk[r-1][n] */
1008 xorl -8(%esi), %eax
1009 movl %eax, 8(%esi) /* So we only need one inverse mix column */
1010 xorl -4(%esi), %eax /* operation (n = 0) for each four word cycle */
1011 movl %eax, 12(%esi) /* in the expanded key. */
1012 addl $16, %esi
1013 cmpl %esi, %edi
1014 jg aes_decrypt_key128.0
1015 jmp dec_end
1016
1017 #endif
1018
1019 #ifdef AES_192
1020
1021 #ifndef DECRYPTION_TABLE
1022 #define DECRYPTION_TABLE
1023 #endif
1024
1025 Entry(aes_decrypt_key192)
1026
1027 pushl %ebp
1028 pushl %ebx
1029 pushl %esi
1030 pushl %edi
1031 movl 24(%esp), %eax /* context */
1032 movl 20(%esp), %edx /* key */
1033 pushl %eax
1034 pushl %edx
1035 do_call(aes_encrypt_key192, 8)
1036 movl $12*16, %eax
1037 movl 24(%esp), %esi /* first round key */
1038 leal (%esi,%eax), %edi /* last round key */
1039 addl $48, %esi /* the first 6 words are the key, of */
1040 /* which the top 2 words are part of */
1041 movl -32(%esi), %edx /* the second round key and hence */
1042 inv_mix_col
1043 movl %eax, -32(%esi) /* need to do a further six values prior */
1044 movl -28(%esi), %edx /* to using a more efficient technique */
1045 inv_mix_col
1046 movl %eax, -28(%esi)
1047 /* dk[r][n] = dk[r][n-1] ^ dk[r-1][n] */
1048 movl -24(%esi), %edx
1049 inv_mix_col
1050 movl %eax, -24(%esi) /* cycle is now 6 words long */
1051 movl -20(%esi), %edx
1052 inv_mix_col
1053 movl %eax, -20(%esi)
1054 movl -16(%esi), %edx
1055 inv_mix_col
1056 movl %eax, -16(%esi)
1057 movl -12(%esi), %edx
1058 inv_mix_col
1059 movl %eax, -12(%esi)
1060 movl -8(%esi), %edx
1061 inv_mix_col
1062 movl %eax, -8(%esi)
1063 movl -4(%esi), %edx
1064 inv_mix_col
1065 movl %eax, -4(%esi)
1066
1067 aes_decrypt_key192.0:
1068 movl (%esi), %edx /* expanded key is 13 * 4 = 44 32-bit words */
1069 inv_mix_col
1070 movl %eax, (%esi) /* using inv_mix_col. We have already done 8 */
1071 xorl -20(%esi), %eax /* of these so 36 are left - hence we need */
1072 movl %eax, 4(%esi) /* exactly 6 loops of six here */
1073 xorl -16(%esi), %eax
1074 movl %eax, 8(%esi)
1075 xorl -12(%esi), %eax
1076 movl %eax, 12(%esi)
1077 xorl -8(%esi), %eax
1078 movl %eax, 16(%esi)
1079 xorl -4(%esi), %eax
1080 movl %eax, 20(%esi)
1081 addl $24, %esi
1082 cmpl %esi, %edi
1083 jg aes_decrypt_key192.0
1084 jmp dec_end
1085
1086 #endif
1087
1088 #ifdef AES_256
1089
1090 #ifndef DECRYPTION_TABLE
1091 #define DECRYPTION_TABLE
1092 #endif
1093
1094 Entry(aes_decrypt_key256)
1095
1096 pushl %ebp
1097 pushl %ebx
1098 pushl %esi
1099 pushl %edi
1100 movl 24(%esp), %eax
1101 movl 20(%esp), %edx
1102 pushl %eax
1103 pushl %edx
1104 do_call(aes_encrypt_key256, 8)
1105 movl $14*16, %eax
1106 movl 24(%esp), %esi
1107 leal (%esi,%eax), %edi
1108 addl $64, %esi
1109
1110 movl -48(%esi), %edx /* the primary key is 8 words, of which */
1111 inv_mix_col
1112 movl %eax, -48(%esi)
1113 movl -44(%esi), %edx
1114 inv_mix_col
1115 movl %eax, -44(%esi)
1116 movl -40(%esi), %edx
1117 inv_mix_col
1118 movl %eax, -40(%esi)
1119 movl -36(%esi), %edx
1120 inv_mix_col
1121 movl %eax, -36(%esi)
1122
1123 movl -32(%esi), %edx /* the encryption key expansion cycle is */
1124 inv_mix_col
1125 movl %eax, -32(%esi) /* start by doing one complete block */
1126 movl -28(%esi), %edx
1127 inv_mix_col
1128 movl %eax, -28(%esi)
1129 movl -24(%esi), %edx
1130 inv_mix_col
1131 movl %eax, -24(%esi)
1132 movl -20(%esi), %edx
1133 inv_mix_col
1134 movl %eax, -20(%esi)
1135 movl -16(%esi), %edx
1136 inv_mix_col
1137 movl %eax, -16(%esi)
1138 movl -12(%esi), %edx
1139 inv_mix_col
1140 movl %eax, -12(%esi)
1141 movl -8(%esi), %edx
1142 inv_mix_col
1143 movl %eax, -8(%esi)
1144 movl -4(%esi), %edx
1145 inv_mix_col
1146 movl %eax, -4(%esi)
1147
1148 aes_decrypt_key256.0:
1149 movl (%esi), %edx /* we can now speed up the remaining */
1150 inv_mix_col
1151 movl %eax, (%esi) /* outlined earlier. But note that */
1152 xorl -28(%esi), %eax /* there is one extra inverse mix */
1153 movl %eax, 4(%esi) /* column operation as the 256 bit */
1154 xorl -24(%esi), %eax /* key has an extra non-linear step */
1155 movl %eax, 8(%esi) /* for the midway element. */
1156 xorl -20(%esi), %eax
1157 movl %eax, 12(%esi) /* the expanded key is 15 * 4 = 60 */
1158 movl 16(%esi), %edx /* 32-bit words of which 52 need to */
1159 inv_mix_col
1160 movl %eax, 16(%esi) /* 12 so 40 are left - which means */
1161 xorl -12(%esi), %eax /* that we need exactly 5 loops of 8 */
1162 movl %eax, 20(%esi)
1163 xorl -8(%esi), %eax
1164 movl %eax, 24(%esi)
1165 xorl -4(%esi), %eax
1166 movl %eax, 28(%esi)
1167 addl $32, %esi
1168 cmpl %esi, %edi
1169 jg aes_decrypt_key256.0
1170
1171 #endif
1172
1173 dec_end:
1174
1175 #ifdef AES_REV_DKS
1176
1177 movl 24(%esp), %esi /* this reverses the order of the */
1178 dec_end.1:
1179 movl (%esi), %eax /* round keys if required */
1180 movl 4(%esi), %ebx
1181 movl (%edi), %ebp
1182 movl 4(%edi), %edx
1183 movl %ebp, (%esi)
1184 movl %edx, 4(%esi)
1185 movl %eax, (%edi)
1186 movl %ebx, 4(%edi)
1187
1188 movl 8(%esi), %eax
1189 movl 12(%esi), %ebx
1190 movl 8(%edi), %ebp
1191 movl 12(%edi), %edx
1192 movl %ebp, 8(%esi)
1193 movl %edx, 12(%esi)
1194 movl %eax, 8(%edi)
1195 movl %ebx, 12(%edi)
1196
1197 addl $16, %esi
1198 subl $16, %edi
1199 cmpl %esi, %edi
1200 jg dec_end.1
1201
1202 #endif
1203
1204 popl %edi
1205 popl %esi
1206 popl %ebx
1207 popl %ebp
1208 xorl %eax, %eax
1209 ret
1210
1211 #ifdef AES_VAR
1212
1213 Entry(aes_decrypt_key)
1214
1215 movl 4(%esp), %ecx
1216 movl 8(%esp), %eax
1217 movl 12(%esp), %edx
1218 pushl %edx
1219 pushl %ecx
1220
1221 cmpl $16, %eax
1222 je aes_decrypt_key.1
1223 cmpl $128, %eax
1224 je aes_decrypt_key.1
1225
1226 cmpl $24, %eax
1227 je aes_decrypt_key.2
1228 cmpl $192, %eax
1229 je aes_decrypt_key.2
1230
1231 cmpl $32, %eax
1232 je aes_decrypt_key.3
1233 cmpl $256, %eax
1234 je aes_decrypt_key.3
1235 movl $-1, %eax
1236 addl $8, %esp
1237 ret
1238
1239 aes_decrypt_key.1:
1240 do_call(aes_decrypt_key128, 8)
1241 ret
1242 aes_decrypt_key.2:
1243 do_call(aes_decrypt_key192, 8)
1244 ret
1245 aes_decrypt_key.3:
1246 do_call(aes_decrypt_key256, 8)
1247 ret
1248
1249 #endif
1250
1251 #endif
1252
1253 #ifdef DECRYPTION_TABLE
1254
1255 /*
1256 * Inverse S-box data - 256 entries
1257 */
1258
1259 .section __DATA, __data
1260 .align ALIGN
1261
1262 #define v8(x) fe(x), f9(x), fd(x), fb(x), fe(x), f9(x), fd(x), x
1263
1264 dec_tab:
1265 .byte v8(0x52),v8(0x09),v8(0x6a),v8(0xd5),v8(0x30),v8(0x36),v8(0xa5),v8(0x38)
1266 .byte v8(0xbf),v8(0x40),v8(0xa3),v8(0x9e),v8(0x81),v8(0xf3),v8(0xd7),v8(0xfb)
1267 .byte v8(0x7c),v8(0xe3),v8(0x39),v8(0x82),v8(0x9b),v8(0x2f),v8(0xff),v8(0x87)
1268 .byte v8(0x34),v8(0x8e),v8(0x43),v8(0x44),v8(0xc4),v8(0xde),v8(0xe9),v8(0xcb)
1269 .byte v8(0x54),v8(0x7b),v8(0x94),v8(0x32),v8(0xa6),v8(0xc2),v8(0x23),v8(0x3d)
1270 .byte v8(0xee),v8(0x4c),v8(0x95),v8(0x0b),v8(0x42),v8(0xfa),v8(0xc3),v8(0x4e)
1271 .byte v8(0x08),v8(0x2e),v8(0xa1),v8(0x66),v8(0x28),v8(0xd9),v8(0x24),v8(0xb2)
1272 .byte v8(0x76),v8(0x5b),v8(0xa2),v8(0x49),v8(0x6d),v8(0x8b),v8(0xd1),v8(0x25)
1273 .byte v8(0x72),v8(0xf8),v8(0xf6),v8(0x64),v8(0x86),v8(0x68),v8(0x98),v8(0x16)
1274 .byte v8(0xd4),v8(0xa4),v8(0x5c),v8(0xcc),v8(0x5d),v8(0x65),v8(0xb6),v8(0x92)
1275 .byte v8(0x6c),v8(0x70),v8(0x48),v8(0x50),v8(0xfd),v8(0xed),v8(0xb9),v8(0xda)
1276 .byte v8(0x5e),v8(0x15),v8(0x46),v8(0x57),v8(0xa7),v8(0x8d),v8(0x9d),v8(0x84)
1277 .byte v8(0x90),v8(0xd8),v8(0xab),v8(0x00),v8(0x8c),v8(0xbc),v8(0xd3),v8(0x0a)
1278 .byte v8(0xf7),v8(0xe4),v8(0x58),v8(0x05),v8(0xb8),v8(0xb3),v8(0x45),v8(0x06)
1279 .byte v8(0xd0),v8(0x2c),v8(0x1e),v8(0x8f),v8(0xca),v8(0x3f),v8(0x0f),v8(0x02)
1280 .byte v8(0xc1),v8(0xaf),v8(0xbd),v8(0x03),v8(0x01),v8(0x13),v8(0x8a),v8(0x6b)
1281 .byte v8(0x3a),v8(0x91),v8(0x11),v8(0x41),v8(0x4f),v8(0x67),v8(0xdc),v8(0xea)
1282 .byte v8(0x97),v8(0xf2),v8(0xcf),v8(0xce),v8(0xf0),v8(0xb4),v8(0xe6),v8(0x73)
1283 .byte v8(0x96),v8(0xac),v8(0x74),v8(0x22),v8(0xe7),v8(0xad),v8(0x35),v8(0x85)
1284 .byte v8(0xe2),v8(0xf9),v8(0x37),v8(0xe8),v8(0x1c),v8(0x75),v8(0xdf),v8(0x6e)
1285 .byte v8(0x47),v8(0xf1),v8(0x1a),v8(0x71),v8(0x1d),v8(0x29),v8(0xc5),v8(0x89)
1286 .byte v8(0x6f),v8(0xb7),v8(0x62),v8(0x0e),v8(0xaa),v8(0x18),v8(0xbe),v8(0x1b)
1287 .byte v8(0xfc),v8(0x56),v8(0x3e),v8(0x4b),v8(0xc6),v8(0xd2),v8(0x79),v8(0x20)
1288 .byte v8(0x9a),v8(0xdb),v8(0xc0),v8(0xfe),v8(0x78),v8(0xcd),v8(0x5a),v8(0xf4)
1289 .byte v8(0x1f),v8(0xdd),v8(0xa8),v8(0x33),v8(0x88),v8(0x07),v8(0xc7),v8(0x31)
1290 .byte v8(0xb1),v8(0x12),v8(0x10),v8(0x59),v8(0x27),v8(0x80),v8(0xec),v8(0x5f)
1291 .byte v8(0x60),v8(0x51),v8(0x7f),v8(0xa9),v8(0x19),v8(0xb5),v8(0x4a),v8(0x0d)
1292 .byte v8(0x2d),v8(0xe5),v8(0x7a),v8(0x9f),v8(0x93),v8(0xc9),v8(0x9c),v8(0xef)
1293 .byte v8(0xa0),v8(0xe0),v8(0x3b),v8(0x4d),v8(0xae),v8(0x2a),v8(0xf5),v8(0xb0)
1294 .byte v8(0xc8),v8(0xeb),v8(0xbb),v8(0x3c),v8(0x83),v8(0x53),v8(0x99),v8(0x61)
1295 .byte v8(0x17),v8(0x2b),v8(0x04),v8(0x7e),v8(0xba),v8(0x77),v8(0xd6),v8(0x26)
1296 .byte v8(0xe1),v8(0x69),v8(0x14),v8(0x63),v8(0x55),v8(0x21),v8(0x0c),v8(0x7d)
1297
1298 #endif