]> git.saurik.com Git - apple/xnu.git/blob - EXTERNAL_HEADERS/corecrypto/cc_priv.h
xnu-6153.41.3.tar.gz
[apple/xnu.git] / EXTERNAL_HEADERS / corecrypto / cc_priv.h
1 /*
2 * cc_priv.h
3 * corecrypto
4 *
5 * Created on 12/01/2010
6 *
7 * Copyright (c) 2010,2011,2012,2014,2015 Apple Inc. All rights reserved.
8 *
9 */
10
11 #ifndef _CORECRYPTO_CC_PRIV_H_
12 #define _CORECRYPTO_CC_PRIV_H_
13
14 #include <corecrypto/cc.h>
15 #include <stdint.h>
16
17 // Fork handlers for the stateful components of corecrypto.
18 void cc_atfork_prepare(void);
19 void cc_atfork_parent(void);
20 void cc_atfork_child(void);
21
22 #ifndef __has_builtin
23 #define __has_builtin(x) 0
24 #endif
25
26 #ifndef __DECONST
27 #define __DECONST(type, var) ((type)(uintptr_t)(const void *)(var))
28 #endif
29
30 /* defines the following macros :
31
32 CC_ARRAY_LEN: returns the number of elements in an array
33
34 CC_STORE32_BE : store 32 bit value in big endian in unaligned buffer.
35 CC_STORE32_LE : store 32 bit value in little endian in unaligned buffer.
36 CC_STORE64_BE : store 64 bit value in big endian in unaligned buffer.
37 CC_STORE64_LE : store 64 bit value in little endian in unaligned buffer.
38
39 CC_LOAD32_BE : load 32 bit value in big endian from unaligned buffer.
40 CC_LOAD32_LE : load 32 bit value in little endian from unaligned buffer.
41 CC_LOAD64_BE : load 64 bit value in big endian from unaligned buffer.
42 CC_LOAD64_LE : load 64 bit value in little endian from unaligned buffer.
43
44 CC_ROR : Rotate Right 32 bits. Rotate count can be a variable.
45 CC_ROL : Rotate Left 32 bits. Rotate count can be a variable.
46 CC_RORc : Rotate Right 32 bits. Rotate count must be a constant.
47 CC_ROLc : Rotate Left 32 bits. Rotate count must be a constant.
48
49 CC_ROR64 : Rotate Right 64 bits. Rotate count can be a variable.
50 CC_ROL64 : Rotate Left 64 bits. Rotate count can be a variable.
51 CC_ROR64c : Rotate Right 64 bits. Rotate count must be a constant.
52 CC_ROL64c : Rotate Left 64 bits. Rotate count must be a constant.
53
54 CC_BSWAP : byte swap a 32 bits variable.
55
56 CC_H2BE32 : convert a 32 bits value between host and big endian order.
57 CC_H2LE32 : convert a 32 bits value between host and little endian order.
58
59 CC_BSWAP64 : byte swap a 64 bits variable
60
61 CC_READ_LE32 : read a 32 bits little endian value
62
63 CC_WRITE_LE32 : write a 32 bits little endian value
64 CC_WRITE_LE64 : write a 64 bits little endian value
65
66 CC_H2BE64 : convert a 64 bits value between host and big endian order
67 CC_H2LE64 : convert a 64 bits value between host and little endian order
68
69 */
70
71 // <rdar://problem/40683103> RTKitOSPlatform should replace CC_MEMCPY with memcpy
72 #define CC_MEMCPY(D,S,L) cc_memcpy((D),(S),(L))
73 #define CC_MEMMOVE(D,S,L) cc_memmove((D),(S),(L))
74 #define CC_MEMSET(D,V,L) cc_memset((D),(V),(L))
75
76 #if __has_builtin(__builtin___memcpy_chk) && !CC_RTKIT
77 #define cc_memcpy(dst, src, len) __builtin___memcpy_chk((dst), (src), (len), __builtin_object_size((dst), 1))
78 #define cc_memcpy_nochk(dst, src, len) __builtin___memcpy_chk((dst), (src), (len), __builtin_object_size((dst), 0))
79 #else
80 #define cc_memcpy(dst, src, len) memcpy((dst), (src), (len))
81 #define cc_memcpy_nochk(dst, src, len) memcpy((dst), (src), (len))
82 #endif
83
84 #if __has_builtin(__builtin___memmove_chk) && !CC_RTKIT
85 #define cc_memmove(dst, src, len) __builtin___memmove_chk((dst), (src), (len), __builtin_object_size((dst), 1))
86 #else
87 #define cc_memmove(dst, src, len) memmove((dst), (src), (len))
88 #endif
89
90 #if __has_builtin(__builtin___memset_chk) && !CC_RTKIT
91 #define cc_memset(dst, val, len) __builtin___memset_chk((dst), (val), (len), __builtin_object_size((dst), 1))
92 #else
93 #define cc_memset(dst, val, len) memset((dst), (val), (len))
94 #endif
95
96 #define CC_ARRAY_LEN(x) (sizeof((x))/sizeof((x)[0]))
97
98 // MARK: - Loads and Store
99
100 // MARK: -- 32 bits - little endian
101
102 // MARK: --- Default version
103
104 #define CC_STORE32_LE(x, y) do { \
105 ((unsigned char *)(y))[3] = (unsigned char)(((x)>>24)&255); \
106 ((unsigned char *)(y))[2] = (unsigned char)(((x)>>16)&255); \
107 ((unsigned char *)(y))[1] = (unsigned char)(((x)>>8)&255); \
108 ((unsigned char *)(y))[0] = (unsigned char)((x)&255); \
109 } while(0)
110
111 #define CC_LOAD32_LE(x, y) do { \
112 x = ((uint32_t)(((const unsigned char *)(y))[3] & 255)<<24) | \
113 ((uint32_t)(((const unsigned char *)(y))[2] & 255)<<16) | \
114 ((uint32_t)(((const unsigned char *)(y))[1] & 255)<<8) | \
115 ((uint32_t)(((const unsigned char *)(y))[0] & 255)); \
116 } while(0)
117
118 // MARK: -- 64 bits - little endian
119
120 #define CC_STORE64_LE(x, y) do { \
121 ((unsigned char *)(y))[7] = (unsigned char)(((x)>>56)&255); \
122 ((unsigned char *)(y))[6] = (unsigned char)(((x)>>48)&255); \
123 ((unsigned char *)(y))[5] = (unsigned char)(((x)>>40)&255); \
124 ((unsigned char *)(y))[4] = (unsigned char)(((x)>>32)&255); \
125 ((unsigned char *)(y))[3] = (unsigned char)(((x)>>24)&255); \
126 ((unsigned char *)(y))[2] = (unsigned char)(((x)>>16)&255); \
127 ((unsigned char *)(y))[1] = (unsigned char)(((x)>>8)&255); \
128 ((unsigned char *)(y))[0] = (unsigned char)((x)&255); \
129 } while(0)
130
131 #define CC_LOAD64_LE(x, y) do { \
132 x = (((uint64_t)(((const unsigned char *)(y))[7] & 255))<<56) | \
133 (((uint64_t)(((const unsigned char *)(y))[6] & 255))<<48) | \
134 (((uint64_t)(((const unsigned char *)(y))[5] & 255))<<40) | \
135 (((uint64_t)(((const unsigned char *)(y))[4] & 255))<<32) | \
136 (((uint64_t)(((const unsigned char *)(y))[3] & 255))<<24) | \
137 (((uint64_t)(((const unsigned char *)(y))[2] & 255))<<16) | \
138 (((uint64_t)(((const unsigned char *)(y))[1] & 255))<<8) | \
139 (((uint64_t)(((const unsigned char *)(y))[0] & 255))); \
140 } while(0)
141
142 // MARK: -- 32 bits - big endian
143 // MARK: --- intel version
144
145 #if (defined(__i386__) || defined(__x86_64__)) && !defined(_MSC_VER)
146
147 #define CC_STORE32_BE(x, y) \
148 __asm__ __volatile__ ( \
149 "bswapl %0 \n\t" \
150 "movl %0,(%1)\n\t" \
151 "bswapl %0 \n\t" \
152 ::"r"(x), "r"(y))
153
154 #define CC_LOAD32_BE(x, y) \
155 __asm__ __volatile__ ( \
156 "movl (%1),%0\n\t" \
157 "bswapl %0\n\t" \
158 :"=r"(x): "r"(y))
159
160 #else
161 // MARK: --- default version
162 #define CC_STORE32_BE(x, y) do { \
163 ((unsigned char *)(y))[0] = (unsigned char)(((x)>>24)&255); \
164 ((unsigned char *)(y))[1] = (unsigned char)(((x)>>16)&255); \
165 ((unsigned char *)(y))[2] = (unsigned char)(((x)>>8)&255); \
166 ((unsigned char *)(y))[3] = (unsigned char)((x)&255); \
167 } while(0)
168
169 #define CC_LOAD32_BE(x, y) do { \
170 x = ((uint32_t)(((const unsigned char *)(y))[0] & 255)<<24) | \
171 ((uint32_t)(((const unsigned char *)(y))[1] & 255)<<16) | \
172 ((uint32_t)(((const unsigned char *)(y))[2] & 255)<<8) | \
173 ((uint32_t)(((const unsigned char *)(y))[3] & 255)); \
174 } while(0)
175
176 #endif
177
178 // MARK: -- 64 bits - big endian
179
180 // MARK: --- intel 64 bits version
181
182 #if defined(__x86_64__) && !defined (_MSC_VER)
183
184 #define CC_STORE64_BE(x, y) \
185 __asm__ __volatile__ ( \
186 "bswapq %0 \n\t" \
187 "movq %0,(%1)\n\t" \
188 "bswapq %0 \n\t" \
189 ::"r"(x), "r"(y))
190
191 #define CC_LOAD64_BE(x, y) \
192 __asm__ __volatile__ ( \
193 "movq (%1),%0\n\t" \
194 "bswapq %0\n\t" \
195 :"=r"(x): "r"(y))
196
197 #else
198
199 // MARK: --- default version
200
201 #define CC_STORE64_BE(x, y) do { \
202 ((unsigned char *)(y))[0] = (unsigned char)(((x)>>56)&255); \
203 ((unsigned char *)(y))[1] = (unsigned char)(((x)>>48)&255); \
204 ((unsigned char *)(y))[2] = (unsigned char)(((x)>>40)&255); \
205 ((unsigned char *)(y))[3] = (unsigned char)(((x)>>32)&255); \
206 ((unsigned char *)(y))[4] = (unsigned char)(((x)>>24)&255); \
207 ((unsigned char *)(y))[5] = (unsigned char)(((x)>>16)&255); \
208 ((unsigned char *)(y))[6] = (unsigned char)(((x)>>8)&255); \
209 ((unsigned char *)(y))[7] = (unsigned char)((x)&255); \
210 } while(0)
211
212 #define CC_LOAD64_BE(x, y) do { \
213 x = (((uint64_t)(((const unsigned char *)(y))[0] & 255))<<56) | \
214 (((uint64_t)(((const unsigned char *)(y))[1] & 255))<<48) | \
215 (((uint64_t)(((const unsigned char *)(y))[2] & 255))<<40) | \
216 (((uint64_t)(((const unsigned char *)(y))[3] & 255))<<32) | \
217 (((uint64_t)(((const unsigned char *)(y))[4] & 255))<<24) | \
218 (((uint64_t)(((const unsigned char *)(y))[5] & 255))<<16) | \
219 (((uint64_t)(((const unsigned char *)(y))[6] & 255))<<8) | \
220 (((uint64_t)(((const unsigned char *)(y))[7] & 255))); \
221 } while(0)
222
223 #endif
224
225 // MARK: - 32-bit Rotates
226
227 #if defined(_MSC_VER)
228 // MARK: -- MSVC version
229
230 #include <stdlib.h>
231 #if !defined(__clang__)
232 #pragma intrinsic(_lrotr,_lrotl)
233 #endif
234 #define CC_ROR(x,n) _lrotr(x,n)
235 #define CC_ROL(x,n) _lrotl(x,n)
236 #define CC_RORc(x,n) _lrotr(x,n)
237 #define CC_ROLc(x,n) _lrotl(x,n)
238
239 #elif (defined(__i386__) || defined(__x86_64__))
240 // MARK: -- intel asm version
241
242 CC_INLINE uint32_t CC_ROL(uint32_t word, int i)
243 {
244 __asm__ ("roll %%cl,%0"
245 :"=r" (word)
246 :"0" (word),"c" (i));
247 return word;
248 }
249
250 CC_INLINE uint32_t CC_ROR(uint32_t word, int i)
251 {
252 __asm__ ("rorl %%cl,%0"
253 :"=r" (word)
254 :"0" (word),"c" (i));
255 return word;
256 }
257
258 /* Need to be a macro here, because 'i' is an immediate (constant) */
259 #define CC_ROLc(word, i) \
260 ({ uint32_t _word=(word); \
261 __asm__ __volatile__ ("roll %2,%0" \
262 :"=r" (_word) \
263 :"0" (_word),"I" (i)); \
264 _word; \
265 })
266
267
268 #define CC_RORc(word, i) \
269 ({ uint32_t _word=(word); \
270 __asm__ __volatile__ ("rorl %2,%0" \
271 :"=r" (_word) \
272 :"0" (_word),"I" (i)); \
273 _word; \
274 })
275
276 #else
277
278 // MARK: -- default version
279
280 CC_INLINE uint32_t CC_ROL(uint32_t word, int i)
281 {
282 return ( (word<<(i&31)) | (word>>(32-(i&31))) );
283 }
284
285 CC_INLINE uint32_t CC_ROR(uint32_t word, int i)
286 {
287 return ( (word>>(i&31)) | (word<<(32-(i&31))) );
288 }
289
290 #define CC_ROLc(x, y) CC_ROL(x, y)
291 #define CC_RORc(x, y) CC_ROR(x, y)
292
293 #endif
294
295 // MARK: - 64 bits rotates
296
297 #if defined(__x86_64__) && !defined(_MSC_VER) //clang _MSVC doesn't support GNU-style inline assembly
298 // MARK: -- intel 64 asm version
299
300 CC_INLINE uint64_t CC_ROL64(uint64_t word, int i)
301 {
302 __asm__("rolq %%cl,%0"
303 :"=r" (word)
304 :"0" (word),"c" (i));
305 return word;
306 }
307
308 CC_INLINE uint64_t CC_ROR64(uint64_t word, int i)
309 {
310 __asm__("rorq %%cl,%0"
311 :"=r" (word)
312 :"0" (word),"c" (i));
313 return word;
314 }
315
316 /* Need to be a macro here, because 'i' is an immediate (constant) */
317 #define CC_ROL64c(word, i) \
318 ({ \
319 uint64_t _word=(word); \
320 __asm__("rolq %2,%0" \
321 :"=r" (_word) \
322 :"0" (_word),"J" (i)); \
323 _word; \
324 })
325
326 #define CC_ROR64c(word, i) \
327 ({ \
328 uint64_t _word=(word); \
329 __asm__("rorq %2,%0" \
330 :"=r" (_word) \
331 :"0" (_word),"J" (i)); \
332 _word; \
333 })
334
335
336 #else /* Not x86_64 */
337
338 // MARK: -- default C version
339
340 CC_INLINE uint64_t CC_ROL64(uint64_t word, int i)
341 {
342 return ( (word<<(i&63)) | (word>>(64-(i&63))) );
343 }
344
345 CC_INLINE uint64_t CC_ROR64(uint64_t word, int i)
346 {
347 return ( (word>>(i&63)) | (word<<(64-(i&63))) );
348 }
349
350 #define CC_ROL64c(x, y) CC_ROL64(x, y)
351 #define CC_ROR64c(x, y) CC_ROR64(x, y)
352
353 #endif
354
355
356 // MARK: - Byte Swaps
357
358 #if __has_builtin(__builtin_bswap32)
359 #define CC_BSWAP32(x) __builtin_bswap32(x)
360 #else
361 CC_INLINE uint32_t CC_BSWAP32(uint32_t x)
362 {
363 return
364 ((x & 0xff000000) >> 24) |
365 ((x & 0x00ff0000) >> 8) |
366 ((x & 0x0000ff00) << 8) |
367 ((x & 0x000000ff) << 24);
368 }
369 #endif
370
371 #if __has_builtin(__builtin_bswap64)
372 #define CC_BSWAP64(x) __builtin_bswap64(x)
373 #else
374 CC_INLINE uint64_t CC_BSWAP64(uint64_t x)
375 {
376 return
377 ((x & 0xff00000000000000ULL) >> 56) |
378 ((x & 0x00ff000000000000ULL) >> 40) |
379 ((x & 0x0000ff0000000000ULL) >> 24) |
380 ((x & 0x000000ff00000000ULL) >> 8) |
381 ((x & 0x00000000ff000000ULL) << 8) |
382 ((x & 0x0000000000ff0000ULL) << 24) |
383 ((x & 0x000000000000ff00ULL) << 40) |
384 ((x & 0x00000000000000ffULL) << 56);
385 }
386 #endif
387
388 #ifdef __LITTLE_ENDIAN__
389 #define CC_H2BE32(x) CC_BSWAP32(x)
390 #define CC_H2LE32(x) (x)
391 #define CC_H2BE64(x) CC_BSWAP64(x)
392 #define CC_H2LE64(x) (x)
393 #else
394 #define CC_H2BE32(x) (x)
395 #define CC_H2LE32(x) CC_BSWAP32(x)
396 #define CC_H2BE64(x) (x)
397 #define CC_H2LE64(x) CC_BSWAP64(x)
398 #endif
399
400 #define CC_READ_LE32(ptr) \
401 ( (uint32_t)( \
402 ((uint32_t)((const uint8_t *)(ptr))[0]) | \
403 (((uint32_t)((const uint8_t *)(ptr))[1]) << 8) | \
404 (((uint32_t)((const uint8_t *)(ptr))[2]) << 16) | \
405 (((uint32_t)((const uint8_t *)(ptr))[3]) << 24)))
406
407 #define CC_WRITE_LE32(ptr, x) \
408 do { \
409 ((uint8_t *)(ptr))[0] = (uint8_t)( (x) & 0xFF); \
410 ((uint8_t *)(ptr))[1] = (uint8_t)(((x) >> 8) & 0xFF); \
411 ((uint8_t *)(ptr))[2] = (uint8_t)(((x) >> 16) & 0xFF); \
412 ((uint8_t *)(ptr))[3] = (uint8_t)(((x) >> 24) & 0xFF); \
413 } while(0)
414
415 #define CC_WRITE_LE64(ptr, x) \
416 do { \
417 ((uint8_t *)(ptr))[0] = (uint8_t)( (x) & 0xFF); \
418 ((uint8_t *)(ptr))[1] = (uint8_t)(((x) >> 8) & 0xFF); \
419 ((uint8_t *)(ptr))[2] = (uint8_t)(((x) >> 16) & 0xFF); \
420 ((uint8_t *)(ptr))[3] = (uint8_t)(((x) >> 24) & 0xFF); \
421 ((uint8_t *)(ptr))[4] = (uint8_t)(((x) >> 32) & 0xFF); \
422 ((uint8_t *)(ptr))[5] = (uint8_t)(((x) >> 40) & 0xFF); \
423 ((uint8_t *)(ptr))[6] = (uint8_t)(((x) >> 48) & 0xFF); \
424 ((uint8_t *)(ptr))[7] = (uint8_t)(((x) >> 56) & 0xFF); \
425 } while(0)
426
427 /* extract a byte portably */
428 #ifdef _MSC_VER
429 #define cc_byte(x, n) ((unsigned char)((x) >> (8 * (n))))
430 #else
431 #define cc_byte(x, n) (((x) >> (8 * (n))) & 255)
432 #endif
433
434 /* Count leading zeros (for nonzero inputs) */
435
436 /*
437 * On i386 and x86_64, we know clang and GCC will generate BSR for
438 * __builtin_clzl. This instruction IS NOT constant time on all micro-
439 * architectures, but it *is* constant time on all micro-architectures that
440 * have been used by Apple, and we expect that to continue to be the case.
441 *
442 * When building for x86_64h with clang, this produces LZCNT, which is exactly
443 * what we want.
444 *
445 * On arm and arm64, we know that clang and GCC generate the constant-time CLZ
446 * instruction from __builtin_clzl( ).
447 */
448
449 #if defined(_WIN32)
450 /* We use the Windows implementations below. */
451 #elif defined(__x86_64__) || defined(__i386__) || defined(__arm64__) || defined(__arm__)
452 /* We use a thought-to-be-good version of __builtin_clz. */
453 #elif defined __GNUC__
454 #warning Using __builtin_clz() on an unknown architecture; it may not be constant-time.
455 /* If you find yourself seeing this warning, file a radar for someone to
456 * check whether or not __builtin_clz() generates a constant-time
457 * implementation on the architecture you are targeting. If it does, append
458 * the name of that architecture to the list of "safe" architectures above. */ */
459 #endif
460
461
462 #if defined(_WIN32)
463
464 #include <windows.h>
465 #include <intrin.h>
466
467 CC_INLINE CC_CONST unsigned clz64_win(uint64_t value)
468 {
469 DWORD leading_zero;
470 _BitScanReverse64(&leading_zero, value);
471 return 63 - leading_zero;
472 }
473
474
475 CC_INLINE CC_CONST unsigned clz32_win(uint32_t value)
476 {
477 DWORD leading_zero;
478 _BitScanReverse(&leading_zero, value);
479 return 31 - leading_zero;
480 }
481
482 #endif
483
484 CC_INLINE CC_CONST unsigned cc_clz32_fallback(uint32_t data)
485 {
486 unsigned int b = 0;
487 unsigned int bit = 0;
488 // Work from LSB to MSB
489 for (int i = 0; i < 32; i++) {
490 bit = (data >> i) & 1;
491 // If the bit is 0, update the "leading bits are zero" counter "b".
492 b += (1 - bit);
493 /* If the bit is 0, (bit - 1) is 0xffff... therefore b is retained.
494 * If the bit is 1, (bit - 1) is 0 therefore b is set to 0.
495 */
496 b &= (bit - 1);
497 }
498 return b;
499 }
500
501 CC_INLINE CC_CONST unsigned cc_clz64_fallback(uint64_t data)
502 {
503 unsigned int b = 0;
504 unsigned int bit = 0;
505 // Work from LSB to MSB
506 for (int i = 0; i < 64; i++) {
507 bit = (data >> i) & 1;
508 // If the bit is 0, update the "leading bits are zero" counter.
509 b += (1 - bit);
510 /* If the bit is 0, (bit - 1) is 0xffff... therefore b is retained.
511 * If the bit is 1, (bit - 1) is 0 therefore b is set to 0.
512 */
513 b &= (bit - 1);
514 }
515 return b;
516 }
517
518 /*!
519 @function cc_clz32
520 @abstract Count leading zeros of a nonzero 32-bit value
521
522 @param data A nonzero 32-bit value
523
524 @result Count of leading zeros of @p data
525
526 @discussion @p data is assumed to be nonzero.
527 */
528 CC_INLINE CC_CONST unsigned cc_clz32(uint32_t data) {
529 #if defined(_WIN32)
530 return clz32_win(data);
531 #elif defined(__x86_64__) || defined(__i386__) || defined(__arm64__) || defined(__arm__) || defined(__GNUC__)
532 cc_static_assert(sizeof(unsigned) == 4, "clz relies on an unsigned int being 4 bytes");
533 return (unsigned)__builtin_clz(data);
534 #else
535 return cc_clz32_fallback(data);
536 #endif
537 }
538
539 /*!
540 @function cc_clz64
541 @abstract Count leading zeros of a nonzero 64-bit value
542
543 @param data A nonzero 64-bit value
544
545 @result Count of leading zeros of @p data
546
547 @discussion @p data is assumed to be nonzero.
548 */
549 CC_INLINE CC_CONST unsigned cc_clz64(uint64_t data) {
550 #if defined(_WIN32)
551 return clz64_win(data);
552 #elif defined(__x86_64__) || defined(__i386__) || defined(__arm64__) || defined(__arm__) || defined(__GNUC__)
553 return (unsigned)__builtin_clzll(data);
554 #else
555 return cc_clz64_fallback(data);
556 #endif
557 }
558
559 /* HEAVISIDE_STEP (shifted by one)
560 function f(x): x->0, when x=0
561 x->1, when x>0
562 Can also be seen as a bitwise operation:
563 f(x): x -> y
564 y[0]=(OR x[i]) for all i (all bits)
565 y[i]=0 for all i>0
566 Run in constant time (log2(<bitsize of x>))
567 Useful to run constant time checks
568 */
569 #define CC_HEAVISIDE_STEP(r, s) { \
570 const uint64_t _s = (uint64_t)s; \
571 const uint64_t _t = (_s & 0xffffffff) | (_s >> 32); \
572 r = (__typeof__(r))((0xffffffff + _t) >> 32); \
573 }
574
575 /* Return 1 if x mod 4 =1,2,3, 0 otherwise */
576 #define CC_CARRY_2BITS(x) (((x>>1) | x) & 0x1)
577 #define CC_CARRY_3BITS(x) (((x>>2) | (x>>1) | x) & 0x1)
578
579 /* Set a variable to the biggest power of 2 which can be represented */
580 #define MAX_POWER_OF_2(x) ((__typeof__(x))1<<(8*sizeof(x)-1))
581 #define cc_ceiling(a,b) (((a)+((b)-1))/(b))
582 #define CC_BITLEN_TO_BYTELEN(x) cc_ceiling((x), 8)
583
584 /*!
585 @brief cc_muxp(s, a, b) is equivalent to z = s ? a : b, but it executes in constant time
586 @param a input pointer
587 @param b input pointer
588 @param s The selection parameter s must be 0 or 1. if s is integer 1 a is returned. If s is integer 0, b is returned. Otherwise, the output is undefined.
589 @return Returns a, if s is 1 and b if s is 0
590 */
591 void *cc_muxp(int s, const void *a, const void *b);
592
593 /*!
594 @brief CC_MUXU(r, s, a, b) is equivalent to r = s ? a : b, but executes in constant time
595 @param a Input a
596 @param b Input b
597 @param s Selection parameter s. Must be 0 or 1.
598 @param r Output, set to a if s=1, or b if s=0.
599 */
600 #define CC_MUXU(r, s, a, b) \
601 { \
602 __typeof__(r) _cond = ((__typeof__(r))(s)-(__typeof__(r))1); \
603 r = (~_cond&(a))|(_cond&(b)); \
604 }
605
606 #define CC_PROVIDES_ABORT (!(CC_USE_SEPROM || CC_USE_S3 || CC_BASEBAND || CC_EFI || CC_IBOOT || CC_RTKITROM))
607
608 /*!
609 @function cc_abort
610 @abstract Abort execution unconditionally
611 */
612 CC_NORETURN
613 void cc_abort(const char *msg);
614
615 /*!
616 @function cc_try_abort
617 @abstract Abort execution iff the platform provides a function like @p abort() or @p panic()
618
619 @discussion If the platform does not provide a means to abort execution, this function does nothing; therefore, callers should return an error code after calling this function.
620 */
621 #if CC_PROVIDES_ABORT
622
623 #pragma clang diagnostic push
624 #pragma clang diagnostic ignored "-Wmissing-noreturn"
625
626 CC_INLINE
627 void cc_try_abort(const char *msg)
628 {
629 cc_abort(msg);
630 }
631
632 #pragma clang diagnostic pop
633
634 #else
635
636 CC_INLINE
637 void cc_try_abort(CC_UNUSED const char *msg)
638 {
639
640 }
641
642 #endif
643
644 /*
645 Unfortunately, since we export this symbol, this declaration needs
646 to be in a public header to satisfy TAPI.
647
648 See fipspost_trace_priv.h for more details.
649 */
650 extern const void *fipspost_trace_vtable;
651
652 #endif /* _CORECRYPTO_CC_PRIV_H_ */