]> git.saurik.com Git - apple/xnu.git/blob - EXTERNAL_HEADERS/corecrypto/cc_priv.h
xnu-3248.50.21.tar.gz
[apple/xnu.git] / EXTERNAL_HEADERS / corecrypto / cc_priv.h
1 /*
2 * cc_priv.h
3 * corecrypto
4 *
5 * Created on 12/01/2010
6 *
7 * Copyright (c) 2010,2011,2012,2014,2015 Apple Inc. All rights reserved.
8 *
9 */
10
11 #ifndef _CORECRYPTO_CC_PRIV_H_
12 #define _CORECRYPTO_CC_PRIV_H_
13
14 #include <corecrypto/cc.h>
15 #include <stdint.h>
16
17 /* defines the following macros :
18
19 CC_MEMCPY : optimized memcpy.
20 CC_MEMMOVE : optimized memmove.
21 CC_MEMSET : optimized memset.
22 CC_BZERO : optimized bzero,
23
24 CC_STORE32_BE : store 32 bit value in big endian in unaligned buffer.
25 CC_STORE32_LE : store 32 bit value in little endian in unaligned buffer.
26 CC_STORE64_BE : store 64 bit value in big endian in unaligned buffer.
27 CC_STORE64_LE : store 64 bit value in little endian in unaligned buffer.
28
29 CC_LOAD32_BE : load 32 bit value in big endian from unaligned buffer.
30 CC_LOAD32_LE : load 32 bit value in little endian from unaligned buffer.
31 CC_LOAD64_BE : load 64 bit value in big endian from unaligned buffer.
32 CC_LOAD64_LE : load 64 bit value in little endian from unaligned buffer.
33
34 CC_ROR : Rotate Right 32 bits. Rotate count can be a variable.
35 CC_ROL : Rotate Left 32 bits. Rotate count can be a variable.
36 CC_RORc : Rotate Right 32 bits. Rotate count must be a constant.
37 CC_ROLc : Rotate Left 32 bits. Rotate count must be a constant.
38
39 CC_ROR64 : Rotate Right 64 bits. Rotate count can be a variable.
40 CC_ROL64 : Rotate Left 64 bits. Rotate count can be a variable.
41 CC_ROR64c : Rotate Right 64 bits. Rotate count must be a constant.
42 CC_ROL64c : Rotate Left 64 bits. Rotate count must be a constant.
43
44 CC_BSWAP : byte swap a 32 bits variable.
45
46 CC_H2BE32 : convert a 32 bits value between host and big endian order.
47 CC_H2LE32 : convert a 32 bits value between host and little endian order.
48
49 The following are not defined yet... define them if needed.
50
51 CC_BSWAPc : byte swap a 32 bits constant
52
53 CC_BSWAP64 : byte swap a 64 bits variable
54 CC_BSWAP64c : byte swap a 64 bits constant
55
56 CC_READ_LE32 : read a 32 bits little endian value
57 CC_READ_LE64 : read a 64 bits little endian value
58 CC_READ_BE32 : read a 32 bits big endian value
59 CC_READ_BE64 : read a 64 bits big endian value
60
61 CC_WRITE_LE32 : write a 32 bits little endian value
62 CC_WRITE_LE64 : write a 64 bits little endian value
63 CC_WRITE_BE32 : write a 32 bits big endian value
64 CC_WRITE_BE64 : write a 64 bits big endian value
65
66 CC_H2BE64 : convert a 64 bits value between host and big endian order
67 CC_H2LE64 : convert a 64 bits value between host and little endian order
68
69 */
70
71 /* TODO: optimized versions */
72 #define CC_MEMCPY(D,S,L) memcpy((D),(S),(L))
73 #define CC_MEMMOVE(D,S,L) memmove((D),(S),(L))
74 #define CC_MEMSET(D,V,L) memset((D),(V),(L))
75 #define CC_BZERO(D,L) memset((D),0,(L)) // Deprecated, DO NOT USE
76
77
78 // MARK: - Loads and Store
79
80 // MARK: -- 32 bits - little endian
81
82 // MARK: --- Default version
83
84 #define CC_STORE32_LE(x, y) do { \
85 ((unsigned char *)(y))[3] = (unsigned char)(((x)>>24)&255); \
86 ((unsigned char *)(y))[2] = (unsigned char)(((x)>>16)&255); \
87 ((unsigned char *)(y))[1] = (unsigned char)(((x)>>8)&255); \
88 ((unsigned char *)(y))[0] = (unsigned char)((x)&255); \
89 } while(0)
90
91 #define CC_LOAD32_LE(x, y) do { \
92 x = ((uint32_t)(((const unsigned char *)(y))[3] & 255)<<24) | \
93 ((uint32_t)(((const unsigned char *)(y))[2] & 255)<<16) | \
94 ((uint32_t)(((const unsigned char *)(y))[1] & 255)<<8) | \
95 ((uint32_t)(((const unsigned char *)(y))[0] & 255)); \
96 } while(0)
97
98 // MARK: -- 64 bits - little endian
99
100 #define CC_STORE64_LE(x, y) do { \
101 ((unsigned char *)(y))[7] = (unsigned char)(((x)>>56)&255); \
102 ((unsigned char *)(y))[6] = (unsigned char)(((x)>>48)&255); \
103 ((unsigned char *)(y))[5] = (unsigned char)(((x)>>40)&255); \
104 ((unsigned char *)(y))[4] = (unsigned char)(((x)>>32)&255); \
105 ((unsigned char *)(y))[3] = (unsigned char)(((x)>>24)&255); \
106 ((unsigned char *)(y))[2] = (unsigned char)(((x)>>16)&255); \
107 ((unsigned char *)(y))[1] = (unsigned char)(((x)>>8)&255); \
108 ((unsigned char *)(y))[0] = (unsigned char)((x)&255); \
109 } while(0)
110
111 #define CC_LOAD64_LE(x, y) do { \
112 x = (((uint64_t)(((const unsigned char *)(y))[7] & 255))<<56) | \
113 (((uint64_t)(((const unsigned char *)(y))[6] & 255))<<48) | \
114 (((uint64_t)(((const unsigned char *)(y))[5] & 255))<<40) | \
115 (((uint64_t)(((const unsigned char *)(y))[4] & 255))<<32) | \
116 (((uint64_t)(((const unsigned char *)(y))[3] & 255))<<24) | \
117 (((uint64_t)(((const unsigned char *)(y))[2] & 255))<<16) | \
118 (((uint64_t)(((const unsigned char *)(y))[1] & 255))<<8) | \
119 (((uint64_t)(((const unsigned char *)(y))[0] & 255))); \
120 } while(0)
121
122 // MARK: -- 32 bits - big endian
123 // MARK: --- intel version
124
125 #if (defined(__i386__) || defined(__x86_64__))
126
127 #define CC_STORE32_BE(x, y) \
128 __asm__ __volatile__ ( \
129 "bswapl %0 \n\t" \
130 "movl %0,(%1)\n\t" \
131 "bswapl %0 \n\t" \
132 ::"r"(x), "r"(y))
133
134 #define CC_LOAD32_BE(x, y) \
135 __asm__ __volatile__ ( \
136 "movl (%1),%0\n\t" \
137 "bswapl %0\n\t" \
138 :"=r"(x): "r"(y))
139
140 #else
141 // MARK: --- default version
142 #define CC_STORE32_BE(x, y) do { \
143 ((unsigned char *)(y))[0] = (unsigned char)(((x)>>24)&255); \
144 ((unsigned char *)(y))[1] = (unsigned char)(((x)>>16)&255); \
145 ((unsigned char *)(y))[2] = (unsigned char)(((x)>>8)&255); \
146 ((unsigned char *)(y))[3] = (unsigned char)((x)&255); \
147 } while(0)
148
149 #define CC_LOAD32_BE(x, y) do { \
150 x = ((uint32_t)(((const unsigned char *)(y))[0] & 255)<<24) | \
151 ((uint32_t)(((const unsigned char *)(y))[1] & 255)<<16) | \
152 ((uint32_t)(((const unsigned char *)(y))[2] & 255)<<8) | \
153 ((uint32_t)(((const unsigned char *)(y))[3] & 255)); \
154 } while(0)
155
156 #endif
157
158 // MARK: -- 64 bits - big endian
159
160 // MARK: --- intel 64 bits version
161
162 #if defined(__x86_64__)
163
164 #define CC_STORE64_BE(x, y) \
165 __asm__ __volatile__ ( \
166 "bswapq %0 \n\t" \
167 "movq %0,(%1)\n\t" \
168 "bswapq %0 \n\t" \
169 ::"r"(x), "r"(y))
170
171 #define CC_LOAD64_BE(x, y) \
172 __asm__ __volatile__ ( \
173 "movq (%1),%0\n\t" \
174 "bswapq %0\n\t" \
175 :"=r"(x): "r"(y))
176
177 #else
178
179 // MARK: --- default version
180
181 #define CC_STORE64_BE(x, y) do { \
182 ((unsigned char *)(y))[0] = (unsigned char)(((x)>>56)&255); \
183 ((unsigned char *)(y))[1] = (unsigned char)(((x)>>48)&255); \
184 ((unsigned char *)(y))[2] = (unsigned char)(((x)>>40)&255); \
185 ((unsigned char *)(y))[3] = (unsigned char)(((x)>>32)&255); \
186 ((unsigned char *)(y))[4] = (unsigned char)(((x)>>24)&255); \
187 ((unsigned char *)(y))[5] = (unsigned char)(((x)>>16)&255); \
188 ((unsigned char *)(y))[6] = (unsigned char)(((x)>>8)&255); \
189 ((unsigned char *)(y))[7] = (unsigned char)((x)&255); \
190 } while(0)
191
192 #define CC_LOAD64_BE(x, y) do { \
193 x = (((uint64_t)(((const unsigned char *)(y))[0] & 255))<<56) | \
194 (((uint64_t)(((const unsigned char *)(y))[1] & 255))<<48) | \
195 (((uint64_t)(((const unsigned char *)(y))[2] & 255))<<40) | \
196 (((uint64_t)(((const unsigned char *)(y))[3] & 255))<<32) | \
197 (((uint64_t)(((const unsigned char *)(y))[4] & 255))<<24) | \
198 (((uint64_t)(((const unsigned char *)(y))[5] & 255))<<16) | \
199 (((uint64_t)(((const unsigned char *)(y))[6] & 255))<<8) | \
200 (((uint64_t)(((const unsigned char *)(y))[7] & 255))); \
201 } while(0)
202
203 #endif
204
205 // MARK: - 32-bit Rotates
206
207 #if defined(_MSC_VER)
208 // MARK: -- MSVC version
209
210 #include <stdlib.h>
211 #pragma intrinsic(_lrotr,_lrotl)
212 #define CC_ROR(x,n) _lrotr(x,n)
213 #define CC_ROL(x,n) _lrotl(x,n)
214 #define CC_RORc(x,n) _lrotr(x,n)
215 #define CC_ROLc(x,n) _lrotl(x,n)
216
217 #elif (defined(__i386__) || defined(__x86_64__))
218 // MARK: -- intel asm version
219
220 static inline uint32_t CC_ROL(uint32_t word, int i)
221 {
222 __asm__ ("roll %%cl,%0"
223 :"=r" (word)
224 :"0" (word),"c" (i));
225 return word;
226 }
227
228 static inline uint32_t CC_ROR(uint32_t word, int i)
229 {
230 __asm__ ("rorl %%cl,%0"
231 :"=r" (word)
232 :"0" (word),"c" (i));
233 return word;
234 }
235
236 /* Need to be a macro here, because 'i' is an immediate (constant) */
237 #define CC_ROLc(word, i) \
238 ({ uint32_t _word=(word); \
239 __asm__ __volatile__ ("roll %2,%0" \
240 :"=r" (_word) \
241 :"0" (_word),"I" (i)); \
242 _word; \
243 })
244
245
246 #define CC_RORc(word, i) \
247 ({ uint32_t _word=(word); \
248 __asm__ __volatile__ ("rorl %2,%0" \
249 :"=r" (_word) \
250 :"0" (_word),"I" (i)); \
251 _word; \
252 })
253
254 #else
255
256 // MARK: -- default version
257
258 static inline uint32_t CC_ROL(uint32_t word, int i)
259 {
260 return ( (word<<(i&31)) | (word>>(32-(i&31))) );
261 }
262
263 static inline uint32_t CC_ROR(uint32_t word, int i)
264 {
265 return ( (word>>(i&31)) | (word<<(32-(i&31))) );
266 }
267
268 #define CC_ROLc(x, y) CC_ROL(x, y)
269 #define CC_RORc(x, y) CC_ROR(x, y)
270
271 #endif
272
273 // MARK: - 64 bits rotates
274
275 #if defined(__x86_64__)
276 // MARK: -- intel 64 asm version
277
278 static inline uint64_t CC_ROL64(uint64_t word, int i)
279 {
280 __asm__("rolq %%cl,%0"
281 :"=r" (word)
282 :"0" (word),"c" (i));
283 return word;
284 }
285
286 static inline uint64_t CC_ROR64(uint64_t word, int i)
287 {
288 __asm__("rorq %%cl,%0"
289 :"=r" (word)
290 :"0" (word),"c" (i));
291 return word;
292 }
293
294 /* Need to be a macro here, because 'i' is an immediate (constant) */
295 #define CC_ROL64c(word, i) \
296 ({ \
297 uint64_t _word=(word); \
298 __asm__("rolq %2,%0" \
299 :"=r" (_word) \
300 :"0" (_word),"J" (i)); \
301 _word; \
302 })
303
304 #define CC_ROR64c(word, i) \
305 ({ \
306 uint64_t _word=(word); \
307 __asm__("rorq %2,%0" \
308 :"=r" (_word) \
309 :"0" (_word),"J" (i)); \
310 _word; \
311 })
312
313
314 #else /* Not x86_64 */
315
316 // MARK: -- default C version
317
318 static inline uint64_t CC_ROL64(uint64_t word, int i)
319 {
320 return ( (word<<(i&63)) | (word>>(64-(i&63))) );
321 }
322
323 static inline uint64_t CC_ROR64(uint64_t word, int i)
324 {
325 return ( (word>>(i&63)) | (word<<(64-(i&63))) );
326 }
327
328 #define CC_ROL64c(x, y) CC_ROL64(x, y)
329 #define CC_ROR64c(x, y) CC_ROR64(x, y)
330
331 #endif
332
333
334 // MARK: - Byte Swaps
335
336 static inline uint32_t CC_BSWAP(uint32_t x)
337 {
338 return (
339 ((x>>24)&0x000000FF) |
340 ((x<<24)&0xFF000000) |
341 ((x>>8) &0x0000FF00) |
342 ((x<<8) &0x00FF0000)
343 );
344 }
345
346 #define CC_BSWAP64(x) \
347 ((uint64_t)((((uint64_t)(x) & 0xff00000000000000ULL) >> 56) | \
348 (((uint64_t)(x) & 0x00ff000000000000ULL) >> 40) | \
349 (((uint64_t)(x) & 0x0000ff0000000000ULL) >> 24) | \
350 (((uint64_t)(x) & 0x000000ff00000000ULL) >> 8) | \
351 (((uint64_t)(x) & 0x00000000ff000000ULL) << 8) | \
352 (((uint64_t)(x) & 0x0000000000ff0000ULL) << 24) | \
353 (((uint64_t)(x) & 0x000000000000ff00ULL) << 40) | \
354 (((uint64_t)(x) & 0x00000000000000ffULL) << 56)))
355
356 #ifdef __LITTLE_ENDIAN__
357 #define CC_H2BE32(x) CC_BSWAP(x)
358 #define CC_H2LE32(x) (x)
359 #else
360 #define CC_H2BE32(x) (x)
361 #define CC_H2LE32(x) CC_BSWAP(x)
362 #endif
363
364
365 /* extract a byte portably */
366 #ifdef _MSC_VER
367 #define cc_byte(x, n) ((unsigned char)((x) >> (8 * (n))))
368 #else
369 #define cc_byte(x, n) (((x) >> (8 * (n))) & 255)
370 #endif
371
372 /* HEAVISIDE_STEP (shifted by one)
373 function f(x): x->0, when x=0
374 x->1, when x>0
375 Can also be seen as a bitwise operation:
376 f(x): x -> y
377 y[0]=(OR x[i]) for all i (all bits)
378 y[i]=0 for all i>0
379 Run in constant time (log2(<bitsize of x>))
380 Useful to run constant time checks
381 */
382 #define HEAVISIDE_STEP_UINT64(x) {uint64_t _t; \
383 _t=(((uint64_t)x>>32) | x); \
384 _t=(0xFFFFFFFF + (_t & 0xFFFFFFFF)); \
385 x=_t >> 32;}
386
387 #define HEAVISIDE_STEP_UINT32(x) {uint32_t _t; \
388 _t=(((uint32_t)x>>16) | x); \
389 _t=(0xFFFF + (_t & 0xFFFF)); \
390 x=_t >> 16;}
391
392 #define HEAVISIDE_STEP_UINT16(x) {uint16_t _t; \
393 _t=(((uint16_t)x>>8) | x); \
394 _t=(0xFF + (_t & 0xFF)); \
395 x=_t >> 8;}
396
397 #define HEAVISIDE_STEP_UINT8(x) {uint8_t _t; \
398 _t=(((uint8_t)x>>4) | (uint8_t)x); \
399 _t=((_t>>2) | _t); \
400 _t=((_t>>1) | _t); \
401 x=_t & 0x1;}
402
403 #define CC_HEAVISIDE_STEP(x) { \
404 if (sizeof(x) == 1) {HEAVISIDE_STEP_UINT8(x);} \
405 else if (sizeof(x) == 2) {HEAVISIDE_STEP_UINT16(x);} \
406 else if (sizeof(x) == 4) {HEAVISIDE_STEP_UINT32(x);} \
407 else if (sizeof(x) == 8) {HEAVISIDE_STEP_UINT64(x);} \
408 else {x=((x==0)?0:1);} \
409 }
410
411 /* Return 1 if x mod 4 =1,2,3, 0 otherwise */
412 #define CC_CARRY_2BITS(x) (((x>>1) | x) & 0x1)
413 #define CC_CARRY_3BITS(x) (((x>>2) | (x>>1) | x) & 0x1)
414
415 /* Set a variable to the biggest power of 2 which can be represented */
416 #define MAX_POWER_OF_2(x) ((__typeof__(x))1<<(8*sizeof(x)-1))
417
418 #define cc_ceiling(a,b) (((a)+((b)-1))/(b))
419 #define CC_BITLEN_TO_BYTELEN(x) cc_ceiling((x), 8)
420
421 #endif /* _CORECRYPTO_CC_PRIV_H_ */