]> git.saurik.com Git - redis.git/blob - deps/jemalloc.orig/include/jemalloc/internal/mb.h
Jemalloc updated to 3.0.0.
[redis.git] / deps / jemalloc.orig / include / jemalloc / internal / mb.h
1 /******************************************************************************/
2 #ifdef JEMALLOC_H_TYPES
3
4 #endif /* JEMALLOC_H_TYPES */
5 /******************************************************************************/
6 #ifdef JEMALLOC_H_STRUCTS
7
8 #endif /* JEMALLOC_H_STRUCTS */
9 /******************************************************************************/
10 #ifdef JEMALLOC_H_EXTERNS
11
12 #endif /* JEMALLOC_H_EXTERNS */
13 /******************************************************************************/
14 #ifdef JEMALLOC_H_INLINES
15
16 #ifndef JEMALLOC_ENABLE_INLINE
17 void mb_write(void);
18 #endif
19
20 #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MB_C_))
21 #ifdef __i386__
22 /*
23 * According to the Intel Architecture Software Developer's Manual, current
24 * processors execute instructions in order from the perspective of other
25 * processors in a multiprocessor system, but 1) Intel reserves the right to
26 * change that, and 2) the compiler's optimizer could re-order instructions if
27 * there weren't some form of barrier. Therefore, even if running on an
28 * architecture that does not need memory barriers (everything through at least
29 * i686), an "optimizer barrier" is necessary.
30 */
31 JEMALLOC_INLINE void
32 mb_write(void)
33 {
34
35 # if 0
36 /* This is a true memory barrier. */
37 asm volatile ("pusha;"
38 "xor %%eax,%%eax;"
39 "cpuid;"
40 "popa;"
41 : /* Outputs. */
42 : /* Inputs. */
43 : "memory" /* Clobbers. */
44 );
45 #else
46 /*
47 * This is hopefully enough to keep the compiler from reordering
48 * instructions around this one.
49 */
50 asm volatile ("nop;"
51 : /* Outputs. */
52 : /* Inputs. */
53 : "memory" /* Clobbers. */
54 );
55 #endif
56 }
57 #elif (defined(__amd64_) || defined(__x86_64__))
58 JEMALLOC_INLINE void
59 mb_write(void)
60 {
61
62 asm volatile ("sfence"
63 : /* Outputs. */
64 : /* Inputs. */
65 : "memory" /* Clobbers. */
66 );
67 }
68 #elif defined(__powerpc__)
69 JEMALLOC_INLINE void
70 mb_write(void)
71 {
72
73 asm volatile ("eieio"
74 : /* Outputs. */
75 : /* Inputs. */
76 : "memory" /* Clobbers. */
77 );
78 }
79 #elif defined(__sparc64__)
80 JEMALLOC_INLINE void
81 mb_write(void)
82 {
83
84 asm volatile ("membar #StoreStore"
85 : /* Outputs. */
86 : /* Inputs. */
87 : "memory" /* Clobbers. */
88 );
89 }
90 #else
91 /*
92 * This is much slower than a simple memory barrier, but the semantics of mutex
93 * unlock make this work.
94 */
95 JEMALLOC_INLINE void
96 mb_write(void)
97 {
98 malloc_mutex_t mtx;
99
100 malloc_mutex_init(&mtx);
101 malloc_mutex_lock(&mtx);
102 malloc_mutex_unlock(&mtx);
103 }
104 #endif
105 #endif
106
107 #endif /* JEMALLOC_H_INLINES */
108 /******************************************************************************/