]> git.saurik.com Git - redis.git/blob - deps/jemalloc/src/chunk_dss.c
Redis 2.6.5
[redis.git] / deps / jemalloc / src / chunk_dss.c
1 #define JEMALLOC_CHUNK_DSS_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
3 /******************************************************************************/
4 /* Data. */
5
6 /*
7 * Protects sbrk() calls. This avoids malloc races among threads, though it
8 * does not protect against races with threads that call sbrk() directly.
9 */
10 static malloc_mutex_t dss_mtx;
11
12 /* Base address of the DSS. */
13 static void *dss_base;
14 /* Current end of the DSS, or ((void *)-1) if the DSS is exhausted. */
15 static void *dss_prev;
16 /* Current upper limit on DSS addresses. */
17 static void *dss_max;
18
19 /******************************************************************************/
20
21 #ifndef JEMALLOC_HAVE_SBRK
22 static void *
23 sbrk(intptr_t increment)
24 {
25
26 not_implemented();
27
28 return (NULL);
29 }
30 #endif
31
32 void *
33 chunk_alloc_dss(size_t size, size_t alignment, bool *zero)
34 {
35 void *ret;
36
37 cassert(config_dss);
38 assert(size > 0 && (size & chunksize_mask) == 0);
39 assert(alignment > 0 && (alignment & chunksize_mask) == 0);
40
41 /*
42 * sbrk() uses a signed increment argument, so take care not to
43 * interpret a huge allocation request as a negative increment.
44 */
45 if ((intptr_t)size < 0)
46 return (NULL);
47
48 malloc_mutex_lock(&dss_mtx);
49 if (dss_prev != (void *)-1) {
50 size_t gap_size, cpad_size;
51 void *cpad, *dss_next;
52 intptr_t incr;
53
54 /*
55 * The loop is necessary to recover from races with other
56 * threads that are using the DSS for something other than
57 * malloc.
58 */
59 do {
60 /* Get the current end of the DSS. */
61 dss_max = sbrk(0);
62 /*
63 * Calculate how much padding is necessary to
64 * chunk-align the end of the DSS.
65 */
66 gap_size = (chunksize - CHUNK_ADDR2OFFSET(dss_max)) &
67 chunksize_mask;
68 /*
69 * Compute how much chunk-aligned pad space (if any) is
70 * necessary to satisfy alignment. This space can be
71 * recycled for later use.
72 */
73 cpad = (void *)((uintptr_t)dss_max + gap_size);
74 ret = (void *)ALIGNMENT_CEILING((uintptr_t)dss_max,
75 alignment);
76 cpad_size = (uintptr_t)ret - (uintptr_t)cpad;
77 dss_next = (void *)((uintptr_t)ret + size);
78 if ((uintptr_t)ret < (uintptr_t)dss_max ||
79 (uintptr_t)dss_next < (uintptr_t)dss_max) {
80 /* Wrap-around. */
81 malloc_mutex_unlock(&dss_mtx);
82 return (NULL);
83 }
84 incr = gap_size + cpad_size + size;
85 dss_prev = sbrk(incr);
86 if (dss_prev == dss_max) {
87 /* Success. */
88 dss_max = dss_next;
89 malloc_mutex_unlock(&dss_mtx);
90 if (cpad_size != 0)
91 chunk_dealloc(cpad, cpad_size, true);
92 if (*zero) {
93 VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
94 memset(ret, 0, size);
95 }
96 return (ret);
97 }
98 } while (dss_prev != (void *)-1);
99 }
100 malloc_mutex_unlock(&dss_mtx);
101
102 return (NULL);
103 }
104
105 bool
106 chunk_in_dss(void *chunk)
107 {
108 bool ret;
109
110 cassert(config_dss);
111
112 malloc_mutex_lock(&dss_mtx);
113 if ((uintptr_t)chunk >= (uintptr_t)dss_base
114 && (uintptr_t)chunk < (uintptr_t)dss_max)
115 ret = true;
116 else
117 ret = false;
118 malloc_mutex_unlock(&dss_mtx);
119
120 return (ret);
121 }
122
123 bool
124 chunk_dss_boot(void)
125 {
126
127 cassert(config_dss);
128
129 if (malloc_mutex_init(&dss_mtx))
130 return (true);
131 dss_base = sbrk(0);
132 dss_prev = dss_base;
133 dss_max = dss_base;
134
135 return (false);
136 }
137
138 void
139 chunk_dss_prefork(void)
140 {
141
142 if (config_dss)
143 malloc_mutex_prefork(&dss_mtx);
144 }
145
146 void
147 chunk_dss_postfork_parent(void)
148 {
149
150 if (config_dss)
151 malloc_mutex_postfork_parent(&dss_mtx);
152 }
153
154 void
155 chunk_dss_postfork_child(void)
156 {
157
158 if (config_dss)
159 malloc_mutex_postfork_child(&dss_mtx);
160 }
161
162 /******************************************************************************/