]> git.saurik.com Git - redis.git/blame - deps/jemalloc/src/chunk_dss.c
Query the archive to provide a complete KEYS list.
[redis.git] / deps / jemalloc / src / chunk_dss.c
CommitLineData
a78e148b 1#define JEMALLOC_CHUNK_DSS_C_
2#include "jemalloc/internal/jemalloc_internal.h"
a78e148b 3/******************************************************************************/
4/* Data. */
5
21b26915 6const char *dss_prec_names[] = {
7 "disabled",
8 "primary",
9 "secondary",
10 "N/A"
11};
12
13/* Current dss precedence default, used when creating new arenas. */
14static dss_prec_t dss_prec_default = DSS_PREC_DEFAULT;
15
4934f93d 16/*
17 * Protects sbrk() calls. This avoids malloc races among threads, though it
18 * does not protect against races with threads that call sbrk() directly.
19 */
20static malloc_mutex_t dss_mtx;
a78e148b 21
22/* Base address of the DSS. */
4934f93d 23static void *dss_base;
a78e148b 24/* Current end of the DSS, or ((void *)-1) if the DSS is exhausted. */
4934f93d 25static void *dss_prev;
a78e148b 26/* Current upper limit on DSS addresses. */
4934f93d 27static void *dss_max;
a78e148b 28
29/******************************************************************************/
30
4934f93d 31#ifndef JEMALLOC_HAVE_SBRK
a78e148b 32static void *
4934f93d 33sbrk(intptr_t increment)
a78e148b 34{
a78e148b 35
4934f93d 36 not_implemented();
a78e148b 37
38 return (NULL);
39}
4934f93d 40#endif
a78e148b 41
21b26915 42dss_prec_t
43chunk_dss_prec_get(void)
44{
45 dss_prec_t ret;
46
47 if (config_dss == false)
48 return (dss_prec_disabled);
49 malloc_mutex_lock(&dss_mtx);
50 ret = dss_prec_default;
51 malloc_mutex_unlock(&dss_mtx);
52 return (ret);
53}
54
55bool
56chunk_dss_prec_set(dss_prec_t dss_prec)
57{
58
59 if (config_dss == false)
60 return (true);
61 malloc_mutex_lock(&dss_mtx);
62 dss_prec_default = dss_prec;
63 malloc_mutex_unlock(&dss_mtx);
64 return (false);
65}
66
a78e148b 67void *
4934f93d 68chunk_alloc_dss(size_t size, size_t alignment, bool *zero)
a78e148b 69{
70 void *ret;
71
4934f93d 72 cassert(config_dss);
73 assert(size > 0 && (size & chunksize_mask) == 0);
74 assert(alignment > 0 && (alignment & chunksize_mask) == 0);
a78e148b 75
76 /*
77 * sbrk() uses a signed increment argument, so take care not to
78 * interpret a huge allocation request as a negative increment.
79 */
80 if ((intptr_t)size < 0)
81 return (NULL);
82
83 malloc_mutex_lock(&dss_mtx);
84 if (dss_prev != (void *)-1) {
4934f93d 85 size_t gap_size, cpad_size;
86 void *cpad, *dss_next;
a78e148b 87 intptr_t incr;
88
89 /*
90 * The loop is necessary to recover from races with other
91 * threads that are using the DSS for something other than
92 * malloc.
93 */
94 do {
95 /* Get the current end of the DSS. */
96 dss_max = sbrk(0);
a78e148b 97 /*
98 * Calculate how much padding is necessary to
99 * chunk-align the end of the DSS.
100 */
4934f93d 101 gap_size = (chunksize - CHUNK_ADDR2OFFSET(dss_max)) &
102 chunksize_mask;
103 /*
104 * Compute how much chunk-aligned pad space (if any) is
105 * necessary to satisfy alignment. This space can be
106 * recycled for later use.
107 */
108 cpad = (void *)((uintptr_t)dss_max + gap_size);
109 ret = (void *)ALIGNMENT_CEILING((uintptr_t)dss_max,
110 alignment);
111 cpad_size = (uintptr_t)ret - (uintptr_t)cpad;
112 dss_next = (void *)((uintptr_t)ret + size);
113 if ((uintptr_t)ret < (uintptr_t)dss_max ||
114 (uintptr_t)dss_next < (uintptr_t)dss_max) {
115 /* Wrap-around. */
116 malloc_mutex_unlock(&dss_mtx);
117 return (NULL);
a78e148b 118 }
4934f93d 119 incr = gap_size + cpad_size + size;
a78e148b 120 dss_prev = sbrk(incr);
121 if (dss_prev == dss_max) {
122 /* Success. */
4934f93d 123 dss_max = dss_next;
a78e148b 124 malloc_mutex_unlock(&dss_mtx);
4934f93d 125 if (cpad_size != 0)
21b26915 126 chunk_unmap(cpad, cpad_size);
4934f93d 127 if (*zero) {
128 VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
129 memset(ret, 0, size);
130 }
a78e148b 131 return (ret);
132 }
133 } while (dss_prev != (void *)-1);
134 }
135 malloc_mutex_unlock(&dss_mtx);
136
137 return (NULL);
138}
139
a78e148b 140bool
141chunk_in_dss(void *chunk)
142{
143 bool ret;
144
4934f93d 145 cassert(config_dss);
146
a78e148b 147 malloc_mutex_lock(&dss_mtx);
148 if ((uintptr_t)chunk >= (uintptr_t)dss_base
149 && (uintptr_t)chunk < (uintptr_t)dss_max)
150 ret = true;
151 else
152 ret = false;
153 malloc_mutex_unlock(&dss_mtx);
154
155 return (ret);
156}
157
158bool
4934f93d 159chunk_dss_boot(void)
a78e148b 160{
a78e148b 161
4934f93d 162 cassert(config_dss);
a78e148b 163
4934f93d 164 if (malloc_mutex_init(&dss_mtx))
165 return (true);
166 dss_base = sbrk(0);
167 dss_prev = dss_base;
168 dss_max = dss_base;
a78e148b 169
4934f93d 170 return (false);
171}
a78e148b 172
4934f93d 173void
174chunk_dss_prefork(void)
175{
a78e148b 176
4934f93d 177 if (config_dss)
178 malloc_mutex_prefork(&dss_mtx);
179}
a78e148b 180
4934f93d 181void
182chunk_dss_postfork_parent(void)
183{
a78e148b 184
4934f93d 185 if (config_dss)
186 malloc_mutex_postfork_parent(&dss_mtx);
a78e148b 187}
188
4934f93d 189void
190chunk_dss_postfork_child(void)
a78e148b 191{
192
4934f93d 193 if (config_dss)
194 malloc_mutex_postfork_child(&dss_mtx);
a78e148b 195}
196
197/******************************************************************************/