]> git.saurik.com Git - redis.git/blame - deps/jemalloc/src/zone.c
Marginally cleaner lookupKeyByPattern() implementation.
[redis.git] / deps / jemalloc / src / zone.c
CommitLineData
a78e148b 1#include "jemalloc/internal/jemalloc_internal.h"
2#ifndef JEMALLOC_ZONE
3# error "This source file is for zones on Darwin (OS X)."
4#endif
5
6/******************************************************************************/
7/* Data. */
8
9static malloc_zone_t zone, szone;
10static struct malloc_introspection_t zone_introspect, ozone_introspect;
11
12/******************************************************************************/
13/* Function prototypes for non-inline static functions. */
14
15static size_t zone_size(malloc_zone_t *zone, void *ptr);
16static void *zone_malloc(malloc_zone_t *zone, size_t size);
17static void *zone_calloc(malloc_zone_t *zone, size_t num, size_t size);
18static void *zone_valloc(malloc_zone_t *zone, size_t size);
19static void zone_free(malloc_zone_t *zone, void *ptr);
20static void *zone_realloc(malloc_zone_t *zone, void *ptr, size_t size);
21#if (JEMALLOC_ZONE_VERSION >= 6)
22static void *zone_memalign(malloc_zone_t *zone, size_t alignment,
23 size_t size);
24static void zone_free_definite_size(malloc_zone_t *zone, void *ptr,
25 size_t size);
26#endif
27static void *zone_destroy(malloc_zone_t *zone);
28static size_t zone_good_size(malloc_zone_t *zone, size_t size);
29static void zone_force_lock(malloc_zone_t *zone);
30static void zone_force_unlock(malloc_zone_t *zone);
31static size_t ozone_size(malloc_zone_t *zone, void *ptr);
32static void ozone_free(malloc_zone_t *zone, void *ptr);
33static void *ozone_realloc(malloc_zone_t *zone, void *ptr, size_t size);
34static unsigned ozone_batch_malloc(malloc_zone_t *zone, size_t size,
35 void **results, unsigned num_requested);
36static void ozone_batch_free(malloc_zone_t *zone, void **to_be_freed,
37 unsigned num);
38#if (JEMALLOC_ZONE_VERSION >= 6)
39static void ozone_free_definite_size(malloc_zone_t *zone, void *ptr,
40 size_t size);
41#endif
42static void ozone_force_lock(malloc_zone_t *zone);
43static void ozone_force_unlock(malloc_zone_t *zone);
44
45/******************************************************************************/
46/*
47 * Functions.
48 */
49
50static size_t
51zone_size(malloc_zone_t *zone, void *ptr)
52{
53
54 /*
55 * There appear to be places within Darwin (such as setenv(3)) that
56 * cause calls to this function with pointers that *no* zone owns. If
57 * we knew that all pointers were owned by *some* zone, we could split
58 * our zone into two parts, and use one as the default allocator and
59 * the other as the default deallocator/reallocator. Since that will
60 * not work in practice, we must check all pointers to assure that they
61 * reside within a mapped chunk before determining size.
62 */
63 return (ivsalloc(ptr));
64}
65
66static void *
67zone_malloc(malloc_zone_t *zone, size_t size)
68{
69
70 return (JEMALLOC_P(malloc)(size));
71}
72
73static void *
74zone_calloc(malloc_zone_t *zone, size_t num, size_t size)
75{
76
77 return (JEMALLOC_P(calloc)(num, size));
78}
79
80static void *
81zone_valloc(malloc_zone_t *zone, size_t size)
82{
83 void *ret = NULL; /* Assignment avoids useless compiler warning. */
84
85 JEMALLOC_P(posix_memalign)(&ret, PAGE_SIZE, size);
86
87 return (ret);
88}
89
90static void
91zone_free(malloc_zone_t *zone, void *ptr)
92{
93
94 JEMALLOC_P(free)(ptr);
95}
96
97static void *
98zone_realloc(malloc_zone_t *zone, void *ptr, size_t size)
99{
100
101 return (JEMALLOC_P(realloc)(ptr, size));
102}
103
104#if (JEMALLOC_ZONE_VERSION >= 6)
105static void *
106zone_memalign(malloc_zone_t *zone, size_t alignment, size_t size)
107{
108 void *ret = NULL; /* Assignment avoids useless compiler warning. */
109
110 JEMALLOC_P(posix_memalign)(&ret, alignment, size);
111
112 return (ret);
113}
114
115static void
116zone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size)
117{
118
119 assert(ivsalloc(ptr) == size);
120 JEMALLOC_P(free)(ptr);
121}
122#endif
123
124static void *
125zone_destroy(malloc_zone_t *zone)
126{
127
128 /* This function should never be called. */
129 assert(false);
130 return (NULL);
131}
132
133static size_t
134zone_good_size(malloc_zone_t *zone, size_t size)
135{
136 size_t ret;
137 void *p;
138
139 /*
140 * Actually create an object of the appropriate size, then find out
141 * how large it could have been without moving up to the next size
142 * class.
143 */
144 p = JEMALLOC_P(malloc)(size);
145 if (p != NULL) {
146 ret = isalloc(p);
147 JEMALLOC_P(free)(p);
148 } else
149 ret = size;
150
151 return (ret);
152}
153
154static void
155zone_force_lock(malloc_zone_t *zone)
156{
157
158 if (isthreaded)
159 jemalloc_prefork();
160}
161
162static void
163zone_force_unlock(malloc_zone_t *zone)
164{
165
166 if (isthreaded)
167 jemalloc_postfork();
168}
169
170malloc_zone_t *
171create_zone(void)
172{
173
174 zone.size = (void *)zone_size;
175 zone.malloc = (void *)zone_malloc;
176 zone.calloc = (void *)zone_calloc;
177 zone.valloc = (void *)zone_valloc;
178 zone.free = (void *)zone_free;
179 zone.realloc = (void *)zone_realloc;
180 zone.destroy = (void *)zone_destroy;
181 zone.zone_name = "jemalloc_zone";
182 zone.batch_malloc = NULL;
183 zone.batch_free = NULL;
184 zone.introspect = &zone_introspect;
185 zone.version = JEMALLOC_ZONE_VERSION;
186#if (JEMALLOC_ZONE_VERSION >= 6)
187 zone.memalign = zone_memalign;
188 zone.free_definite_size = zone_free_definite_size;
189#endif
190
191 zone_introspect.enumerator = NULL;
192 zone_introspect.good_size = (void *)zone_good_size;
193 zone_introspect.check = NULL;
194 zone_introspect.print = NULL;
195 zone_introspect.log = NULL;
196 zone_introspect.force_lock = (void *)zone_force_lock;
197 zone_introspect.force_unlock = (void *)zone_force_unlock;
198 zone_introspect.statistics = NULL;
199#if (JEMALLOC_ZONE_VERSION >= 6)
200 zone_introspect.zone_locked = NULL;
201#endif
202
203 return (&zone);
204}
205
206static size_t
207ozone_size(malloc_zone_t *zone, void *ptr)
208{
209 size_t ret;
210
211 ret = ivsalloc(ptr);
212 if (ret == 0)
213 ret = szone.size(zone, ptr);
214
215 return (ret);
216}
217
218static void
219ozone_free(malloc_zone_t *zone, void *ptr)
220{
221
222 if (ivsalloc(ptr) != 0)
223 JEMALLOC_P(free)(ptr);
224 else {
225 size_t size = szone.size(zone, ptr);
226 if (size != 0)
227 (szone.free)(zone, ptr);
228 }
229}
230
231static void *
232ozone_realloc(malloc_zone_t *zone, void *ptr, size_t size)
233{
234 size_t oldsize;
235
236 if (ptr == NULL)
237 return (JEMALLOC_P(malloc)(size));
238
239 oldsize = ivsalloc(ptr);
240 if (oldsize != 0)
241 return (JEMALLOC_P(realloc)(ptr, size));
242 else {
243 oldsize = szone.size(zone, ptr);
244 if (oldsize == 0)
245 return (JEMALLOC_P(malloc)(size));
246 else {
247 void *ret = JEMALLOC_P(malloc)(size);
248 if (ret != NULL) {
249 memcpy(ret, ptr, (oldsize < size) ? oldsize :
250 size);
251 (szone.free)(zone, ptr);
252 }
253 return (ret);
254 }
255 }
256}
257
258static unsigned
259ozone_batch_malloc(malloc_zone_t *zone, size_t size, void **results,
260 unsigned num_requested)
261{
262
263 /* Don't bother implementing this interface, since it isn't required. */
264 return (0);
265}
266
267static void
268ozone_batch_free(malloc_zone_t *zone, void **to_be_freed, unsigned num)
269{
270 unsigned i;
271
272 for (i = 0; i < num; i++)
273 ozone_free(zone, to_be_freed[i]);
274}
275
276#if (JEMALLOC_ZONE_VERSION >= 6)
277static void
278ozone_free_definite_size(malloc_zone_t *zone, void *ptr, size_t size)
279{
280
281 if (ivsalloc(ptr) != 0) {
282 assert(ivsalloc(ptr) == size);
283 JEMALLOC_P(free)(ptr);
284 } else {
285 assert(size == szone.size(zone, ptr));
286 szone.free_definite_size(zone, ptr, size);
287 }
288}
289#endif
290
291static void
292ozone_force_lock(malloc_zone_t *zone)
293{
294
295 /* jemalloc locking is taken care of by the normal jemalloc zone. */
296 szone.introspect->force_lock(zone);
297}
298
299static void
300ozone_force_unlock(malloc_zone_t *zone)
301{
302
303 /* jemalloc locking is taken care of by the normal jemalloc zone. */
304 szone.introspect->force_unlock(zone);
305}
306
307/*
308 * Overlay the default scalable zone (szone) such that existing allocations are
309 * drained, and further allocations come from jemalloc. This is necessary
310 * because Core Foundation directly accesses and uses the szone before the
311 * jemalloc library is even loaded.
312 */
313void
314szone2ozone(malloc_zone_t *zone)
315{
316
317 /*
318 * Stash a copy of the original szone so that we can call its
319 * functions as needed. Note that the internally, the szone stores its
320 * bookkeeping data structures immediately following the malloc_zone_t
321 * header, so when calling szone functions, we need to pass a pointer
322 * to the original zone structure.
323 */
324 memcpy(&szone, zone, sizeof(malloc_zone_t));
325
326 zone->size = (void *)ozone_size;
327 zone->malloc = (void *)zone_malloc;
328 zone->calloc = (void *)zone_calloc;
329 zone->valloc = (void *)zone_valloc;
330 zone->free = (void *)ozone_free;
331 zone->realloc = (void *)ozone_realloc;
332 zone->destroy = (void *)zone_destroy;
333 zone->zone_name = "jemalloc_ozone";
334 zone->batch_malloc = ozone_batch_malloc;
335 zone->batch_free = ozone_batch_free;
336 zone->introspect = &ozone_introspect;
337 zone->version = JEMALLOC_ZONE_VERSION;
338#if (JEMALLOC_ZONE_VERSION >= 6)
339 zone->memalign = zone_memalign;
340 zone->free_definite_size = ozone_free_definite_size;
341#endif
342
343 ozone_introspect.enumerator = NULL;
344 ozone_introspect.good_size = (void *)zone_good_size;
345 ozone_introspect.check = NULL;
346 ozone_introspect.print = NULL;
347 ozone_introspect.log = NULL;
348 ozone_introspect.force_lock = (void *)ozone_force_lock;
349 ozone_introspect.force_unlock = (void *)ozone_force_unlock;
350 ozone_introspect.statistics = NULL;
351#if (JEMALLOC_ZONE_VERSION >= 6)
352 ozone_introspect.zone_locked = NULL;
353#endif
354}