]>
git.saurik.com Git - redis.git/blob - deps/jemalloc/src/base.c
1 #define JEMALLOC_BASE_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
4 /******************************************************************************/
7 malloc_mutex_t base_mtx
;
10 * Current pages that are being used for internal memory allocations. These
11 * pages are carved up in cacheline-size quanta, so that there is no chance of
12 * false cache line sharing.
14 static void *base_pages
;
15 static void *base_next_addr
;
16 static void *base_past_addr
; /* Addr immediately past base_pages. */
17 static extent_node_t
*base_nodes
;
19 /******************************************************************************/
20 /* Function prototypes for non-inline static functions. */
22 static bool base_pages_alloc(size_t minsize
);
24 /******************************************************************************/
27 base_pages_alloc(size_t minsize
)
33 csize
= CHUNK_CEILING(minsize
);
35 base_pages
= chunk_alloc(csize
, true, &zero
);
36 if (base_pages
== NULL
)
38 base_next_addr
= base_pages
;
39 base_past_addr
= (void *)((uintptr_t)base_pages
+ csize
);
45 base_alloc(size_t size
)
50 /* Round size up to nearest multiple of the cacheline size. */
51 csize
= CACHELINE_CEILING(size
);
53 malloc_mutex_lock(&base_mtx
);
54 /* Make sure there's enough space for the allocation. */
55 if ((uintptr_t)base_next_addr
+ csize
> (uintptr_t)base_past_addr
) {
56 if (base_pages_alloc(csize
)) {
57 malloc_mutex_unlock(&base_mtx
);
63 base_next_addr
= (void *)((uintptr_t)base_next_addr
+ csize
);
64 malloc_mutex_unlock(&base_mtx
);
74 malloc_mutex_lock(&base_mtx
);
75 if (base_nodes
!= NULL
) {
77 base_nodes
= *(extent_node_t
**)ret
;
78 malloc_mutex_unlock(&base_mtx
);
80 malloc_mutex_unlock(&base_mtx
);
81 ret
= (extent_node_t
*)base_alloc(sizeof(extent_node_t
));
88 base_node_dealloc(extent_node_t
*node
)
91 malloc_mutex_lock(&base_mtx
);
92 *(extent_node_t
**)node
= base_nodes
;
94 malloc_mutex_unlock(&base_mtx
);
102 if (malloc_mutex_init(&base_mtx
))