]> git.saurik.com Git - redis.git/blob - deps/jemalloc/src/base.c
Sentinel: SENTINEL FAILOVER command implemented.
[redis.git] / deps / jemalloc / src / base.c
1 #define JEMALLOC_BASE_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
3
4 /******************************************************************************/
5 /* Data. */
6
7 static malloc_mutex_t base_mtx;
8
9 /*
10 * Current pages that are being used for internal memory allocations. These
11 * pages are carved up in cacheline-size quanta, so that there is no chance of
12 * false cache line sharing.
13 */
14 static void *base_pages;
15 static void *base_next_addr;
16 static void *base_past_addr; /* Addr immediately past base_pages. */
17 static extent_node_t *base_nodes;
18
19 /******************************************************************************/
20 /* Function prototypes for non-inline static functions. */
21
22 static bool base_pages_alloc(size_t minsize);
23
24 /******************************************************************************/
25
26 static bool
27 base_pages_alloc(size_t minsize)
28 {
29 size_t csize;
30 bool zero;
31
32 assert(minsize != 0);
33 csize = CHUNK_CEILING(minsize);
34 zero = false;
35 base_pages = chunk_alloc(csize, chunksize, true, &zero);
36 if (base_pages == NULL)
37 return (true);
38 base_next_addr = base_pages;
39 base_past_addr = (void *)((uintptr_t)base_pages + csize);
40
41 return (false);
42 }
43
44 void *
45 base_alloc(size_t size)
46 {
47 void *ret;
48 size_t csize;
49
50 /* Round size up to nearest multiple of the cacheline size. */
51 csize = CACHELINE_CEILING(size);
52
53 malloc_mutex_lock(&base_mtx);
54 /* Make sure there's enough space for the allocation. */
55 if ((uintptr_t)base_next_addr + csize > (uintptr_t)base_past_addr) {
56 if (base_pages_alloc(csize)) {
57 malloc_mutex_unlock(&base_mtx);
58 return (NULL);
59 }
60 }
61 /* Allocate. */
62 ret = base_next_addr;
63 base_next_addr = (void *)((uintptr_t)base_next_addr + csize);
64 malloc_mutex_unlock(&base_mtx);
65
66 return (ret);
67 }
68
69 void *
70 base_calloc(size_t number, size_t size)
71 {
72 void *ret = base_alloc(number * size);
73
74 if (ret != NULL)
75 memset(ret, 0, number * size);
76
77 return (ret);
78 }
79
80 extent_node_t *
81 base_node_alloc(void)
82 {
83 extent_node_t *ret;
84
85 malloc_mutex_lock(&base_mtx);
86 if (base_nodes != NULL) {
87 ret = base_nodes;
88 base_nodes = *(extent_node_t **)ret;
89 malloc_mutex_unlock(&base_mtx);
90 } else {
91 malloc_mutex_unlock(&base_mtx);
92 ret = (extent_node_t *)base_alloc(sizeof(extent_node_t));
93 }
94
95 return (ret);
96 }
97
98 void
99 base_node_dealloc(extent_node_t *node)
100 {
101
102 malloc_mutex_lock(&base_mtx);
103 *(extent_node_t **)node = base_nodes;
104 base_nodes = node;
105 malloc_mutex_unlock(&base_mtx);
106 }
107
108 bool
109 base_boot(void)
110 {
111
112 base_nodes = NULL;
113 if (malloc_mutex_init(&base_mtx))
114 return (true);
115
116 return (false);
117 }
118
119 void
120 base_prefork(void)
121 {
122
123 malloc_mutex_prefork(&base_mtx);
124 }
125
126 void
127 base_postfork_parent(void)
128 {
129
130 malloc_mutex_postfork_parent(&base_mtx);
131 }
132
133 void
134 base_postfork_child(void)
135 {
136
137 malloc_mutex_postfork_child(&base_mtx);
138 }