]> git.saurik.com Git - redis.git/blame - deps/jemalloc/src/base.c
Sentinel: SENTINEL FAILOVER command implemented.
[redis.git] / deps / jemalloc / src / base.c
CommitLineData
a78e148b 1#define JEMALLOC_BASE_C_
2#include "jemalloc/internal/jemalloc_internal.h"
3
4/******************************************************************************/
5/* Data. */
6
ad4c0b41 7static malloc_mutex_t base_mtx;
a78e148b 8
9/*
10 * Current pages that are being used for internal memory allocations. These
11 * pages are carved up in cacheline-size quanta, so that there is no chance of
12 * false cache line sharing.
13 */
14static void *base_pages;
15static void *base_next_addr;
16static void *base_past_addr; /* Addr immediately past base_pages. */
17static extent_node_t *base_nodes;
18
19/******************************************************************************/
20/* Function prototypes for non-inline static functions. */
21
22static bool base_pages_alloc(size_t minsize);
23
24/******************************************************************************/
25
26static bool
27base_pages_alloc(size_t minsize)
28{
29 size_t csize;
30 bool zero;
31
32 assert(minsize != 0);
33 csize = CHUNK_CEILING(minsize);
34 zero = false;
ad4c0b41 35 base_pages = chunk_alloc(csize, chunksize, true, &zero);
a78e148b 36 if (base_pages == NULL)
37 return (true);
38 base_next_addr = base_pages;
39 base_past_addr = (void *)((uintptr_t)base_pages + csize);
40
41 return (false);
42}
43
44void *
45base_alloc(size_t size)
46{
47 void *ret;
48 size_t csize;
49
50 /* Round size up to nearest multiple of the cacheline size. */
51 csize = CACHELINE_CEILING(size);
52
53 malloc_mutex_lock(&base_mtx);
54 /* Make sure there's enough space for the allocation. */
55 if ((uintptr_t)base_next_addr + csize > (uintptr_t)base_past_addr) {
56 if (base_pages_alloc(csize)) {
57 malloc_mutex_unlock(&base_mtx);
58 return (NULL);
59 }
60 }
61 /* Allocate. */
62 ret = base_next_addr;
63 base_next_addr = (void *)((uintptr_t)base_next_addr + csize);
64 malloc_mutex_unlock(&base_mtx);
65
66 return (ret);
67}
68
ad4c0b41 69void *
70base_calloc(size_t number, size_t size)
71{
72 void *ret = base_alloc(number * size);
73
74 if (ret != NULL)
75 memset(ret, 0, number * size);
76
77 return (ret);
78}
79
a78e148b 80extent_node_t *
81base_node_alloc(void)
82{
83 extent_node_t *ret;
84
85 malloc_mutex_lock(&base_mtx);
86 if (base_nodes != NULL) {
87 ret = base_nodes;
88 base_nodes = *(extent_node_t **)ret;
89 malloc_mutex_unlock(&base_mtx);
90 } else {
91 malloc_mutex_unlock(&base_mtx);
92 ret = (extent_node_t *)base_alloc(sizeof(extent_node_t));
93 }
94
95 return (ret);
96}
97
98void
99base_node_dealloc(extent_node_t *node)
100{
101
102 malloc_mutex_lock(&base_mtx);
103 *(extent_node_t **)node = base_nodes;
104 base_nodes = node;
105 malloc_mutex_unlock(&base_mtx);
106}
107
108bool
109base_boot(void)
110{
111
112 base_nodes = NULL;
113 if (malloc_mutex_init(&base_mtx))
114 return (true);
115
116 return (false);
117}
ad4c0b41 118
119void
120base_prefork(void)
121{
122
123 malloc_mutex_prefork(&base_mtx);
124}
125
126void
127base_postfork_parent(void)
128{
129
130 malloc_mutex_postfork_parent(&base_mtx);
131}
132
133void
134base_postfork_child(void)
135{
136
137 malloc_mutex_postfork_child(&base_mtx);
138}