]>
Commit | Line | Data |
---|---|---|
1 | #define JEMALLOC_BASE_C_ | |
2 | #include "jemalloc/internal/jemalloc_internal.h" | |
3 | ||
4 | /******************************************************************************/ | |
5 | /* Data. */ | |
6 | ||
7 | static malloc_mutex_t base_mtx; | |
8 | ||
9 | /* | |
10 | * Current pages that are being used for internal memory allocations. These | |
11 | * pages are carved up in cacheline-size quanta, so that there is no chance of | |
12 | * false cache line sharing. | |
13 | */ | |
14 | static void *base_pages; | |
15 | static void *base_next_addr; | |
16 | static void *base_past_addr; /* Addr immediately past base_pages. */ | |
17 | static extent_node_t *base_nodes; | |
18 | ||
19 | /******************************************************************************/ | |
20 | /* Function prototypes for non-inline static functions. */ | |
21 | ||
22 | static bool base_pages_alloc(size_t minsize); | |
23 | ||
24 | /******************************************************************************/ | |
25 | ||
26 | static bool | |
27 | base_pages_alloc(size_t minsize) | |
28 | { | |
29 | size_t csize; | |
30 | bool zero; | |
31 | ||
32 | assert(minsize != 0); | |
33 | csize = CHUNK_CEILING(minsize); | |
34 | zero = false; | |
35 | base_pages = chunk_alloc(csize, chunksize, true, &zero, | |
36 | chunk_dss_prec_get()); | |
37 | if (base_pages == NULL) | |
38 | return (true); | |
39 | base_next_addr = base_pages; | |
40 | base_past_addr = (void *)((uintptr_t)base_pages + csize); | |
41 | ||
42 | return (false); | |
43 | } | |
44 | ||
45 | void * | |
46 | base_alloc(size_t size) | |
47 | { | |
48 | void *ret; | |
49 | size_t csize; | |
50 | ||
51 | /* Round size up to nearest multiple of the cacheline size. */ | |
52 | csize = CACHELINE_CEILING(size); | |
53 | ||
54 | malloc_mutex_lock(&base_mtx); | |
55 | /* Make sure there's enough space for the allocation. */ | |
56 | if ((uintptr_t)base_next_addr + csize > (uintptr_t)base_past_addr) { | |
57 | if (base_pages_alloc(csize)) { | |
58 | malloc_mutex_unlock(&base_mtx); | |
59 | return (NULL); | |
60 | } | |
61 | } | |
62 | /* Allocate. */ | |
63 | ret = base_next_addr; | |
64 | base_next_addr = (void *)((uintptr_t)base_next_addr + csize); | |
65 | malloc_mutex_unlock(&base_mtx); | |
66 | ||
67 | return (ret); | |
68 | } | |
69 | ||
70 | void * | |
71 | base_calloc(size_t number, size_t size) | |
72 | { | |
73 | void *ret = base_alloc(number * size); | |
74 | ||
75 | if (ret != NULL) | |
76 | memset(ret, 0, number * size); | |
77 | ||
78 | return (ret); | |
79 | } | |
80 | ||
81 | extent_node_t * | |
82 | base_node_alloc(void) | |
83 | { | |
84 | extent_node_t *ret; | |
85 | ||
86 | malloc_mutex_lock(&base_mtx); | |
87 | if (base_nodes != NULL) { | |
88 | ret = base_nodes; | |
89 | base_nodes = *(extent_node_t **)ret; | |
90 | malloc_mutex_unlock(&base_mtx); | |
91 | } else { | |
92 | malloc_mutex_unlock(&base_mtx); | |
93 | ret = (extent_node_t *)base_alloc(sizeof(extent_node_t)); | |
94 | } | |
95 | ||
96 | return (ret); | |
97 | } | |
98 | ||
99 | void | |
100 | base_node_dealloc(extent_node_t *node) | |
101 | { | |
102 | ||
103 | malloc_mutex_lock(&base_mtx); | |
104 | *(extent_node_t **)node = base_nodes; | |
105 | base_nodes = node; | |
106 | malloc_mutex_unlock(&base_mtx); | |
107 | } | |
108 | ||
109 | bool | |
110 | base_boot(void) | |
111 | { | |
112 | ||
113 | base_nodes = NULL; | |
114 | if (malloc_mutex_init(&base_mtx)) | |
115 | return (true); | |
116 | ||
117 | return (false); | |
118 | } | |
119 | ||
120 | void | |
121 | base_prefork(void) | |
122 | { | |
123 | ||
124 | malloc_mutex_prefork(&base_mtx); | |
125 | } | |
126 | ||
127 | void | |
128 | base_postfork_parent(void) | |
129 | { | |
130 | ||
131 | malloc_mutex_postfork_parent(&base_mtx); | |
132 | } | |
133 | ||
134 | void | |
135 | base_postfork_child(void) | |
136 | { | |
137 | ||
138 | malloc_mutex_postfork_child(&base_mtx); | |
139 | } |