a78e148b |
1 | #define JEMALLOC_BASE_C_ |
2 | #include "jemalloc/internal/jemalloc_internal.h" |
3 | |
4 | /******************************************************************************/ |
5 | /* Data. */ |
6 | |
7 | malloc_mutex_t base_mtx; |
8 | |
9 | /* |
10 | * Current pages that are being used for internal memory allocations. These |
11 | * pages are carved up in cacheline-size quanta, so that there is no chance of |
12 | * false cache line sharing. |
13 | */ |
14 | static void *base_pages; |
15 | static void *base_next_addr; |
16 | static void *base_past_addr; /* Addr immediately past base_pages. */ |
17 | static extent_node_t *base_nodes; |
18 | |
19 | /******************************************************************************/ |
20 | /* Function prototypes for non-inline static functions. */ |
21 | |
22 | static bool base_pages_alloc(size_t minsize); |
23 | |
24 | /******************************************************************************/ |
25 | |
26 | static bool |
27 | base_pages_alloc(size_t minsize) |
28 | { |
29 | size_t csize; |
30 | bool zero; |
31 | |
32 | assert(minsize != 0); |
33 | csize = CHUNK_CEILING(minsize); |
34 | zero = false; |
35 | base_pages = chunk_alloc(csize, true, &zero); |
36 | if (base_pages == NULL) |
37 | return (true); |
38 | base_next_addr = base_pages; |
39 | base_past_addr = (void *)((uintptr_t)base_pages + csize); |
40 | |
41 | return (false); |
42 | } |
43 | |
44 | void * |
45 | base_alloc(size_t size) |
46 | { |
47 | void *ret; |
48 | size_t csize; |
49 | |
50 | /* Round size up to nearest multiple of the cacheline size. */ |
51 | csize = CACHELINE_CEILING(size); |
52 | |
53 | malloc_mutex_lock(&base_mtx); |
54 | /* Make sure there's enough space for the allocation. */ |
55 | if ((uintptr_t)base_next_addr + csize > (uintptr_t)base_past_addr) { |
56 | if (base_pages_alloc(csize)) { |
57 | malloc_mutex_unlock(&base_mtx); |
58 | return (NULL); |
59 | } |
60 | } |
61 | /* Allocate. */ |
62 | ret = base_next_addr; |
63 | base_next_addr = (void *)((uintptr_t)base_next_addr + csize); |
64 | malloc_mutex_unlock(&base_mtx); |
65 | |
66 | return (ret); |
67 | } |
68 | |
69 | extent_node_t * |
70 | base_node_alloc(void) |
71 | { |
72 | extent_node_t *ret; |
73 | |
74 | malloc_mutex_lock(&base_mtx); |
75 | if (base_nodes != NULL) { |
76 | ret = base_nodes; |
77 | base_nodes = *(extent_node_t **)ret; |
78 | malloc_mutex_unlock(&base_mtx); |
79 | } else { |
80 | malloc_mutex_unlock(&base_mtx); |
81 | ret = (extent_node_t *)base_alloc(sizeof(extent_node_t)); |
82 | } |
83 | |
84 | return (ret); |
85 | } |
86 | |
87 | void |
88 | base_node_dealloc(extent_node_t *node) |
89 | { |
90 | |
91 | malloc_mutex_lock(&base_mtx); |
92 | *(extent_node_t **)node = base_nodes; |
93 | base_nodes = node; |
94 | malloc_mutex_unlock(&base_mtx); |
95 | } |
96 | |
97 | bool |
98 | base_boot(void) |
99 | { |
100 | |
101 | base_nodes = NULL; |
102 | if (malloc_mutex_init(&base_mtx)) |
103 | return (true); |
104 | |
105 | return (false); |
106 | } |