]>
Commit | Line | Data |
---|---|---|
4934f93d | 1 | /* |
2 | * This radix tree implementation is tailored to the singular purpose of | |
3 | * tracking which chunks are currently owned by jemalloc. This functionality | |
4 | * is mandatory for OS X, where jemalloc must be able to respond to object | |
5 | * ownership queries. | |
6 | * | |
7 | ******************************************************************************* | |
8 | */ | |
9 | #ifdef JEMALLOC_H_TYPES | |
10 | ||
11 | typedef struct rtree_s rtree_t; | |
12 | ||
13 | /* | |
14 | * Size of each radix tree node (must be a power of 2). This impacts tree | |
15 | * depth. | |
16 | */ | |
17 | #if (LG_SIZEOF_PTR == 2) | |
18 | # define RTREE_NODESIZE (1U << 14) | |
19 | #else | |
20 | # define RTREE_NODESIZE CACHELINE | |
21 | #endif | |
22 | ||
23 | #endif /* JEMALLOC_H_TYPES */ | |
24 | /******************************************************************************/ | |
25 | #ifdef JEMALLOC_H_STRUCTS | |
26 | ||
27 | struct rtree_s { | |
28 | malloc_mutex_t mutex; | |
29 | void **root; | |
30 | unsigned height; | |
31 | unsigned level2bits[1]; /* Dynamically sized. */ | |
32 | }; | |
33 | ||
34 | #endif /* JEMALLOC_H_STRUCTS */ | |
35 | /******************************************************************************/ | |
36 | #ifdef JEMALLOC_H_EXTERNS | |
37 | ||
38 | rtree_t *rtree_new(unsigned bits); | |
39 | ||
40 | #endif /* JEMALLOC_H_EXTERNS */ | |
41 | /******************************************************************************/ | |
42 | #ifdef JEMALLOC_H_INLINES | |
43 | ||
44 | #ifndef JEMALLOC_ENABLE_INLINE | |
45 | #ifndef JEMALLOC_DEBUG | |
46 | void *rtree_get_locked(rtree_t *rtree, uintptr_t key); | |
47 | #endif | |
48 | void *rtree_get(rtree_t *rtree, uintptr_t key); | |
49 | bool rtree_set(rtree_t *rtree, uintptr_t key, void *val); | |
50 | #endif | |
51 | ||
52 | #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_RTREE_C_)) | |
53 | #define RTREE_GET_GENERATE(f) \ | |
54 | /* The least significant bits of the key are ignored. */ \ | |
55 | JEMALLOC_INLINE void * \ | |
56 | f(rtree_t *rtree, uintptr_t key) \ | |
57 | { \ | |
58 | void *ret; \ | |
59 | uintptr_t subkey; \ | |
60 | unsigned i, lshift, height, bits; \ | |
61 | void **node, **child; \ | |
62 | \ | |
63 | RTREE_LOCK(&rtree->mutex); \ | |
64 | for (i = lshift = 0, height = rtree->height, node = rtree->root;\ | |
65 | i < height - 1; \ | |
66 | i++, lshift += bits, node = child) { \ | |
67 | bits = rtree->level2bits[i]; \ | |
68 | subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR + \ | |
69 | 3)) - bits); \ | |
70 | child = (void**)node[subkey]; \ | |
71 | if (child == NULL) { \ | |
72 | RTREE_UNLOCK(&rtree->mutex); \ | |
73 | return (NULL); \ | |
74 | } \ | |
75 | } \ | |
76 | \ | |
77 | /* \ | |
78 | * node is a leaf, so it contains values rather than node \ | |
79 | * pointers. \ | |
80 | */ \ | |
81 | bits = rtree->level2bits[i]; \ | |
82 | subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) - \ | |
83 | bits); \ | |
84 | ret = node[subkey]; \ | |
85 | RTREE_UNLOCK(&rtree->mutex); \ | |
86 | \ | |
87 | RTREE_GET_VALIDATE \ | |
88 | return (ret); \ | |
89 | } | |
90 | ||
91 | #ifdef JEMALLOC_DEBUG | |
92 | # define RTREE_LOCK(l) malloc_mutex_lock(l) | |
93 | # define RTREE_UNLOCK(l) malloc_mutex_unlock(l) | |
94 | # define RTREE_GET_VALIDATE | |
95 | RTREE_GET_GENERATE(rtree_get_locked) | |
96 | # undef RTREE_LOCK | |
97 | # undef RTREE_UNLOCK | |
98 | # undef RTREE_GET_VALIDATE | |
99 | #endif | |
100 | ||
101 | #define RTREE_LOCK(l) | |
102 | #define RTREE_UNLOCK(l) | |
103 | #ifdef JEMALLOC_DEBUG | |
104 | /* | |
105 | * Suppose that it were possible for a jemalloc-allocated chunk to be | |
106 | * munmap()ped, followed by a different allocator in another thread re-using | |
107 | * overlapping virtual memory, all without invalidating the cached rtree | |
108 | * value. The result would be a false positive (the rtree would claim that | |
109 | * jemalloc owns memory that it had actually discarded). This scenario | |
110 | * seems impossible, but the following assertion is a prudent sanity check. | |
111 | */ | |
112 | # define RTREE_GET_VALIDATE \ | |
113 | assert(rtree_get_locked(rtree, key) == ret); | |
114 | #else | |
115 | # define RTREE_GET_VALIDATE | |
116 | #endif | |
117 | RTREE_GET_GENERATE(rtree_get) | |
118 | #undef RTREE_LOCK | |
119 | #undef RTREE_UNLOCK | |
120 | #undef RTREE_GET_VALIDATE | |
121 | ||
122 | JEMALLOC_INLINE bool | |
123 | rtree_set(rtree_t *rtree, uintptr_t key, void *val) | |
124 | { | |
125 | uintptr_t subkey; | |
126 | unsigned i, lshift, height, bits; | |
127 | void **node, **child; | |
128 | ||
129 | malloc_mutex_lock(&rtree->mutex); | |
130 | for (i = lshift = 0, height = rtree->height, node = rtree->root; | |
131 | i < height - 1; | |
132 | i++, lshift += bits, node = child) { | |
133 | bits = rtree->level2bits[i]; | |
134 | subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) - | |
135 | bits); | |
136 | child = (void**)node[subkey]; | |
137 | if (child == NULL) { | |
138 | child = (void**)base_alloc(sizeof(void *) << | |
139 | rtree->level2bits[i+1]); | |
140 | if (child == NULL) { | |
141 | malloc_mutex_unlock(&rtree->mutex); | |
142 | return (true); | |
143 | } | |
144 | memset(child, 0, sizeof(void *) << | |
145 | rtree->level2bits[i+1]); | |
146 | node[subkey] = child; | |
147 | } | |
148 | } | |
149 | ||
150 | /* node is a leaf, so it contains values rather than node pointers. */ | |
151 | bits = rtree->level2bits[i]; | |
152 | subkey = (key << lshift) >> ((ZU(1) << (LG_SIZEOF_PTR+3)) - bits); | |
153 | node[subkey] = val; | |
154 | malloc_mutex_unlock(&rtree->mutex); | |
155 | ||
156 | return (false); | |
157 | } | |
158 | #endif | |
159 | ||
160 | #endif /* JEMALLOC_H_INLINES */ | |
161 | /******************************************************************************/ |