]>
git.saurik.com Git - redis.git/blob - deps/jemalloc/src/chunk_mmap.c
1 #define JEMALLOC_CHUNK_MMAP_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
4 /******************************************************************************/
5 /* Function prototypes for non-inline static functions. */
7 static void *pages_map(void *addr
, size_t size
);
8 static void pages_unmap(void *addr
, size_t size
);
9 static void *chunk_alloc_mmap_slow(size_t size
, size_t alignment
,
12 /******************************************************************************/
15 pages_map(void *addr
, size_t size
)
23 * If VirtualAlloc can't allocate at the given address when one is
24 * given, it fails and returns NULL.
26 ret
= VirtualAlloc(addr
, size
, MEM_COMMIT
| MEM_RESERVE
,
30 * We don't use MAP_FIXED here, because it can cause the *replacement*
31 * of existing mappings, and we only want to create new mappings.
33 ret
= mmap(addr
, size
, PROT_READ
| PROT_WRITE
, MAP_PRIVATE
| MAP_ANON
,
37 if (ret
== MAP_FAILED
)
39 else if (addr
!= NULL
&& ret
!= addr
) {
41 * We succeeded in mapping memory, but not in the right place.
43 if (munmap(ret
, size
) == -1) {
44 char buf
[BUFERROR_BUF
];
46 buferror(buf
, sizeof(buf
));
47 malloc_printf("<jemalloc: Error in munmap(): %s\n",
55 assert(ret
== NULL
|| (addr
== NULL
&& ret
!= addr
)
56 || (addr
!= NULL
&& ret
== addr
));
61 pages_unmap(void *addr
, size_t size
)
65 if (VirtualFree(addr
, 0, MEM_RELEASE
) == 0)
67 if (munmap(addr
, size
) == -1)
70 char buf
[BUFERROR_BUF
];
72 buferror(buf
, sizeof(buf
));
73 malloc_printf("<jemalloc>: Error in "
86 pages_trim(void *addr
, size_t alloc_size
, size_t leadsize
, size_t size
)
88 void *ret
= (void *)((uintptr_t)addr
+ leadsize
);
90 assert(alloc_size
>= leadsize
+ size
);
95 pages_unmap(addr
, alloc_size
);
96 new_addr
= pages_map(ret
, size
);
100 pages_unmap(new_addr
, size
);
105 size_t trailsize
= alloc_size
- leadsize
- size
;
108 pages_unmap(addr
, leadsize
);
110 pages_unmap((void *)((uintptr_t)ret
+ size
), trailsize
);
117 pages_purge(void *addr
, size_t length
)
121 VirtualAlloc(addr
, length
, MEM_RESET
, PAGE_READWRITE
);
123 # ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
124 # define JEMALLOC_MADV_PURGE MADV_DONTNEED
125 # elif defined(JEMALLOC_PURGE_MADVISE_FREE)
126 # define JEMALLOC_MADV_PURGE MADV_FREE
128 # error "No method defined for purging unused dirty pages."
130 madvise(addr
, length
, JEMALLOC_MADV_PURGE
);
135 chunk_alloc_mmap_slow(size_t size
, size_t alignment
, bool *zero
)
138 size_t alloc_size
, leadsize
;
140 alloc_size
= size
+ alignment
- PAGE
;
141 /* Beware size_t wrap-around. */
142 if (alloc_size
< size
)
145 pages
= pages_map(NULL
, alloc_size
);
148 leadsize
= ALIGNMENT_CEILING((uintptr_t)pages
, alignment
) -
150 ret
= pages_trim(pages
, alloc_size
, leadsize
, size
);
151 } while (ret
== NULL
);
159 chunk_alloc_mmap(size_t size
, size_t alignment
, bool *zero
)
165 * Ideally, there would be a way to specify alignment to mmap() (like
166 * NetBSD has), but in the absence of such a feature, we have to work
167 * hard to efficiently create aligned mappings. The reliable, but
168 * slow method is to create a mapping that is over-sized, then trim the
169 * excess. However, that always results in one or two calls to
172 * Optimistically try mapping precisely the right amount before falling
173 * back to the slow method, with the expectation that the optimistic
174 * approach works most of the time.
177 assert(alignment
!= 0);
178 assert((alignment
& chunksize_mask
) == 0);
180 ret
= pages_map(NULL
, size
);
183 offset
= ALIGNMENT_ADDR2OFFSET(ret
, alignment
);
185 pages_unmap(ret
, size
);
186 return (chunk_alloc_mmap_slow(size
, alignment
, zero
));
195 chunk_dealloc_mmap(void *chunk
, size_t size
)
199 pages_unmap(chunk
, size
);
201 return (config_munmap
== false);