/******************************************************************************/
/* Data. */
-#ifdef JEMALLOC_STATS
uint64_t huge_nmalloc;
uint64_t huge_ndalloc;
size_t huge_allocated;
-#endif
malloc_mutex_t huge_mtx;
void *
huge_malloc(size_t size, bool zero)
+{
+
+ return (huge_palloc(size, chunksize, zero));
+}
+
+void *
+huge_palloc(size_t size, size_t alignment, bool zero)
{
void *ret;
size_t csize;
extent_node_t *node;
+ bool is_zeroed;
/* Allocate one or more contiguous chunks for this request. */
if (node == NULL)
return (NULL);
- ret = chunk_alloc(csize, false, &zero);
+ /*
+ * Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that
+ * it is possible to make correct junk/zero fill decisions below.
+ */
+ is_zeroed = zero;
+ ret = chunk_alloc(csize, alignment, false, &is_zeroed);
if (ret == NULL) {
base_node_dealloc(node);
return (NULL);
malloc_mutex_lock(&huge_mtx);
extent_tree_ad_insert(&huge, node);
-#ifdef JEMALLOC_STATS
- stats_cactive_add(csize);
- huge_nmalloc++;
- huge_allocated += csize;
-#endif
+ if (config_stats) {
+ stats_cactive_add(csize);
+ huge_nmalloc++;
+ huge_allocated += csize;
+ }
malloc_mutex_unlock(&huge_mtx);
-#ifdef JEMALLOC_FILL
- if (zero == false) {
+ if (config_fill && zero == false) {
if (opt_junk)
memset(ret, 0xa5, csize);
- else if (opt_zero)
+ else if (opt_zero && is_zeroed == false)
memset(ret, 0, csize);
}
-#endif
-
- return (ret);
-}
-
-/* Only handles large allocations that require more than chunk alignment. */
-void *
-huge_palloc(size_t size, size_t alignment, bool zero)
-{
- void *ret;
- size_t alloc_size, chunk_size, offset;
- extent_node_t *node;
-
- /*
- * This allocation requires alignment that is even larger than chunk
- * alignment. This means that huge_malloc() isn't good enough.
- *
- * Allocate almost twice as many chunks as are demanded by the size or
- * alignment, in order to assure the alignment can be achieved, then
- * unmap leading and trailing chunks.
- */
- assert(alignment > chunksize);
-
- chunk_size = CHUNK_CEILING(size);
-
- if (size >= alignment)
- alloc_size = chunk_size + alignment - chunksize;
- else
- alloc_size = (alignment << 1) - chunksize;
-
- /* Allocate an extent node with which to track the chunk. */
- node = base_node_alloc();
- if (node == NULL)
- return (NULL);
-
- ret = chunk_alloc(alloc_size, false, &zero);
- if (ret == NULL) {
- base_node_dealloc(node);
- return (NULL);
- }
-
- offset = (uintptr_t)ret & (alignment - 1);
- assert((offset & chunksize_mask) == 0);
- assert(offset < alloc_size);
- if (offset == 0) {
- /* Trim trailing space. */
- chunk_dealloc((void *)((uintptr_t)ret + chunk_size), alloc_size
- - chunk_size, true);
- } else {
- size_t trailsize;
-
- /* Trim leading space. */
- chunk_dealloc(ret, alignment - offset, true);
-
- ret = (void *)((uintptr_t)ret + (alignment - offset));
-
- trailsize = alloc_size - (alignment - offset) - chunk_size;
- if (trailsize != 0) {
- /* Trim trailing space. */
- assert(trailsize < alloc_size);
- chunk_dealloc((void *)((uintptr_t)ret + chunk_size),
- trailsize, true);
- }
- }
-
- /* Insert node into huge. */
- node->addr = ret;
- node->size = chunk_size;
-
- malloc_mutex_lock(&huge_mtx);
- extent_tree_ad_insert(&huge, node);
-#ifdef JEMALLOC_STATS
- stats_cactive_add(chunk_size);
- huge_nmalloc++;
- huge_allocated += chunk_size;
-#endif
- malloc_mutex_unlock(&huge_mtx);
-
-#ifdef JEMALLOC_FILL
- if (zero == false) {
- if (opt_junk)
- memset(ret, 0xa5, chunk_size);
- else if (opt_zero)
- memset(ret, 0, chunk_size);
- }
-#endif
return (ret);
}
&& CHUNK_CEILING(oldsize) >= CHUNK_CEILING(size)
&& CHUNK_CEILING(oldsize) <= CHUNK_CEILING(size+extra)) {
assert(CHUNK_CEILING(oldsize) == oldsize);
-#ifdef JEMALLOC_FILL
- if (opt_junk && size < oldsize) {
+ if (config_fill && opt_junk && size < oldsize) {
memset((void *)((uintptr_t)ptr + size), 0x5a,
oldsize - size);
}
-#endif
return (ptr);
}
*/
copysize = (size < oldsize) ? size : oldsize;
+#ifdef JEMALLOC_MREMAP
/*
* Use mremap(2) if this is a huge-->huge reallocation, and neither the
- * source nor the destination are in swap or dss.
+ * source nor the destination are in dss.
*/
-#ifdef JEMALLOC_MREMAP_FIXED
- if (oldsize >= chunksize
-# ifdef JEMALLOC_SWAP
- && (swap_enabled == false || (chunk_in_swap(ptr) == false &&
- chunk_in_swap(ret) == false))
-# endif
-# ifdef JEMALLOC_DSS
- && chunk_in_dss(ptr) == false && chunk_in_dss(ret) == false
-# endif
- ) {
+ if (oldsize >= chunksize && (config_dss == false || (chunk_in_dss(ptr)
+ == false && chunk_in_dss(ret) == false))) {
size_t newsize = huge_salloc(ret);
/*
*/
char buf[BUFERROR_BUF];
- buferror(errno, buf, sizeof(buf));
- malloc_write("<jemalloc>: Error in mremap(): ");
- malloc_write(buf);
- malloc_write("\n");
+ buferror(buf, sizeof(buf));
+ malloc_printf("<jemalloc>: Error in mremap(): %s\n",
+ buf);
if (opt_abort)
abort();
memcpy(ret, ptr, copysize);
#endif
{
memcpy(ret, ptr, copysize);
- idalloc(ptr);
+ iqalloc(ptr);
}
return (ret);
}
assert(node->addr == ptr);
extent_tree_ad_remove(&huge, node);
-#ifdef JEMALLOC_STATS
- stats_cactive_sub(node->size);
- huge_ndalloc++;
- huge_allocated -= node->size;
-#endif
+ if (config_stats) {
+ stats_cactive_sub(node->size);
+ huge_ndalloc++;
+ huge_allocated -= node->size;
+ }
malloc_mutex_unlock(&huge_mtx);
- if (unmap) {
- /* Unmap chunk. */
-#ifdef JEMALLOC_FILL
-#if (defined(JEMALLOC_SWAP) || defined(JEMALLOC_DSS))
- if (opt_junk)
- memset(node->addr, 0x5a, node->size);
-#endif
-#endif
- }
+ if (unmap && config_fill && config_dss && opt_junk)
+ memset(node->addr, 0x5a, node->size);
chunk_dealloc(node->addr, node->size, unmap);
return (ret);
}
-#ifdef JEMALLOC_PROF
prof_ctx_t *
huge_prof_ctx_get(const void *ptr)
{
malloc_mutex_unlock(&huge_mtx);
}
-#endif
bool
huge_boot(void)
return (true);
extent_tree_ad_new(&huge);
-#ifdef JEMALLOC_STATS
- huge_nmalloc = 0;
- huge_ndalloc = 0;
- huge_allocated = 0;
-#endif
+ if (config_stats) {
+ huge_nmalloc = 0;
+ huge_ndalloc = 0;
+ huge_allocated = 0;
+ }
return (false);
}
+
+void
+huge_prefork(void)
+{
+
+ malloc_mutex_prefork(&huge_mtx);
+}
+
+void
+huge_postfork_parent(void)
+{
+
+ malloc_mutex_postfork_parent(&huge_mtx);
+}
+
+void
+huge_postfork_child(void)
+{
+
+ malloc_mutex_postfork_child(&huge_mtx);
+}