X-Git-Url: https://git.saurik.com/redis.git/blobdiff_plain/aaada3f962a9f87fb239e55e3d26c1e794d411d5..6c9897f6cfdd3d9dbf73d09a9f8a9c5706020717:/src/zmalloc.c diff --git a/src/zmalloc.c b/src/zmalloc.c index 5c1b5e9a..1f8c7f08 100644 --- a/src/zmalloc.c +++ b/src/zmalloc.c @@ -30,35 +30,76 @@ #include #include + +/* This function provide us access to the original libc free(). This is useful + * for instance to free results obtained by backtrace_symbols(). We need + * to define this function before including zmalloc.h that may shadow the + * free implementation if we use jemalloc or another non standard allocator. */ +void zlibc_free(void *ptr) { + free(ptr); +} + #include #include #include "config.h" +#include "zmalloc.h" -#if defined(__sun) -#define PREFIX_SIZE sizeof(long long) +#ifdef HAVE_MALLOC_SIZE +#define PREFIX_SIZE (0) +#else +#if defined(__sun) || defined(__sparc) || defined(__sparc__) +#define PREFIX_SIZE (sizeof(long long)) #else -#define PREFIX_SIZE sizeof(size_t) +#define PREFIX_SIZE (sizeof(size_t)) +#endif #endif -#define increment_used_memory(__n) do { \ +/* Explicitly override malloc/free etc when using tcmalloc. */ +#if defined(USE_TCMALLOC) +#define malloc(size) tc_malloc(size) +#define calloc(count,size) tc_calloc(count,size) +#define realloc(ptr,size) tc_realloc(ptr,size) +#define free(ptr) tc_free(ptr) +#elif defined(USE_JEMALLOC) +#define malloc(size) je_malloc(size) +#define calloc(count,size) je_calloc(count,size) +#define realloc(ptr,size) je_realloc(ptr,size) +#define free(ptr) je_free(ptr) +#endif + +#ifdef HAVE_ATOMIC +#define update_zmalloc_stat_add(__n) __sync_add_and_fetch(&used_memory, (__n)) +#define update_zmalloc_stat_sub(__n) __sync_sub_and_fetch(&used_memory, (__n)) +#else +#define update_zmalloc_stat_add(__n) do { \ + pthread_mutex_lock(&used_memory_mutex); \ + used_memory += (__n); \ + pthread_mutex_unlock(&used_memory_mutex); \ +} while(0) + +#define update_zmalloc_stat_sub(__n) do { \ + pthread_mutex_lock(&used_memory_mutex); \ + used_memory -= (__n); \ + pthread_mutex_unlock(&used_memory_mutex); \ +} while(0) + +#endif + +#define update_zmalloc_stat_alloc(__n) do { \ size_t _n = (__n); \ if (_n&(sizeof(long)-1)) _n += sizeof(long)-(_n&(sizeof(long)-1)); \ if (zmalloc_thread_safe) { \ - pthread_mutex_lock(&used_memory_mutex); \ - used_memory += _n; \ - pthread_mutex_unlock(&used_memory_mutex); \ + update_zmalloc_stat_add(_n); \ } else { \ used_memory += _n; \ } \ } while(0) -#define decrement_used_memory(__n) do { \ +#define update_zmalloc_stat_free(__n) do { \ size_t _n = (__n); \ if (_n&(sizeof(long)-1)) _n += sizeof(long)-(_n&(sizeof(long)-1)); \ if (zmalloc_thread_safe) { \ - pthread_mutex_lock(&used_memory_mutex); \ - used_memory -= _n; \ - pthread_mutex_unlock(&used_memory_mutex); \ + update_zmalloc_stat_sub(_n); \ } else { \ used_memory -= _n; \ } \ @@ -68,23 +109,25 @@ static size_t used_memory = 0; static int zmalloc_thread_safe = 0; pthread_mutex_t used_memory_mutex = PTHREAD_MUTEX_INITIALIZER; -static void zmalloc_oom(size_t size) { +static void zmalloc_default_oom(size_t size) { fprintf(stderr, "zmalloc: Out of memory trying to allocate %zu bytes\n", size); fflush(stderr); abort(); } +static void (*zmalloc_oom_handler)(size_t) = zmalloc_default_oom; + void *zmalloc(size_t size) { void *ptr = malloc(size+PREFIX_SIZE); - if (!ptr) zmalloc_oom(size); + if (!ptr) zmalloc_oom_handler(size); #ifdef HAVE_MALLOC_SIZE - increment_used_memory(redis_malloc_size(ptr)); + update_zmalloc_stat_alloc(zmalloc_size(ptr)); return ptr; #else *((size_t*)ptr) = size; - increment_used_memory(size+PREFIX_SIZE); + update_zmalloc_stat_alloc(size+PREFIX_SIZE); return (char*)ptr+PREFIX_SIZE; #endif } @@ -92,13 +135,13 @@ void *zmalloc(size_t size) { void *zcalloc(size_t size) { void *ptr = calloc(1, size+PREFIX_SIZE); - if (!ptr) zmalloc_oom(size); + if (!ptr) zmalloc_oom_handler(size); #ifdef HAVE_MALLOC_SIZE - increment_used_memory(redis_malloc_size(ptr)); + update_zmalloc_stat_alloc(zmalloc_size(ptr)); return ptr; #else *((size_t*)ptr) = size; - increment_used_memory(size+PREFIX_SIZE); + update_zmalloc_stat_alloc(size+PREFIX_SIZE); return (char*)ptr+PREFIX_SIZE; #endif } @@ -112,26 +155,40 @@ void *zrealloc(void *ptr, size_t size) { if (ptr == NULL) return zmalloc(size); #ifdef HAVE_MALLOC_SIZE - oldsize = redis_malloc_size(ptr); + oldsize = zmalloc_size(ptr); newptr = realloc(ptr,size); - if (!newptr) zmalloc_oom(size); + if (!newptr) zmalloc_oom_handler(size); - decrement_used_memory(oldsize); - increment_used_memory(redis_malloc_size(newptr)); + update_zmalloc_stat_free(oldsize); + update_zmalloc_stat_alloc(zmalloc_size(newptr)); return newptr; #else realptr = (char*)ptr-PREFIX_SIZE; oldsize = *((size_t*)realptr); newptr = realloc(realptr,size+PREFIX_SIZE); - if (!newptr) zmalloc_oom(size); + if (!newptr) zmalloc_oom_handler(size); *((size_t*)newptr) = size; - decrement_used_memory(oldsize); - increment_used_memory(size); + update_zmalloc_stat_free(oldsize); + update_zmalloc_stat_alloc(size); return (char*)newptr+PREFIX_SIZE; #endif } +/* Provide zmalloc_size() for systems where this function is not provided by + * malloc itself, given that in that case we store an header with this + * information as the first bytes of every allocation. */ +#ifndef HAVE_MALLOC_SIZE +size_t zmalloc_size(void *ptr) { + void *realptr = (char*)ptr-PREFIX_SIZE; + size_t size = *((size_t*)realptr); + /* Assume at least that all the allocations are padded at sizeof(long) by + * the underlying allocator. */ + if (size&(sizeof(long)-1)) size += sizeof(long)-(size&(sizeof(long)-1)); + return size+PREFIX_SIZE; +} +#endif + void zfree(void *ptr) { #ifndef HAVE_MALLOC_SIZE void *realptr; @@ -140,12 +197,12 @@ void zfree(void *ptr) { if (ptr == NULL) return; #ifdef HAVE_MALLOC_SIZE - decrement_used_memory(redis_malloc_size(ptr)); + update_zmalloc_stat_free(zmalloc_size(ptr)); free(ptr); #else realptr = (char*)ptr-PREFIX_SIZE; oldsize = *((size_t*)realptr); - decrement_used_memory(oldsize+PREFIX_SIZE); + update_zmalloc_stat_free(oldsize+PREFIX_SIZE); free(realptr); #endif } @@ -161,12 +218,134 @@ char *zstrdup(const char *s) { size_t zmalloc_used_memory(void) { size_t um; - if (zmalloc_thread_safe) pthread_mutex_lock(&used_memory_mutex); - um = used_memory; - if (zmalloc_thread_safe) pthread_mutex_unlock(&used_memory_mutex); + if (zmalloc_thread_safe) { +#ifdef HAVE_ATOMIC + um = __sync_add_and_fetch(&used_memory, 0); +#else + pthread_mutex_lock(&used_memory_mutex); + um = used_memory; + pthread_mutex_unlock(&used_memory_mutex); +#endif + } + else { + um = used_memory; + } + return um; } void zmalloc_enable_thread_safeness(void) { zmalloc_thread_safe = 1; } + +void zmalloc_set_oom_handler(void (*oom_handler)(size_t)) { + zmalloc_oom_handler = oom_handler; +} + +/* Get the RSS information in an OS-specific way. + * + * WARNING: the function zmalloc_get_rss() is not designed to be fast + * and may not be called in the busy loops where Redis tries to release + * memory expiring or swapping out objects. + * + * For this kind of "fast RSS reporting" usages use instead the + * function RedisEstimateRSS() that is a much faster (and less precise) + * version of the funciton. */ + +#if defined(HAVE_PROCFS) +#include +#include +#include +#include + +size_t zmalloc_get_rss(void) { + int page = sysconf(_SC_PAGESIZE); + size_t rss; + char buf[4096]; + char filename[256]; + int fd, count; + char *p, *x; + + snprintf(filename,256,"/proc/%d/stat",getpid()); + if ((fd = open(filename,O_RDONLY)) == -1) return 0; + if (read(fd,buf,4096) <= 0) { + close(fd); + return 0; + } + close(fd); + + p = buf; + count = 23; /* RSS is the 24th field in /proc//stat */ + while(p && count--) { + p = strchr(p,' '); + if (p) p++; + } + if (!p) return 0; + x = strchr(p,' '); + if (!x) return 0; + *x = '\0'; + + rss = strtoll(p,NULL,10); + rss *= page; + return rss; +} +#elif defined(HAVE_TASKINFO) +#include +#include +#include +#include +#include +#include +#include + +size_t zmalloc_get_rss(void) { + task_t task = MACH_PORT_NULL; + struct task_basic_info t_info; + mach_msg_type_number_t t_info_count = TASK_BASIC_INFO_COUNT; + + if (task_for_pid(current_task(), getpid(), &task) != KERN_SUCCESS) + return 0; + task_info(task, TASK_BASIC_INFO, (task_info_t)&t_info, &t_info_count); + + return t_info.resident_size; +} +#else +size_t zmalloc_get_rss(void) { + /* If we can't get the RSS in an OS-specific way for this system just + * return the memory usage we estimated in zmalloc().. + * + * Fragmentation will appear to be always 1 (no fragmentation) + * of course... */ + return zmalloc_used_memory(); +} +#endif + +/* Fragmentation = RSS / allocated-bytes */ +float zmalloc_get_fragmentation_ratio(void) { + return (float)zmalloc_get_rss()/zmalloc_used_memory(); +} + +#if defined(HAVE_PROCFS) +size_t zmalloc_get_private_dirty(void) { + char line[1024]; + size_t pd = 0; + FILE *fp = fopen("/proc/self/smaps","r"); + + if (!fp) return 0; + while(fgets(line,sizeof(line),fp) != NULL) { + if (strncmp(line,"Private_Dirty:",14) == 0) { + char *p = strchr(line,'k'); + if (p) { + *p = '\0'; + pd += strtol(line+14,NULL,10) * 1024; + } + } + } + fclose(fp); + return pd; +} +#else +size_t zmalloc_get_private_dirty(void) { + return 0; +} +#endif