From: Salvatore Sanfilippo Date: Mon, 23 Apr 2012 09:07:21 +0000 (-0700) Subject: Merge pull request #461 from schlenk/unstable X-Git-Url: https://git.saurik.com/redis.git/commitdiff_plain/c22e0eecd5624d498ec6ee10f6b449ec5ff28c8e?hp=875944a23f37916a419bcf4b69e3b255ef093cde Merge pull request #461 from schlenk/unstable Replace some unnecessary calls to echo and cat in tests --- diff --git a/README b/README index bba2439c..1c3f5746 100644 --- a/README +++ b/README @@ -7,6 +7,13 @@ documentation at http://redis.io Building Redis -------------- +Redis can be compiled and used on Linux, OSX, OpenBSD, NetBSD, FreeBSD. +We support big endian and little endian architectures. + +It may compile on Solaris derived systems (for instance SmartOS) but our +support for this platform is "best effort" and Redis is not guaranteed to +work as well as in Linux, OSX, and *BSD there. + It is as simple as: % make diff --git a/redis.conf b/redis.conf index d7d29303..ed0e2f0e 100644 --- a/redis.conf +++ b/redis.conf @@ -298,21 +298,23 @@ slave-read-only yes ############################## APPEND ONLY MODE ############################### -# By default Redis asynchronously dumps the dataset on disk. If you can live -# with the idea that the latest records will be lost if something like a crash -# happens this is the preferred way to run Redis. If instead you care a lot -# about your data and don't want to that a single record can get lost you should -# enable the append only mode: when this mode is enabled Redis will append -# every write operation received in the file appendonly.aof. This file will -# be read on startup in order to rebuild the full dataset in memory. -# -# Note that you can have both the async dumps and the append only file if you -# like (you have to comment the "save" statements above to disable the dumps). -# Still if append only mode is enabled Redis will load the data from the -# log file at startup ignoring the dump.rdb file. -# -# IMPORTANT: Check the BGREWRITEAOF to check how to rewrite the append -# log file in background when it gets too big. +# By default Redis asynchronously dumps the dataset on disk. This mode is +# good enough in many applications, but an issue with the Redis process or +# a power outage may result into a few minutes of writes lost (depending on +# the configured save points). +# +# The Append Only File is an alternative persistence mode that provides +# much better durability. For instance using the default data fsync policy +# (see later in the config file) Redis can lose just one second of writes in a +# dramatic event like a server power outage, or a single write if something +# wrong with the Redis process itself happens, but the operating system is +# still running correctly. +# +# AOF and RDB persistence can be enabled at the same time without problems. +# If the AOF is enabled on startup Redis will load the AOF, that is the file +# with the better durability guarantees. +# +# Please check http://redis.io/topics/persistence for more information. appendonly no @@ -327,7 +329,7 @@ appendonly no # # no: don't fsync, just let the OS flush the data when it wants. Faster. # always: fsync after every write to the append only log . Slow, Safest. -# everysec: fsync only if one second passed since the last fsync. Compromise. +# everysec: fsync only one time every second. Compromise. # # The default is "everysec" that's usually the right compromise between # speed and data safety. It's up to you to understand if you can relax this to @@ -337,6 +339,9 @@ appendonly no # or on the contrary, use "always" that's very slow but a bit safer than # everysec. # +# More details please check the following article: +# http://antirez.com/post/redis-persistence-demystified.html +# # If unsure, use "everysec". # appendfsync always @@ -442,7 +447,7 @@ slowlog-log-slower-than 10000 # There is no limit to this length. Just be aware that it will consume memory. # You can reclaim memory used by the slow log with SLOWLOG RESET. -slowlog-max-len 1024 +slowlog-max-len 128 ############################### ADVANCED CONFIG ############################### diff --git a/src/.gitignore b/src/.gitignore new file mode 100644 index 00000000..aee7aacf --- /dev/null +++ b/src/.gitignore @@ -0,0 +1,5 @@ +*.gcda +*.gcno +*.gcov +redis.info +lcov-html diff --git a/src/Makefile b/src/Makefile index 7d858561..a0913688 100644 --- a/src/Makefile +++ b/src/Makefile @@ -2,12 +2,12 @@ # Copyright (C) 2009 Salvatore Sanfilippo # This file is released under the BSD license, see the COPYING file # -# The Makefile composes the final REDIS_CFLAGS and REDIS_LDFLAGS using +# The Makefile composes the final FINAL_CFLAGS and FINAL_LDFLAGS using # what is needed for Redis plus the standard CFLAGS and LDFLAGS passed. # However when building the dependencies (Jemalloc, Lua, Hiredis, ...) # CFLAGS and LDFLAGS are propagated to the dependencies, so to pass -# flags only to be used when compiling / linking Redis itself ADD_CFLAGS -# and ADD_LDFLAGS are used instead (this is the case of 'make gcov'). +# flags only to be used when compiling / linking Redis itself REDIS_CFLAGS +# and REDIS_LDFLAGS are used instead (this is the case of 'make gcov'). # # Dependencies are stored in the Makefile.dep file. To rebuild this file # Just use 'make dep', but this is only needed by developers. @@ -17,30 +17,16 @@ uname_S := $(shell sh -c 'uname -s 2>/dev/null || echo not') OPTIMIZATION?=-O2 DEPENDENCY_TARGETS=hiredis linenoise lua +# Default settings STD= -std=c99 -pedantic WARN= -Wall OPT= $(OPTIMIZATION) -ifeq ($(uname_S),SunOS) - REDIS_CFLAGS= $(STD) $(WARN) $(OPT) $(DEBUG) $(CFLAGS) $(ADD_CFLAGS) -D__EXTENSIONS__ -D_XPG6 - REDIS_LDFLAGS= $(LDFLAGS) $(ADD_LDFLAGS) - REDIS_LIBS= $(LIBS) -ldl -lnsl -lsocket -lm -lpthread - DEBUG= -g -ggdb -else - REDIS_CFLAGS= $(STD) $(WARN) $(OPT) $(DEBUG) $(CFLAGS) $(ADD_CFLAGS) - REDIS_LDFLAGS= $(LDFLAGS) $(ADD_LDFLAGS) - REDIS_LIBS= $(LIBS) -lm -pthread - DEBUG= -g -rdynamic -ggdb -endif - -# Include paths to dependencies -REDIS_CFLAGS+= -I../deps/hiredis -I../deps/linenoise -I../deps/lua/src - # Default allocator ifeq ($(uname_S),Linux) - MALLOC?=jemalloc + MALLOC=jemalloc else - MALLOC?=libc + MALLOC=libc endif # Backwards compatibility for selecting an allocator @@ -56,24 +42,42 @@ ifeq ($(USE_JEMALLOC),yes) MALLOC=jemalloc endif +# Override default settings if possible +-include .make-settings + +ifeq ($(uname_S),SunOS) + FINAL_CFLAGS= $(STD) $(WARN) $(OPT) $(DEBUG) $(CFLAGS) $(REDIS_CFLAGS) -D__EXTENSIONS__ -D_XPG6 + FINAL_LDFLAGS= $(LDFLAGS) $(REDIS_LDFLAGS) + FINAL_LIBS= -ldl -lnsl -lsocket -lm -lpthread + DEBUG= -g -ggdb +else + FINAL_CFLAGS= $(STD) $(WARN) $(OPT) $(DEBUG) $(CFLAGS) $(REDIS_CFLAGS) + FINAL_LDFLAGS= $(LDFLAGS) $(REDIS_LDFLAGS) + FINAL_LIBS= -lm -pthread + DEBUG= -g -rdynamic -ggdb +endif + +# Include paths to dependencies +FINAL_CFLAGS+= -I../deps/hiredis -I../deps/linenoise -I../deps/lua/src + ifeq ($(MALLOC),tcmalloc) - REDIS_CFLAGS+= -DUSE_TCMALLOC - REDIS_LIBS+= -ltcmalloc + FINAL_CFLAGS+= -DUSE_TCMALLOC + FINAL_LIBS+= -ltcmalloc endif ifeq ($(MALLOC),tcmalloc_minimal) - REDIS_CFLAGS+= -DUSE_TCMALLOC - REDIS_LIBS+= -ltcmalloc_minimal + FINAL_CFLAGS+= -DUSE_TCMALLOC + FINAL_LIBS+= -ltcmalloc_minimal endif ifeq ($(MALLOC),jemalloc) DEPENDENCY_TARGETS+= jemalloc - REDIS_CFLAGS+= -DUSE_JEMALLOC -I../deps/jemalloc/include - REDIS_LIBS+= ../deps/jemalloc/lib/libjemalloc.a -ldl + FINAL_CFLAGS+= -DUSE_JEMALLOC -I../deps/jemalloc/include + FINAL_LIBS+= ../deps/jemalloc/lib/libjemalloc.a -ldl endif -REDIS_CC=$(QUIET_CC)$(CC) $(REDIS_CFLAGS) -REDIS_LD=$(QUIET_LINK)$(CC) $(REDIS_LDFLAGS) +REDIS_CC=$(QUIET_CC)$(CC) $(FINAL_CFLAGS) +REDIS_LD=$(QUIET_LINK)$(CC) $(FINAL_LDFLAGS) PREFIX= /usr/local INSTALL_BIN= $(PREFIX)/bin @@ -117,51 +121,53 @@ dep: .PHONY: dep +persist-settings: distclean + echo STD=$(STD) >> .make-settings + echo WARN=$(WARN) >> .make-settings + echo OPT=$(OPT) >> .make-settings + echo MALLOC=$(MALLOC) >> .make-settings + echo CFLAGS=$(CFLAGS) >> .make-settings + echo LDFLAGS=$(LDFLAGS) >> .make-settings + echo REDIS_CFLAGS=$(REDIS_CFLAGS) >> .make-settings + echo REDIS_LDFLAGS=$(REDIS_LDFLAGS) >> .make-settings + echo PREV_FINAL_CFLAGS=$(FINAL_CFLAGS) >> .make-settings + echo PREV_FINAL_LDFLAGS=$(FINAL_LDFLAGS) >> .make-settings + -(cd ../deps && $(MAKE) $(DEPENDENCY_TARGETS)) + +.PHONY: persist-settings + # Prerequisites target .make-prerequisites: @touch $@ -# Clean local objects and build dependencies when REDIS_CFLAGS is different -ifneq ($(shell sh -c '[ -f .make-cflags ] && cat .make-cflags || echo none'), $(REDIS_CFLAGS)) -.make-cflags: clean - -(cd ../deps && $(MAKE) $(DEPENDENCY_TARGETS)) - -(echo "$(REDIS_CFLAGS)" > .make-cflags) -.make-prerequisites: .make-cflags -endif - -# Clean local objects when REDIS_LDFLAGS is different -ifneq ($(shell sh -c '[ -f .make-ldflags ] && cat .make-ldflags || echo none'), $(REDIS_LDFLAGS)) -.make-ldflags: clean - -(echo "$(REDIS_LDFLAGS)" > .make-ldflags) -.make-prerequisites: .make-ldflags +# Clean everything, persist settings and build dependencies if anything changed +ifneq ($(strip $(PREV_FINAL_CFLAGS)), $(strip $(FINAL_CFLAGS))) +.make-prerequisites: persist-settings endif -# Clean local objects when MALLOC is different -ifneq ($(shell sh -c '[ -f .make-malloc ] && cat .make-malloc || echo none'), $(MALLOC)) -.make-malloc: clean - -(echo "$(MALLOC)" > .make-malloc) -.make-prerequisites: .make-malloc +ifneq ($(strip $(PREV_FINAL_LDFLAGS)), $(strip $(FINAL_LDFLAGS))) +.make-prerequisites: persist-settings endif # redis-server $(REDIS_SERVER_NAME): $(REDIS_SERVER_OBJ) - $(REDIS_LD) -o $@ $^ ../deps/lua/src/liblua.a $(REDIS_LIBS) + $(REDIS_LD) -o $@ $^ ../deps/lua/src/liblua.a $(FINAL_LIBS) # redis-cli $(REDIS_CLI_NAME): $(REDIS_CLI_OBJ) - $(REDIS_LD) -o $@ $^ ../deps/hiredis/libhiredis.a ../deps/linenoise/linenoise.o $(REDIS_LIBS) + $(REDIS_LD) -o $@ $^ ../deps/hiredis/libhiredis.a ../deps/linenoise/linenoise.o $(FINAL_LIBS) # redis-benchmark $(REDIS_BENCHMARK_NAME): $(REDIS_BENCHMARK_OBJ) - $(REDIS_LD) -o $@ $^ ../deps/hiredis/libhiredis.a $(REDIS_LIBS) + $(REDIS_LD) -o $@ $^ ../deps/hiredis/libhiredis.a $(FINAL_LIBS) # redis-check-dump $(REDIS_CHECK_DUMP_NAME): $(REDIS_CHECK_DUMP_OBJ) - $(REDIS_LD) -o $@ $^ $(REDIS_LIBS) + $(REDIS_LD) -o $@ $^ $(FINAL_LIBS) # redis-check-aof $(REDIS_CHECK_AOF_NAME): $(REDIS_CHECK_AOF_OBJ) - $(REDIS_LD) -o $@ $^ $(REDIS_LIBS) + $(REDIS_LD) -o $@ $^ $(FINAL_LIBS) # Because the jemalloc.h header is generated as a part of the jemalloc build, # building it should complete before building any other object. Instead of @@ -184,7 +190,7 @@ test: $(REDIS_SERVER_NAME) $(REDIS_CHECK_AOF_NAME) @(cd ..; ./runtest) lcov: - $(MAKE) clean gcov + $(MAKE) gcov @(set -e; cd ..; ./runtest --clients 1) @geninfo -o redis.info . @genhtml --legend -o lcov-html redis.info @@ -198,10 +204,10 @@ bench: $(REDIS_BENCHMARK_NAME) @echo "" @echo "WARNING: if it fails under Linux you probably need to install libc6-dev-i386" @echo "" - $(MAKE) CFLAGS="$(CFLAGS) -m32" LDFLAGS="$(LDFLAGS) -m32" + $(MAKE) CFLAGS="-m32" LDFLAGS="-m32" gcov: - $(MAKE) ADD_CFLAGS="-fprofile-arcs -ftest-coverage -DCOVERAGE_TEST" ADD_LDFLAGS="-fprofile-arcs -ftest-coverage" + $(MAKE) REDIS_CFLAGS="-fprofile-arcs -ftest-coverage -DCOVERAGE_TEST" REDIS_LDFLAGS="-fprofile-arcs -ftest-coverage" noopt: $(MAKE) OPT="-O0" diff --git a/src/config.c b/src/config.c index 9899dfbc..6f98e5e5 100644 --- a/src/config.c +++ b/src/config.c @@ -155,6 +155,9 @@ void loadServerConfigFromString(char *config) { loadServerConfig(argv[1],NULL); } else if (!strcasecmp(argv[0],"maxclients") && argc == 2) { server.maxclients = atoi(argv[1]); + if (server.maxclients < 1) { + err = "Invalid max clients limit"; goto loaderr; + } } else if (!strcasecmp(argv[0],"maxmemory") && argc == 2) { server.maxmemory = memtoll(argv[1],NULL); } else if (!strcasecmp(argv[0],"maxmemory-policy") && argc == 2) { diff --git a/src/dict.c b/src/dict.c index 6fd4584e..e6668082 100644 --- a/src/dict.c +++ b/src/dict.c @@ -116,8 +116,8 @@ unsigned int dictGenCaseHashFunction(const unsigned char *buf, int len) { /* ----------------------------- API implementation ------------------------- */ -/* Reset an hashtable already initialized with ht_init(). - * NOTE: This function should only called by ht_destroy(). */ +/* Reset a hash table already initialized with ht_init(). + * NOTE: This function should only be called by ht_destroy(). */ static void _dictReset(dictht *ht) { ht->table = NULL; @@ -162,18 +162,18 @@ int dictResize(dict *d) return dictExpand(d, minimal); } -/* Expand or create the hashtable */ +/* Expand or create the hash table */ int dictExpand(dict *d, unsigned long size) { - dictht n; /* the new hashtable */ + dictht n; /* the new hash table */ unsigned long realsize = _dictNextPower(size); /* the size is invalid if it is smaller than the number of - * elements already inside the hashtable */ + * elements already inside the hash table */ if (dictIsRehashing(d) || d->ht[0].used > size) return DICT_ERR; - /* Allocate the new hashtable and initialize all pointers to NULL */ + /* Allocate the new hash table and initialize all pointers to NULL */ n.size = realsize; n.sizemask = realsize-1; n.table = zcalloc(realsize*sizeof(dictEntry*)); @@ -280,7 +280,7 @@ int dictAdd(dict *d, void *key, void *val) * a value returns the dictEntry structure to the user, that will make * sure to fill the value field as he wishes. * - * This function is also directly expoed to user API to be called + * This function is also directly exposed to the user API to be called * mainly in order to store non-pointers inside the hash value, example: * * entry = dictAddRaw(dict,mykey); @@ -607,7 +607,7 @@ static int _dictKeyIndex(dict *d, const void *key) unsigned int h, idx, table; dictEntry *he; - /* Expand the hashtable if needed */ + /* Expand the hash table if needed */ if (_dictExpandIfNeeded(d) == DICT_ERR) return -1; /* Compute the key hash value */ @@ -633,6 +633,21 @@ void dictEmpty(dict *d) { d->iterators = 0; } +void dictEnableResize(void) { + dict_can_resize = 1; +} + +void dictDisableResize(void) { + dict_can_resize = 0; +} + +#if 0 + +/* The following is code that we don't use for Redis currently, but that is part +of the library. */ + +/* ----------------------- Debugging ------------------------*/ + #define DICT_STATS_VECTLEN 50 static void _dictPrintStatsHt(dictht *ht) { unsigned long i, slots = 0, chainlen, maxchainlen = 0; @@ -686,20 +701,6 @@ void dictPrintStats(dict *d) { } } -void dictEnableResize(void) { - dict_can_resize = 1; -} - -void dictDisableResize(void) { - dict_can_resize = 0; -} - -#if 0 - -/* The following are just example hash table types implementations. - * Not useful for Redis so they are commented out. - */ - /* ----------------------- StringCopy Hash Table Type ------------------------*/ static unsigned int _dictStringCopyHTHashFunction(const void *key) diff --git a/src/rdb.c b/src/rdb.c index 8ffd2c28..f9ad9f94 100644 --- a/src/rdb.c +++ b/src/rdb.c @@ -798,7 +798,7 @@ robj *rdbLoadObject(int rdbtype, rio *rdb) { } /* This will also be called when the set was just converted - * to regular hashtable encoded set */ + * to a regular hash table encoded set */ if (o->encoding == REDIS_ENCODING_HT) { dictAdd((dict*)o->ptr,ele,NULL); } else { diff --git a/src/redis-cli.c b/src/redis-cli.c index bdaa3964..3e0c5895 100644 --- a/src/redis-cli.c +++ b/src/redis-cli.c @@ -69,6 +69,7 @@ static struct config { int cluster_mode; int cluster_reissue_command; int slave_mode; + int bigkeys; int stdinarg; /* get last arg from stdin. (-x option) */ char *auth; int output; /* output mode, see OUTPUT_* defines */ @@ -655,6 +656,8 @@ static int parseOptions(int argc, char **argv) { config.latency_mode = 1; } else if (!strcmp(argv[i],"--slave")) { config.slave_mode = 1; + } else if (!strcmp(argv[i],"--bigkeys")) { + config.bigkeys = 1; } else if (!strcmp(argv[i],"--eval") && !lastarg) { config.eval = argv[++i]; } else if (!strcmp(argv[i],"-c")) { @@ -711,6 +714,7 @@ static void usage() { " --raw Use raw formatting for replies (default when STDOUT is not a tty)\n" " --latency Enter a special mode continuously sampling latency.\n" " --slave Simulate a slave showing commands received from the master.\n" +" --bigkeys Sample Redis keys looking for big keys.\n" " --eval Send an EVAL command using the Lua script at .\n" " --help Output this help and exit\n" " --version Output version and exit\n" @@ -964,6 +968,87 @@ static void slaveMode(void) { while (cliReadReply(0) == REDIS_OK); } +#define TYPE_STRING 0 +#define TYPE_LIST 1 +#define TYPE_SET 2 +#define TYPE_HASH 3 +#define TYPE_ZSET 4 + +static void findBigKeys(void) { + unsigned long long biggest[5] = {0,0,0,0,0}; + unsigned long long samples = 0; + redisReply *reply1, *reply2, *reply3; + char *sizecmd, *typename[] = {"string","list","set","hash","zset"}; + int type; + + printf("\n# Press ctrl+c when you have had enough of it... :)\n"); + printf("# You can use -i 0.1 to sleep 0.1 sec every 100 sampled keys\n"); + printf("# in order to reduce server load (usually not needed).\n\n"); + while(1) { + /* Sample with RANDOMKEY */ + reply1 = redisCommand(context,"RANDOMKEY"); + if (reply1 == NULL) { + fprintf(stderr,"\nI/O error\n"); + exit(1); + } else if (reply1->type == REDIS_REPLY_ERROR) { + fprintf(stderr, "RANDOMKEY error: %s\n", + reply1->str); + exit(1); + } + /* Get the key type */ + reply2 = redisCommand(context,"TYPE %s",reply1->str); + assert(reply2 && reply2->type == REDIS_REPLY_STATUS); + samples++; + + /* Get the key "size" */ + if (!strcmp(reply2->str,"string")) { + sizecmd = "STRLEN"; + type = TYPE_STRING; + } else if (!strcmp(reply2->str,"list")) { + sizecmd = "LLEN"; + type = TYPE_LIST; + } else if (!strcmp(reply2->str,"set")) { + sizecmd = "SCARD"; + type = TYPE_SET; + } else if (!strcmp(reply2->str,"hash")) { + sizecmd = "HLEN"; + type = TYPE_HASH; + } else if (!strcmp(reply2->str,"zset")) { + sizecmd = "ZCARD"; + type = TYPE_ZSET; + } else if (!strcmp(reply2->str,"none")) { + freeReplyObject(reply1); + freeReplyObject(reply2); + freeReplyObject(reply3); + continue; + } else { + fprintf(stderr, "Unknown key type '%s' for key '%s'\n", + reply2->str, reply1->str); + exit(1); + } + + reply3 = redisCommand(context,"%s %s", sizecmd, reply1->str); + if (reply3 && reply3->type == REDIS_REPLY_INTEGER) { + if (biggest[type] < reply3->integer) { + printf("[%6s] %s | biggest so far with size %llu\n", + typename[type], reply1->str, + (unsigned long long) reply3->integer); + biggest[type] = reply3->integer; + } + } + + if ((samples % 1000000) == 0) + printf("(%llu keys sampled)\n", samples); + + if ((samples % 100) == 0 && config.interval) + usleep(config.interval); + + freeReplyObject(reply1); + freeReplyObject(reply2); + if (reply3) freeReplyObject(reply3); + } +} + int main(int argc, char **argv) { int firstarg; @@ -979,6 +1064,8 @@ int main(int argc, char **argv) { config.pubsub_mode = 0; config.latency_mode = 0; config.cluster_mode = 0; + config.slave_mode = 0; + config.bigkeys = 0; config.stdinarg = 0; config.auth = NULL; config.eval = NULL; @@ -1005,6 +1092,12 @@ int main(int argc, char **argv) { slaveMode(); } + /* Find big keys */ + if (config.bigkeys) { + cliConnect(0); + findBigKeys(); + } + /* Start interactive mode when no command is provided */ if (argc == 0 && !config.eval) { /* Note that in repl mode we don't abort on connection error. diff --git a/src/redis.c b/src/redis.c index 7da9c545..cb4883cc 100644 --- a/src/redis.c +++ b/src/redis.c @@ -1153,7 +1153,6 @@ void adjustOpenFilesLimit(void) { rlim_t maxfiles = server.maxclients+32; struct rlimit limit; - if (maxfiles < 1024) maxfiles = 1024; if (getrlimit(RLIMIT_NOFILE,&limit) == -1) { redisLog(REDIS_WARNING,"Unable to obtain the current NOFILE limit (%s), assuming 1024 and setting the max clients configuration accordingly.", strerror(errno)); @@ -1600,7 +1599,7 @@ int processCommand(redisClient *c) { /* Lua script too slow? Only allow SHUTDOWN NOSAVE and SCRIPT KILL. */ if (server.lua_timedout && - !(c->cmd->proc != shutdownCommand && + !(c->cmd->proc == shutdownCommand && c->argc == 2 && tolower(((char*)c->argv[1]->ptr)[0]) == 'n') && !(c->cmd->proc == scriptCommand && diff --git a/src/redis.h b/src/redis.h index 42a9b581..3b9d4e78 100644 --- a/src/redis.h +++ b/src/redis.h @@ -52,7 +52,7 @@ #define REDIS_AOF_REWRITE_MIN_SIZE (1024*1024) #define REDIS_AOF_REWRITE_ITEMS_PER_CMD 64 #define REDIS_SLOWLOG_LOG_SLOWER_THAN 10000 -#define REDIS_SLOWLOG_MAX_LEN 64 +#define REDIS_SLOWLOG_MAX_LEN 128 #define REDIS_MAX_CLIENTS 10000 #define REDIS_REPL_TIMEOUT 60 diff --git a/src/scripting.c b/src/scripting.c index 4c7de33b..a5f5683e 100644 --- a/src/scripting.c +++ b/src/scripting.c @@ -412,6 +412,13 @@ void luaLoadLibraries(lua_State *lua) { #endif } +/* Remove a functions that we don't want to expose to the Redis scripting + * environment. */ +void luaRemoveUnsupportedFunctions(lua_State *lua) { + lua_pushnil(lua); + lua_setglobal(lua,"loadfile"); +} + /* This function installs metamethods in the global table _G that prevent * the creation of globals accidentally. * @@ -455,7 +462,9 @@ void scriptingEnableGlobalsProtection(lua_State *lua) { * See scriptingReset() for more information. */ void scriptingInit(void) { lua_State *lua = lua_open(); + luaLoadLibraries(lua); + luaRemoveUnsupportedFunctions(lua); /* Initialize a dictionary we use to map SHAs to scripts. * This is useful for replication, as we need to replicate EVALSHA diff --git a/src/slowlog.c b/src/slowlog.c index cfd66dc6..53c44a01 100644 --- a/src/slowlog.c +++ b/src/slowlog.c @@ -16,13 +16,36 @@ * this function. */ slowlogEntry *slowlogCreateEntry(robj **argv, int argc, long long duration) { slowlogEntry *se = zmalloc(sizeof(*se)); - int j; + int j, slargc = argc; + + if (slargc > SLOWLOG_ENTRY_MAX_ARGC) slargc = SLOWLOG_ENTRY_MAX_ARGC; + se->argc = slargc; + se->argv = zmalloc(sizeof(robj*)*slargc); + for (j = 0; j < slargc; j++) { + /* Logging too many arguments is a useless memory waste, so we stop + * at SLOWLOG_ENTRY_MAX_ARGC, but use the last argument to specify + * how many remaining arguments there were in the original command. */ + if (slargc != argc && j == slargc-1) { + se->argv[j] = createObject(REDIS_STRING, + sdscatprintf(sdsempty(),"... (%d more arguments)", + argc-slargc+1)); + } else { + /* Trim too long strings as well... */ + if (argv[j]->type == REDIS_STRING && + argv[j]->encoding == REDIS_ENCODING_RAW && + sdslen(argv[j]->ptr) > SLOWLOG_ENTRY_MAX_STRING) + { + sds s = sdsnewlen(argv[j]->ptr, SLOWLOG_ENTRY_MAX_STRING); - se->argc = argc; - se->argv = zmalloc(sizeof(robj*)*argc); - for (j = 0; j < argc; j++) { - se->argv[j] = argv[j]; - incrRefCount(argv[j]); + s = sdscatprintf(s,"... (%lu more bytes)", + (unsigned long) + sdslen(argv[j]->ptr) - SLOWLOG_ENTRY_MAX_STRING); + se->argv[j] = createObject(REDIS_STRING,s); + } else { + se->argv[j] = argv[j]; + incrRefCount(argv[j]); + } + } } se->time = time(NULL); se->duration = duration; diff --git a/src/slowlog.h b/src/slowlog.h index bad770db..bcc961cc 100644 --- a/src/slowlog.h +++ b/src/slowlog.h @@ -1,3 +1,6 @@ +#define SLOWLOG_ENTRY_MAX_ARGC 32 +#define SLOWLOG_ENTRY_MAX_STRING 128 + /* This structure defines an entry inside the slow log list */ typedef struct slowlogEntry { robj **argv; diff --git a/src/sort.c b/src/sort.c index ff655c7e..c1ed5517 100644 --- a/src/sort.c +++ b/src/sort.c @@ -28,7 +28,7 @@ redisSortOperation *createSortOperation(int type, robj *pattern) { robj *lookupKeyByPattern(redisDb *db, robj *pattern, robj *subst) { char *p, *f, *k; sds spat, ssub; - robj *keyobj, *fieldobj, *o; + robj *keyobj, *fieldobj = NULL, *o; int prefixlen, sublen, postfixlen, fieldlen; /* If the pattern is "#" return the substitution object itself in order @@ -76,7 +76,7 @@ robj *lookupKeyByPattern(redisDb *db, robj *pattern, robj *subst) { o = lookupKeyRead(db,keyobj); if (o == NULL) goto noobj; - if (fieldlen > 0) { + if (fieldobj) { if (o->type != REDIS_HASH) goto noobj; /* Retrieve value from hash by the field name. This operation @@ -90,7 +90,7 @@ robj *lookupKeyByPattern(redisDb *db, robj *pattern, robj *subst) { incrRefCount(o); } decrRefCount(keyobj); - if (fieldlen) decrRefCount(fieldobj); + if (fieldobj) decrRefCount(fieldobj); return o; noobj: diff --git a/src/t_list.c b/src/t_list.c index 6a16a632..ca03916b 100644 --- a/src/t_list.c +++ b/src/t_list.c @@ -699,6 +699,8 @@ void rpoplpushCommand(redisClient *c) { checkType(c,sobj,REDIS_LIST)) return; if (listTypeLength(sobj) == 0) { + /* This may only happen after loading very old RDB files. Recent + * versions of Redis delete keys of empty lists. */ addReply(c,shared.nullbulk); } else { robj *dobj = lookupKeyWrite(c->db,c->argv[2]); diff --git a/src/t_set.c b/src/t_set.c index 3cf1cf00..df8ade47 100644 --- a/src/t_set.c +++ b/src/t_set.c @@ -185,7 +185,7 @@ unsigned long setTypeSize(robj *subject) { } /* Convert the set to specified encoding. The resulting dict (when converting - * to a hashtable) is presized to hold the number of elements in the original + * to a hash table) is presized to hold the number of elements in the original * set. */ void setTypeConvert(robj *setobj, int enc) { setTypeIterator *si; diff --git a/src/zipmap.c b/src/zipmap.c index 1f11fd42..d9b7c8b3 100644 --- a/src/zipmap.c +++ b/src/zipmap.c @@ -52,15 +52,15 @@ * lengths are encoded in a single value or in a 5 bytes value. * If the first byte value (as an unsigned 8 bit value) is between 0 and * 252, it's a single-byte length. If it is 253 then a four bytes unsigned - * integer follows (in the host byte ordering). A value fo 255 is used to + * integer follows (in the host byte ordering). A value of 255 is used to * signal the end of the hash. The special value 254 is used to mark * empty space that can be used to add new key/value pairs. * - * is the number of free unused bytes - * after the string, resulting from modification of values associated to a - * key (for instance if "foo" is set to "bar', and later "foo" will be se to - * "hi", I'll have a free byte to use if the value will enlarge again later, - * or even in order to add a key/value pair if it fits. + * is the number of free unused bytes after the string, resulting + * from modification of values associated to a key. For instance if "foo" + * is set to "bar", and later "foo" will be set to "hi", it will have a + * free byte to use if the value will enlarge again later, or even in + * order to add a key/value pair if it fits. * * is always an unsigned 8 bit number, because if after an * update operation there are more than a few free bytes, the zipmap will be diff --git a/tests/assets/default.conf b/tests/assets/default.conf index 976852e9..1b234504 100644 --- a/tests/assets/default.conf +++ b/tests/assets/default.conf @@ -294,7 +294,7 @@ no-appendfsync-on-rewrite no ############################### ADVANCED CONFIG ############################### # Hashes are encoded in a special way (much more memory efficient) when they -# have at max a given numer of elements, and the biggest element does not +# have at max a given number of elements, and the biggest element does not # exceed a given threshold. You can configure this limits with the following # configuration directives. hash-max-ziplist-entries 64 @@ -317,7 +317,7 @@ set-max-intset-entries 512 # order to help rehashing the main Redis hash table (the one mapping top-level # keys to values). The hash table implementation redis uses (see dict.c) # performs a lazy rehashing: the more operation you run into an hash table -# that is rhashing, the more rehashing "steps" are performed, so if the +# that is rehashing, the more rehashing "steps" are performed, so if the # server is idle the rehashing is never complete and some more memory is used # by the hash table. # diff --git a/tests/unit/limits.tcl b/tests/unit/limits.tcl index f622e1b9..b37ea9b0 100644 --- a/tests/unit/limits.tcl +++ b/tests/unit/limits.tcl @@ -4,7 +4,9 @@ start_server {tags {"limits"} overrides {maxclients 10}} { catch { while {$c < 50} { incr c - redis_deferring_client + set rd [redis_deferring_client] + $rd ping + $rd read after 100 } } e diff --git a/tests/unit/protocol.tcl b/tests/unit/protocol.tcl index 3110d3d7..1700e489 100644 --- a/tests/unit/protocol.tcl +++ b/tests/unit/protocol.tcl @@ -68,7 +68,7 @@ start_server {tags {"protocol"}} { puts -nonewline $s $seq set payload [string repeat A 1024]"\n" set test_start [clock seconds] - set test_time_limit 5 + set test_time_limit 30 while 1 { if {[catch { puts -nonewline $s payload diff --git a/tests/unit/scripting.tcl b/tests/unit/scripting.tcl index 009c1347..a60c65b4 100644 --- a/tests/unit/scripting.tcl +++ b/tests/unit/scripting.tcl @@ -251,6 +251,64 @@ start_server {tags {"scripting"}} { lappend res [r eval $decr_if_gt 1 foo 2] set res } {4 3 2 2 2} + + test {Scripting engine resets PRNG at every script execution} { + set rand1 [r eval {return tostring(math.random())} 0] + set rand2 [r eval {return tostring(math.random())} 0] + assert_equal $rand1 $rand2 + } + + test {Scripting engine PRNG can be seeded correctly} { + set rand1 [r eval { + math.randomseed(ARGV[1]); return tostring(math.random()) + } 0 10] + set rand2 [r eval { + math.randomseed(ARGV[1]); return tostring(math.random()) + } 0 10] + set rand3 [r eval { + math.randomseed(ARGV[1]); return tostring(math.random()) + } 0 20] + assert_equal $rand1 $rand2 + assert {$rand2 ne $rand3} + } +} + +# Start a new server since the last test in this stanza will kill the +# instance at all. +start_server {tags {"scripting"}} { + test {Timedout read-only scripts can be killed by SCRIPT KILL} { + set rd [redis_deferring_client] + r config set lua-time-limit 10 + $rd eval {while true do end} 0 + after 200 + catch {r ping} e + assert_match {BUSY*} $e + r script kill + assert_equal [r ping] "PONG" + } + + test {Timedout scripts that modified data can't be killed by SCRIPT KILL} { + set rd [redis_deferring_client] + r config set lua-time-limit 10 + $rd eval {redis.call('set','x','y'); while true do end} 0 + after 200 + catch {r ping} e + assert_match {BUSY*} $e + catch {r script kill} e + assert_match {ERR*} $e + catch {r ping} e + assert_match {BUSY*} $e + } + + test {SHUTDOWN NOSAVE can kill a timedout script anyway} { + # The server sould be still unresponding to normal commands. + catch {r ping} e + assert_match {BUSY*} $e + catch {r shutdown nosave} + # Make sure the server was killed + catch {set rd [redis_deferring_client]} e + assert_match {*connection refused*} $e + } } start_server {tags {"scripting repl"}} { diff --git a/tests/unit/slowlog.tcl b/tests/unit/slowlog.tcl index 55a71e98..2216e925 100644 --- a/tests/unit/slowlog.tcl +++ b/tests/unit/slowlog.tcl @@ -38,4 +38,21 @@ start_server {tags {"slowlog"} overrides {slowlog-log-slower-than 1000000}} { assert_equal [expr {[lindex $e 2] > 100000}] 1 assert_equal [lindex $e 3] {debug sleep 0.2} } + + test {SLOWLOG - commands with too many arguments are trimmed} { + r config set slowlog-log-slower-than 0 + r slowlog reset + r sadd set 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 + set e [lindex [r slowlog get] 0] + lindex $e 3 + } {sadd set 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 {... (2 more arguments)}} + + test {SLOWLOG - too long arguments are trimmed} { + r config set slowlog-log-slower-than 0 + r slowlog reset + set arg [string repeat A 129] + r sadd set foo $arg + set e [lindex [r slowlog get] 0] + lindex $e 3 + } {sadd set foo {AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA... (1 more bytes)}} } diff --git a/tests/unit/type/hash.tcl b/tests/unit/type/hash.tcl index 47e10caa..950805d1 100644 --- a/tests/unit/type/hash.tcl +++ b/tests/unit/type/hash.tcl @@ -395,4 +395,28 @@ start_server {tags {"hash"}} { r hset hash kkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkk b r hget hash kkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkk } {b} + + foreach size {10 512} { + test "Hash fuzzing - $size fields" { + for {set times 0} {$times < 10} {incr times} { + catch {unset hash} + array set hash {} + r del hash + + # Create + for {set j 0} {$j < $size} {incr j} { + set field [randomValue] + set value [randomValue] + r hset hash $field $value + set hash($field) $value + } + + # Verify + foreach {k v} [array get hash] { + assert_equal $v [r hget hash $k] + } + assert_equal [array size hash] [r hlen hash] + } + } + } } diff --git a/tests/unit/type/list.tcl b/tests/unit/type/list.tcl index 970e3ee7..85dde569 100644 --- a/tests/unit/type/list.tcl +++ b/tests/unit/type/list.tcl @@ -7,7 +7,7 @@ start_server { } { source "tests/unit/type/list-common.tcl" - test {LPUSH, RPUSH, LLENGTH, LINDEX - ziplist} { + test {LPUSH, RPUSH, LLENGTH, LINDEX, LPOP - ziplist} { # first lpush then rpush assert_equal 1 [r lpush myziplist1 a] assert_equal 2 [r rpush myziplist1 b] @@ -16,6 +16,9 @@ start_server { assert_equal a [r lindex myziplist1 0] assert_equal b [r lindex myziplist1 1] assert_equal c [r lindex myziplist1 2] + assert_equal {} [r lindex myziplist2 3] + assert_equal c [r rpop myziplist1] + assert_equal a [r lpop myziplist1] assert_encoding ziplist myziplist1 # first rpush then lpush @@ -26,10 +29,13 @@ start_server { assert_equal c [r lindex myziplist2 0] assert_equal b [r lindex myziplist2 1] assert_equal a [r lindex myziplist2 2] + assert_equal {} [r lindex myziplist2 3] + assert_equal a [r rpop myziplist2] + assert_equal c [r lpop myziplist2] assert_encoding ziplist myziplist2 } - test {LPUSH, RPUSH, LLENGTH, LINDEX - regular list} { + test {LPUSH, RPUSH, LLENGTH, LINDEX, LPOP - regular list} { # first lpush then rpush assert_equal 1 [r lpush mylist1 $largevalue(linkedlist)] assert_encoding linkedlist mylist1 @@ -39,6 +45,9 @@ start_server { assert_equal $largevalue(linkedlist) [r lindex mylist1 0] assert_equal b [r lindex mylist1 1] assert_equal c [r lindex mylist1 2] + assert_equal {} [r lindex mylist1 3] + assert_equal c [r rpop mylist1] + assert_equal $largevalue(linkedlist) [r lpop mylist1] # first rpush then lpush assert_equal 1 [r rpush mylist2 $largevalue(linkedlist)] @@ -49,8 +58,15 @@ start_server { assert_equal c [r lindex mylist2 0] assert_equal b [r lindex mylist2 1] assert_equal $largevalue(linkedlist) [r lindex mylist2 2] + assert_equal {} [r lindex mylist2 3] + assert_equal $largevalue(linkedlist) [r rpop mylist2] + assert_equal c [r lpop mylist2] } + test {R/LPOP against empty list} { + r lpop non-existing-list + } {} + test {Variadic RPUSH/LPUSH} { r del mylist assert_equal 4 [r lpush mylist a b c d] @@ -396,6 +412,11 @@ start_server { } } + test {LINSERT raise error on bad syntax} { + catch {[r linsert xlist aft3r aa 42]} e + set e + } {*ERR*syntax*error*} + test {LPUSHX, RPUSHX convert from ziplist to list} { set large $largevalue(linkedlist) diff --git a/tests/unit/type/set.tcl b/tests/unit/type/set.tcl index bdd1f9bf..f4f28373 100644 --- a/tests/unit/type/set.tcl +++ b/tests/unit/type/set.tcl @@ -206,6 +206,13 @@ start_server { } } + test "SDIFF with first set empty" { + r del set1 set2 set3 + r sadd set2 1 2 3 4 + r sadd set3 a b c d + r sdiff set1 set2 set3 + } {} + test "SINTER against non-set should throw error" { r set key1 x assert_error "ERR*wrong kind*" {r sinter key1 noset} @@ -216,6 +223,23 @@ start_server { assert_error "ERR*wrong kind*" {r sunion key1 noset} } + test "SINTER should handle non existing key as empty" { + r del set1 set2 set3 + r sadd set1 a b c + r sadd set2 b c d + r sinter set1 set2 set3 + } {} + + test "SINTER with same integer elements but different encoding" { + r del set1 set2 + r sadd set1 1 2 3 + r sadd set2 1 2 3 a + r srem set2 a + assert_encoding intset set1 + assert_encoding hashtable set2 + lsort [r sinter set1 set2] + } {1 2 3} + test "SINTERSTORE against non existing keys should delete dstkey" { r set setres xxx assert_equal 0 [r sinterstore setres foo111 bar222] @@ -317,6 +341,13 @@ start_server { assert_error "ERR*wrong kind*" {r smove myset2 x foo} } + test "SMOVE with identical source and destination" { + r del set + r sadd set a b c + r smove set set b + lsort [r smembers set] + } {a b c} + tags {slow} { test {intsets implementation stress testing} { for {set j 0} {$j < 20} {incr j} {