Building Redis
--------------
+Redis can be compiled and used on Linux, OSX, OpenBSD, NetBSD, FreeBSD.
+We support big endian and little endian architectures.
+
+It may compile on Solaris derived systems (for instance SmartOS) but our
+support for this platform is "best effort" and Redis is not guaranteed to
+work as well as in Linux, OSX, and *BSD there.
+
It is as simple as:
% make
############################## APPEND ONLY MODE ###############################
-# By default Redis asynchronously dumps the dataset on disk. If you can live
-# with the idea that the latest records will be lost if something like a crash
-# happens this is the preferred way to run Redis. If instead you care a lot
-# about your data and don't want to that a single record can get lost you should
-# enable the append only mode: when this mode is enabled Redis will append
-# every write operation received in the file appendonly.aof. This file will
-# be read on startup in order to rebuild the full dataset in memory.
-#
-# Note that you can have both the async dumps and the append only file if you
-# like (you have to comment the "save" statements above to disable the dumps).
-# Still if append only mode is enabled Redis will load the data from the
-# log file at startup ignoring the dump.rdb file.
-#
-# IMPORTANT: Check the BGREWRITEAOF to check how to rewrite the append
-# log file in background when it gets too big.
+# By default Redis asynchronously dumps the dataset on disk. This mode is
+# good enough in many applications, but an issue with the Redis process or
+# a power outage may result into a few minutes of writes lost (depending on
+# the configured save points).
+#
+# The Append Only File is an alternative persistence mode that provides
+# much better durability. For instance using the default data fsync policy
+# (see later in the config file) Redis can lose just one second of writes in a
+# dramatic event like a server power outage, or a single write if something
+# wrong with the Redis process itself happens, but the operating system is
+# still running correctly.
+#
+# AOF and RDB persistence can be enabled at the same time without problems.
+# If the AOF is enabled on startup Redis will load the AOF, that is the file
+# with the better durability guarantees.
+#
+# Please check http://redis.io/topics/persistence for more information.
appendonly no
#
# no: don't fsync, just let the OS flush the data when it wants. Faster.
# always: fsync after every write to the append only log . Slow, Safest.
-# everysec: fsync only if one second passed since the last fsync. Compromise.
+# everysec: fsync only one time every second. Compromise.
#
# The default is "everysec" that's usually the right compromise between
# speed and data safety. It's up to you to understand if you can relax this to
# or on the contrary, use "always" that's very slow but a bit safer than
# everysec.
#
+# More details please check the following article:
+# http://antirez.com/post/redis-persistence-demystified.html
+#
# If unsure, use "everysec".
# appendfsync always
# There is no limit to this length. Just be aware that it will consume memory.
# You can reclaim memory used by the slow log with SLOWLOG RESET.
-slowlog-max-len 1024
+slowlog-max-len 128
############################### ADVANCED CONFIG ###############################
--- /dev/null
+*.gcda
+*.gcno
+*.gcov
+redis.info
+lcov-html
# Copyright (C) 2009 Salvatore Sanfilippo <antirez at gmail dot com>
# This file is released under the BSD license, see the COPYING file
#
-# The Makefile composes the final REDIS_CFLAGS and REDIS_LDFLAGS using
+# The Makefile composes the final FINAL_CFLAGS and FINAL_LDFLAGS using
# what is needed for Redis plus the standard CFLAGS and LDFLAGS passed.
# However when building the dependencies (Jemalloc, Lua, Hiredis, ...)
# CFLAGS and LDFLAGS are propagated to the dependencies, so to pass
-# flags only to be used when compiling / linking Redis itself ADD_CFLAGS
-# and ADD_LDFLAGS are used instead (this is the case of 'make gcov').
+# flags only to be used when compiling / linking Redis itself REDIS_CFLAGS
+# and REDIS_LDFLAGS are used instead (this is the case of 'make gcov').
#
# Dependencies are stored in the Makefile.dep file. To rebuild this file
# Just use 'make dep', but this is only needed by developers.
OPTIMIZATION?=-O2
DEPENDENCY_TARGETS=hiredis linenoise lua
+# Default settings
STD= -std=c99 -pedantic
WARN= -Wall
OPT= $(OPTIMIZATION)
-ifeq ($(uname_S),SunOS)
- REDIS_CFLAGS= $(STD) $(WARN) $(OPT) $(DEBUG) $(CFLAGS) $(ADD_CFLAGS) -D__EXTENSIONS__ -D_XPG6
- REDIS_LDFLAGS= $(LDFLAGS) $(ADD_LDFLAGS)
- REDIS_LIBS= $(LIBS) -ldl -lnsl -lsocket -lm -lpthread
- DEBUG= -g -ggdb
-else
- REDIS_CFLAGS= $(STD) $(WARN) $(OPT) $(DEBUG) $(CFLAGS) $(ADD_CFLAGS)
- REDIS_LDFLAGS= $(LDFLAGS) $(ADD_LDFLAGS)
- REDIS_LIBS= $(LIBS) -lm -pthread
- DEBUG= -g -rdynamic -ggdb
-endif
-
-# Include paths to dependencies
-REDIS_CFLAGS+= -I../deps/hiredis -I../deps/linenoise -I../deps/lua/src
-
# Default allocator
ifeq ($(uname_S),Linux)
- MALLOC?=jemalloc
+ MALLOC=jemalloc
else
- MALLOC?=libc
+ MALLOC=libc
endif
# Backwards compatibility for selecting an allocator
MALLOC=jemalloc
endif
+# Override default settings if possible
+-include .make-settings
+
+ifeq ($(uname_S),SunOS)
+ FINAL_CFLAGS= $(STD) $(WARN) $(OPT) $(DEBUG) $(CFLAGS) $(REDIS_CFLAGS) -D__EXTENSIONS__ -D_XPG6
+ FINAL_LDFLAGS= $(LDFLAGS) $(REDIS_LDFLAGS)
+ FINAL_LIBS= -ldl -lnsl -lsocket -lm -lpthread
+ DEBUG= -g -ggdb
+else
+ FINAL_CFLAGS= $(STD) $(WARN) $(OPT) $(DEBUG) $(CFLAGS) $(REDIS_CFLAGS)
+ FINAL_LDFLAGS= $(LDFLAGS) $(REDIS_LDFLAGS)
+ FINAL_LIBS= -lm -pthread
+ DEBUG= -g -rdynamic -ggdb
+endif
+
+# Include paths to dependencies
+FINAL_CFLAGS+= -I../deps/hiredis -I../deps/linenoise -I../deps/lua/src
+
ifeq ($(MALLOC),tcmalloc)
- REDIS_CFLAGS+= -DUSE_TCMALLOC
- REDIS_LIBS+= -ltcmalloc
+ FINAL_CFLAGS+= -DUSE_TCMALLOC
+ FINAL_LIBS+= -ltcmalloc
endif
ifeq ($(MALLOC),tcmalloc_minimal)
- REDIS_CFLAGS+= -DUSE_TCMALLOC
- REDIS_LIBS+= -ltcmalloc_minimal
+ FINAL_CFLAGS+= -DUSE_TCMALLOC
+ FINAL_LIBS+= -ltcmalloc_minimal
endif
ifeq ($(MALLOC),jemalloc)
DEPENDENCY_TARGETS+= jemalloc
- REDIS_CFLAGS+= -DUSE_JEMALLOC -I../deps/jemalloc/include
- REDIS_LIBS+= ../deps/jemalloc/lib/libjemalloc.a -ldl
+ FINAL_CFLAGS+= -DUSE_JEMALLOC -I../deps/jemalloc/include
+ FINAL_LIBS+= ../deps/jemalloc/lib/libjemalloc.a -ldl
endif
-REDIS_CC=$(QUIET_CC)$(CC) $(REDIS_CFLAGS)
-REDIS_LD=$(QUIET_LINK)$(CC) $(REDIS_LDFLAGS)
+REDIS_CC=$(QUIET_CC)$(CC) $(FINAL_CFLAGS)
+REDIS_LD=$(QUIET_LINK)$(CC) $(FINAL_LDFLAGS)
PREFIX= /usr/local
INSTALL_BIN= $(PREFIX)/bin
.PHONY: dep
+persist-settings: distclean
+ echo STD=$(STD) >> .make-settings
+ echo WARN=$(WARN) >> .make-settings
+ echo OPT=$(OPT) >> .make-settings
+ echo MALLOC=$(MALLOC) >> .make-settings
+ echo CFLAGS=$(CFLAGS) >> .make-settings
+ echo LDFLAGS=$(LDFLAGS) >> .make-settings
+ echo REDIS_CFLAGS=$(REDIS_CFLAGS) >> .make-settings
+ echo REDIS_LDFLAGS=$(REDIS_LDFLAGS) >> .make-settings
+ echo PREV_FINAL_CFLAGS=$(FINAL_CFLAGS) >> .make-settings
+ echo PREV_FINAL_LDFLAGS=$(FINAL_LDFLAGS) >> .make-settings
+ -(cd ../deps && $(MAKE) $(DEPENDENCY_TARGETS))
+
+.PHONY: persist-settings
+
# Prerequisites target
.make-prerequisites:
@touch $@
-# Clean local objects and build dependencies when REDIS_CFLAGS is different
-ifneq ($(shell sh -c '[ -f .make-cflags ] && cat .make-cflags || echo none'), $(REDIS_CFLAGS))
-.make-cflags: clean
- -(cd ../deps && $(MAKE) $(DEPENDENCY_TARGETS))
- -(echo "$(REDIS_CFLAGS)" > .make-cflags)
-.make-prerequisites: .make-cflags
-endif
-
-# Clean local objects when REDIS_LDFLAGS is different
-ifneq ($(shell sh -c '[ -f .make-ldflags ] && cat .make-ldflags || echo none'), $(REDIS_LDFLAGS))
-.make-ldflags: clean
- -(echo "$(REDIS_LDFLAGS)" > .make-ldflags)
-.make-prerequisites: .make-ldflags
+# Clean everything, persist settings and build dependencies if anything changed
+ifneq ($(strip $(PREV_FINAL_CFLAGS)), $(strip $(FINAL_CFLAGS)))
+.make-prerequisites: persist-settings
endif
-# Clean local objects when MALLOC is different
-ifneq ($(shell sh -c '[ -f .make-malloc ] && cat .make-malloc || echo none'), $(MALLOC))
-.make-malloc: clean
- -(echo "$(MALLOC)" > .make-malloc)
-.make-prerequisites: .make-malloc
+ifneq ($(strip $(PREV_FINAL_LDFLAGS)), $(strip $(FINAL_LDFLAGS)))
+.make-prerequisites: persist-settings
endif
# redis-server
$(REDIS_SERVER_NAME): $(REDIS_SERVER_OBJ)
- $(REDIS_LD) -o $@ $^ ../deps/lua/src/liblua.a $(REDIS_LIBS)
+ $(REDIS_LD) -o $@ $^ ../deps/lua/src/liblua.a $(FINAL_LIBS)
# redis-cli
$(REDIS_CLI_NAME): $(REDIS_CLI_OBJ)
- $(REDIS_LD) -o $@ $^ ../deps/hiredis/libhiredis.a ../deps/linenoise/linenoise.o $(REDIS_LIBS)
+ $(REDIS_LD) -o $@ $^ ../deps/hiredis/libhiredis.a ../deps/linenoise/linenoise.o $(FINAL_LIBS)
# redis-benchmark
$(REDIS_BENCHMARK_NAME): $(REDIS_BENCHMARK_OBJ)
- $(REDIS_LD) -o $@ $^ ../deps/hiredis/libhiredis.a $(REDIS_LIBS)
+ $(REDIS_LD) -o $@ $^ ../deps/hiredis/libhiredis.a $(FINAL_LIBS)
# redis-check-dump
$(REDIS_CHECK_DUMP_NAME): $(REDIS_CHECK_DUMP_OBJ)
- $(REDIS_LD) -o $@ $^ $(REDIS_LIBS)
+ $(REDIS_LD) -o $@ $^ $(FINAL_LIBS)
# redis-check-aof
$(REDIS_CHECK_AOF_NAME): $(REDIS_CHECK_AOF_OBJ)
- $(REDIS_LD) -o $@ $^ $(REDIS_LIBS)
+ $(REDIS_LD) -o $@ $^ $(FINAL_LIBS)
# Because the jemalloc.h header is generated as a part of the jemalloc build,
# building it should complete before building any other object. Instead of
@(cd ..; ./runtest)
lcov:
- $(MAKE) clean gcov
+ $(MAKE) gcov
@(set -e; cd ..; ./runtest --clients 1)
@geninfo -o redis.info .
@genhtml --legend -o lcov-html redis.info
@echo ""
@echo "WARNING: if it fails under Linux you probably need to install libc6-dev-i386"
@echo ""
- $(MAKE) CFLAGS="$(CFLAGS) -m32" LDFLAGS="$(LDFLAGS) -m32"
+ $(MAKE) CFLAGS="-m32" LDFLAGS="-m32"
gcov:
- $(MAKE) ADD_CFLAGS="-fprofile-arcs -ftest-coverage -DCOVERAGE_TEST" ADD_LDFLAGS="-fprofile-arcs -ftest-coverage"
+ $(MAKE) REDIS_CFLAGS="-fprofile-arcs -ftest-coverage -DCOVERAGE_TEST" REDIS_LDFLAGS="-fprofile-arcs -ftest-coverage"
noopt:
$(MAKE) OPT="-O0"
loadServerConfig(argv[1],NULL);
} else if (!strcasecmp(argv[0],"maxclients") && argc == 2) {
server.maxclients = atoi(argv[1]);
+ if (server.maxclients < 1) {
+ err = "Invalid max clients limit"; goto loaderr;
+ }
} else if (!strcasecmp(argv[0],"maxmemory") && argc == 2) {
server.maxmemory = memtoll(argv[1],NULL);
} else if (!strcasecmp(argv[0],"maxmemory-policy") && argc == 2) {
/* ----------------------------- API implementation ------------------------- */
-/* Reset an hashtable already initialized with ht_init().
- * NOTE: This function should only called by ht_destroy(). */
+/* Reset a hash table already initialized with ht_init().
+ * NOTE: This function should only be called by ht_destroy(). */
static void _dictReset(dictht *ht)
{
ht->table = NULL;
return dictExpand(d, minimal);
}
-/* Expand or create the hashtable */
+/* Expand or create the hash table */
int dictExpand(dict *d, unsigned long size)
{
- dictht n; /* the new hashtable */
+ dictht n; /* the new hash table */
unsigned long realsize = _dictNextPower(size);
/* the size is invalid if it is smaller than the number of
- * elements already inside the hashtable */
+ * elements already inside the hash table */
if (dictIsRehashing(d) || d->ht[0].used > size)
return DICT_ERR;
- /* Allocate the new hashtable and initialize all pointers to NULL */
+ /* Allocate the new hash table and initialize all pointers to NULL */
n.size = realsize;
n.sizemask = realsize-1;
n.table = zcalloc(realsize*sizeof(dictEntry*));
* a value returns the dictEntry structure to the user, that will make
* sure to fill the value field as he wishes.
*
- * This function is also directly expoed to user API to be called
+ * This function is also directly exposed to the user API to be called
* mainly in order to store non-pointers inside the hash value, example:
*
* entry = dictAddRaw(dict,mykey);
unsigned int h, idx, table;
dictEntry *he;
- /* Expand the hashtable if needed */
+ /* Expand the hash table if needed */
if (_dictExpandIfNeeded(d) == DICT_ERR)
return -1;
/* Compute the key hash value */
d->iterators = 0;
}
+void dictEnableResize(void) {
+ dict_can_resize = 1;
+}
+
+void dictDisableResize(void) {
+ dict_can_resize = 0;
+}
+
+#if 0
+
+/* The following is code that we don't use for Redis currently, but that is part
+of the library. */
+
+/* ----------------------- Debugging ------------------------*/
+
#define DICT_STATS_VECTLEN 50
static void _dictPrintStatsHt(dictht *ht) {
unsigned long i, slots = 0, chainlen, maxchainlen = 0;
}
}
-void dictEnableResize(void) {
- dict_can_resize = 1;
-}
-
-void dictDisableResize(void) {
- dict_can_resize = 0;
-}
-
-#if 0
-
-/* The following are just example hash table types implementations.
- * Not useful for Redis so they are commented out.
- */
-
/* ----------------------- StringCopy Hash Table Type ------------------------*/
static unsigned int _dictStringCopyHTHashFunction(const void *key)
}
/* This will also be called when the set was just converted
- * to regular hashtable encoded set */
+ * to a regular hash table encoded set */
if (o->encoding == REDIS_ENCODING_HT) {
dictAdd((dict*)o->ptr,ele,NULL);
} else {
int cluster_mode;
int cluster_reissue_command;
int slave_mode;
+ int bigkeys;
int stdinarg; /* get last arg from stdin. (-x option) */
char *auth;
int output; /* output mode, see OUTPUT_* defines */
config.latency_mode = 1;
} else if (!strcmp(argv[i],"--slave")) {
config.slave_mode = 1;
+ } else if (!strcmp(argv[i],"--bigkeys")) {
+ config.bigkeys = 1;
} else if (!strcmp(argv[i],"--eval") && !lastarg) {
config.eval = argv[++i];
} else if (!strcmp(argv[i],"-c")) {
" --raw Use raw formatting for replies (default when STDOUT is not a tty)\n"
" --latency Enter a special mode continuously sampling latency.\n"
" --slave Simulate a slave showing commands received from the master.\n"
+" --bigkeys Sample Redis keys looking for big keys.\n"
" --eval <file> Send an EVAL command using the Lua script at <file>.\n"
" --help Output this help and exit\n"
" --version Output version and exit\n"
while (cliReadReply(0) == REDIS_OK);
}
+#define TYPE_STRING 0
+#define TYPE_LIST 1
+#define TYPE_SET 2
+#define TYPE_HASH 3
+#define TYPE_ZSET 4
+
+static void findBigKeys(void) {
+ unsigned long long biggest[5] = {0,0,0,0,0};
+ unsigned long long samples = 0;
+ redisReply *reply1, *reply2, *reply3;
+ char *sizecmd, *typename[] = {"string","list","set","hash","zset"};
+ int type;
+
+ printf("\n# Press ctrl+c when you have had enough of it... :)\n");
+ printf("# You can use -i 0.1 to sleep 0.1 sec every 100 sampled keys\n");
+ printf("# in order to reduce server load (usually not needed).\n\n");
+ while(1) {
+ /* Sample with RANDOMKEY */
+ reply1 = redisCommand(context,"RANDOMKEY");
+ if (reply1 == NULL) {
+ fprintf(stderr,"\nI/O error\n");
+ exit(1);
+ } else if (reply1->type == REDIS_REPLY_ERROR) {
+ fprintf(stderr, "RANDOMKEY error: %s\n",
+ reply1->str);
+ exit(1);
+ }
+ /* Get the key type */
+ reply2 = redisCommand(context,"TYPE %s",reply1->str);
+ assert(reply2 && reply2->type == REDIS_REPLY_STATUS);
+ samples++;
+
+ /* Get the key "size" */
+ if (!strcmp(reply2->str,"string")) {
+ sizecmd = "STRLEN";
+ type = TYPE_STRING;
+ } else if (!strcmp(reply2->str,"list")) {
+ sizecmd = "LLEN";
+ type = TYPE_LIST;
+ } else if (!strcmp(reply2->str,"set")) {
+ sizecmd = "SCARD";
+ type = TYPE_SET;
+ } else if (!strcmp(reply2->str,"hash")) {
+ sizecmd = "HLEN";
+ type = TYPE_HASH;
+ } else if (!strcmp(reply2->str,"zset")) {
+ sizecmd = "ZCARD";
+ type = TYPE_ZSET;
+ } else if (!strcmp(reply2->str,"none")) {
+ freeReplyObject(reply1);
+ freeReplyObject(reply2);
+ freeReplyObject(reply3);
+ continue;
+ } else {
+ fprintf(stderr, "Unknown key type '%s' for key '%s'\n",
+ reply2->str, reply1->str);
+ exit(1);
+ }
+
+ reply3 = redisCommand(context,"%s %s", sizecmd, reply1->str);
+ if (reply3 && reply3->type == REDIS_REPLY_INTEGER) {
+ if (biggest[type] < reply3->integer) {
+ printf("[%6s] %s | biggest so far with size %llu\n",
+ typename[type], reply1->str,
+ (unsigned long long) reply3->integer);
+ biggest[type] = reply3->integer;
+ }
+ }
+
+ if ((samples % 1000000) == 0)
+ printf("(%llu keys sampled)\n", samples);
+
+ if ((samples % 100) == 0 && config.interval)
+ usleep(config.interval);
+
+ freeReplyObject(reply1);
+ freeReplyObject(reply2);
+ if (reply3) freeReplyObject(reply3);
+ }
+}
+
int main(int argc, char **argv) {
int firstarg;
config.pubsub_mode = 0;
config.latency_mode = 0;
config.cluster_mode = 0;
+ config.slave_mode = 0;
+ config.bigkeys = 0;
config.stdinarg = 0;
config.auth = NULL;
config.eval = NULL;
slaveMode();
}
+ /* Find big keys */
+ if (config.bigkeys) {
+ cliConnect(0);
+ findBigKeys();
+ }
+
/* Start interactive mode when no command is provided */
if (argc == 0 && !config.eval) {
/* Note that in repl mode we don't abort on connection error.
rlim_t maxfiles = server.maxclients+32;
struct rlimit limit;
- if (maxfiles < 1024) maxfiles = 1024;
if (getrlimit(RLIMIT_NOFILE,&limit) == -1) {
redisLog(REDIS_WARNING,"Unable to obtain the current NOFILE limit (%s), assuming 1024 and setting the max clients configuration accordingly.",
strerror(errno));
/* Lua script too slow? Only allow SHUTDOWN NOSAVE and SCRIPT KILL. */
if (server.lua_timedout &&
- !(c->cmd->proc != shutdownCommand &&
+ !(c->cmd->proc == shutdownCommand &&
c->argc == 2 &&
tolower(((char*)c->argv[1]->ptr)[0]) == 'n') &&
!(c->cmd->proc == scriptCommand &&
#define REDIS_AOF_REWRITE_MIN_SIZE (1024*1024)
#define REDIS_AOF_REWRITE_ITEMS_PER_CMD 64
#define REDIS_SLOWLOG_LOG_SLOWER_THAN 10000
-#define REDIS_SLOWLOG_MAX_LEN 64
+#define REDIS_SLOWLOG_MAX_LEN 128
#define REDIS_MAX_CLIENTS 10000
#define REDIS_REPL_TIMEOUT 60
#endif
}
+/* Remove a functions that we don't want to expose to the Redis scripting
+ * environment. */
+void luaRemoveUnsupportedFunctions(lua_State *lua) {
+ lua_pushnil(lua);
+ lua_setglobal(lua,"loadfile");
+}
+
/* This function installs metamethods in the global table _G that prevent
* the creation of globals accidentally.
*
* See scriptingReset() for more information. */
void scriptingInit(void) {
lua_State *lua = lua_open();
+
luaLoadLibraries(lua);
+ luaRemoveUnsupportedFunctions(lua);
/* Initialize a dictionary we use to map SHAs to scripts.
* This is useful for replication, as we need to replicate EVALSHA
* this function. */
slowlogEntry *slowlogCreateEntry(robj **argv, int argc, long long duration) {
slowlogEntry *se = zmalloc(sizeof(*se));
- int j;
+ int j, slargc = argc;
+
+ if (slargc > SLOWLOG_ENTRY_MAX_ARGC) slargc = SLOWLOG_ENTRY_MAX_ARGC;
+ se->argc = slargc;
+ se->argv = zmalloc(sizeof(robj*)*slargc);
+ for (j = 0; j < slargc; j++) {
+ /* Logging too many arguments is a useless memory waste, so we stop
+ * at SLOWLOG_ENTRY_MAX_ARGC, but use the last argument to specify
+ * how many remaining arguments there were in the original command. */
+ if (slargc != argc && j == slargc-1) {
+ se->argv[j] = createObject(REDIS_STRING,
+ sdscatprintf(sdsempty(),"... (%d more arguments)",
+ argc-slargc+1));
+ } else {
+ /* Trim too long strings as well... */
+ if (argv[j]->type == REDIS_STRING &&
+ argv[j]->encoding == REDIS_ENCODING_RAW &&
+ sdslen(argv[j]->ptr) > SLOWLOG_ENTRY_MAX_STRING)
+ {
+ sds s = sdsnewlen(argv[j]->ptr, SLOWLOG_ENTRY_MAX_STRING);
- se->argc = argc;
- se->argv = zmalloc(sizeof(robj*)*argc);
- for (j = 0; j < argc; j++) {
- se->argv[j] = argv[j];
- incrRefCount(argv[j]);
+ s = sdscatprintf(s,"... (%lu more bytes)",
+ (unsigned long)
+ sdslen(argv[j]->ptr) - SLOWLOG_ENTRY_MAX_STRING);
+ se->argv[j] = createObject(REDIS_STRING,s);
+ } else {
+ se->argv[j] = argv[j];
+ incrRefCount(argv[j]);
+ }
+ }
}
se->time = time(NULL);
se->duration = duration;
+#define SLOWLOG_ENTRY_MAX_ARGC 32
+#define SLOWLOG_ENTRY_MAX_STRING 128
+
/* This structure defines an entry inside the slow log list */
typedef struct slowlogEntry {
robj **argv;
robj *lookupKeyByPattern(redisDb *db, robj *pattern, robj *subst) {
char *p, *f, *k;
sds spat, ssub;
- robj *keyobj, *fieldobj, *o;
+ robj *keyobj, *fieldobj = NULL, *o;
int prefixlen, sublen, postfixlen, fieldlen;
/* If the pattern is "#" return the substitution object itself in order
o = lookupKeyRead(db,keyobj);
if (o == NULL) goto noobj;
- if (fieldlen > 0) {
+ if (fieldobj) {
if (o->type != REDIS_HASH) goto noobj;
/* Retrieve value from hash by the field name. This operation
incrRefCount(o);
}
decrRefCount(keyobj);
- if (fieldlen) decrRefCount(fieldobj);
+ if (fieldobj) decrRefCount(fieldobj);
return o;
noobj:
checkType(c,sobj,REDIS_LIST)) return;
if (listTypeLength(sobj) == 0) {
+ /* This may only happen after loading very old RDB files. Recent
+ * versions of Redis delete keys of empty lists. */
addReply(c,shared.nullbulk);
} else {
robj *dobj = lookupKeyWrite(c->db,c->argv[2]);
}
/* Convert the set to specified encoding. The resulting dict (when converting
- * to a hashtable) is presized to hold the number of elements in the original
+ * to a hash table) is presized to hold the number of elements in the original
* set. */
void setTypeConvert(robj *setobj, int enc) {
setTypeIterator *si;
* <len> lengths are encoded in a single value or in a 5 bytes value.
* If the first byte value (as an unsigned 8 bit value) is between 0 and
* 252, it's a single-byte length. If it is 253 then a four bytes unsigned
- * integer follows (in the host byte ordering). A value fo 255 is used to
+ * integer follows (in the host byte ordering). A value of 255 is used to
* signal the end of the hash. The special value 254 is used to mark
* empty space that can be used to add new key/value pairs.
*
- * <free> is the number of free unused bytes
- * after the string, resulting from modification of values associated to a
- * key (for instance if "foo" is set to "bar', and later "foo" will be se to
- * "hi", I'll have a free byte to use if the value will enlarge again later,
- * or even in order to add a key/value pair if it fits.
+ * <free> is the number of free unused bytes after the string, resulting
+ * from modification of values associated to a key. For instance if "foo"
+ * is set to "bar", and later "foo" will be set to "hi", it will have a
+ * free byte to use if the value will enlarge again later, or even in
+ * order to add a key/value pair if it fits.
*
* <free> is always an unsigned 8 bit number, because if after an
* update operation there are more than a few free bytes, the zipmap will be
############################### ADVANCED CONFIG ###############################
# Hashes are encoded in a special way (much more memory efficient) when they
-# have at max a given numer of elements, and the biggest element does not
+# have at max a given number of elements, and the biggest element does not
# exceed a given threshold. You can configure this limits with the following
# configuration directives.
hash-max-ziplist-entries 64
# order to help rehashing the main Redis hash table (the one mapping top-level
# keys to values). The hash table implementation redis uses (see dict.c)
# performs a lazy rehashing: the more operation you run into an hash table
-# that is rhashing, the more rehashing "steps" are performed, so if the
+# that is rehashing, the more rehashing "steps" are performed, so if the
# server is idle the rehashing is never complete and some more memory is used
# by the hash table.
#
catch {
while {$c < 50} {
incr c
- redis_deferring_client
+ set rd [redis_deferring_client]
+ $rd ping
+ $rd read
after 100
}
} e
puts -nonewline $s $seq
set payload [string repeat A 1024]"\n"
set test_start [clock seconds]
- set test_time_limit 5
+ set test_time_limit 30
while 1 {
if {[catch {
puts -nonewline $s payload
lappend res [r eval $decr_if_gt 1 foo 2]
set res
} {4 3 2 2 2}
+
+ test {Scripting engine resets PRNG at every script execution} {
+ set rand1 [r eval {return tostring(math.random())} 0]
+ set rand2 [r eval {return tostring(math.random())} 0]
+ assert_equal $rand1 $rand2
+ }
+
+ test {Scripting engine PRNG can be seeded correctly} {
+ set rand1 [r eval {
+ math.randomseed(ARGV[1]); return tostring(math.random())
+ } 0 10]
+ set rand2 [r eval {
+ math.randomseed(ARGV[1]); return tostring(math.random())
+ } 0 10]
+ set rand3 [r eval {
+ math.randomseed(ARGV[1]); return tostring(math.random())
+ } 0 20]
+ assert_equal $rand1 $rand2
+ assert {$rand2 ne $rand3}
+ }
+}
+
+# Start a new server since the last test in this stanza will kill the
+# instance at all.
+start_server {tags {"scripting"}} {
+ test {Timedout read-only scripts can be killed by SCRIPT KILL} {
+ set rd [redis_deferring_client]
+ r config set lua-time-limit 10
+ $rd eval {while true do end} 0
+ after 200
+ catch {r ping} e
+ assert_match {BUSY*} $e
+ r script kill
+ assert_equal [r ping] "PONG"
+ }
+
+ test {Timedout scripts that modified data can't be killed by SCRIPT KILL} {
+ set rd [redis_deferring_client]
+ r config set lua-time-limit 10
+ $rd eval {redis.call('set','x','y'); while true do end} 0
+ after 200
+ catch {r ping} e
+ assert_match {BUSY*} $e
+ catch {r script kill} e
+ assert_match {ERR*} $e
+ catch {r ping} e
+ assert_match {BUSY*} $e
+ }
+
+ test {SHUTDOWN NOSAVE can kill a timedout script anyway} {
+ # The server sould be still unresponding to normal commands.
+ catch {r ping} e
+ assert_match {BUSY*} $e
+ catch {r shutdown nosave}
+ # Make sure the server was killed
+ catch {set rd [redis_deferring_client]} e
+ assert_match {*connection refused*} $e
+ }
}
start_server {tags {"scripting repl"}} {
assert_equal [expr {[lindex $e 2] > 100000}] 1
assert_equal [lindex $e 3] {debug sleep 0.2}
}
+
+ test {SLOWLOG - commands with too many arguments are trimmed} {
+ r config set slowlog-log-slower-than 0
+ r slowlog reset
+ r sadd set 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33
+ set e [lindex [r slowlog get] 0]
+ lindex $e 3
+ } {sadd set 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 {... (2 more arguments)}}
+
+ test {SLOWLOG - too long arguments are trimmed} {
+ r config set slowlog-log-slower-than 0
+ r slowlog reset
+ set arg [string repeat A 129]
+ r sadd set foo $arg
+ set e [lindex [r slowlog get] 0]
+ lindex $e 3
+ } {sadd set foo {AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA... (1 more bytes)}}
}
r hset hash kkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkk b
r hget hash kkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkkk
} {b}
+
+ foreach size {10 512} {
+ test "Hash fuzzing - $size fields" {
+ for {set times 0} {$times < 10} {incr times} {
+ catch {unset hash}
+ array set hash {}
+ r del hash
+
+ # Create
+ for {set j 0} {$j < $size} {incr j} {
+ set field [randomValue]
+ set value [randomValue]
+ r hset hash $field $value
+ set hash($field) $value
+ }
+
+ # Verify
+ foreach {k v} [array get hash] {
+ assert_equal $v [r hget hash $k]
+ }
+ assert_equal [array size hash] [r hlen hash]
+ }
+ }
+ }
}
} {
source "tests/unit/type/list-common.tcl"
- test {LPUSH, RPUSH, LLENGTH, LINDEX - ziplist} {
+ test {LPUSH, RPUSH, LLENGTH, LINDEX, LPOP - ziplist} {
# first lpush then rpush
assert_equal 1 [r lpush myziplist1 a]
assert_equal 2 [r rpush myziplist1 b]
assert_equal a [r lindex myziplist1 0]
assert_equal b [r lindex myziplist1 1]
assert_equal c [r lindex myziplist1 2]
+ assert_equal {} [r lindex myziplist2 3]
+ assert_equal c [r rpop myziplist1]
+ assert_equal a [r lpop myziplist1]
assert_encoding ziplist myziplist1
# first rpush then lpush
assert_equal c [r lindex myziplist2 0]
assert_equal b [r lindex myziplist2 1]
assert_equal a [r lindex myziplist2 2]
+ assert_equal {} [r lindex myziplist2 3]
+ assert_equal a [r rpop myziplist2]
+ assert_equal c [r lpop myziplist2]
assert_encoding ziplist myziplist2
}
- test {LPUSH, RPUSH, LLENGTH, LINDEX - regular list} {
+ test {LPUSH, RPUSH, LLENGTH, LINDEX, LPOP - regular list} {
# first lpush then rpush
assert_equal 1 [r lpush mylist1 $largevalue(linkedlist)]
assert_encoding linkedlist mylist1
assert_equal $largevalue(linkedlist) [r lindex mylist1 0]
assert_equal b [r lindex mylist1 1]
assert_equal c [r lindex mylist1 2]
+ assert_equal {} [r lindex mylist1 3]
+ assert_equal c [r rpop mylist1]
+ assert_equal $largevalue(linkedlist) [r lpop mylist1]
# first rpush then lpush
assert_equal 1 [r rpush mylist2 $largevalue(linkedlist)]
assert_equal c [r lindex mylist2 0]
assert_equal b [r lindex mylist2 1]
assert_equal $largevalue(linkedlist) [r lindex mylist2 2]
+ assert_equal {} [r lindex mylist2 3]
+ assert_equal $largevalue(linkedlist) [r rpop mylist2]
+ assert_equal c [r lpop mylist2]
}
+ test {R/LPOP against empty list} {
+ r lpop non-existing-list
+ } {}
+
test {Variadic RPUSH/LPUSH} {
r del mylist
assert_equal 4 [r lpush mylist a b c d]
}
}
+ test {LINSERT raise error on bad syntax} {
+ catch {[r linsert xlist aft3r aa 42]} e
+ set e
+ } {*ERR*syntax*error*}
+
test {LPUSHX, RPUSHX convert from ziplist to list} {
set large $largevalue(linkedlist)
}
}
+ test "SDIFF with first set empty" {
+ r del set1 set2 set3
+ r sadd set2 1 2 3 4
+ r sadd set3 a b c d
+ r sdiff set1 set2 set3
+ } {}
+
test "SINTER against non-set should throw error" {
r set key1 x
assert_error "ERR*wrong kind*" {r sinter key1 noset}
assert_error "ERR*wrong kind*" {r sunion key1 noset}
}
+ test "SINTER should handle non existing key as empty" {
+ r del set1 set2 set3
+ r sadd set1 a b c
+ r sadd set2 b c d
+ r sinter set1 set2 set3
+ } {}
+
+ test "SINTER with same integer elements but different encoding" {
+ r del set1 set2
+ r sadd set1 1 2 3
+ r sadd set2 1 2 3 a
+ r srem set2 a
+ assert_encoding intset set1
+ assert_encoding hashtable set2
+ lsort [r sinter set1 set2]
+ } {1 2 3}
+
test "SINTERSTORE against non existing keys should delete dstkey" {
r set setres xxx
assert_equal 0 [r sinterstore setres foo111 bar222]
assert_error "ERR*wrong kind*" {r smove myset2 x foo}
}
+ test "SMOVE with identical source and destination" {
+ r del set
+ r sadd set a b c
+ r smove set set b
+ lsort [r smembers set]
+ } {a b c}
+
tags {slow} {
test {intsets implementation stress testing} {
for {set j 0} {$j < 20} {incr j} {