#
slave-serve-stale-data yes
+# You can configure a slave instance to accept writes or not. Writing against
+# a slave instance may be useful to store some ephemeral data (because data
+# written on a slave will be easily deleted after resync with the master) but
+# may also cause problems if clients are writing to it because of a
+# misconfiguration.
+#
+# Since Redis 2.6 by default slaves are read-only.
+#
+# Note: read only slaves are not designed to be exposed to untrusted clients
+# on the internet. It's just a protection layer against misuse of the instance.
+# Still a read only slave exports by default all the administrative commands
+# such as CONFIG, DEBUG, and so forth. To a limited extend you can improve
+# security of read only slaves using 'rename-command' to shadow all the
+# administrative / dangerous commands.
+slave-read-only yes
+
# Slaves send PINGs to server in a predefined interval. It's possible to change
# this interval with the repl_ping_slave_period option. The default value is 10
# seconds.
QUIET_LINK = @printf ' %b %b\n' $(LINKCOLOR)LINK$(ENDCOLOR) $(BINCOLOR)$@$(ENDCOLOR);
endif
-OBJ = adlist.o ae.o anet.o dict.o redis.o sds.o zmalloc.o lzf_c.o lzf_d.o pqsort.o zipmap.o sha1.o ziplist.o release.o networking.o util.o object.o db.o replication.o rdb.o t_string.o t_list.o t_set.o t_zset.o t_hash.o config.o aof.o pubsub.o multi.o debug.o sort.o intset.o syncio.o cluster.o crc16.o endianconv.o slowlog.o scripting.o bio.o rio.o rand.o
+OBJ = adlist.o ae.o anet.o dict.o redis.o sds.o zmalloc.o lzf_c.o lzf_d.o pqsort.o zipmap.o sha1.o ziplist.o release.o networking.o util.o object.o db.o replication.o rdb.o t_string.o t_list.o t_set.o t_zset.o t_hash.o config.o aof.o pubsub.o multi.o debug.o sort.o intset.o syncio.o cluster.o crc16.o endianconv.o slowlog.o scripting.o bio.o rio.o rand.o memtest.o
BENCHOBJ = ae.o anet.o redis-benchmark.o sds.o adlist.o zmalloc.o
CLIOBJ = anet.o sds.o adlist.o redis-cli.o zmalloc.o release.o
CHECKDUMPOBJ = redis-check-dump.o lzf_c.o lzf_d.o
/* Called when the user switches from "appendonly no" to "appendonly yes"
* at runtime using the CONFIG command. */
int startAppendOnly(void) {
- server.aof_last_fsync = time(NULL);
+ server.aof_last_fsync = server.unixtime;
server.aof_fd = open(server.aof_filename,O_WRONLY|O_APPEND|O_CREAT,0644);
redisAssert(server.aof_state == REDIS_AOF_OFF);
if (server.aof_fd == -1) {
}
/* Otherwise fall trough, and go write since we can't wait
* over two seconds. */
+ server.aof_delayed_fsync++;
redisLog(REDIS_NOTICE,"Asynchronous AOF fsync is taking too long (disk is busy?). Writing the AOF buffer without waiting for fsync to complete, this may slow down Redis.");
}
}
if ((server.repl_serve_stale_data = yesnotoi(argv[1])) == -1) {
err = "argument must be 'yes' or 'no'"; goto loaderr;
}
+ } else if (!strcasecmp(argv[0],"slave-read-only") && argc == 2) {
+ if ((server.repl_slave_ro = yesnotoi(argv[1])) == -1) {
+ err = "argument must be 'yes' or 'no'"; goto loaderr;
+ }
} else if (!strcasecmp(argv[0],"rdbcompression") && argc == 2) {
if ((server.rdb_compression = yesnotoi(argv[1])) == -1) {
err = "argument must be 'yes' or 'no'"; goto loaderr;
if (yn == -1) goto badfmt;
server.repl_serve_stale_data = yn;
+ } else if (!strcasecmp(c->argv[2]->ptr,"slave-read-only")) {
+ int yn = yesnotoi(o->ptr);
+
+ if (yn == -1) goto badfmt;
+ server.repl_slave_ro = yn;
} else if (!strcasecmp(c->argv[2]->ptr,"dir")) {
if (chdir((char*)o->ptr) == -1) {
addReplyErrorFormat(c,"Changing directory: %s", strerror(errno));
server.aof_no_fsync_on_rewrite);
config_get_bool_field("slave-serve-stale-data",
server.repl_serve_stale_data);
+ config_get_bool_field("slave-read-only",
+ server.repl_slave_ro);
config_get_bool_field("stop-writes-on-bgsave-error",
server.stop_writes_on_bgsave_err);
config_get_bool_field("daemonize", server.daemonize);
server.stat_numcommands = 0;
server.stat_numconnections = 0;
server.stat_expiredkeys = 0;
+ server.stat_rejected_conn = 0;
+ server.stat_fork_time = 0;
+ server.aof_delayed_fsync = 0;
resetCommandTableStats();
addReply(c,shared.ok);
} else {
* C-level DB API
*----------------------------------------------------------------------------*/
-/* Important notes on lookup and disk store.
- *
- * When disk store is enabled on lookup we can have different cases.
- *
- * a) The key is in memory:
- * - If the key is not in IO_SAVEINPROG state we can access it.
- * As if it's just IO_SAVE this means we have the key in the IO queue
- * but can't be accessed by the IO thread (it requires to be
- * translated into an IO Job by the cache cron function.)
- * - If the key is in IO_SAVEINPROG we can't touch the key and have
- * to blocking wait completion of operations.
- * b) The key is not in memory:
- * - If it's marked as non existing on disk as well (negative cache)
- * we don't need to perform the disk access.
- * - if the key MAY EXIST, but is not in memory, and it is marked as IO_SAVE
- * then the key can only be a deleted one. As IO_SAVE keys are never
- * evicted (dirty state), so the only possibility is that key was deleted.
- * - if the key MAY EXIST we need to blocking load it.
- * We check that the key is not in IO_SAVEINPROG state before accessing
- * the disk object. If it is in this state, we wait.
- */
-
robj *lookupKey(redisDb *db, robj *key) {
dictEntry *de = dictFind(db->dict,key->ptr);
if (de) {
}
}
-/* Empty the whole database.
- * If diskstore is enabled this function will just flush the in-memory cache. */
long long emptyDb() {
int j;
long long removed = 0;
* that is, 0 if we think the key should be still valid, 1 if
* we think the key is expired at this time. */
if (server.masterhost != NULL) {
- return time(NULL) > when;
+ return mstime() > when;
}
/* Return when this key has not expired */
#endif
}
-void sigsegvHandler(int sig, siginfo_t *info, void *secret) {
+/* Logs the stack trace using the backtrace() call. */
+void logStackTrace(ucontext_t *uc) {
void *trace[100];
- char **messages = NULL;
int i, trace_size = 0;
- ucontext_t *uc = (ucontext_t*) secret;
- sds infostring, clients;
- struct sigaction act;
- REDIS_NOTUSED(info);
-
- bugReportStart();
- redisLog(REDIS_WARNING,
- " Redis %s crashed by signal: %d", REDIS_VERSION, sig);
- redisLog(REDIS_WARNING,
- " Failed assertion: %s (%s:%d)", server.assert_failed,
- server.assert_file, server.assert_line);
+ char **messages = NULL;
/* Generate the stack trace */
trace_size = backtrace(trace, 100);
redisLog(REDIS_WARNING, "--- STACK TRACE");
for (i=1; i<trace_size; ++i)
redisLog(REDIS_WARNING,"%s", messages[i]);
+}
+
+/* Log information about the "current" client, that is, the client that is
+ * currently being served by Redis. May be NULL if Redis is not serving a
+ * client right now. */
+void logCurrentClient(void) {
+ if (server.current_client == NULL) return;
+
+ redisClient *cc = server.current_client;
+ sds client;
+ int j;
+
+ redisLog(REDIS_WARNING, "--- CURRENT CLIENT INFO");
+ client = getClientInfoString(cc);
+ redisLog(REDIS_WARNING,"client: %s", client);
+ sdsfree(client);
+ for (j = 0; j < cc->argc; j++) {
+ robj *decoded;
+
+ decoded = getDecodedObject(cc->argv[j]);
+ redisLog(REDIS_WARNING,"argv[%d]: '%s'", j, (char*)decoded->ptr);
+ decrRefCount(decoded);
+ }
+ /* Check if the first argument, usually a key, is found inside the
+ * selected DB, and if so print info about the associated object. */
+ if (cc->argc >= 1) {
+ robj *val, *key;
+ dictEntry *de;
+
+ key = getDecodedObject(cc->argv[1]);
+ de = dictFind(cc->db->dict, key->ptr);
+ if (de) {
+ val = dictGetVal(de);
+ redisLog(REDIS_WARNING,"key '%s' found in DB containing the following object:", key->ptr);
+ redisLogObjectDebugInfo(val);
+ }
+ decrRefCount(key);
+ }
+}
+
+void sigsegvHandler(int sig, siginfo_t *info, void *secret) {
+ ucontext_t *uc = (ucontext_t*) secret;
+ sds infostring, clients;
+ struct sigaction act;
+ REDIS_NOTUSED(info);
+
+ bugReportStart();
+ redisLog(REDIS_WARNING,
+ " Redis %s crashed by signal: %d", REDIS_VERSION, sig);
+ redisLog(REDIS_WARNING,
+ " Failed assertion: %s (%s:%d)", server.assert_failed,
+ server.assert_file, server.assert_line);
+
+ /* Log the stack trace */
+ logStackTrace(uc);
/* Log INFO and CLIENT LIST */
redisLog(REDIS_WARNING, "--- INFO OUTPUT");
redisLog(REDIS_WARNING, "--- CLIENT LIST OUTPUT");
clients = getAllClientsInfoString();
redisLogRaw(REDIS_WARNING, clients);
- /* Don't sdsfree() strings to avoid a crash. Memory may be corrupted. */
-
- /* Log CURRENT CLIENT info */
- if (server.current_client) {
- redisClient *cc = server.current_client;
- sds client;
- int j;
+ sdsfree(infostring);
+ sdsfree(clients);
- redisLog(REDIS_WARNING, "--- CURRENT CLIENT INFO");
- client = getClientInfoString(cc);
- redisLog(REDIS_WARNING,"client: %s", client);
- /* Missing sdsfree(client) to avoid crash if memory is corrupted. */
- for (j = 0; j < cc->argc; j++) {
- robj *decoded;
-
- decoded = getDecodedObject(cc->argv[j]);
- redisLog(REDIS_WARNING,"argv[%d]: '%s'", j, (char*)decoded->ptr);
- decrRefCount(decoded);
- }
- /* Check if the first argument, usually a key, is found inside the
- * selected DB, and if so print info about the associated object. */
- if (cc->argc >= 1) {
- robj *val, *key;
- dictEntry *de;
-
- key = getDecodedObject(cc->argv[1]);
- de = dictFind(cc->db->dict, key->ptr);
- if (de) {
- val = dictGetVal(de);
- redisLog(REDIS_WARNING,"key '%s' found in DB containing the following object:", key->ptr);
- redisLogObjectDebugInfo(val);
- }
- decrRefCount(key);
- }
- }
+ /* Log the current client */
+ logCurrentClient();
/* Log dump of processor registers */
logRegisters(uc);
redisLog(REDIS_WARNING,
"\n=== REDIS BUG REPORT END. Make sure to include from START to END. ===\n\n"
-" Please report the crash opening an issue on github:\n\n"
-" http://github.com/antirez/redis/issues\n\n"
+" Please report the crash opening an issue on github:\n\n"
+" http://github.com/antirez/redis/issues\n\n"
+" Suspect RAM error? Use redis-server --test-memory to veryfy it.\n\n"
);
/* free(messages); Don't call free() with possibly corrupted memory. */
if (server.daemonize) unlink(server.pidfile);
--- /dev/null
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+#include <limits.h>
+#include <errno.h>
+#include <termios.h>
+#include <sys/ioctl.h>
+
+#if (ULONG_MAX == 4294967295UL)
+#define MEMTEST_32BIT
+#elif (ULONG_MAX == 18446744073709551615ULL)
+#define MEMTEST_64BIT
+#else
+#error "ULONG_MAX value not supported."
+#endif
+
+#ifdef MEMTEST_32BIT
+#define ULONG_ONEZERO 0xaaaaaaaaaaaaaaaaUL
+#define ULONG_ZEROONE 0x5555555555555555UL
+#else
+#define ULONG_ONEZERO 0xaaaaaaaaUL
+#define ULONG_ZEROONE 0x55555555UL
+#endif
+
+static struct winsize ws;
+size_t progress_printed; /* Printed chars in screen-wide progress bar. */
+size_t progress_full; /* How many chars to write to fill the progress bar. */
+
+void memtest_progress_start(char *title, int pass) {
+ int j;
+
+ printf("\x1b[H\x1b[2J"); /* Cursor home, clear screen. */
+ /* Fill with dots. */
+ for (j = 0; j < ws.ws_col*(ws.ws_row-2); j++) printf(".");
+ printf("Please keep the test running several minutes per GB of memory.\n");
+ printf("Also check http://www.memtest86.com/ and http://pyropus.ca/software/memtester/");
+ printf("\x1b[H\x1b[2K"); /* Cursor home, clear current line. */
+ printf("%s [%d]\n", title, pass); /* Print title. */
+ progress_printed = 0;
+ progress_full = ws.ws_col*(ws.ws_row-3);
+ fflush(stdout);
+}
+
+void memtest_progress_end(void) {
+ printf("\x1b[H\x1b[2J"); /* Cursor home, clear screen. */
+}
+
+void memtest_progress_step(size_t curr, size_t size, char c) {
+ size_t chars = (curr*progress_full)/size, j;
+
+ for (j = 0; j < chars-progress_printed; j++) {
+ printf("%c",c);
+ progress_printed++;
+ }
+ fflush(stdout);
+}
+
+/* Test that addressing is fine. Every location is populated with its own
+ * address, and finally verified. This test is very fast but may detect
+ * ASAP big issues with the memory subsystem. */
+void memtest_addressing(unsigned long *l, size_t bytes) {
+ unsigned long words = bytes/sizeof(unsigned long);
+ unsigned long j, *p;
+
+ /* Fill */
+ p = l;
+ for (j = 0; j < words; j++) {
+ *p = (unsigned long)p;
+ p++;
+ if ((j & 0xffff) == 0) memtest_progress_step(j,words*2,'A');
+ }
+ /* Test */
+ p = l;
+ for (j = 0; j < words; j++) {
+ if (*p != (unsigned long)p) {
+ printf("\n*** MEMORY ADDRESSING ERROR: %p contains %lu\n",
+ (void*) p, *p);
+ exit(1);
+ }
+ p++;
+ if ((j & 0xffff) == 0) memtest_progress_step(j+words,words*2,'A');
+ }
+}
+
+/* Fill words stepping a single page at every write, so we continue to
+ * touch all the pages in the smallest amount of time reducing the
+ * effectiveness of caches, and making it hard for the OS to transfer
+ * pages on the swap. */
+void memtest_fill_random(unsigned long *l, size_t bytes) {
+ unsigned long step = 4096/sizeof(unsigned long);
+ unsigned long words = bytes/sizeof(unsigned long)/2;
+ unsigned long iwords = words/step; /* words per iteration */
+ unsigned long off, w, *l1, *l2;
+
+ assert((bytes & 4095) == 0);
+ for (off = 0; off < step; off++) {
+ l1 = l+off;
+ l2 = l1+words;
+ for (w = 0; w < iwords; w++) {
+#ifdef MEMTEST_32BIT
+ *l1 = *l2 = ((unsigned long) (rand()&0xffff)) |
+ (((unsigned long) (rand()&0xffff)) << 16);
+#else
+ *l1 = *l2 = ((unsigned long) (rand()&0xffff)) |
+ (((unsigned long) (rand()&0xffff)) << 16) |
+ (((unsigned long) (rand()&0xffff)) << 32) |
+ (((unsigned long) (rand()&0xffff)) << 48);
+#endif
+ l1 += step;
+ l2 += step;
+ if ((w & 0xffff) == 0)
+ memtest_progress_step(w+iwords*off,words,'R');
+ }
+ }
+}
+
+/* Like memtest_fill_random() but uses the two specified values to fill
+ * memory, in an alternated way (v1|v2|v1|v2|...) */
+void memtest_fill_value(unsigned long *l, size_t bytes, unsigned long v1,
+ unsigned long v2, char sym)
+{
+ unsigned long step = 4096/sizeof(unsigned long);
+ unsigned long words = bytes/sizeof(unsigned long)/2;
+ unsigned long iwords = words/step; /* words per iteration */
+ unsigned long off, w, *l1, *l2, v;
+
+ assert((bytes & 4095) == 0);
+ for (off = 0; off < step; off++) {
+ l1 = l+off;
+ l2 = l1+words;
+ v = (off & 1) ? v2 : v1;
+ for (w = 0; w < iwords; w++) {
+#ifdef MEMTEST_32BIT
+ *l1 = *l2 = ((unsigned long) (rand()&0xffff)) |
+ (((unsigned long) (rand()&0xffff)) << 16);
+#else
+ *l1 = *l2 = ((unsigned long) (rand()&0xffff)) |
+ (((unsigned long) (rand()&0xffff)) << 16) |
+ (((unsigned long) (rand()&0xffff)) << 32) |
+ (((unsigned long) (rand()&0xffff)) << 48);
+#endif
+ l1 += step;
+ l2 += step;
+ if ((w & 0xffff) == 0)
+ memtest_progress_step(w+iwords*off,words,sym);
+ }
+ }
+}
+
+void memtest_compare(unsigned long *l, size_t bytes) {
+ unsigned long words = bytes/sizeof(unsigned long)/2;
+ unsigned long w, *l1, *l2;
+
+ assert((bytes & 4095) == 0);
+ l1 = l;
+ l2 = l1+words;
+ for (w = 0; w < words; w++) {
+ if (*l1 != *l2) {
+ printf("\n*** MEMORY ERROR DETECTED: %p != %p (%lu vs %lu)\n",
+ (void*)l1, (void*)l2, *l1, *l2);
+ exit(1);
+ }
+ l1 ++;
+ l2 ++;
+ if ((w & 0xffff) == 0) memtest_progress_step(w,words,'=');
+ }
+}
+
+void memtest_compare_times(unsigned long *m, size_t bytes, int pass, int times) {
+ int j;
+
+ for (j = 0; j < times; j++) {
+ memtest_progress_start("Compare",pass);
+ memtest_compare(m,bytes);
+ memtest_progress_end();
+ }
+}
+
+void memtest_test(size_t megabytes, int passes) {
+ size_t bytes = megabytes*1024*1024;
+ unsigned long *m = malloc(bytes);
+ int pass = 0;
+
+ if (m == NULL) {
+ fprintf(stderr,"Unable to allocate %zu megabytes: %s",
+ megabytes, strerror(errno));
+ exit(1);
+ }
+ while (pass != passes) {
+ pass++;
+
+ memtest_progress_start("Addressing test",pass);
+ memtest_addressing(m,bytes);
+ memtest_progress_end();
+
+ memtest_progress_start("Random fill",pass);
+ memtest_fill_random(m,bytes);
+ memtest_progress_end();
+ memtest_compare_times(m,bytes,pass,4);
+
+ memtest_progress_start("Solid fill",pass);
+ memtest_fill_value(m,bytes,0,(unsigned long)-1,'S');
+ memtest_progress_end();
+ memtest_compare_times(m,bytes,pass,4);
+
+ memtest_progress_start("Checkerboard fill",pass);
+ memtest_fill_value(m,bytes,ULONG_ONEZERO,ULONG_ZEROONE,'C');
+ memtest_progress_end();
+ memtest_compare_times(m,bytes,pass,4);
+ }
+}
+
+void memtest(size_t megabytes, int passes) {
+ if (ioctl(1, TIOCGWINSZ, &ws) == -1) {
+ ws.ws_col = 80;
+ ws.ws_row = 20;
+ }
+ memtest_test(megabytes,passes);
+ printf("\nYour memory passed this test.\n");
+ printf("Please if you are still in doubt use the following two tools:\n");
+ printf("1) memtest86: http://www.memtest86.com/\n");
+ printf("2) memtester: http://pyropus.ca/software/memtester/\n");
+ exit(0);
+}
c->mstate.count++;
}
+void discardTransaction(redisClient *c) {
+ freeClientMultiState(c);
+ initClientMultiState(c);
+ c->flags &= ~(REDIS_MULTI|REDIS_DIRTY_CAS);;
+ unwatchAllKeys(c);
+}
+
void multiCommand(redisClient *c) {
if (c->flags & REDIS_MULTI) {
addReplyError(c,"MULTI calls can not be nested");
addReplyError(c,"DISCARD without MULTI");
return;
}
-
- freeClientMultiState(c);
- initClientMultiState(c);
- c->flags &= ~(REDIS_MULTI|REDIS_DIRTY_CAS);;
- unwatchAllKeys(c);
+ discardTransaction(c);
addReply(c,shared.ok);
}
c->bulklen = -1;
c->sentlen = 0;
c->flags = 0;
- c->ctime = c->lastinteraction = time(NULL);
+ c->ctime = c->lastinteraction = server.unixtime;
c->authenticated = 0;
c->replstate = REDIS_REPL_NONE;
c->reply = listCreate();
if (c->flags & REDIS_MASTER) {
server.master = NULL;
server.repl_state = REDIS_REPL_CONNECT;
- server.repl_down_since = time(NULL);
+ server.repl_down_since = server.unixtime;
/* Since we lost the connection with the master, we should also
* close the connection with all our slaves if we have any, so
* when we'll resync with the master the other slaves will sync again
return;
}
}
- if (totwritten > 0) c->lastinteraction = time(NULL);
+ if (totwritten > 0) c->lastinteraction = server.unixtime;
if (c->bufpos == 0 && listLength(c->reply) == 0) {
c->sentlen = 0;
aeDeleteFileEvent(server.el,c->fd,AE_WRITABLE);
}
if (nread) {
sdsIncrLen(c->querybuf,nread);
- c->lastinteraction = time(NULL);
+ c->lastinteraction = server.unixtime;
} else {
server.current_client = NULL;
return;
sds getClientInfoString(redisClient *client) {
char ip[32], flags[16], events[3], *p;
int port;
- time_t now = time(NULL);
int emask;
anetPeerToString(client->fd,ip,&port);
return sdscatprintf(sdsempty(),
"addr=%s:%d fd=%d age=%ld idle=%ld flags=%s db=%d sub=%d psub=%d qbuf=%lu qbuf-free=%lu obl=%lu oll=%lu omem=%lu events=%s cmd=%s",
ip,port,client->fd,
- (long)(now - client->ctime),
- (long)(now - client->lastinteraction),
+ (long)(server.unixtime - client->ctime),
+ (long)(server.unixtime - client->lastinteraction),
flags,
client->db->id,
(int) dictSize(client->pubsub_channels),
* that is "non surprising" for the user (that is, most small decimal
* numbers will be represented in a way that when converted back into
* a string are exactly the same as what the user typed.) */
- len = snprintf(buf,sizeof(buf),"%.17Lg", value);
+ len = snprintf(buf,sizeof(buf),"%.17Lf", value);
+ /* Now remove trailing zeroes after the '.' */
+ if (strchr(buf,'.') != NULL) {
+ char *p = buf+len-1;
+ while(*p == '0') {
+ p--;
+ len--;
+ }
+ if (*p == '.') len--;
+ }
return createStringObject(buf,len);
}
/* Ok, this object can be encoded...
*
- * Can I use a shared object? Only if the object is inside a given
- * range and if the back end in use is in-memory. For disk store every
- * object in memory used as value should be independent.
+ * Can I use a shared object? Only if the object is inside a given range
*
* Note that we also avoid using shared integers when maxmemory is used
* because every object needs to have a private LRU field for the LRU
/* Add pair to ziplist */
o->ptr = ziplistPush(o->ptr, field->ptr, sdslen(field->ptr), ZIPLIST_TAIL);
o->ptr = ziplistPush(o->ptr, value->ptr, sdslen(value->ptr), ZIPLIST_TAIL);
-
/* Convert to hash table if size threshold is exceeded */
if (sdslen(field->ptr) > server.hash_max_ziplist_value ||
sdslen(value->ptr) > server.hash_max_ziplist_value)
{
+ decrRefCount(field);
+ decrRefCount(value);
hashTypeConvert(o, REDIS_ENCODING_HT);
break;
}
+ decrRefCount(field);
+ decrRefCount(value);
}
/* Load remaining fields and values into the hash table */
static int cliReadReply(int output_raw_strings) {
void *_reply;
redisReply *reply;
- sds out;
+ sds out = NULL;
int output = 1;
if (redisGetReply(context,&_reply) != REDIS_OK) {
reply = (redisReply*)_reply;
- /* Check if we need to connect to a different node and reissue the request. */
+ /* Check if we need to connect to a different node and reissue the
+ * request. */
if (config.cluster_mode && reply->type == REDIS_REPLY_ERROR &&
(!strncmp(reply->str,"MOVED",5) || !strcmp(reply->str,"ASK")))
{
unsigned long long payload;
/* Send the SYNC command. */
- write(fd,"SYNC\r\n",6);
+ if (write(fd,"SYNC\r\n",6) != 6) {
+ fprintf(stderr,"Error writing to master\n");
+ exit(1);
+ }
/* Read $<payload>\r\n, making sure to read just up to "\n" */
p = buf;
{"lastsave",lastsaveCommand,1,"r",0,NULL,0,0,0,0,0},
{"type",typeCommand,2,"r",0,NULL,1,1,1,0,0},
{"multi",multiCommand,1,"rs",0,NULL,0,0,0,0,0},
- {"exec",execCommand,1,"wms",0,NULL,0,0,0,0,0},
+ {"exec",execCommand,1,"s",0,NULL,0,0,0,0,0},
{"discard",discardCommand,1,"rs",0,NULL,0,0,0,0,0},
{"sync",syncCommand,1,"ars",0,NULL,0,0,0,0,0},
{"flushdb",flushdbCommand,1,"w",0,NULL,0,0,0,0,0},
{"pttl",pttlCommand,2,"r",0,NULL,1,1,1,0,0},
{"persist",persistCommand,2,"w",0,NULL,1,1,1,0,0},
{"slaveof",slaveofCommand,3,"aws",0,NULL,0,0,0,0,0},
- {"debug",debugCommand,-2,"aws",0,NULL,0,0,0,0,0},
+ {"debug",debugCommand,-2,"as",0,NULL,0,0,0,0,0},
{"config",configCommand,-2,"ar",0,NULL,0,0,0,0,0},
{"subscribe",subscribeCommand,-2,"rps",0,NULL,0,0,0,0,0},
{"unsubscribe",unsubscribeCommand,-1,"rps",0,NULL,0,0,0,0,0},
{"psubscribe",psubscribeCommand,-2,"rps",0,NULL,0,0,0,0,0},
{"punsubscribe",punsubscribeCommand,-1,"rps",0,NULL,0,0,0,0,0},
- {"publish",publishCommand,3,"rpf",0,NULL,0,0,0,0,0},
+ {"publish",publishCommand,3,"pf",0,NULL,0,0,0,0,0},
{"watch",watchCommand,-2,"rs",0,noPreloadGetKeys,1,-1,1,0,0},
{"unwatch",unwatchCommand,1,"rs",0,NULL,0,0,0,0,0},
{"cluster",clusterCommand,-2,"ar",0,NULL,0,0,0,0,0},
{"dump",dumpCommand,2,"ar",0,NULL,1,1,1,0,0},
{"object",objectCommand,-2,"r",0,NULL,2,2,2,0,0},
{"client",clientCommand,-2,"ar",0,NULL,0,0,0,0,0},
- {"eval",evalCommand,-3,"wms",0,zunionInterGetKeys,0,0,0,0,0},
- {"evalsha",evalShaCommand,-3,"wms",0,zunionInterGetKeys,0,0,0,0,0},
+ {"eval",evalCommand,-3,"s",0,zunionInterGetKeys,0,0,0,0,0},
+ {"evalsha",evalShaCommand,-3,"s",0,zunionInterGetKeys,0,0,0,0,0},
{"slowlog",slowlogCommand,-2,"r",0,NULL,0,0,0,0,0},
{"script",scriptCommand,-2,"ras",0,NULL,0,0,0,0,0},
{"time",timeCommand,1,"rR",0,NULL,0,0,0,0,0}
}
void updateLRUClock(void) {
- server.lruclock = (time(NULL)/REDIS_LRU_CLOCK_RESOLUTION) &
+ server.lruclock = (server.unixtime/REDIS_LRU_CLOCK_RESOLUTION) &
REDIS_LRU_CLOCK_MAX;
}
return sum / REDIS_OPS_SEC_SAMPLES;
}
-void clientsCronHandleTimeout(redisClient *c) {
+/* Check for timeouts. Returns non-zero if the client was terminated */
+int clientsCronHandleTimeout(redisClient *c) {
time_t now = server.unixtime;
if (server.maxidletime &&
{
redisLog(REDIS_VERBOSE,"Closing idle client");
freeClient(c);
+ return 1;
} else if (c->flags & REDIS_BLOCKED) {
if (c->bpop.timeout != 0 && c->bpop.timeout < now) {
addReply(c,shared.nullmultibulk);
unblockClientWaitingData(c);
}
}
+ return 0;
}
/* The client query buffer is an sds.c string that can end with a lot of
- * free space not used, this function reclaims space if needed. */
-void clientsCronResizeQueryBuffer(redisClient *c) {
+ * free space not used, this function reclaims space if needed.
+ *
+ * The funciton always returns 0 as it never terminates the client. */
+int clientsCronResizeQueryBuffer(redisClient *c) {
size_t querybuf_size = sdsAllocSize(c->querybuf);
time_t idletime = server.unixtime - c->lastinteraction;
/* Reset the peak again to capture the peak memory usage in the next
* cycle. */
c->querybuf_peak = 0;
+ return 0;
}
void clientsCron(void) {
listRotate(server.clients);
head = listFirst(server.clients);
c = listNodeValue(head);
- clientsCronHandleTimeout(c);
- clientsCronResizeQueryBuffer(c);
+ /* The following functions do different service checks on the client.
+ * The protocol is that they return non-zero if the client was
+ * terminated. */
+ if (clientsCronHandleTimeout(c)) continue;
+ if (clientsCronResizeQueryBuffer(c)) continue;
}
}
updateDictResizePolicy();
}
} else {
- time_t now = time(NULL);
-
/* If there is not a background saving/rewrite in progress check if
* we have to save/rewrite now */
for (j = 0; j < server.saveparamslen; j++) {
struct saveparam *sp = server.saveparams+j;
if (server.dirty >= sp->changes &&
- now-server.lastsave > sp->seconds) {
+ server.unixtime-server.lastsave > sp->seconds) {
redisLog(REDIS_NOTICE,"%d changes in %d seconds. Saving...",
sp->changes, sp->seconds);
rdbSaveBackground(server.rdb_filename);
shared.slowscripterr = createObject(REDIS_STRING,sdsnew(
"-BUSY Redis is busy running a script. You can only call SCRIPT KILL or SHUTDOWN NOSAVE.\r\n"));
shared.bgsaveerr = createObject(REDIS_STRING,sdsnew(
- "-MISCONF Redis is configured to save RDB snapshots, but is currently not able to persist on disk. Write commands are disabled. Please check Redis logs for details about the error.\r\n"));
+ "-MISCONF Redis is configured to save RDB snapshots, but is currently not able to persist on disk. Commands that may modify the data set are disabled. Please check Redis logs for details about the error.\r\n"));
+ shared.roslaveerr = createObject(REDIS_STRING,sdsnew(
+ "-READONLY You can't write against a read only slave.\r\n"));
+ shared.oomerr = createObject(REDIS_STRING,sdsnew(
+ "-OOM command not allowed when used memory > 'maxmemory'.\r\n"));
shared.space = createObject(REDIS_STRING,sdsnew(" "));
shared.colon = createObject(REDIS_STRING,sdsnew(":"));
shared.plus = createObject(REDIS_STRING,sdsnew("+"));
server.aof_rewrite_base_size = 0;
server.aof_rewrite_scheduled = 0;
server.aof_last_fsync = time(NULL);
+ server.aof_delayed_fsync = 0;
server.aof_fd = -1;
server.aof_selected_db = -1; /* Make sure the first time will not match */
server.aof_flush_postponed_start = 0;
server.repl_state = REDIS_REPL_NONE;
server.repl_syncio_timeout = REDIS_REPL_SYNCIO_TIMEOUT;
server.repl_serve_stale_data = 1;
+ server.repl_slave_ro = 1;
server.repl_down_since = -1;
/* Client output buffer limits */
if (server.maxmemory) {
int retval = freeMemoryIfNeeded();
if ((c->cmd->flags & REDIS_CMD_DENYOOM) && retval == REDIS_ERR) {
- addReplyError(c,
- "command not allowed when used memory > 'maxmemory'");
+ addReply(c, shared.oomerr);
return REDIS_OK;
}
}
return REDIS_OK;
}
+ /* Don't accept wirte commands if this is a read only slave. But
+ * accept write commands if this is our master. */
+ if (server.masterhost && server.repl_slave_ro &&
+ !(c->flags & REDIS_MASTER) &&
+ c->cmd->flags & REDIS_CMD_WRITE)
+ {
+ addReply(c, shared.roslaveerr);
+ return REDIS_OK;
+ }
+
/* Only allow SUBSCRIBE and UNSUBSCRIBE in the context of Pub/Sub */
if ((dictSize(c->pubsub_channels) > 0 || listLength(c->pubsub_patterns) > 0)
&&
* on memory corruption problems. */
sds genRedisInfoString(char *section) {
sds info = sdsempty();
- time_t uptime = time(NULL)-server.stat_starttime;
+ time_t uptime = server.unixtime-server.stat_starttime;
int j, numcommands;
struct rusage self_ru, c_ru;
unsigned long lol, bib;
"aof_base_size:%lld\r\n"
"aof_pending_rewrite:%d\r\n"
"aof_buffer_length:%zu\r\n"
- "aof_pending_bio_fsync:%llu\r\n",
+ "aof_pending_bio_fsync:%llu\r\n"
+ "aof_delayed_fsync:%lu\r\n",
(long long) server.aof_current_size,
(long long) server.aof_rewrite_base_size,
server.aof_rewrite_scheduled,
sdslen(server.aof_buf),
- bioPendingJobsOfType(REDIS_BIO_AOF_FSYNC));
+ bioPendingJobsOfType(REDIS_BIO_AOF_FSYNC),
+ server.aof_delayed_fsync);
}
if (server.loading) {
perc = ((double)server.loading_loaded_bytes /
server.loading_total_bytes) * 100;
- elapsed = time(NULL)-server.loading_start_time;
+ elapsed = server.unixtime-server.loading_start_time;
if (elapsed == 0) {
eta = 1; /* A fake 1 second figure if we don't have
enough info */
(server.repl_state == REDIS_REPL_CONNECTED) ?
"up" : "down",
server.master ?
- ((int)(time(NULL)-server.master->lastinteraction)) : -1,
+ ((int)(server.unixtime-server.master->lastinteraction)) : -1,
server.repl_state == REDIS_REPL_TRANSFER
);
"master_sync_left_bytes:%ld\r\n"
"master_sync_last_io_seconds_ago:%d\r\n"
,(long)server.repl_transfer_left,
- (int)(time(NULL)-server.repl_transfer_lastio)
+ (int)(server.unixtime-server.repl_transfer_lastio)
);
}
if (server.repl_state != REDIS_REPL_CONNECTED) {
info = sdscatprintf(info,
"master_link_down_since_seconds:%ld\r\n",
- (long)time(NULL)-server.repl_down_since);
+ (long)server.unixtime-server.repl_down_since);
}
}
info = sdscatprintf(info,
}
void version() {
- printf("Redis server version %s (%s:%d)\n", REDIS_VERSION,
- redisGitSHA1(), atoi(redisGitDirty()) > 0);
+ printf("Redis server v=%s sha=%s:%d malloc=%s\n", REDIS_VERSION,
+ redisGitSHA1(), atoi(redisGitDirty()) > 0, ZMALLOC_LIB);
exit(0);
}
fprintf(stderr,"Usage: ./redis-server [/path/to/redis.conf] [options]\n");
fprintf(stderr," ./redis-server - (read config from stdin)\n");
fprintf(stderr," ./redis-server -v or --version\n");
- fprintf(stderr," ./redis-server -h or --help\n\n");
+ fprintf(stderr," ./redis-server -h or --help\n");
+ fprintf(stderr," ./redis-server --test-memory <megabytes>\n\n");
fprintf(stderr,"Examples:\n");
fprintf(stderr," ./redis-server (run the server with default conf)\n");
fprintf(stderr," ./redis-server /etc/redis/6379.conf\n");
return;
}
+void memtest(size_t megabytes, int passes);
+
int main(int argc, char **argv) {
long long start;
struct timeval tv;
strcmp(argv[1], "--version") == 0) version();
if (strcmp(argv[1], "--help") == 0 ||
strcmp(argv[1], "-h") == 0) usage();
+ if (strcmp(argv[1], "--test-memory") == 0) {
+ if (argc == 3) {
+ memtest(atoi(argv[2]),50);
+ exit(0);
+ } else {
+ fprintf(stderr,"Please specify the amount of memory to test in megabytes.\n");
+ fprintf(stderr,"Example: ./redis-server --test-memory 4096\n\n");
+ exit(1);
+ }
+ }
+
/* First argument is the config file name? */
if (argv[j][0] != '-' || argv[j][1] != '-')
configfile = argv[j++];
*colon, *nullbulk, *nullmultibulk, *queued,
*emptymultibulk, *wrongtypeerr, *nokeyerr, *syntaxerr, *sameobjecterr,
*outofrangeerr, *noscripterr, *loadingerr, *slowscripterr, *bgsaveerr,
- *plus, *select0, *select1, *select2, *select3, *select4,
- *select5, *select6, *select7, *select8, *select9,
+ *roslaveerr, *oomerr, *plus, *select0, *select1, *select2, *select3,
+ *select4, *select5, *select6, *select7, *select8, *select9,
*messagebulk, *pmessagebulk, *subscribebulk, *unsubscribebulk,
*psubscribebulk, *punsubscribebulk, *del, *rpop, *lpop,
*integers[REDIS_SHARED_INTEGERS],
int aof_selected_db; /* Currently selected DB in AOF */
time_t aof_flush_postponed_start; /* UNIX time of postponed AOF flush */
time_t aof_last_fsync; /* UNIX time of last fsync() */
+ unsigned long aof_delayed_fsync; /* delayed AOF fsync() counter */
/* RDB persistence */
long long dirty; /* Changes to DB from the last save */
long long dirty_before_bgsave; /* Used to restore dirty on failed BGSAVE */
char *repl_transfer_tmpfile; /* Slave-> master SYNC temp file name */
time_t repl_transfer_lastio; /* Unix time of the latest read, for timeout */
int repl_serve_stale_data; /* Serve stale data when link is down? */
+ int repl_slave_ro; /* Slave is read only? */
time_t repl_down_since; /* Unix time at which link with master went down */
/* Limits */
unsigned int maxclients; /* Max number of simultaneous clients */
extern dictType clusterNodesDictType;
extern dictType dbDictType;
extern double R_Zero, R_PosInf, R_NegInf, R_Nan;
-dictType hashDictType;
+extern dictType hashDictType;
/*-----------------------------------------------------------------------------
* Functions prototypes
void queueMultiCommand(redisClient *c);
void touchWatchedKey(redisDb *db, robj *key);
void touchWatchedKeysOnFlush(int dbid);
+void discardTransaction(redisClient *c);
/* Redis object implementation */
void decrRefCount(void *o);
/* At this stage just a newline works as a PING in order to take
* the connection live. So we refresh our last interaction
* timestamp. */
- server.repl_transfer_lastio = time(NULL);
+ server.repl_transfer_lastio = server.unixtime;
return;
} else if (buf[0] != '$') {
redisLog(REDIS_WARNING,"Bad protocol from MASTER, the first byte is not '$', are you sure the host and port are right?");
replicationAbortSyncTransfer();
return;
}
- server.repl_transfer_lastio = time(NULL);
+ server.repl_transfer_lastio = server.unixtime;
if (write(server.repl_transfer_fd,buf,nread) != nread) {
redisLog(REDIS_WARNING,"Write error or short write writing to the DB dump file needed for MASTER <-> SLAVE synchrnonization: %s", strerror(errno));
goto error;
/* Prepare a suitable temp file for bulk transfer */
while(maxtries--) {
snprintf(tmpfile,256,
- "temp-%d.%ld.rdb",(int)time(NULL),(long int)getpid());
+ "temp-%d.%ld.rdb",(int)server.unixtime,(long int)getpid());
dfd = open(tmpfile,O_CREAT|O_WRONLY|O_EXCL,0644);
if (dfd != -1) break;
sleep(1);
server.repl_state = REDIS_REPL_TRANSFER;
server.repl_transfer_left = -1;
server.repl_transfer_fd = dfd;
- server.repl_transfer_lastio = time(NULL);
+ server.repl_transfer_lastio = server.unixtime;
server.repl_transfer_tmpfile = zstrdup(tmpfile);
return;
return REDIS_ERR;
}
- server.repl_transfer_lastio = time(NULL);
+ server.repl_transfer_lastio = server.unixtime;
server.repl_transfer_s = fd;
server.repl_state = REDIS_REPL_CONNECTING;
return REDIS_OK;
goto cleanup;
}
+ /* There are commands that are not allowed inside scripts. */
if (cmd->flags & REDIS_CMD_NOSCRIPT) {
luaPushError(lua, "This Redis command is not allowed from scripts");
goto cleanup;
}
- if (cmd->flags & REDIS_CMD_WRITE && server.lua_random_dirty) {
- luaPushError(lua,
- "Write commands not allowed after non deterministic commands");
- goto cleanup;
+ /* Write commands are forbidden against read-only slaves, or if a
+ * command marked as non-deterministic was already called in the context
+ * of this script. */
+ if (cmd->flags & REDIS_CMD_WRITE) {
+ if (server.lua_random_dirty) {
+ luaPushError(lua,
+ "Write commands not allowed after non deterministic commands");
+ goto cleanup;
+ } else if (server.masterhost && server.repl_slave_ro &&
+ !(server.lua_caller->flags & REDIS_MASTER))
+ {
+ luaPushError(lua, shared.roslaveerr->ptr);
+ goto cleanup;
+ } else if (server.stop_writes_on_bgsave_err &&
+ server.saveparamslen > 0 &&
+ server.lastbgsave_status == REDIS_ERR)
+ {
+ luaPushError(lua, shared.bgsaveerr->ptr);
+ goto cleanup;
+ }
+ }
+
+ /* If we reached the memory limit configured via maxmemory, commands that
+ * could enlarge the memory usage are not allowed, but only if this is the
+ * first write in the context of this script, otherwise we can't stop
+ * in the middle. */
+ if (server.maxmemory && server.lua_write_dirty == 0 &&
+ (cmd->flags & REDIS_CMD_DENYOOM))
+ {
+ if (freeMemoryIfNeeded() == REDIS_ERR) {
+ luaPushError(lua, shared.oomerr->ptr);
+ goto cleanup;
+ }
}
if (cmd->flags & REDIS_CMD_RANDOM) server.lua_random_dirty = 1;
}
/* Add an element, discard the old if the key already exists.
- * Return 0 on insert and 1 on update. */
+ * Return 0 on insert and 1 on update.
+ * This function will take care of incrementing the reference count of the
+ * retained fields and value objects. */
int hashTypeSet(robj *o, robj *field, robj *value) {
int update = 0;
zl = ziplistPush(zl, field->ptr, sdslen(field->ptr), ZIPLIST_TAIL);
zl = ziplistPush(zl, value->ptr, sdslen(value->ptr), ZIPLIST_TAIL);
}
-
o->ptr = zl;
-
decrRefCount(field);
decrRefCount(value);
/* Check if the ziplist needs to be converted to a hash table */
- if (hashTypeLength(o) > server.hash_max_ziplist_entries) {
+ if (hashTypeLength(o) > server.hash_max_ziplist_entries)
hashTypeConvert(o, REDIS_ENCODING_HT);
- }
-
} else if (o->encoding == REDIS_ENCODING_HT) {
if (dictReplace(o->ptr, field, value)) { /* Insert */
incrRefCount(field);
} else { /* Update */
update = 1;
}
-
incrRefCount(value);
-
} else {
redisPanic("Unknown hash encoding");
}
-
return update;
}
void hincrbyfloatCommand(redisClient *c) {
double long value, incr;
- robj *o, *current, *new;
+ robj *o, *current, *new, *aux;
if (getLongDoubleFromObjectOrReply(c,c->argv[3],&incr,NULL) != REDIS_OK) return;
if ((o = hashTypeLookupWriteOrCreate(c,c->argv[1])) == NULL) return;
hashTypeTryObjectEncoding(o,&c->argv[2],NULL);
hashTypeSet(o,c->argv[2],new);
addReplyBulk(c,new);
- decrRefCount(new);
signalModifiedKey(c->db,c->argv[1]);
server.dirty++;
+
+ /* Always replicate HINCRBYFLOAT as an HSET command with the final value
+ * in order to make sure that differences in float pricision or formatting
+ * will not create differences in replicas or after an AOF restart. */
+ aux = createStringObject("HSET",4);
+ rewriteClientCommandArgument(c,0,aux);
+ decrRefCount(aux);
+ rewriteClientCommandArgument(c,3,new);
+ decrRefCount(new);
}
static void addHashFieldToReply(redisClient *c, robj *o, robj *field) {
return REDIS_ERR;
}
- if (tval > 0) tval += time(NULL);
+ if (tval > 0) tval += server.unixtime;
*timeout = tval;
return REDIS_OK;
} else if ((prevlensize) == 5) { \
assert(sizeof((prevlensize)) == 4); \
memcpy(&(prevlen), ((char*)(ptr)) + 1, 4); \
- memrev32ifbe(&len); \
+ memrev32ifbe(&prevlen); \
} \
} while(0);
-set server_path [tmpdir "server.convert-zipmap-hash-on-load"]
-
# Copy RDB with zipmap encoded hash to server path
-exec cp tests/assets/hash-zipmap.rdb $server_path
+set server_path [tmpdir "server.convert-zipmap-hash-on-load"]
+exec cp -f tests/assets/hash-zipmap.rdb $server_path
start_server [list overrides [list "dir" $server_path "dbfilename" "hash-zipmap.rdb"]] {
test "RDB load zipmap hash: converts to ziplist" {
r select 0
}
}
+exec cp -f tests/assets/hash-zipmap.rdb $server_path
start_server [list overrides [list "dir" $server_path "dbfilename" "hash-zipmap.rdb" "hash-max-ziplist-entries" 1]] {
test "RDB load zipmap hash: converts to hash table when hash-max-ziplist-entries is exceeded" {
r select 0
}
}
+exec cp -f tests/assets/hash-zipmap.rdb $server_path
start_server [list overrides [list "dir" $server_path "dbfilename" "hash-zipmap.rdb" "hash-max-ziplist-value" 1]] {
test "RDB load zipmap hash: converts to hash table when hash-max-ziplist-value is exceeded" {
r select 0
--- /dev/null
+set server_path [tmpdir "server.rdb-encoding-test"]
+
+# Copy RDB with different encodings in server path
+exec cp tests/assets/encodings.rdb $server_path
+
+start_server [list overrides [list "dir" $server_path "dbfilename" "encodings.rdb"]] {
+ test "RDB encoding loading test" {
+ r select 0
+ csvdump r
+ } {"compressible","string","aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
+"hash","hash","a","1","aa","10","aaa","100","b","2","bb","20","bbb","200","c","3","cc","30","ccc","300","ddd","400","eee","5000000000",
+"hash_zipped","hash","a","1","b","2","c","3",
+"list","list","1","2","3","a","b","c","100000","6000000000","1","2","3","a","b","c","100000","6000000000","1","2","3","a","b","c","100000","6000000000",
+"list_zipped","list","1","2","3","a","b","c","100000","6000000000",
+"number","string","10"
+"set","set","1","100000","2","3","6000000000","a","b","c",
+"set_zipped_1","set","1","2","3","4",
+"set_zipped_2","set","100000","200000","300000","400000",
+"set_zipped_3","set","1000000000","2000000000","3000000000","4000000000","5000000000","6000000000",
+"string","string","Hello World"
+"zset","zset","a","1","b","2","c","3","aa","10","bb","20","cc","30","aaa","100","bbb","200","ccc","300","aaaa","1000","cccc","123456789","bbbb","5000000000",
+"zset_zipped","zset","a","1","b","2","c","3",
+}
+}
+
- {return -code error [redis_read_line $fd]}
$ {redis_bulk_read $fd}
* {redis_multi_bulk_read $fd}
- default {return -code error "Bad protocol, $type as reply type byte"}
+ default {return -code error "Bad protocol, '$type' as reply type byte"}
}
}
set stderr [format "%s/%s" [dict get $config "dir"] "stderr"]
if {$::valgrind} {
- exec valgrind --suppressions=src/valgrind.sup src/redis-server $config_file > $stdout 2> $stderr &
+ exec valgrind --suppressions=src/valgrind.sup --show-reachable=no --show-possibly-lost=no --leak-check=full src/redis-server $config_file > $stdout 2> $stderr &
} else {
exec src/redis-server $config_file > $stdout 2> $stderr &
}
integration/replication-2
integration/replication-3
integration/aof
+ integration/rdb
+ integration/convert-zipmap-hash-on-load
unit/pubsub
unit/slowlog
unit/scripting
# Redis configuration file example
-# Note on units: when memory size is needed, it is possible to specifiy
+# Note on units: when memory size is needed, it is possible to specify
# it in the usual form of 1k 5GB 4M and so forth:
#
# 1k => 1000 bytes
# on a unix socket when not specified.
#
# unixsocket /tmp/redis.sock
+# unixsocketperm 755
# Close the connection after a client is idle for N seconds (0 to disable)
-timeout 300
+timeout 0
# Set server verbosity to 'debug'
# it can be one of:
# verbose (many rarely useful info, but not a mess like the debug level)
# notice (moderately verbose, what you want in production probably)
# warning (only very important / critical messages are logged)
-loglevel verbose
+loglevel notice
# Specify the log file name. Also 'stdout' can be used to force
# Redis to log on the standard output. Note that if you use standard
# after 60 sec if at least 10000 keys changed
#
# Note: you can disable saving at all commenting all the "save" lines.
+#
+# It is also possible to remove all the previously configured save
+# points by adding a save directive with a single empty string argument
+# like in the following example:
+#
+# save ""
save 900 1
save 300 10
save 60 10000
+# By default Redis will stop accepting writes if RDB snapshots are enabled
+# (at least one save point) and the latest background save failed.
+# This will make the user aware (in an hard way) that data is not persisting
+# on disk properly, otherwise chances are that no one will notice and some
+# distater will happen.
+#
+# If the background saving process will start working again Redis will
+# automatically allow writes again.
+#
+# However if you have setup your proper monitoring of the Redis server
+# and persistence, you may want to disable this feature so that Redis will
+# continue to work as usually even if there are problems with disk,
+# permissions, and so forth.
+stop-writes-on-bgsave-error yes
+
# Compress string objects using LZF when dump .rdb databases?
# For default that's set to 'yes' as it's almost always a win.
# If you want to save some CPU in the saving child set it to 'no' but
# is still in progress, the slave can act in two different ways:
#
# 1) if slave-serve-stale-data is set to 'yes' (the default) the slave will
-# still reply to client requests, possibly with out of data data, or the
+# still reply to client requests, possibly with out of date data, or the
# data set may just be empty if this is the first synchronization.
#
# 2) if slave-serve-stale data is set to 'no' the slave will reply with
#
slave-serve-stale-data yes
+# Slaves send PINGs to server in a predefined interval. It's possible to change
+# this interval with the repl_ping_slave_period option. The default value is 10
+# seconds.
+#
+# repl-ping-slave-period 10
+
+# The following option sets a timeout for both Bulk transfer I/O timeout and
+# master data or ping response timeout. The default value is 60 seconds.
+#
+# It is important to make sure that this value is greater than the value
+# specified for repl-ping-slave-period otherwise a timeout will be detected
+# every time there is low traffic between the master and the slave.
+#
+# repl-timeout 60
+
################################## SECURITY ###################################
# Require clients to issue AUTH <PASSWORD> before processing any other
# Command renaming.
#
-# It is possilbe to change the name of dangerous commands in a shared
+# It is possible to change the name of dangerous commands in a shared
# environment. For instance the CONFIG command may be renamed into something
# of hard to guess so that it will be still available for internal-use
# tools but not available for general clients.
#
# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52
#
-# It is also possilbe to completely kill a command renaming it into
+# It is also possible to completely kill a command renaming it into
# an empty string:
#
# rename-command CONFIG ""
################################### LIMITS ####################################
-# Set the max number of connected clients at the same time. By default there
-# is no limit, and it's up to the number of file descriptors the Redis process
-# is able to open. The special value '0' means no limits.
+# Set the max number of connected clients at the same time. By default
+# this limit is set to 10000 clients, however if the Redis server is not
+# able ot configure the process file limit to allow for the specified limit
+# the max number of allowed clients is set to the current file limit
+# minus 32 (as Redis reserves a few file descriptors for internal uses).
+#
# Once the limit is reached Redis will close all the new connections sending
# an error 'max number of clients reached'.
#
-# maxclients 128
+# maxclients 10000
# Don't use more memory than the specified amount of bytes.
-# When the memory limit is reached Redis will try to remove keys with an
-# EXPIRE set. It will try to start freeing keys that are going to expire
-# in little time and preserve keys with a longer time to live.
-# Redis will also try to remove objects from free lists if possible.
-#
-# If all this fails, Redis will start to reply with errors to commands
-# that will use more memory, like SET, LPUSH, and so on, and will continue
-# to reply to most read-only commands like GET.
-#
-# WARNING: maxmemory can be a good idea mainly if you want to use Redis as a
-# 'state' server or cache, not as a real DB. When Redis is used as a real
-# database the memory usage will grow over the weeks, it will be obvious if
-# it is going to use too much memory in the long run, and you'll have the time
-# to upgrade. With maxmemory after the limit is reached you'll start to get
-# errors for write operations, and this may even lead to DB inconsistency.
+# When the memory limit is reached Redis will try to remove keys
+# accordingly to the eviction policy selected (see maxmemmory-policy).
+#
+# If Redis can't remove keys according to the policy, or if the policy is
+# set to 'noeviction', Redis will start to reply with errors to commands
+# that would use more memory, like SET, LPUSH, and so on, and will continue
+# to reply to read-only commands like GET.
+#
+# This option is usually useful when using Redis as an LRU cache, or to set
+# an hard memory limit for an instance (using the 'noeviction' policy).
+#
+# WARNING: If you have slaves attached to an instance with maxmemory on,
+# the size of the output buffers needed to feed the slaves are subtracted
+# from the used memory count, so that network problems / resyncs will
+# not trigger a loop where keys are evicted, and in turn the output
+# buffer of slaves is full with DELs of keys evicted triggering the deletion
+# of more keys, and so forth until the database is completely emptied.
+#
+# In short... if you have slaves attached it is suggested that you set a lower
+# limit for maxmemory so that there is some free RAM on the system for slave
+# output buffers (but this is not needed if the policy is 'noeviction').
#
# maxmemory <bytes>
# volatile-lru -> remove the key with an expire set using an LRU algorithm
# allkeys-lru -> remove any key accordingly to the LRU algorithm
# volatile-random -> remove a random key with an expire set
-# allkeys->random -> remove a random key, any key
+# allkeys-random -> remove a random key, any key
# volatile-ttl -> remove the key with the nearest expire time (minor TTL)
# noeviction -> don't expire at all, just return an error on write operations
#
#
# The default is "everysec" that's usually the right compromise between
# speed and data safety. It's up to you to understand if you can relax this to
-# "no" that will will let the operating system flush the output buffer when
+# "no" that will let the operating system flush the output buffer when
# it wants, for better performances (but if you can live with the idea of
# some data loss consider the default persistence mode that's snapshotting),
# or on the contrary, use "always" that's very slow but a bit safer than
# BGSAVE or BGREWRITEAOF is in progress.
#
# This means that while another child is saving the durability of Redis is
-# the same as "appendfsync none", that in pratical terms means that it is
+# the same as "appendfsync none", that in practical terms means that it is
# possible to lost up to 30 seconds of log in the worst scenario (with the
# default Linux settings).
#
# is useful to avoid rewriting the AOF file even if the percentage increase
# is reached but it is still pretty small.
#
-# Specify a precentage of zero in order to disable the automatic AOF
+# Specify a percentage of zero in order to disable the automatic AOF
# rewrite feature.
auto-aof-rewrite-percentage 100
################################ LUA SCRIPTING ###############################
# Max execution time of a Lua script in milliseconds.
-# This prevents that a programming error generating an infinite loop will block
-# your server forever. Set it to 0 or a negative value for unlimited execution.
-#lua-time-limit 60000
+#
+# If the maximum execution time is reached Redis will log that a script is
+# still in execution after the maximum allowed time and will start to
+# reply to queries with an error.
+#
+# When a long running script exceed the maximum execution time only the
+# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be
+# used to stop a script that did not yet called write commands. The second
+# is the only way to shut down the server in the case a write commands was
+# already issue by the script but the user don't want to wait for the natural
+# termination of the script.
+#
+# Set it to 0 or a negative value for unlimited execution without warnings.
+lua-time-limit 5000
+
+################################ REDIS CLUSTER ###############################
+#
+# Normal Redis instances can't be part of a Redis Cluster, only nodes that are
+# started as cluster nodes can. In order to start a Redis instance as a
+# cluster node enable the cluster support uncommenting the following:
+#
+# cluster-enabled yes
+
+# Every cluster node has a cluster configuration file. This file is not
+# intended to be edited by hand. It is created and updated by Redis nodes.
+# Every Redis Cluster node requires a different cluster configuration file.
+# Make sure that instances running in the same system does not have
+# overlapping cluster configuration file names.
+#
+# cluster-config-file nodes-6379.conf
+
+# In order to setup your cluster make sure to read the documentation
+# available at http://redis.io web site.
################################## SLOW LOG ###################################
############################### ADVANCED CONFIG ###############################
-# Hashes are encoded in a special way (much more memory efficient) when they
-# have at max a given numer of elements, and the biggest element does not
-# exceed a given threshold. You can configure this limits with the following
-# configuration directives.
-hash-max-zipmap-entries 512
-hash-max-zipmap-value 64
+# Hashes are encoded using a memory efficient data structure when they have a
+# small number of entries, and the biggest entry does not exceed a given
+# threshold. These thresholds can be configured using the following directives.
+hash-max-ziplist-entries 512
+hash-max-ziplist-value 64
# Similarly to hashes, small lists are also encoded in a special way in order
# to save a lot of space. The special representation is only used when
# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in
# order to help rehashing the main Redis hash table (the one mapping top-level
-# keys to values). The hash table implementation redis uses (see dict.c)
+# keys to values). The hash table implementation Redis uses (see dict.c)
# performs a lazy rehashing: the more operation you run into an hash table
-# that is rhashing, the more rehashing "steps" are performed, so if the
+# that is rehashing, the more rehashing "steps" are performed, so if the
# server is idle the rehashing is never complete and some more memory is used
# by the hash table.
#
# want to free memory asap when possible.
activerehashing yes
+# The client output buffer limits can be used to force disconnection of clients
+# that are not reading data from the server fast enough for some reason (a
+# common reason is that a Pub/Sub client can't consume messages as fast as the
+# publisher can produce them).
+#
+# The limit can be set differently for the three different classes of clients:
+#
+# normal -> normal clients
+# slave -> slave clients and MONITOR clients
+# pubsub -> clients subcribed to at least one pubsub channel or pattern
+#
+# The syntax of every client-output-buffer-limit directive is the following:
+#
+# client-output-buffer-limit <class> <hard limit> <soft limit> <soft seconds>
+#
+# A client is immediately disconnected once the hard limit is reached, or if
+# the soft limit is reached and remains reached for the specified number of
+# seconds (continuously).
+# So for instance if the hard limit is 32 megabytes and the soft limit is
+# 16 megabytes / 10 seconds, the client will get disconnected immediately
+# if the size of the output buffers reach 32 megabytes, but will also get
+# disconnected if the client reaches 16 megabytes and continuously overcomes
+# the limit for 10 seconds.
+#
+# By default normal clients are not limited because they don't receive data
+# without asking (in a push way), but just after a request, so only
+# asynchronous clients may create a scenario where data is requested faster
+# than it can read.
+#
+# Instead there is a default limit for pubsub and slave clients, since
+# subscribers and slaves receive data in a push fashion.
+#
+# Both the hard or the soft limit can be disabled just setting it to zero.
+client-output-buffer-limit normal 0 0 0
+client-output-buffer-limit slave 256mb 64mb 60
+client-output-buffer-limit pubsub 32mb 8mb 60
+
################################## INCLUDES ###################################
# Include one or more other config files here. This is useful if you
-# have a standard template that goes to all redis server but also need
+# have a standard template that goes to all Redis server but also need
# to customize a few per-server settings. Include files can include
# other files, so use this wisely.
#