* Initialization
* -------------------------------------------------------------------------- */
-void clusterGetRandomName(char *p) {
- FILE *fp = fopen("/dev/urandom","r");
- char *charset = "0123456789abcdef";
- int j;
-
- if (fp == NULL || fread(p,REDIS_CLUSTER_NAMELEN,1,fp) == 0) {
- for (j = 0; j < REDIS_CLUSTER_NAMELEN; j++)
- p[j] = rand();
- }
- for (j = 0; j < REDIS_CLUSTER_NAMELEN; j++)
- p[j] = charset[p[j] & 0x0F];
- fclose(fp);
-}
-
int clusterLoadConfig(char *filename) {
FILE *fp = fopen(filename,"r");
char *line;
if (nodename)
memcpy(node->name, nodename, REDIS_CLUSTER_NAMELEN);
else
- clusterGetRandomName(node->name);
+ getRandomHexChars(node->name, REDIS_CLUSTER_NAMELEN);
node->flags = flags;
memset(node->slots,0,sizeof(node->slots));
node->numslaves = 0;
de = dictFind(server.cluster.nodes,s);
sdsfree(s);
if (de == NULL) return NULL;
- return dictGetEntryVal(de);
+ return dictGetVal(de);
}
/* This is only used after the handshake. When we connect a given IP/PORT
* time PONG figure if it is newer than our figure.
* Note that it's not a problem if we have a PING already
* in progress against this node. */
- if (node->pong_received < ntohl(g->pong_received)) {
+ if (node->pong_received < (signed) ntohl(g->pong_received)) {
redisLog(REDIS_DEBUG,"Node pong_received updated by gossip");
node->pong_received = ntohl(g->pong_received);
}
/* Update the node address to the IP address that can be extracted
* from link->fd, and at the specified port. */
void nodeUpdateAddress(clusterNode *node, clusterLink *link, int port) {
+ /* TODO */
}
/* When this function is called, there is a packet to process starting
uint16_t type = ntohs(hdr->type);
clusterNode *sender;
- redisLog(REDIS_DEBUG,"--- packet to process %lu bytes (%lu) ---",
- (unsigned long) totlen, sdslen(link->rcvbuf));
+ redisLog(REDIS_DEBUG,"--- Processing packet of type %d, %lu bytes",
+ type, (unsigned long) totlen);
+
+ /* Perform sanity checks */
if (totlen < 8) return 1;
if (totlen > sdslen(link->rcvbuf)) return 1;
if (type == CLUSTERMSG_TYPE_PING || type == CLUSTERMSG_TYPE_PONG ||
explen += sizeof(clusterMsgDataFail);
if (totlen != explen) return 1;
}
+ if (type == CLUSTERMSG_TYPE_PUBLISH) {
+ uint32_t explen = sizeof(clusterMsg)-sizeof(union clusterMsgData);
+ explen += sizeof(clusterMsgDataPublish) +
+ ntohl(hdr->data.publish.msg.channel_len) +
+ ntohl(hdr->data.publish.msg.message_len);
+ if (totlen != explen) return 1;
+ }
+
+ /* Ready to process the packet. Dispatch by type. */
sender = clusterLookupNode(hdr->sender);
if (type == CLUSTERMSG_TYPE_PING || type == CLUSTERMSG_TYPE_MEET) {
int update_config = 0;
}
}
/* Update our info about the node */
- link->node->pong_received = time(NULL);
+ if (link->node) link->node->pong_received = time(NULL);
/* Update master/slave info */
if (sender) {
clusterUpdateState();
clusterSaveConfigOrDie();
}
+ } else if (type == CLUSTERMSG_TYPE_PUBLISH) {
+ robj *channel, *message;
+ uint32_t channel_len, message_len;
+
+ /* Don't bother creating useless objects if there are no Pub/Sub subscribers. */
+ if (dictSize(server.pubsub_channels) || listLength(server.pubsub_patterns)) {
+ channel_len = ntohl(hdr->data.publish.msg.channel_len);
+ message_len = ntohl(hdr->data.publish.msg.message_len);
+ channel = createStringObject(
+ (char*)hdr->data.publish.msg.bulk_data,channel_len);
+ message = createStringObject(
+ (char*)hdr->data.publish.msg.bulk_data+channel_len, message_len);
+ pubsubPublishMessage(channel,message);
+ decrRefCount(channel);
+ decrRefCount(message);
+ }
} else {
- redisLog(REDIS_NOTICE,"Received unknown packet type: %d", type);
+ redisLog(REDIS_WARNING,"Received unknown packet type: %d", type);
}
return 1;
}
link->sndbuf = sdscatlen(link->sndbuf, msg, msglen);
}
+/* Send a message to all the nodes with a reliable link */
+void clusterBroadcastMessage(void *buf, size_t len) {
+ dictIterator *di;
+ dictEntry *de;
+
+ di = dictGetIterator(server.cluster.nodes);
+ while((de = dictNext(di)) != NULL) {
+ clusterNode *node = dictGetVal(de);
+
+ if (!node->link) continue;
+ if (node->flags & (REDIS_NODE_MYSELF|REDIS_NODE_NOADDR)) continue;
+ clusterSendMessage(node->link,buf,len);
+ }
+ dictReleaseIterator(di);
+}
+
/* Build the message header */
void clusterBuildMessageHdr(clusterMsg *hdr, int type) {
- int totlen;
+ int totlen = 0;
memset(hdr,0,sizeof(*hdr));
hdr->type = htons(type);
/* Populate the gossip fields */
while(freshnodes > 0 && gossipcount < 3) {
struct dictEntry *de = dictGetRandomKey(server.cluster.nodes);
- clusterNode *this = dictGetEntryVal(de);
+ clusterNode *this = dictGetVal(de);
clusterMsgDataGossip *gossip;
int j;
clusterSendMessage(link,buf,totlen);
}
-/* Send a message to all the nodes with a reliable link */
-void clusterBroadcastMessage(void *buf, size_t len) {
- dictIterator *di;
- dictEntry *de;
+/* Send a PUBLISH message.
+ *
+ * If link is NULL, then the message is broadcasted to the whole cluster. */
+void clusterSendPublish(clusterLink *link, robj *channel, robj *message) {
+ unsigned char buf[4096], *payload;
+ clusterMsg *hdr = (clusterMsg*) buf;
+ uint32_t totlen;
+ uint32_t channel_len, message_len;
- di = dictGetIterator(server.cluster.nodes);
- while((de = dictNext(di)) != NULL) {
- clusterNode *node = dictGetEntryVal(de);
+ channel = getDecodedObject(channel);
+ message = getDecodedObject(message);
+ channel_len = sdslen(channel->ptr);
+ message_len = sdslen(message->ptr);
- if (!node->link) continue;
- if (node->flags & (REDIS_NODE_MYSELF|REDIS_NODE_NOADDR)) continue;
- clusterSendMessage(node->link,buf,len);
+ clusterBuildMessageHdr(hdr,CLUSTERMSG_TYPE_PUBLISH);
+ totlen = sizeof(clusterMsg)-sizeof(union clusterMsgData);
+ totlen += sizeof(clusterMsgDataPublish) + channel_len + message_len;
+
+ hdr->data.publish.msg.channel_len = htonl(channel_len);
+ hdr->data.publish.msg.message_len = htonl(message_len);
+ hdr->totlen = htonl(totlen);
+
+ /* Try to use the local buffer if possible */
+ if (totlen < sizeof(buf)) {
+ payload = buf;
+ } else {
+ payload = zmalloc(totlen);
+ hdr = (clusterMsg*) payload;
+ memcpy(payload,hdr,sizeof(hdr));
}
- dictReleaseIterator(di);
+ memcpy(hdr->data.publish.msg.bulk_data,channel->ptr,sdslen(channel->ptr));
+ memcpy(hdr->data.publish.msg.bulk_data+sdslen(channel->ptr),
+ message->ptr,sdslen(message->ptr));
+
+ if (link)
+ clusterSendMessage(link,payload,totlen);
+ else
+ clusterBroadcastMessage(payload,totlen);
+
+ decrRefCount(channel);
+ decrRefCount(message);
+ if (payload != buf) zfree(payload);
}
/* Send a FAIL message to all the nodes we are able to contact.
clusterBroadcastMessage(buf,ntohl(hdr->totlen));
}
+/* -----------------------------------------------------------------------------
+ * CLUSTER Pub/Sub support
+ *
+ * For now we do very little, just propagating PUBLISH messages across the whole
+ * cluster. In the future we'll try to get smarter and avoiding propagating those
+ * messages to hosts without receives for a given channel.
+ * -------------------------------------------------------------------------- */
+void clusterPropagatePublish(robj *channel, robj *message) {
+ clusterSendPublish(NULL, channel, message);
+}
+
/* -----------------------------------------------------------------------------
* CLUSTER cron job
* -------------------------------------------------------------------------- */
/* Check if we have disconnected nodes and reestablish the connection. */
di = dictGetIterator(server.cluster.nodes);
while((de = dictNext(di)) != NULL) {
- clusterNode *node = dictGetEntryVal(de);
+ clusterNode *node = dictGetVal(de);
if (node->flags & (REDIS_NODE_MYSELF|REDIS_NODE_NOADDR)) continue;
if (node->link == NULL) {
* the oldest ping_sent time */
for (j = 0; j < 5; j++) {
de = dictGetRandomKey(server.cluster.nodes);
- clusterNode *this = dictGetEntryVal(de);
+ clusterNode *this = dictGetVal(de);
if (this->link == NULL) continue;
if (this->flags & (REDIS_NODE_MYSELF|REDIS_NODE_HANDSHAKE)) continue;
/* Iterate nodes to check if we need to flag something as failing */
di = dictGetIterator(server.cluster.nodes);
while((de = dictNext(di)) != NULL) {
- clusterNode *node = dictGetEntryVal(de);
+ clusterNode *node = dictGetVal(de);
int delay;
if (node->flags &
di = dictGetIterator(server.cluster.nodes);
while((de = dictNext(di)) != NULL) {
- clusterNode *node = dictGetEntryVal(de);
+ clusterNode *node = dictGetVal(de);
/* Node coordinates */
ci = sdscatprintf(ci,"%.40s %s:%d ",
ci = sdscatprintf(ci,"%ld %ld %s",
(long) node->ping_sent,
(long) node->pong_received,
- node->link ? "connected" : "disconnected");
+ (node->link || node->flags & REDIS_NODE_MYSELF) ?
+ "connected" : "disconnected");
/* Slots served by this instance */
start = -1;
addReplyBulk(c,o);
decrRefCount(o);
} else if ((!strcasecmp(c->argv[1]->ptr,"addslots") ||
- !strcasecmp(c->argv[1]->ptr,"delslots")) && c->argc >= 3) {
+ !strcasecmp(c->argv[1]->ptr,"delslots")) && c->argc >= 3)
+ {
+ /* CLUSTER ADDSLOTS <slot> [slot] ... */
+ /* CLUSTER DELSLOTS <slot> [slot] ... */
int j, slot;
unsigned char *slots = zmalloc(REDIS_CLUSTER_SLOTS);
int del = !strcasecmp(c->argv[1]->ptr,"delslots");
}
for (j = 0; j < REDIS_CLUSTER_SLOTS; j++) {
if (slots[j]) {
- int retval = del ? clusterDelSlot(j) :
- clusterAddSlot(server.cluster.myself,j);
-
- redisAssert(retval == REDIS_OK);
+ int retval;
+
+ /* If this slot was set as importing we can clear this
+ * state as now we are the real owner of the slot. */
+ if (server.cluster.importing_slots_from[j])
+ server.cluster.importing_slots_from[j] = NULL;
+
+ retval = del ? clusterDelSlot(j) :
+ clusterAddSlot(server.cluster.myself,j);
+ redisAssertWithInfo(c,NULL,retval == REDIS_OK);
}
}
zfree(slots);
clusterSaveConfigOrDie();
addReply(c,shared.ok);
} else if (!strcasecmp(c->argv[1]->ptr,"setslot") && c->argc >= 4) {
- /* SETSLOT 10 MIGRATING <instance ID> */
- /* SETSLOT 10 IMPORTING <instance ID> */
+ /* SETSLOT 10 MIGRATING <node ID> */
+ /* SETSLOT 10 IMPORTING <node ID> */
/* SETSLOT 10 STABLE */
+ /* SETSLOT 10 NODE <node ID> */
int slot;
clusterNode *n;
/* CLUSTER SETSLOT <SLOT> STABLE */
server.cluster.importing_slots_from[slot] = NULL;
server.cluster.migrating_slots_to[slot] = NULL;
- } else if (!strcasecmp(c->argv[3]->ptr,"node") && c->argc == 4) {
+ } else if (!strcasecmp(c->argv[3]->ptr,"node") && c->argc == 5) {
/* CLUSTER SETSLOT <SLOT> NODE <NODE ID> */
clusterNode *n = clusterLookupNode(c->argv[4]->ptr);
keys = zmalloc(sizeof(robj*)*1);
numkeys = GetKeysInSlot(slot, keys, 1);
zfree(keys);
- if (numkeys == 0) {
+ if (numkeys != 0) {
addReplyErrorFormat(c, "Can't assign hashslot %d to a different node while I still hold keys for this hash slot.", slot);
return;
}
}
+ /* If this node was the slot owner and the slot was marked as
+ * migrating, assigning the slot to another node will clear
+ * the migratig status. */
+ if (server.cluster.slots[slot] == server.cluster.myself &&
+ server.cluster.migrating_slots_to[slot])
+ server.cluster.migrating_slots_to[slot] = NULL;
+
+ /* If this node was importing this slot, assigning the slot to
+ * itself also clears the importing status. */
+ if (n == server.cluster.myself && server.cluster.importing_slots_from[slot])
+ server.cluster.importing_slots_from[slot] = NULL;
+
clusterDelSlot(slot);
clusterAddSlot(n,slot);
} else {
/* RESTORE key ttl serialized-value */
void restoreCommand(redisClient *c) {
- FILE *fp;
- char buf[64];
- robj *o;
- unsigned char *data;
long ttl;
+ rio payload;
+ int type;
+ robj *obj;
/* Make sure this key does not already exist here... */
- if (dbExists(c->db,c->argv[1])) {
+ if (lookupKeyWrite(c->db,c->argv[1]) != NULL) {
addReplyError(c,"Target key name is busy.");
return;
}
return;
}
- /* rdbLoadObject() only works against file descriptors so we need to
- * dump the serialized object into a file and reload. */
- snprintf(buf,sizeof(buf),"redis-restore-%d.tmp",getpid());
- fp = fopen(buf,"w+");
- if (!fp) {
- redisLog(REDIS_WARNING,"Can't open tmp file for RESTORE: %s",
- strerror(errno));
- addReplyErrorFormat(c,"RESTORE failed, tmp file creation error: %s",
- strerror(errno));
- return;
- }
- unlink(buf);
-
- /* Write the actual data and rewind the file */
- data = (unsigned char*) c->argv[3]->ptr;
- if (fwrite(data+1,sdslen((sds)data)-1,1,fp) != 1) {
- redisLog(REDIS_WARNING,"Can't write against tmp file for RESTORE: %s",
- strerror(errno));
- addReplyError(c,"RESTORE failed, tmp file I/O error.");
- fclose(fp);
- return;
- }
- rewind(fp);
-
- /* Finally create the object from the serialized dump and
- * store it at the specified key. */
- if ((data[0] > 4 && data[0] < 9) ||
- data[0] > 11 ||
- (o = rdbLoadObject(data[0],fp)) == NULL)
+ rioInitWithBuffer(&payload,c->argv[3]->ptr);
+ if (((type = rdbLoadObjectType(&payload)) == -1) ||
+ ((obj = rdbLoadObject(type,&payload)) == NULL))
{
- addReplyError(c,"Bad data format.");
- fclose(fp);
+ addReplyError(c,"Bad data format");
return;
}
- fclose(fp);
/* Create the key and set the TTL if any */
- dbAdd(c->db,c->argv[1],o);
+ dbAdd(c->db,c->argv[1],obj);
if (ttl) setExpire(c->db,c->argv[1],time(NULL)+ttl);
+ signalModifiedKey(c->db,c->argv[1]);
addReply(c,shared.ok);
+ server.dirty++;
}
/* MIGRATE host port key dbid timeout */
int fd;
long timeout;
long dbid;
- char buf[64];
- FILE *fp;
time_t ttl;
robj *o;
- unsigned char type;
- off_t payload_len;
+ rio cmd, payload;
/* Sanity check */
if (getLongFromObjectOrReply(c,c->argv[5],&timeout,NULL) != REDIS_OK)
* nothing to migrate (for instance the key expired in the meantime), but
* we include such information in the reply string. */
if ((o = lookupKeyRead(c->db,c->argv[3])) == NULL) {
- addReplySds(c,sdsnew("+NOKEY"));
+ addReplySds(c,sdsnew("+NOKEY\r\n"));
return;
}
return;
}
- /* Create temp file */
- snprintf(buf,sizeof(buf),"redis-migrate-%d.tmp",getpid());
- fp = fopen(buf,"w+");
- if (!fp) {
- redisLog(REDIS_WARNING,"Can't open tmp file for MIGRATE: %s",
- strerror(errno));
- addReplyErrorFormat(c,"MIGRATE failed, tmp file creation error: %s.",
- strerror(errno));
- return;
- }
- unlink(buf);
-
- /* Build the SELECT + RESTORE query writing it in our temp file. */
- if (fwriteBulkCount(fp,'*',2) == 0) goto file_wr_err;
- if (fwriteBulkString(fp,"SELECT",6) == 0) goto file_wr_err;
- if (fwriteBulkLongLong(fp,dbid) == 0) goto file_wr_err;
+ rioInitWithBuffer(&cmd,sdsempty());
+ redisAssertWithInfo(c,NULL,rioWriteBulkCount(&cmd,'*',2));
+ redisAssertWithInfo(c,NULL,rioWriteBulkString(&cmd,"SELECT",6));
+ redisAssertWithInfo(c,NULL,rioWriteBulkLongLong(&cmd,dbid));
ttl = getExpire(c->db,c->argv[3]);
- type = o->type;
- if (fwriteBulkCount(fp,'*',4) == 0) goto file_wr_err;
- if (fwriteBulkString(fp,"RESTORE",7) == 0) goto file_wr_err;
- if (fwriteBulkObject(fp,c->argv[3]) == 0) goto file_wr_err;
- if (fwriteBulkLongLong(fp, (ttl == -1) ? 0 : ttl) == 0) goto file_wr_err;
+ redisAssertWithInfo(c,NULL,rioWriteBulkCount(&cmd,'*',4));
+ redisAssertWithInfo(c,NULL,rioWriteBulkString(&cmd,"RESTORE",7));
+ redisAssertWithInfo(c,NULL,c->argv[3]->encoding == REDIS_ENCODING_RAW);
+ redisAssertWithInfo(c,NULL,rioWriteBulkString(&cmd,c->argv[3]->ptr,sdslen(c->argv[3]->ptr)));
+ redisAssertWithInfo(c,NULL,rioWriteBulkLongLong(&cmd,(ttl == -1) ? 0 : ttl));
/* Finally the last argument that is the serailized object payload
- * in the form: <type><rdb-serailized-object>. */
- payload_len = rdbSavedObjectLen(o);
- if (fwriteBulkCount(fp,'$',payload_len+1) == 0) goto file_wr_err;
- if (fwrite(&type,1,1,fp) == 0) goto file_wr_err;
- if (rdbSaveObject(fp,o) == -1) goto file_wr_err;
- if (fwrite("\r\n",2,1,fp) == 0) goto file_wr_err;
-
- /* Tranfer the query to the other node */
- rewind(fp);
+ * in the form: <type><rdb-serialized-object>. */
+ rioInitWithBuffer(&payload,sdsempty());
+ redisAssertWithInfo(c,NULL,rdbSaveObjectType(&payload,o));
+ redisAssertWithInfo(c,NULL,rdbSaveObject(&payload,o) != -1);
+ redisAssertWithInfo(c,NULL,rioWriteBulkString(&cmd,payload.io.buffer.ptr,sdslen(payload.io.buffer.ptr)));
+ sdsfree(payload.io.buffer.ptr);
+
+ /* Tranfer the query to the other node in 64K chunks. */
{
- char buf[4096];
- size_t nread;
-
- while ((nread = fread(buf,1,sizeof(buf),fp)) != 0) {
- int nwritten;
-
- nwritten = syncWrite(fd,buf,nread,timeout);
- if (nwritten != (signed)nread) goto socket_wr_err;
+ sds buf = cmd.io.buffer.ptr;
+ size_t pos = 0, towrite;
+ int nwritten = 0;
+
+ while ((towrite = sdslen(buf)-pos) > 0) {
+ towrite = (towrite > (64*1024) ? (64*1024) : towrite);
+ nwritten = syncWrite(fd,buf+nwritten,towrite,timeout);
+ if (nwritten != (signed)towrite) goto socket_wr_err;
+ pos += nwritten;
}
- if (ferror(fp)) goto file_rd_err;
}
- /* Read back the reply */
+ /* Read back the reply. */
{
char buf1[1024];
char buf2[1024];
if (syncReadLine(fd, buf1, sizeof(buf1), timeout) <= 0)
goto socket_rd_err;
if (syncReadLine(fd, buf2, sizeof(buf2), timeout) <= 0)
- goto socket_rd_err;
+ goto socket_rd_err;
if (buf1[0] == '-' || buf2[0] == '-') {
addReplyErrorFormat(c,"Target instance replied with error: %s",
(buf1[0] == '-') ? buf1+1 : buf2+1);
} else {
+ robj *aux;
+
dbDelete(c->db,c->argv[3]);
+ signalModifiedKey(c->db,c->argv[3]);
addReply(c,shared.ok);
+ server.dirty++;
+
+ /* Translate MIGRATE as DEL for replication/AOF. */
+ aux = createStringObject("DEL",3);
+ rewriteClientCommandVector(c,2,aux,c->argv[3]);
+ decrRefCount(aux);
}
}
- fclose(fp);
- close(fd);
- return;
-
-file_wr_err:
- redisLog(REDIS_WARNING,"Can't write on tmp file for MIGRATE: %s",
- strerror(errno));
- addReplyErrorFormat(c,"MIGRATE failed, tmp file write error: %s.",
- strerror(errno));
- fclose(fp);
- close(fd);
- return;
-file_rd_err:
- redisLog(REDIS_WARNING,"Can't read from tmp file for MIGRATE: %s",
- strerror(errno));
- addReplyErrorFormat(c,"MIGRATE failed, tmp file read error: %s.",
- strerror(errno));
- fclose(fp);
+ sdsfree(cmd.io.buffer.ptr);
close(fd);
return;
strerror(errno));
addReplyErrorFormat(c,"MIGRATE failed, writing to target node: %s.",
strerror(errno));
- fclose(fp);
+ sdsfree(cmd.io.buffer.ptr);
close(fd);
return;
strerror(errno));
addReplyErrorFormat(c,"MIGRATE failed, reading from target node: %s.",
strerror(errno));
- fclose(fp);
+ sdsfree(cmd.io.buffer.ptr);
close(fd);
return;
}
* DUMP is actually not used by Redis Cluster but it is the obvious
* complement of RESTORE and can be useful for different applications. */
void dumpCommand(redisClient *c) {
- char buf[64];
- FILE *fp;
robj *o, *dumpobj;
- sds dump = NULL;
- off_t payload_len;
- unsigned int type;
+ rio payload;
/* Check if the key is here. */
if ((o = lookupKeyRead(c->db,c->argv[1])) == NULL) {
addReply(c,shared.nullbulk);
return;
}
-
- /* Create temp file */
- snprintf(buf,sizeof(buf),"redis-dump-%d.tmp",getpid());
- fp = fopen(buf,"w+");
- if (!fp) {
- redisLog(REDIS_WARNING,"Can't open tmp file for MIGRATE: %s",
- strerror(errno));
- addReplyErrorFormat(c,"DUMP failed, tmp file creation error: %s.",
- strerror(errno));
- return;
- }
- unlink(buf);
-
- /* Dump the serailized object and read it back in memory.
- * We prefix it with a one byte containing the type ID.
- * This is the serialization format understood by RESTORE. */
- if (rdbSaveObject(fp,o) == -1) goto file_wr_err;
- payload_len = ftello(fp);
- if (fseeko(fp,0,SEEK_SET) == -1) goto file_rd_err;
- dump = sdsnewlen(NULL,payload_len+1);
- if (payload_len && fread(dump+1,payload_len,1,fp) != 1) goto file_rd_err;
- fclose(fp);
- type = o->type;
- if (type == REDIS_LIST && o->encoding == REDIS_ENCODING_ZIPLIST)
- type = REDIS_LIST_ZIPLIST;
- else if (type == REDIS_HASH && o->encoding == REDIS_ENCODING_ZIPMAP)
- type = REDIS_HASH_ZIPMAP;
- else if (type == REDIS_SET && o->encoding == REDIS_ENCODING_INTSET)
- type = REDIS_SET_INTSET;
- else
- type = o->type;
- dump[0] = type;
+
+ /* Serialize the object in a RDB-like format. It consist of an object type
+ * byte followed by the serialized object. This is understood by RESTORE. */
+ rioInitWithBuffer(&payload,sdsempty());
+ redisAssertWithInfo(c,NULL,rdbSaveObjectType(&payload,o));
+ redisAssertWithInfo(c,NULL,rdbSaveObject(&payload,o));
/* Transfer to the client */
- dumpobj = createObject(REDIS_STRING,dump);
+ dumpobj = createObject(REDIS_STRING,payload.io.buffer.ptr);
addReplyBulk(c,dumpobj);
decrRefCount(dumpobj);
return;
+}
-file_wr_err:
- redisLog(REDIS_WARNING,"Can't write on tmp file for DUMP: %s",
- strerror(errno));
- addReplyErrorFormat(c,"DUMP failed, tmp file write error: %s.",
- strerror(errno));
- sdsfree(dump);
- fclose(fp);
- return;
-
-file_rd_err:
- redisLog(REDIS_WARNING,"Can't read from tmp file for DUMP: %s",
- strerror(errno));
- addReplyErrorFormat(c,"DUMP failed, tmp file read error: %s.",
- strerror(errno));
- sdsfree(dump);
- fclose(fp);
- return;
+/* The ASKING command is required after a -ASK redirection.
+ * The client should issue ASKING before to actualy send the command to
+ * the target instance. See the Redis Cluster specification for more
+ * information. */
+void askingCommand(redisClient *c) {
+ if (server.cluster_enabled == 0) {
+ addReplyError(c,"This instance has cluster support disabled");
+ return;
+ }
+ c->flags |= REDIS_ASKING;
+ addReply(c,shared.ok);
}
/* -----------------------------------------------------------------------------
slot = keyHashSlot((char*)firstkey->ptr, sdslen(firstkey->ptr));
n = server.cluster.slots[slot];
- redisAssert(n != NULL);
+ redisAssertWithInfo(c,firstkey,n != NULL);
} else {
/* If it is not the first key, make sure it is exactly
* the same key as the first we saw. */
}
/* Handle the case in which we are receiving this hash slot from
* another instance, so we'll accept the query even if in the table
- * it is assigned to a different node. */
- if (server.cluster.importing_slots_from[slot] != NULL)
+ * it is assigned to a different node, but only if the client
+ * issued an ASKING command before. */
+ if (server.cluster.importing_slots_from[slot] != NULL &&
+ c->flags & REDIS_ASKING) {
return server.cluster.myself;
+ }
/* It's not a -ASK case. Base case: just return the right node. */
return n;
}