From ed9b544e10b84cd43348ddfab7068b610a5df1f7 Mon Sep 17 00:00:00 2001 From: antirez Date: Sun, 22 Mar 2009 10:30:00 +0100 Subject: [PATCH] first commit --- BETATESTING.txt | 12 + BUGS | 1 + COPYING | 10 + Makefile | 58 + README | 1 + TODO | 17 + adlist.c | 285 ++ adlist.h | 90 + ae.c | 368 ++ ae.h | 106 + anet.c | 268 ++ anet.h | 49 + benchmark.c | 460 +++ client-libraries/README | 28 + client-libraries/erlang/.hg_archival.txt | 2 + client-libraries/erlang/.hgignore | 2 + client-libraries/erlang/LICENSE | 22 + client-libraries/erlang/Makefile | 29 + client-libraries/erlang/include/erldis.hrl | 1 + client-libraries/erlang/src/Makefile | 9 + client-libraries/erlang/src/client.erl | 272 ++ client-libraries/erlang/src/erldis.erl | 82 + client-libraries/erlang/src/proto.erl | 68 + client-libraries/erlang/support/include.mk | 51 + client-libraries/erlang/test/Makefile | 12 + client-libraries/erlang/test/erldis_tests.erl | 88 + client-libraries/erlang/test/proto_tests.erl | 10 + client-libraries/php/redis.php | 330 ++ client-libraries/php/tests.php | 78 + client-libraries/python/redis.py | 930 +++++ client-libraries/ruby/LICENSE | 20 + client-libraries/ruby/README.markdown | 31 + client-libraries/ruby/README.rdoc | 12 + client-libraries/ruby/Rakefile | 58 + client-libraries/ruby/bench.rb | 15 + client-libraries/ruby/bin/distredis | 33 + client-libraries/ruby/examples/basic.rb | 16 + client-libraries/ruby/examples/incr-decr.rb | 18 + client-libraries/ruby/examples/list.rb | 26 + client-libraries/ruby/examples/sets.rb | 36 + client-libraries/ruby/fill.rb | 11 + client-libraries/ruby/lib/better_timeout.rb | 188 + client-libraries/ruby/lib/dist_redis.rb | 111 + client-libraries/ruby/lib/hash_ring.rb | 73 + client-libraries/ruby/lib/redis.rb | 836 +++++ client-libraries/ruby/spec/redis_spec.rb | 267 ++ client-libraries/ruby/spec/spec_helper.rb | 4 + client-libraries/ruby/tasks/redis.tasks.rb | 116 + dict.c | 579 ++++ dict.h | 136 + doc/Benchmarks.html | 121 + doc/BgsaveCommand.html | 39 + doc/CommandReference.html | 44 + doc/Credits.html | 36 + doc/DbsizeCommand.html | 38 + doc/DelCommand.html | 42 + doc/DesignPatterns.html | 37 + doc/ExistsCommand.html | 42 + doc/FAQ.html | 47 + doc/FlushallCommand.html | 39 + doc/FlushdbCommand.html | 39 + doc/GetCommand.html | 39 + doc/IncrCommand.html | 43 + doc/InfoCommand.html | 50 + doc/KeysCommand.html | 42 + doc/LastsaveCommand.html | 39 + doc/LindexCommand.html | 41 + doc/LlenCommand.html | 42 + doc/LpopCommand.html | 41 + doc/LrangeCommand.html | 42 + doc/LremCommand.html | 43 + doc/LsetCommand.html | 39 + doc/LtrimCommand.html | 47 + doc/MoveCommand.html | 42 + doc/ProtocolSpecification.html | 143 + doc/QuitCommand.html | 38 + doc/README.html | 109 + doc/RandomkeyCommand.html | 39 + doc/RenameCommand.html | 39 + doc/RenamenxCommand.html | 44 + doc/ReplyTypes.html | 44 + doc/RpushCommand.html | 40 + doc/SaddCommand.html | 43 + doc/SaveCommand.html | 39 + doc/ScardCommand.html | 42 + doc/SelectCommand.html | 39 + doc/SetCommand.html | 39 + doc/SetnxCommand.html | 42 + doc/ShutdownCommand.html | 39 + doc/SinterCommand.html | 40 + doc/SinterstoreCommand.html | 39 + doc/SismemberCommand.html | 43 + doc/SmembersCommand.html | 39 + doc/SortCommand.html | 60 + doc/SremCommand.html | 43 + doc/TemplateCommand.html | 38 + doc/TwitterAlikeExample.html | 252 ++ doc/TypeCommand.html | 44 + doc/VersionControl.html | 40 + doc/index.html | 36 + doc/redis.png | Bin 0 -> 4852 bytes doc/style.css | 25 + redis-cli.c | 340 ++ redis.c | 3037 +++++++++++++++++ redis.conf | 66 + sds.c | 329 ++ sds.h | 63 + test-redis.tcl | 807 +++++ zmalloc.c | 82 + zmalloc.h | 40 + 110 files changed, 13641 insertions(+) create mode 100644 BETATESTING.txt create mode 100644 BUGS create mode 100644 COPYING create mode 100644 Makefile create mode 100644 README create mode 100644 TODO create mode 100644 adlist.c create mode 100644 adlist.h create mode 100644 ae.c create mode 100644 ae.h create mode 100644 anet.c create mode 100644 anet.h create mode 100644 benchmark.c create mode 100644 client-libraries/README create mode 100644 client-libraries/erlang/.hg_archival.txt create mode 100644 client-libraries/erlang/.hgignore create mode 100644 client-libraries/erlang/LICENSE create mode 100644 client-libraries/erlang/Makefile create mode 100644 client-libraries/erlang/include/erldis.hrl create mode 100644 client-libraries/erlang/src/Makefile create mode 100644 client-libraries/erlang/src/client.erl create mode 100644 client-libraries/erlang/src/erldis.erl create mode 100644 client-libraries/erlang/src/proto.erl create mode 100644 client-libraries/erlang/support/include.mk create mode 100644 client-libraries/erlang/test/Makefile create mode 100644 client-libraries/erlang/test/erldis_tests.erl create mode 100644 client-libraries/erlang/test/proto_tests.erl create mode 100644 client-libraries/php/redis.php create mode 100644 client-libraries/php/tests.php create mode 100644 client-libraries/python/redis.py create mode 100644 client-libraries/ruby/LICENSE create mode 100644 client-libraries/ruby/README.markdown create mode 100644 client-libraries/ruby/README.rdoc create mode 100644 client-libraries/ruby/Rakefile create mode 100644 client-libraries/ruby/bench.rb create mode 100755 client-libraries/ruby/bin/distredis create mode 100644 client-libraries/ruby/examples/basic.rb create mode 100644 client-libraries/ruby/examples/incr-decr.rb create mode 100644 client-libraries/ruby/examples/list.rb create mode 100644 client-libraries/ruby/examples/sets.rb create mode 100644 client-libraries/ruby/fill.rb create mode 100644 client-libraries/ruby/lib/better_timeout.rb create mode 100644 client-libraries/ruby/lib/dist_redis.rb create mode 100644 client-libraries/ruby/lib/hash_ring.rb create mode 100644 client-libraries/ruby/lib/redis.rb create mode 100644 client-libraries/ruby/spec/redis_spec.rb create mode 100644 client-libraries/ruby/spec/spec_helper.rb create mode 100644 client-libraries/ruby/tasks/redis.tasks.rb create mode 100644 dict.c create mode 100644 dict.h create mode 100644 doc/Benchmarks.html create mode 100644 doc/BgsaveCommand.html create mode 100644 doc/CommandReference.html create mode 100644 doc/Credits.html create mode 100644 doc/DbsizeCommand.html create mode 100644 doc/DelCommand.html create mode 100644 doc/DesignPatterns.html create mode 100644 doc/ExistsCommand.html create mode 100644 doc/FAQ.html create mode 100644 doc/FlushallCommand.html create mode 100644 doc/FlushdbCommand.html create mode 100644 doc/GetCommand.html create mode 100644 doc/IncrCommand.html create mode 100644 doc/InfoCommand.html create mode 100644 doc/KeysCommand.html create mode 100644 doc/LastsaveCommand.html create mode 100644 doc/LindexCommand.html create mode 100644 doc/LlenCommand.html create mode 100644 doc/LpopCommand.html create mode 100644 doc/LrangeCommand.html create mode 100644 doc/LremCommand.html create mode 100644 doc/LsetCommand.html create mode 100644 doc/LtrimCommand.html create mode 100644 doc/MoveCommand.html create mode 100644 doc/ProtocolSpecification.html create mode 100644 doc/QuitCommand.html create mode 100644 doc/README.html create mode 100644 doc/RandomkeyCommand.html create mode 100644 doc/RenameCommand.html create mode 100644 doc/RenamenxCommand.html create mode 100644 doc/ReplyTypes.html create mode 100644 doc/RpushCommand.html create mode 100644 doc/SaddCommand.html create mode 100644 doc/SaveCommand.html create mode 100644 doc/ScardCommand.html create mode 100644 doc/SelectCommand.html create mode 100644 doc/SetCommand.html create mode 100644 doc/SetnxCommand.html create mode 100644 doc/ShutdownCommand.html create mode 100644 doc/SinterCommand.html create mode 100644 doc/SinterstoreCommand.html create mode 100644 doc/SismemberCommand.html create mode 100644 doc/SmembersCommand.html create mode 100644 doc/SortCommand.html create mode 100644 doc/SremCommand.html create mode 100644 doc/TemplateCommand.html create mode 100644 doc/TwitterAlikeExample.html create mode 100644 doc/TypeCommand.html create mode 100644 doc/VersionControl.html create mode 100644 doc/index.html create mode 100644 doc/redis.png create mode 100644 doc/style.css create mode 100644 redis-cli.c create mode 100644 redis.c create mode 100644 redis.conf create mode 100644 sds.c create mode 100644 sds.h create mode 100644 test-redis.tcl create mode 100644 zmalloc.c create mode 100644 zmalloc.h diff --git a/BETATESTING.txt b/BETATESTING.txt new file mode 100644 index 00000000..6870420a --- /dev/null +++ b/BETATESTING.txt @@ -0,0 +1,12 @@ +Hello betatester! + +This Redis Server distribution is just a preview, it is by no mean an usable +product, but probably it can already give you some feeling about what the +final release is going to be. + +Be aware that if you want to use Redis in production the server may not be perfectly stable or may cotanin unfixed bugs. We did our best to ensure this distribution is of good quality and bug free but the development is currently very fast. + +Please send feedbacks to antirez at gmail dot com. + +Enjoy, +antirez diff --git a/BUGS b/BUGS new file mode 100644 index 00000000..d9d271e1 --- /dev/null +++ b/BUGS @@ -0,0 +1 @@ +Plese check http://code.google.com/p/redis/issues/list diff --git a/COPYING b/COPYING new file mode 100644 index 00000000..3e704e3e --- /dev/null +++ b/COPYING @@ -0,0 +1,10 @@ +Copyright (c) 2006-2009, Salvatore Sanfilippo +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. + * Neither the name of Redis nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Makefile b/Makefile new file mode 100644 index 00000000..bf9760f0 --- /dev/null +++ b/Makefile @@ -0,0 +1,58 @@ +# Redis Makefile +# Copyright (C) 2009 Salvatore Sanfilippo +# This file is released under the BSD license, see the COPYING file + +DEBUG?= -g +CFLAGS?= -O2 -Wall -W -DSDS_ABORT_ON_OOM +CCOPT= $(CFLAGS) + +OBJ = adlist.o ae.o anet.o dict.o redis.o sds.o zmalloc.o +BENCHOBJ = ae.o anet.o benchmark.o sds.o adlist.o zmalloc.o +CLIOBJ = anet.o sds.o adlist.o redis-cli.o zmalloc.o + +PRGNAME = redis-server +BENCHPRGNAME = redis-benchmark +CLIPRGNAME = redis-cli + +all: redis-server redis-benchmark redis-cli + +# Deps (use make dep to generate this) +adlist.o: adlist.c adlist.h +ae.o: ae.c ae.h +anet.o: anet.c anet.h +benchmark.o: benchmark.c ae.h anet.h sds.h adlist.h +dict.o: dict.c dict.h +redis-cli.o: redis-cli.c anet.h sds.h adlist.h +redis.o: redis.c ae.h sds.h anet.h dict.h adlist.h +sds.o: sds.c sds.h +sha1.o: sha1.c sha1.h +zmalloc.o: zmalloc.c + +redis-server: $(OBJ) + $(CC) -o $(PRGNAME) $(CCOPT) $(DEBUG) $(OBJ) + @echo "" + @echo "Hint: To run the test-redis.tcl script is a good idea." + @echo "Launch the redis server with ./redis-server, then in another" + @echo "terminal window enter this directory and run 'make test'." + @echo "" + +redis-benchmark: $(BENCHOBJ) + $(CC) -o $(BENCHPRGNAME) $(CCOPT) $(DEBUG) $(BENCHOBJ) + +redis-cli: $(CLIOBJ) + $(CC) -o $(CLIPRGNAME) $(CCOPT) $(DEBUG) $(CLIOBJ) + +.c.o: + $(CC) -c $(CCOPT) $(DEBUG) $(COMPILE_TIME) $< + +clean: + rm -rf $(PRGNAME) $(BENCHPRGNAME) $(CLIPRGNAME) *.o + +dep: + $(CC) -MM *.c + +test: + tclsh test-redis.tcl + +bench: + ./redis-benchmark diff --git a/README b/README new file mode 100644 index 00000000..a810a7c0 --- /dev/null +++ b/README @@ -0,0 +1 @@ +Check the 'doc' directory. doc/README.html is a good starting point :) diff --git a/TODO b/TODO new file mode 100644 index 00000000..02595425 --- /dev/null +++ b/TODO @@ -0,0 +1,17 @@ +BETA 8 TODO +- keys expire +- sunion ssub +- write integers in a special way on disk (and on memory?) +- compact types for disk storing of short strings (no 4 bytes overhead!) +- network layer stresser in test in demo +- maxclients directive +- check 'server.dirty' everywere +- replication tests +- command line client. If the last argument of a bulk command is missing get it from stdin. Example: + $ echo "bar" | redis-client SET foo + $ redis-client SET foo bar + $ redis-client GET foo + bar + $ +- Make Redis aware of the memory it is using thanks to getrusage() and report this info with the INFO command. +- INFO command: clients, slave/master, requests/second in the last N seconds, memory usage, uptime, dirty, lastsave diff --git a/adlist.c b/adlist.c new file mode 100644 index 00000000..1f978c7b --- /dev/null +++ b/adlist.c @@ -0,0 +1,285 @@ +/* adlist.c - A generic doubly linked list implementation + * + * Copyright (c) 2006-2009, Salvatore Sanfilippo + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + + +#include +#include "adlist.h" +#include "zmalloc.h" + +/* Create a new list. The created list can be freed with + * AlFreeList(), but private value of every node need to be freed + * by the user before to call AlFreeList(). + * + * On error, NULL is returned. Otherwise the pointer to the new list. */ +list *listCreate(void) +{ + struct list *list; + + if ((list = zmalloc(sizeof(*list))) == NULL) + return NULL; + list->head = list->tail = NULL; + list->len = 0; + list->dup = NULL; + list->free = NULL; + list->match = NULL; + return list; +} + +/* Free the whole list. + * + * This function can't fail. */ +void listRelease(list *list) +{ + unsigned int len; + listNode *current, *next; + + current = list->head; + len = list->len; + while(len--) { + next = current->next; + if (list->free) list->free(current->value); + zfree(current); + current = next; + } + zfree(list); +} + +/* Add a new node to the list, to head, contaning the specified 'value' + * pointer as value. + * + * On error, NULL is returned and no operation is performed (i.e. the + * list remains unaltered). + * On success the 'list' pointer you pass to the function is returned. */ +list *listAddNodeHead(list *list, void *value) +{ + listNode *node; + + if ((node = zmalloc(sizeof(*node))) == NULL) + return NULL; + node->value = value; + if (list->len == 0) { + list->head = list->tail = node; + node->prev = node->next = NULL; + } else { + node->prev = NULL; + node->next = list->head; + list->head->prev = node; + list->head = node; + } + list->len++; + return list; +} + +/* Add a new node to the list, to tail, contaning the specified 'value' + * pointer as value. + * + * On error, NULL is returned and no operation is performed (i.e. the + * list remains unaltered). + * On success the 'list' pointer you pass to the function is returned. */ +list *listAddNodeTail(list *list, void *value) +{ + listNode *node; + + if ((node = zmalloc(sizeof(*node))) == NULL) + return NULL; + node->value = value; + if (list->len == 0) { + list->head = list->tail = node; + node->prev = node->next = NULL; + } else { + node->prev = list->tail; + node->next = NULL; + list->tail->next = node; + list->tail = node; + } + list->len++; + return list; +} + +/* Remove the specified node from the specified list. + * It's up to the caller to free the private value of the node. + * + * This function can't fail. */ +void listDelNode(list *list, listNode *node) +{ + if (node->prev) + node->prev->next = node->next; + else + list->head = node->next; + if (node->next) + node->next->prev = node->prev; + else + list->tail = node->prev; + if (list->free) list->free(node->value); + zfree(node); + list->len--; +} + +/* Returns a list iterator 'iter'. After the initialization every + * call to listNextElement() will return the next element of the list. + * + * This function can't fail. */ +listIter *listGetIterator(list *list, int direction) +{ + listIter *iter; + + if ((iter = zmalloc(sizeof(*iter))) == NULL) return NULL; + if (direction == AL_START_HEAD) + iter->next = list->head; + else + iter->next = list->tail; + iter->direction = direction; + return iter; +} + +/* Release the iterator memory */ +void listReleaseIterator(listIter *iter) { + zfree(iter); +} + +/* Return the next element of an iterator. + * It's valid to remove the currently returned element using + * listDelNode(), but not to remove other elements. + * + * The function returns a pointer to the next element of the list, + * or NULL if there are no more elements, so the classical usage patter + * is: + * + * iter = listGetItarotr(list,); + * while ((node = listNextIterator(iter)) != NULL) { + * DoSomethingWith(listNodeValue(node)); + * } + * + * */ +listNode *listNextElement(listIter *iter) +{ + listNode *current = iter->next; + + if (current != NULL) { + if (iter->direction == AL_START_HEAD) + iter->next = current->next; + else + iter->next = current->prev; + } + return current; +} + +/* Duplicate the whole list. On out of memory NULL is returned. + * On success a copy of the original list is returned. + * + * The 'Dup' method set with listSetDupMethod() function is used + * to copy the node value. Otherwise the same pointer value of + * the original node is used as value of the copied node. + * + * The original list both on success or error is never modified. */ +list *listDup(list *orig) +{ + list *copy; + listIter *iter; + listNode *node; + + if ((copy = listCreate()) == NULL) + return NULL; + copy->dup = orig->dup; + copy->free = orig->free; + copy->match = orig->match; + iter = listGetIterator(orig, AL_START_HEAD); + while((node = listNextElement(iter)) != NULL) { + void *value; + + if (copy->dup) { + value = copy->dup(node->value); + if (value == NULL) { + listRelease(copy); + listReleaseIterator(iter); + return NULL; + } + } else + value = node->value; + if (listAddNodeTail(copy, value) == NULL) { + listRelease(copy); + listReleaseIterator(iter); + return NULL; + } + } + listReleaseIterator(iter); + return copy; +} + +/* Search the list for a node matching a given key. + * The match is performed using the 'match' method + * set with listSetMatchMethod(). If no 'match' method + * is set, the 'value' pointer of every node is directly + * compared with the 'key' pointer. + * + * On success the first matching node pointer is returned + * (search starts from head). If no matching node exists + * NULL is returned. */ +listNode *listSearchKey(list *list, void *key) +{ + listIter *iter; + listNode *node; + + iter = listGetIterator(list, AL_START_HEAD); + while((node = listNextElement(iter)) != NULL) { + if (list->match) { + if (list->match(node->value, key)) { + listReleaseIterator(iter); + return node; + } + } else { + if (key == node->value) { + listReleaseIterator(iter); + return node; + } + } + } + listReleaseIterator(iter); + return NULL; +} + +/* Return the element at the specified zero-based index + * where 0 is the head, 1 is the element next to head + * and so on. Negative integers are used in order to count + * from the tail, -1 is the last element, -2 the penultimante + * and so on. If the index is out of range NULL is returned. */ +listNode *listIndex(list *list, int index) { + listNode *n; + + if (index < 0) { + index = (-index)-1; + n = list->tail; + while(index-- && n) n = n->prev; + } else { + n = list->head; + while(index-- && n) n = n->next; + } + return n; +} diff --git a/adlist.h b/adlist.h new file mode 100644 index 00000000..43476c0a --- /dev/null +++ b/adlist.h @@ -0,0 +1,90 @@ +/* adlist.h - A generic doubly linked list implementation + * + * Copyright (c) 2006-2009, Salvatore Sanfilippo + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __ADLIST_H__ +#define __ADLIST_H__ + +/* Node, List, and Iterator are the only data structures used currently. */ + +typedef struct listNode { + struct listNode *prev; + struct listNode *next; + void *value; +} listNode; + +typedef struct list { + listNode *head; + listNode *tail; + void *(*dup)(void *ptr); + void (*free)(void *ptr); + int (*match)(void *ptr, void *key); + unsigned int len; +} list; + +typedef struct listIter { + listNode *next; + listNode *prev; + int direction; +} listIter; + +/* Functions implemented as macros */ +#define listLength(l) ((l)->len) +#define listFirst(l) ((l)->head) +#define listLast(l) ((l)->tail) +#define listPrevNode(n) ((n)->prev) +#define listNextNode(n) ((n)->next) +#define listNodeValue(n) ((n)->value) + +#define listSetDupMethod(l,m) ((l)->dup = (m)) +#define listSetFreeMethod(l,m) ((l)->free = (m)) +#define listSetMatchMethod(l,m) ((l)->match = (m)) + +#define listGetDupMethod(l) ((l)->dup) +#define listGetFree(l) ((l)->free) +#define listGetMatchMethod(l) ((l)->match) + +/* Prototypes */ +list *listCreate(void); +void listRelease(list *list); +list *listAddNodeHead(list *list, void *value); +list *listAddNodeTail(list *list, void *value); +void listDelNode(list *list, listNode *node); +listIter *listGetIterator(list *list, int direction); +listNode *listNextElement(listIter *iter); +void listReleaseIterator(listIter *iter); +list *listDup(list *orig); +listNode *listSearchKey(list *list, void *key); +listNode *listIndex(list *list, int index); + +/* Directions for iterators */ +#define AL_START_HEAD 0 +#define AL_START_TAIL 1 + +#endif /* __ADLIST_H__ */ diff --git a/ae.c b/ae.c new file mode 100644 index 00000000..375f28a4 --- /dev/null +++ b/ae.c @@ -0,0 +1,368 @@ +/* A simple event-driven programming library. Originally I wrote this code + * for the Jim's event-loop (Jim is a Tcl interpreter) but later translated + * it in form of a library for easy reuse. + * + * Copyright (c) 2006-2009, Salvatore Sanfilippo + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include + +#include "ae.h" +#include "zmalloc.h" + +aeEventLoop *aeCreateEventLoop(void) { + aeEventLoop *eventLoop; + + eventLoop = zmalloc(sizeof(*eventLoop)); + if (!eventLoop) return NULL; + eventLoop->fileEventHead = NULL; + eventLoop->timeEventHead = NULL; + eventLoop->timeEventNextId = 0; + eventLoop->stop = 0; + return eventLoop; +} + +void aeDeleteEventLoop(aeEventLoop *eventLoop) { + zfree(eventLoop); +} + +void aeStop(aeEventLoop *eventLoop) { + eventLoop->stop = 1; +} + +int aeCreateFileEvent(aeEventLoop *eventLoop, int fd, int mask, + aeFileProc *proc, void *clientData, + aeEventFinalizerProc *finalizerProc) +{ + aeFileEvent *fe; + + fe = zmalloc(sizeof(*fe)); + if (fe == NULL) return AE_ERR; + fe->fd = fd; + fe->mask = mask; + fe->fileProc = proc; + fe->finalizerProc = finalizerProc; + fe->clientData = clientData; + fe->next = eventLoop->fileEventHead; + eventLoop->fileEventHead = fe; + return AE_OK; +} + +void aeDeleteFileEvent(aeEventLoop *eventLoop, int fd, int mask) +{ + aeFileEvent *fe, *prev = NULL; + + fe = eventLoop->fileEventHead; + while(fe) { + if (fe->fd == fd && fe->mask == mask) { + if (prev == NULL) + eventLoop->fileEventHead = fe->next; + else + prev->next = fe->next; + if (fe->finalizerProc) + fe->finalizerProc(eventLoop, fe->clientData); + zfree(fe); + return; + } + prev = fe; + fe = fe->next; + } +} + +static void aeGetTime(long *seconds, long *milliseconds) +{ + struct timeval tv; + + gettimeofday(&tv, NULL); + *seconds = tv.tv_sec; + *milliseconds = tv.tv_usec/1000; +} + +static void aeAddMillisecondsToNow(long long milliseconds, long *sec, long *ms) { + long cur_sec, cur_ms, when_sec, when_ms; + + aeGetTime(&cur_sec, &cur_ms); + when_sec = cur_sec + milliseconds/1000; + when_ms = cur_ms + milliseconds%1000; + if (when_ms >= 1000) { + when_sec ++; + when_ms -= 1000; + } + *sec = when_sec; + *ms = when_ms; +} + +long long aeCreateTimeEvent(aeEventLoop *eventLoop, long long milliseconds, + aeTimeProc *proc, void *clientData, + aeEventFinalizerProc *finalizerProc) +{ + long long id = eventLoop->timeEventNextId++; + aeTimeEvent *te; + + te = zmalloc(sizeof(*te)); + if (te == NULL) return AE_ERR; + te->id = id; + aeAddMillisecondsToNow(milliseconds,&te->when_sec,&te->when_ms); + te->timeProc = proc; + te->finalizerProc = finalizerProc; + te->clientData = clientData; + te->next = eventLoop->timeEventHead; + eventLoop->timeEventHead = te; + return id; +} + +int aeDeleteTimeEvent(aeEventLoop *eventLoop, long long id) +{ + aeTimeEvent *te, *prev = NULL; + + te = eventLoop->timeEventHead; + while(te) { + if (te->id == id) { + if (prev == NULL) + eventLoop->timeEventHead = te->next; + else + prev->next = te->next; + if (te->finalizerProc) + te->finalizerProc(eventLoop, te->clientData); + zfree(te); + return AE_OK; + } + prev = te; + te = te->next; + } + return AE_ERR; /* NO event with the specified ID found */ +} + +/* Search the first timer to fire. + * This operation is useful to know how many time the select can be + * put in sleep without to delay any event. + * If there are no timers NULL is returned. + * + * Note that's O(N) since time events are unsorted. */ +static aeTimeEvent *aeSearchNearestTimer(aeEventLoop *eventLoop) +{ + aeTimeEvent *te = eventLoop->timeEventHead; + aeTimeEvent *nearest = NULL; + + while(te) { + if (!nearest || te->when_sec < nearest->when_sec || + (te->when_sec == nearest->when_sec && + te->when_ms < nearest->when_ms)) + nearest = te; + te = te->next; + } + return nearest; +} + +/* Process every pending time event, then every pending file event + * (that may be registered by time event callbacks just processed). + * Without special flags the function sleeps until some file event + * fires, or when the next time event occurrs (if any). + * + * If flags is 0, the function does nothing and returns. + * if flags has AE_ALL_EVENTS set, all the kind of events are processed. + * if flags has AE_FILE_EVENTS set, file events are processed. + * if flags has AE_TIME_EVENTS set, time events are processed. + * if flags has AE_DONT_WAIT set the function returns ASAP until all + * the events that's possible to process without to wait are processed. + * + * The function returns the number of events processed. */ +int aeProcessEvents(aeEventLoop *eventLoop, int flags) +{ + int maxfd = 0, numfd = 0, processed = 0; + fd_set rfds, wfds, efds; + aeFileEvent *fe = eventLoop->fileEventHead; + aeTimeEvent *te; + long long maxId; + AE_NOTUSED(flags); + + /* Nothing to do? return ASAP */ + if (!(flags & AE_TIME_EVENTS) && !(flags & AE_FILE_EVENTS)) return 0; + + FD_ZERO(&rfds); + FD_ZERO(&wfds); + FD_ZERO(&efds); + + /* Check file events */ + if (flags & AE_FILE_EVENTS) { + while (fe != NULL) { + if (fe->mask & AE_READABLE) FD_SET(fe->fd, &rfds); + if (fe->mask & AE_WRITABLE) FD_SET(fe->fd, &wfds); + if (fe->mask & AE_EXCEPTION) FD_SET(fe->fd, &efds); + if (maxfd < fe->fd) maxfd = fe->fd; + numfd++; + fe = fe->next; + } + } + /* Note that we want call select() even if there are no + * file events to process as long as we want to process time + * events, in order to sleep until the next time event is ready + * to fire. */ + if (numfd || ((flags & AE_TIME_EVENTS) && !(flags & AE_DONT_WAIT))) { + int retval; + aeTimeEvent *shortest = NULL; + struct timeval tv, *tvp; + + if (flags & AE_TIME_EVENTS && !(flags & AE_DONT_WAIT)) + shortest = aeSearchNearestTimer(eventLoop); + if (shortest) { + long now_sec, now_ms; + + /* Calculate the time missing for the nearest + * timer to fire. */ + aeGetTime(&now_sec, &now_ms); + tvp = &tv; + tvp->tv_sec = shortest->when_sec - now_sec; + if (shortest->when_ms < now_ms) { + tvp->tv_usec = ((shortest->when_ms+1000) - now_ms)*1000; + tvp->tv_sec --; + } else { + tvp->tv_usec = (shortest->when_ms - now_ms)*1000; + } + } else { + /* If we have to check for events but need to return + * ASAP because of AE_DONT_WAIT we need to se the timeout + * to zero */ + if (flags & AE_DONT_WAIT) { + tv.tv_sec = tv.tv_usec = 0; + tvp = &tv; + } else { + /* Otherwise we can block */ + tvp = NULL; /* wait forever */ + } + } + + retval = select(maxfd+1, &rfds, &wfds, &efds, tvp); + if (retval > 0) { + fe = eventLoop->fileEventHead; + while(fe != NULL) { + int fd = (int) fe->fd; + + if ((fe->mask & AE_READABLE && FD_ISSET(fd, &rfds)) || + (fe->mask & AE_WRITABLE && FD_ISSET(fd, &wfds)) || + (fe->mask & AE_EXCEPTION && FD_ISSET(fd, &efds))) + { + int mask = 0; + + if (fe->mask & AE_READABLE && FD_ISSET(fd, &rfds)) + mask |= AE_READABLE; + if (fe->mask & AE_WRITABLE && FD_ISSET(fd, &wfds)) + mask |= AE_WRITABLE; + if (fe->mask & AE_EXCEPTION && FD_ISSET(fd, &efds)) + mask |= AE_EXCEPTION; + fe->fileProc(eventLoop, fe->fd, fe->clientData, mask); + processed++; + /* After an event is processed our file event list + * may no longer be the same, so what we do + * is to clear the bit for this file descriptor and + * restart again from the head. */ + fe = eventLoop->fileEventHead; + FD_CLR(fd, &rfds); + FD_CLR(fd, &wfds); + FD_CLR(fd, &efds); + } else { + fe = fe->next; + } + } + } + } + /* Check time events */ + if (flags & AE_TIME_EVENTS) { + te = eventLoop->timeEventHead; + maxId = eventLoop->timeEventNextId-1; + while(te) { + long now_sec, now_ms; + long long id; + + if (te->id > maxId) { + te = te->next; + continue; + } + aeGetTime(&now_sec, &now_ms); + if (now_sec > te->when_sec || + (now_sec == te->when_sec && now_ms >= te->when_ms)) + { + int retval; + + id = te->id; + retval = te->timeProc(eventLoop, id, te->clientData); + /* After an event is processed our time event list may + * no longer be the same, so we restart from head. + * Still we make sure to don't process events registered + * by event handlers itself in order to don't loop forever. + * To do so we saved the max ID we want to handle. */ + if (retval != AE_NOMORE) { + aeAddMillisecondsToNow(retval,&te->when_sec,&te->when_ms); + } else { + aeDeleteTimeEvent(eventLoop, id); + } + te = eventLoop->timeEventHead; + } else { + te = te->next; + } + } + } + return processed; /* return the number of processed file/time events */ +} + +/* Wait for millseconds until the given file descriptor becomes + * writable/readable/exception */ +int aeWait(int fd, int mask, long long milliseconds) { + struct timeval tv; + fd_set rfds, wfds, efds; + int retmask = 0, retval; + + tv.tv_sec = milliseconds/1000; + tv.tv_usec = (milliseconds%1000)*1000; + FD_ZERO(&rfds); + FD_ZERO(&wfds); + FD_ZERO(&efds); + + if (mask & AE_READABLE) FD_SET(fd,&rfds); + if (mask & AE_WRITABLE) FD_SET(fd,&wfds); + if (mask & AE_EXCEPTION) FD_SET(fd,&efds); + if ((retval = select(fd+1, &rfds, &wfds, &efds, &tv)) > 0) { + if (FD_ISSET(fd,&rfds)) retmask |= AE_READABLE; + if (FD_ISSET(fd,&wfds)) retmask |= AE_WRITABLE; + if (FD_ISSET(fd,&efds)) retmask |= AE_EXCEPTION; + return retmask; + } else { + return retval; + } +} + +void aeMain(aeEventLoop *eventLoop) +{ + eventLoop->stop = 0; + while (!eventLoop->stop) + aeProcessEvents(eventLoop, AE_ALL_EVENTS); +} diff --git a/ae.h b/ae.h new file mode 100644 index 00000000..69bbbee9 --- /dev/null +++ b/ae.h @@ -0,0 +1,106 @@ +/* A simple event-driven programming library. Originally I wrote this code + * for the Jim's event-loop (Jim is a Tcl interpreter) but later translated + * it in form of a library for easy reuse. + * + * Copyright (c) 2006-2009, Salvatore Sanfilippo + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __AE_H__ +#define __AE_H__ + +struct aeEventLoop; + +/* Types and data structures */ +typedef void aeFileProc(struct aeEventLoop *eventLoop, int fd, void *clientData, int mask); +typedef int aeTimeProc(struct aeEventLoop *eventLoop, long long id, void *clientData); +typedef void aeEventFinalizerProc(struct aeEventLoop *eventLoop, void *clientData); + +/* File event structure */ +typedef struct aeFileEvent { + int fd; + int mask; /* one of AE_(READABLE|WRITABLE|EXCEPTION) */ + aeFileProc *fileProc; + aeEventFinalizerProc *finalizerProc; + void *clientData; + struct aeFileEvent *next; +} aeFileEvent; + +/* Time event structure */ +typedef struct aeTimeEvent { + long long id; /* time event identifier. */ + long when_sec; /* seconds */ + long when_ms; /* milliseconds */ + aeTimeProc *timeProc; + aeEventFinalizerProc *finalizerProc; + void *clientData; + struct aeTimeEvent *next; +} aeTimeEvent; + +/* State of an event based program */ +typedef struct aeEventLoop { + long long timeEventNextId; + aeFileEvent *fileEventHead; + aeTimeEvent *timeEventHead; + int stop; +} aeEventLoop; + +/* Defines */ +#define AE_OK 0 +#define AE_ERR -1 + +#define AE_READABLE 1 +#define AE_WRITABLE 2 +#define AE_EXCEPTION 4 + +#define AE_FILE_EVENTS 1 +#define AE_TIME_EVENTS 2 +#define AE_ALL_EVENTS (AE_FILE_EVENTS|AE_TIME_EVENTS) +#define AE_DONT_WAIT 4 + +#define AE_NOMORE -1 + +/* Macros */ +#define AE_NOTUSED(V) ((void) V) + +/* Prototypes */ +aeEventLoop *aeCreateEventLoop(void); +void aeDeleteEventLoop(aeEventLoop *eventLoop); +void aeStop(aeEventLoop *eventLoop); +int aeCreateFileEvent(aeEventLoop *eventLoop, int fd, int mask, + aeFileProc *proc, void *clientData, + aeEventFinalizerProc *finalizerProc); +void aeDeleteFileEvent(aeEventLoop *eventLoop, int fd, int mask); +long long aeCreateTimeEvent(aeEventLoop *eventLoop, long long milliseconds, + aeTimeProc *proc, void *clientData, + aeEventFinalizerProc *finalizerProc); +int aeDeleteTimeEvent(aeEventLoop *eventLoop, long long id); +int aeProcessEvents(aeEventLoop *eventLoop, int flags); +int aeWait(int fd, int mask, long long milliseconds); +void aeMain(aeEventLoop *eventLoop); + +#endif diff --git a/anet.c b/anet.c new file mode 100644 index 00000000..bcb99057 --- /dev/null +++ b/anet.c @@ -0,0 +1,268 @@ +/* anet.c -- Basic TCP socket stuff made a bit less boring + * + * Copyright (c) 2006-2009, Salvatore Sanfilippo + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "anet.h" + +static void anetSetError(char *err, const char *fmt, ...) +{ + va_list ap; + + if (!err) return; + va_start(ap, fmt); + vsnprintf(err, ANET_ERR_LEN, fmt, ap); + va_end(ap); +} + +int anetNonBlock(char *err, int fd) +{ + int flags; + + /* Set the socket nonblocking. + * Note that fcntl(2) for F_GETFL and F_SETFL can't be + * interrupted by a signal. */ + if ((flags = fcntl(fd, F_GETFL)) == -1) { + anetSetError(err, "fcntl(F_GETFL): %s\n", strerror(errno)); + return ANET_ERR; + } + if (fcntl(fd, F_SETFL, flags | O_NONBLOCK) == -1) { + anetSetError(err, "fcntl(F_SETFL,O_NONBLOCK): %s\n", strerror(errno)); + return ANET_ERR; + } + return ANET_OK; +} + +int anetTcpNoDelay(char *err, int fd) +{ + int yes = 1; + if (setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, &yes, sizeof(yes)) == -1) + { + anetSetError(err, "setsockopt TCP_NODELAY: %s\n", strerror(errno)); + return ANET_ERR; + } + return ANET_OK; +} + +int anetSetSendBuffer(char *err, int fd, int buffsize) +{ + if (setsockopt(fd, SOL_SOCKET, SO_SNDBUF, &buffsize, sizeof(buffsize)) == -1) + { + anetSetError(err, "setsockopt SO_SNDBUF: %s\n", strerror(errno)); + return ANET_ERR; + } + return ANET_OK; +} + +int anetTcpKeepAlive(char *err, int fd) +{ + int yes = 1; + if (setsockopt(fd, SOL_SOCKET, SO_KEEPALIVE, &yes, sizeof(yes)) == -1) { + anetSetError(err, "setsockopt SO_KEEPALIVE: %s\n", strerror(errno)); + return ANET_ERR; + } + return ANET_OK; +} + +int anetResolve(char *err, char *host, char *ipbuf) +{ + struct sockaddr_in sa; + + sa.sin_family = AF_INET; + if (inet_aton(host, &sa.sin_addr) == 0) { + struct hostent *he; + + he = gethostbyname(host); + if (he == NULL) { + anetSetError(err, "can't resolve: %s\n", host); + return ANET_ERR; + } + memcpy(&sa.sin_addr, he->h_addr, sizeof(struct in_addr)); + } + strcpy(ipbuf,inet_ntoa(sa.sin_addr)); + return ANET_OK; +} + +#define ANET_CONNECT_NONE 0 +#define ANET_CONNECT_NONBLOCK 1 +static int anetTcpGenericConnect(char *err, char *addr, int port, int flags) +{ + int s, on = 1; + struct sockaddr_in sa; + + if ((s = socket(AF_INET, SOCK_STREAM, 0)) == -1) { + anetSetError(err, "creating socket: %s\n", strerror(errno)); + return ANET_ERR; + } + /* Make sure connection-intensive things like the redis benckmark + * will be able to close/open sockets a zillion of times */ + setsockopt(s, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on)); + + sa.sin_family = AF_INET; + sa.sin_port = htons(port); + if (inet_aton(addr, &sa.sin_addr) == 0) { + struct hostent *he; + + he = gethostbyname(addr); + if (he == NULL) { + anetSetError(err, "can't resolve: %s\n", addr); + close(s); + return ANET_ERR; + } + memcpy(&sa.sin_addr, he->h_addr, sizeof(struct in_addr)); + } + if (flags & ANET_CONNECT_NONBLOCK) { + if (anetNonBlock(err,s) != ANET_OK) + return ANET_ERR; + } + if (connect(s, (struct sockaddr*)&sa, sizeof(sa)) == -1) { + if (errno == EINPROGRESS && + flags & ANET_CONNECT_NONBLOCK) + return s; + + anetSetError(err, "connect: %s\n", strerror(errno)); + close(s); + return ANET_ERR; + } + return s; +} + +int anetTcpConnect(char *err, char *addr, int port) +{ + return anetTcpGenericConnect(err,addr,port,ANET_CONNECT_NONE); +} + +int anetTcpNonBlockConnect(char *err, char *addr, int port) +{ + return anetTcpGenericConnect(err,addr,port,ANET_CONNECT_NONBLOCK); +} + +/* Like read(2) but make sure 'count' is read before to return + * (unless error or EOF condition is encountered) */ +int anetRead(int fd, void *buf, int count) +{ + int nread, totlen = 0; + while(totlen != count) { + nread = read(fd,buf,count-totlen); + if (nread == 0) return totlen; + if (nread == -1) return -1; + totlen += nread; + buf += nread; + } + return totlen; +} + +/* Like write(2) but make sure 'count' is read before to return + * (unless error is encountered) */ +int anetWrite(int fd, void *buf, int count) +{ + int nwritten, totlen = 0; + while(totlen != count) { + nwritten = write(fd,buf,count-totlen); + if (nwritten == 0) return totlen; + if (nwritten == -1) return -1; + totlen += nwritten; + buf += nwritten; + } + return totlen; +} + +int anetTcpServer(char *err, int port, char *bindaddr) +{ + int s, on = 1; + struct sockaddr_in sa; + + if ((s = socket(AF_INET, SOCK_STREAM, 0)) == -1) { + anetSetError(err, "socket: %s\n", strerror(errno)); + return ANET_ERR; + } + if (setsockopt(s, SOL_SOCKET, SO_REUSEADDR, &on, sizeof(on)) == -1) { + anetSetError(err, "setsockopt SO_REUSEADDR: %s\n", strerror(errno)); + close(s); + return ANET_ERR; + } + memset(&sa,0,sizeof(sa)); + sa.sin_family = AF_INET; + sa.sin_port = htons(port); + sa.sin_addr.s_addr = htonl(INADDR_ANY); + if (bindaddr) { + if (inet_aton(bindaddr, &sa.sin_addr) == 0) { + anetSetError(err, "Invalid bind address\n"); + close(s); + return ANET_ERR; + } + } + if (bind(s, (struct sockaddr*)&sa, sizeof(sa)) == -1) { + anetSetError(err, "bind: %s\n", strerror(errno)); + close(s); + return ANET_ERR; + } + if (listen(s, 32) == -1) { + anetSetError(err, "listen: %s\n", strerror(errno)); + close(s); + return ANET_ERR; + } + return s; +} + +int anetAccept(char *err, int serversock, char *ip, int *port) +{ + int fd; + struct sockaddr_in sa; + unsigned int saLen; + + while(1) { + saLen = sizeof(sa); + fd = accept(serversock, (struct sockaddr*)&sa, &saLen); + if (fd == -1) { + if (errno == EINTR) + continue; + else { + anetSetError(err, "accept: %s\n", strerror(errno)); + return ANET_ERR; + } + } + break; + } + if (ip) strcpy(ip,inet_ntoa(sa.sin_addr)); + if (port) *port = ntohs(sa.sin_port); + return fd; +} diff --git a/anet.h b/anet.h new file mode 100644 index 00000000..c43405d9 --- /dev/null +++ b/anet.h @@ -0,0 +1,49 @@ +/* anet.c -- Basic TCP socket stuff made a bit less boring + * + * Copyright (c) 2006-2009, Salvatore Sanfilippo + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef ANET_H +#define ANET_H + +#define ANET_OK 0 +#define ANET_ERR -1 +#define ANET_ERR_LEN 256 + +int anetTcpConnect(char *err, char *addr, int port); +int anetTcpNonBlockConnect(char *err, char *addr, int port); +int anetRead(int fd, void *buf, int count); +int anetResolve(char *err, char *host, char *ipbuf); +int anetTcpServer(char *err, int port, char *bindaddr); +int anetAccept(char *err, int serversock, char *ip, int *port); +int anetWrite(int fd, void *buf, int count); +int anetNonBlock(char *err, int fd); +int anetTcpNoDelay(char *err, int fd); +int anetTcpKeepAlive(char *err, int fd); + +#endif diff --git a/benchmark.c b/benchmark.c new file mode 100644 index 00000000..51c7e980 --- /dev/null +++ b/benchmark.c @@ -0,0 +1,460 @@ +/* Redis benchmark utility. + * + * Copyright (c) 2006-2009, Salvatore Sanfilippo + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ae.h" +#include "anet.h" +#include "sds.h" +#include "adlist.h" +#include "zmalloc.h" + +#define REPLY_INT 0 +#define REPLY_RETCODE 1 +#define REPLY_BULK 2 + +#define CLIENT_CONNECTING 0 +#define CLIENT_SENDQUERY 1 +#define CLIENT_READREPLY 2 + +#define MAX_LATENCY 5000 + +#define REDIS_NOTUSED(V) ((void) V) + +static struct config { + int numclients; + int requests; + int liveclients; + int donerequests; + int keysize; + int datasize; + aeEventLoop *el; + char *hostip; + int hostport; + int keepalive; + long long start; + long long totlatency; + int *latency; + list *clients; + int quiet; + int loop; +} config; + +typedef struct _client { + int state; + int fd; + sds obuf; + sds ibuf; + int readlen; /* readlen == -1 means read a single line */ + unsigned int written; /* bytes of 'obuf' already written */ + int replytype; + long long start; /* start time in milliseconds */ +} *client; + +/* Prototypes */ +static void writeHandler(aeEventLoop *el, int fd, void *privdata, int mask); +static void createMissingClients(client c); + +/* Implementation */ +static long long mstime(void) { + struct timeval tv; + long long mst; + + gettimeofday(&tv, NULL); + mst = ((long)tv.tv_sec)*1000; + mst += tv.tv_usec/1000; + return mst; +} + +static void freeClient(client c) { + listNode *ln; + + aeDeleteFileEvent(config.el,c->fd,AE_WRITABLE); + aeDeleteFileEvent(config.el,c->fd,AE_READABLE); + sdsfree(c->ibuf); + sdsfree(c->obuf); + close(c->fd); + zfree(c); + config.liveclients--; + ln = listSearchKey(config.clients,c); + assert(ln != NULL); + listDelNode(config.clients,ln); +} + +static void freeAllClients(void) { + listNode *ln = config.clients->head, *next; + + while(ln) { + next = ln->next; + freeClient(ln->value); + ln = next; + } +} + +static void resetClient(client c) { + aeDeleteFileEvent(config.el,c->fd,AE_WRITABLE); + aeDeleteFileEvent(config.el,c->fd,AE_READABLE); + aeCreateFileEvent(config.el,c->fd, AE_WRITABLE,writeHandler,c,NULL); + sdsfree(c->ibuf); + c->ibuf = sdsempty(); + c->readlen = (c->replytype == REPLY_BULK) ? -1 : 0; + c->written = 0; + c->state = CLIENT_SENDQUERY; + c->start = mstime(); +} + +static void clientDone(client c) { + long long latency; + config.donerequests ++; + latency = mstime() - c->start; + if (latency > MAX_LATENCY) latency = MAX_LATENCY; + config.latency[latency]++; + + if (config.donerequests == config.requests) { + freeClient(c); + aeStop(config.el); + return; + } + if (config.keepalive) { + resetClient(c); + } else { + config.liveclients--; + createMissingClients(c); + config.liveclients++; + freeClient(c); + } +} + +static void readHandler(aeEventLoop *el, int fd, void *privdata, int mask) +{ + char buf[1024]; + int nread; + client c = privdata; + REDIS_NOTUSED(el); + REDIS_NOTUSED(fd); + REDIS_NOTUSED(mask); + + nread = read(c->fd, buf, 1024); + if (nread == -1) { + fprintf(stderr, "Reading from socket: %s\n", strerror(errno)); + freeClient(c); + return; + } + if (nread == 0) { + fprintf(stderr, "EOF from client\n"); + freeClient(c); + return; + } + c->ibuf = sdscatlen(c->ibuf,buf,nread); + + if (c->replytype == REPLY_INT || + c->replytype == REPLY_RETCODE || + (c->replytype == REPLY_BULK && c->readlen == -1)) { + char *p; + + if ((p = strchr(c->ibuf,'\n')) != NULL) { + if (c->replytype == REPLY_BULK) { + *p = '\0'; + *(p-1) = '\0'; + if (memcmp(c->ibuf,"nil",3) == 0) { + clientDone(c); + return; + } + c->readlen = atoi(c->ibuf)+2; + c->ibuf = sdsrange(c->ibuf,(p-c->ibuf)+1,-1); + } else { + c->ibuf = sdstrim(c->ibuf,"\r\n"); + clientDone(c); + return; + } + } + } + /* bulk read */ + if ((unsigned)c->readlen == sdslen(c->ibuf)) + clientDone(c); +} + +static void writeHandler(aeEventLoop *el, int fd, void *privdata, int mask) +{ + client c = privdata; + REDIS_NOTUSED(el); + REDIS_NOTUSED(fd); + REDIS_NOTUSED(mask); + + if (c->state == CLIENT_CONNECTING) { + c->state = CLIENT_SENDQUERY; + c->start = mstime(); + } + if (sdslen(c->obuf) > c->written) { + void *ptr = c->obuf+c->written; + int len = sdslen(c->obuf) - c->written; + int nwritten = write(c->fd, ptr, len); + if (nwritten == -1) { + fprintf(stderr, "Writing to socket: %s\n", strerror(errno)); + freeClient(c); + return; + } + c->written += nwritten; + if (sdslen(c->obuf) == c->written) { + aeDeleteFileEvent(config.el,c->fd,AE_WRITABLE); + aeCreateFileEvent(config.el,c->fd,AE_READABLE,readHandler,c,NULL); + c->state = CLIENT_READREPLY; + } + } +} + +static client createClient(void) { + client c = zmalloc(sizeof(struct _client)); + char err[ANET_ERR_LEN]; + + c->fd = anetTcpNonBlockConnect(err,config.hostip,config.hostport); + if (c->fd == ANET_ERR) { + zfree(c); + fprintf(stderr,"Connect: %s\n",err); + return NULL; + } + anetTcpNoDelay(NULL,c->fd); + c->obuf = sdsempty(); + c->ibuf = sdsempty(); + c->readlen = 0; + c->written = 0; + c->state = CLIENT_CONNECTING; + aeCreateFileEvent(config.el, c->fd, AE_WRITABLE, writeHandler, c, NULL); + config.liveclients++; + listAddNodeTail(config.clients,c); + return c; +} + +static void createMissingClients(client c) { + while(config.liveclients < config.numclients) { + client new = createClient(); + if (!new) continue; + sdsfree(new->obuf); + new->obuf = sdsdup(c->obuf); + new->replytype = c->replytype; + if (c->replytype == REPLY_BULK) + new->readlen = -1; + } +} + +static void showLatencyReport(char *title) { + int j, seen = 0; + float perc, reqpersec; + + reqpersec = (float)config.donerequests/((float)config.totlatency/1000); + if (!config.quiet) { + printf("====== %s ======\n", title); + printf(" %d requests completed in %.2f seconds\n", config.donerequests, + (float)config.totlatency/1000); + printf(" %d parallel clients\n", config.numclients); + printf(" %d bytes payload\n", config.datasize); + printf(" keep alive: %d\n", config.keepalive); + printf("\n"); + for (j = 0; j <= MAX_LATENCY; j++) { + if (config.latency[j]) { + seen += config.latency[j]; + perc = ((float)seen*100)/config.donerequests; + printf("%.2f%% <= %d milliseconds\n", perc, j); + } + } + printf("%.2f requests per second\n\n", reqpersec); + } else { + printf("%s: %.2f requests per second\n", title, reqpersec); + } +} + +static void prepareForBenchmark(void) +{ + memset(config.latency,0,sizeof(int)*(MAX_LATENCY+1)); + config.start = mstime(); + config.donerequests = 0; +} + +static void endBenchmark(char *title) { + config.totlatency = mstime()-config.start; + showLatencyReport(title); + freeAllClients(); +} + +void parseOptions(int argc, char **argv) { + int i; + + for (i = 1; i < argc; i++) { + int lastarg = i==argc-1; + + if (!strcmp(argv[i],"-c") && !lastarg) { + config.numclients = atoi(argv[i+1]); + i++; + } else if (!strcmp(argv[i],"-n") && !lastarg) { + config.requests = atoi(argv[i+1]); + i++; + } else if (!strcmp(argv[i],"-k") && !lastarg) { + config.keepalive = atoi(argv[i+1]); + i++; + } else if (!strcmp(argv[i],"-h") && !lastarg) { + char *ip = zmalloc(32); + if (anetResolve(NULL,argv[i+1],ip) == ANET_ERR) { + printf("Can't resolve %s\n", argv[i]); + exit(1); + } + config.hostip = ip; + i++; + } else if (!strcmp(argv[i],"-p") && !lastarg) { + config.hostport = atoi(argv[i+1]); + i++; + } else if (!strcmp(argv[i],"-d") && !lastarg) { + config.datasize = atoi(argv[i+1]); + i++; + if (config.datasize < 1) config.datasize=1; + if (config.datasize > 1024*1024) config.datasize = 1024*1024; + } else if (!strcmp(argv[i],"-q")) { + config.quiet = 1; + } else if (!strcmp(argv[i],"-l")) { + config.loop = 1; + } else { + printf("Wrong option '%s' or option argument missing\n\n",argv[i]); + printf("Usage: redis-benchmark [-h ] [-p ] [-c ] [-n [-k ]\n\n"); + printf(" -h Server hostname (default 127.0.0.1)\n"); + printf(" -p Server port (default 6379)\n"); + printf(" -c Number of parallel connections (default 50)\n"); + printf(" -n Total number of requests (default 10000)\n"); + printf(" -d Data size of SET/GET value in bytes (default 2)\n"); + printf(" -k 1=keep alive 0=reconnect (default 1)\n"); + printf(" -q Quiet. Just show query/sec values\n"); + printf(" -l Loop. Run the tests forever\n"); + exit(1); + } + } +} + +int main(int argc, char **argv) { + client c; + + signal(SIGHUP, SIG_IGN); + signal(SIGPIPE, SIG_IGN); + + config.numclients = 50; + config.requests = 10000; + config.liveclients = 0; + config.el = aeCreateEventLoop(); + config.keepalive = 1; + config.donerequests = 0; + config.datasize = 3; + config.quiet = 0; + config.loop = 0; + config.latency = NULL; + config.clients = listCreate(); + config.latency = zmalloc(sizeof(int)*(MAX_LATENCY+1)); + + config.hostip = "127.0.0.1"; + config.hostport = 6379; + + parseOptions(argc,argv); + + if (config.keepalive == 0) { + printf("WARNING: keepalive disabled, you probably need 'echo 1 > /proc/sys/net/ipv4/tcp_tw_reuse' in order to use a lot of clients/requests\n"); + } + + do { + prepareForBenchmark(); + c = createClient(); + if (!c) exit(1); + c->obuf = sdscat(c->obuf,"PING\r\n"); + c->replytype = REPLY_RETCODE; + createMissingClients(c); + aeMain(config.el); + endBenchmark("PING"); + + prepareForBenchmark(); + c = createClient(); + if (!c) exit(1); + c->obuf = sdscatprintf(c->obuf,"SET foo %d\r\n",config.datasize); + { + char *data = zmalloc(config.datasize+2); + memset(data,'x',config.datasize); + data[config.datasize] = '\r'; + data[config.datasize+1] = '\n'; + c->obuf = sdscatlen(c->obuf,data,config.datasize+2); + } + c->replytype = REPLY_RETCODE; + createMissingClients(c); + aeMain(config.el); + endBenchmark("SET"); + + prepareForBenchmark(); + c = createClient(); + if (!c) exit(1); + c->obuf = sdscat(c->obuf,"GET foo\r\n"); + c->replytype = REPLY_BULK; + c->readlen = -1; + createMissingClients(c); + aeMain(config.el); + endBenchmark("GET"); + + prepareForBenchmark(); + c = createClient(); + if (!c) exit(1); + c->obuf = sdscat(c->obuf,"INCR counter\r\n"); + c->replytype = REPLY_INT; + createMissingClients(c); + aeMain(config.el); + endBenchmark("INCR"); + + prepareForBenchmark(); + c = createClient(); + if (!c) exit(1); + c->obuf = sdscat(c->obuf,"LPUSH mylist 3\r\nbar\r\n"); + c->replytype = REPLY_INT; + createMissingClients(c); + aeMain(config.el); + endBenchmark("LPUSH"); + + prepareForBenchmark(); + c = createClient(); + if (!c) exit(1); + c->obuf = sdscat(c->obuf,"LPOP mylist\r\n"); + c->replytype = REPLY_BULK; + c->readlen = -1; + createMissingClients(c); + aeMain(config.el); + endBenchmark("LPOP"); + + printf("\n"); + } while(config.loop); + + return 0; +} diff --git a/client-libraries/README b/client-libraries/README new file mode 100644 index 00000000..09a971cd --- /dev/null +++ b/client-libraries/README @@ -0,0 +1,28 @@ +Redis client libraries +---------------------- + +In this directory you'll find client libraries for different languages. +This are the latest releases available at the time this Redis tar.gz for this +release was created, and are good for most uses, but if you need more fresh +code or recent bugfixes read more. + +How to get the lastest versions of client libraries source code +--------------------------------------------------------------- + +Note that while the PHP and Python versions are the most uptodate available +libraries, the Ruby and Erlang libraries have their own sites so you may want +to grab this libraries from their main sites: + +Ruby lib source code: +http://github.com/ezmobius/redis-rb/tree/master + +Erlang lib source code: +http://bitbucket.org/adroll/erldis/ + +For the languages with development code in the Redis SVN, check this urls for unstable versions of the libs: + +Python lib source code: +http://code.google.com/p/redis/source/browse/#svn/trunk/client-libraries/python + +PHP lib source code: +http://code.google.com/p/redis/source/browse/#svn/trunk/client-libraries/php diff --git a/client-libraries/erlang/.hg_archival.txt b/client-libraries/erlang/.hg_archival.txt new file mode 100644 index 00000000..2a61f3ae --- /dev/null +++ b/client-libraries/erlang/.hg_archival.txt @@ -0,0 +1,2 @@ +repo: 9e1f35ed7fdc7b3da7f5ff66a71d1975b85e2ae5 +node: 7f98e864d76b0b2a7427049b943fb1c0dad0df2a diff --git a/client-libraries/erlang/.hgignore b/client-libraries/erlang/.hgignore new file mode 100644 index 00000000..6822f5cd --- /dev/null +++ b/client-libraries/erlang/.hgignore @@ -0,0 +1,2 @@ +syntax: glob +*.beam \ No newline at end of file diff --git a/client-libraries/erlang/LICENSE b/client-libraries/erlang/LICENSE new file mode 100644 index 00000000..af3d5c8f --- /dev/null +++ b/client-libraries/erlang/LICENSE @@ -0,0 +1,22 @@ +Copyright (c) 2009 +adroll.com +Valentino Volonghi + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/client-libraries/erlang/Makefile b/client-libraries/erlang/Makefile new file mode 100644 index 00000000..7c7a7436 --- /dev/null +++ b/client-libraries/erlang/Makefile @@ -0,0 +1,29 @@ +LIBDIR=`erl -eval 'io:format("~s~n", [code:lib_dir()])' -s init stop -noshell` + +all: + mkdir -p ebin/ + (cd src;$(MAKE)) + (cd test;$(MAKE)) + +clean: clean_tests + (cd src;$(MAKE) clean) + rm -rf erl_crash.dump *.beam + +clean_tests: + (cd test;$(MAKE) clean) + rm -rf erl_crash.dump *.beam + +test: clean + mkdir -p ebin/ + (cd src;$(MAKE)) + (cd test;$(MAKE)) + (cd test;$(MAKE) test) + +testrun: all + mkdir -p ebin/ + (cd test;$(MAKE) test) + +install: all + mkdir -p ${LIBDIR}/erldis-0.0.1/{ebin,include} + for i in ebin/*.beam; do install $$i $(LIBDIR)/erldis-0.0.1/$$i ; done + for i in include/*.hrl; do install $$i $(LIBDIR)/erldis-0.0.1/$$i ; done diff --git a/client-libraries/erlang/include/erldis.hrl b/client-libraries/erlang/include/erldis.hrl new file mode 100644 index 00000000..1a20b37f --- /dev/null +++ b/client-libraries/erlang/include/erldis.hrl @@ -0,0 +1 @@ +-record(redis, {socket,buffer=[],reply_caller,parsers,remaining=0,pstate=empty,results=[]}). \ No newline at end of file diff --git a/client-libraries/erlang/src/Makefile b/client-libraries/erlang/src/Makefile new file mode 100644 index 00000000..c56f980b --- /dev/null +++ b/client-libraries/erlang/src/Makefile @@ -0,0 +1,9 @@ +include ../support/include.mk + +all: $(EBIN_FILES) + +debug: + $(MAKE) DEBUG=-DDEBUG + +clean: + rm -rf $(EBIN_FILES) erl_crash.dump diff --git a/client-libraries/erlang/src/client.erl b/client-libraries/erlang/src/client.erl new file mode 100644 index 00000000..a752da80 --- /dev/null +++ b/client-libraries/erlang/src/client.erl @@ -0,0 +1,272 @@ +-module(client). +-behavior(gen_server). + +-export([start/1, start/2, connect/1, connect/2, asend/2, send/3, send/2, + disconnect/1, ssend/3, str/1, format/1, sformat/1, ssend/2, + get_all_results/1]). +-export([init/1, handle_call/3, handle_cast/2, + handle_info/2, terminate/2, code_change/3]). + +-include("erldis.hrl"). + +-define(EOL, "\r\n"). + + +%% Helpers +str(X) when is_list(X) -> + X; +str(X) when is_atom(X) -> + atom_to_list(X); +str(X) when is_binary(X) -> + binary_to_list(X); +str(X) when is_integer(X) -> + integer_to_list(X); +str(X) when is_float(X) -> + float_to_list(X). + +format([], Result) -> + string:join(lists:reverse(Result), ?EOL); +format([Line|Rest], Result) -> + JoinedLine = string:join([str(X) || X <- Line], " "), + format(Rest, [JoinedLine|Result]). + +format(Lines) -> + format(Lines, []). +sformat(Line) -> + format([Line], []). + +get_parser(Cmd) + when Cmd =:= set orelse Cmd =:= setnx orelse Cmd =:= del + orelse Cmd =:= exists orelse Cmd =:= rename orelse Cmd =:= renamenx + orelse Cmd =:= rpush orelse Cmd =:= lpush orelse Cmd =:= ltrim + orelse Cmd =:= lset orelse Cmd =:= sadd orelse Cmd =:= srem + orelse Cmd =:= sismember orelse Cmd =:= select orelse Cmd =:= move + orelse Cmd =:= save orelse Cmd =:= bgsave orelse Cmd =:= flushdb + orelse Cmd =:= flushall -> + fun proto:parse/2; +get_parser(Cmd) when Cmd =:= lrem -> + fun proto:parse_special/2; +get_parser(Cmd) + when Cmd =:= incr orelse Cmd =:= incrby orelse Cmd =:= decr + orelse Cmd =:= decrby orelse Cmd =:= llen orelse Cmd =:= scard -> + fun proto:parse_int/2; +get_parser(Cmd) when Cmd =:= type -> + fun proto:parse_types/2; +get_parser(Cmd) when Cmd =:= randomkey -> + fun proto:parse_string/2; +get_parser(Cmd) + when Cmd =:= get orelse Cmd =:= lindex orelse Cmd =:= lpop + orelse Cmd =:= rpop -> + fun proto:single_stateful_parser/2; +get_parser(Cmd) + when Cmd =:= keys orelse Cmd =:= lrange orelse Cmd =:= sinter + orelse Cmd =:= smembers orelse Cmd =:= sort -> + fun proto:stateful_parser/2. +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + + +%% Exported API +start(Host) -> + connect(Host). +start(Host, Port) -> + connect(Host, Port). + +connect(Host) -> + connect(Host, 6379). +connect(Host, Port) -> + gen_server:start_link(?MODULE, [Host, Port], []). + +ssend(Client, Cmd) -> ssend(Client, Cmd, []). +ssend(Client, Cmd, Args) -> + gen_server:cast(Client, {send, sformat([Cmd|Args]), get_parser(Cmd)}). + +send(Client, Cmd) -> send(Client, Cmd, []). +send(Client, Cmd, Args) -> + gen_server:cast(Client, {send, + string:join([str(Cmd), format(Args)], " "), get_parser(Cmd)}). + +asend(Client, Cmd) -> + gen_server:cast(Client, {asend, Cmd}). +disconnect(Client) -> + gen_server:call(Client, disconnect). + +get_all_results(Client) -> + gen_server:call(Client, get_all_results). +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + + + +%% gen_server callbacks +init([Host, Port]) -> + process_flag(trap_exit, true), + ConnectOptions = [list, {active, once}, {packet, line}, {nodelay, true}], + case gen_tcp:connect(Host, Port, ConnectOptions) of + {error, Why} -> + {error, {socket_error, Why}}; + {ok, Socket} -> + {ok, #redis{socket=Socket, parsers=queue:new()}} + end. + +handle_call({send, Cmd, Parser}, From, State=#redis{parsers=Parsers}) -> + gen_tcp:send(State#redis.socket, [Cmd|?EOL]), + {noreply, State#redis{reply_caller=fun(V) -> gen_server:reply(From, lists:nth(1, V)) end, + parsers=queue:in(Parser, Parsers), remaining=1}}; + +handle_call(disconnect, _From, State) -> + {stop, normal, ok, State}; +handle_call(get_all_results, From, State) -> + case queue:is_empty(State#redis.parsers) of + true -> + % answers came earlier than we could start listening... + % Very unlikely but totally possible. + {reply, lists:reverse(State#redis.results), State#redis{results=[]}}; + false -> + % We are here earlier than results came, so just make + % ourselves wait until stuff is ready. + {noreply, State#redis{reply_caller=fun(V) -> gen_server:reply(From, V) end}} + end; +handle_call(_, _From, State) -> {noreply, State}. + + +handle_cast({asend, Cmd}, State) -> + gen_tcp:send(State#redis.socket, [Cmd|?EOL]), + {noreply, State}; +handle_cast({send, Cmd, Parser}, State=#redis{parsers=Parsers, remaining=Remaining}) -> + % how we should do here: if remaining is already != 0 then we'll + % let handle_info take care of keeping track how many remaining things + % there are. If instead it's 0 we are the first call so let's just + % do it. + gen_tcp:send(State#redis.socket, [Cmd|?EOL]), + NewParsers = queue:in(Parser, Parsers), + case Remaining of + 0 -> + {noreply, State#redis{remaining=1, parsers=NewParsers}}; + _ -> + {noreply, State#redis{parsers=NewParsers}} + end; +handle_cast(_Msg, State) -> {noreply, State}. + + +trim2({ok, S}) -> + string:substr(S, 1, length(S)-2); +trim2(S) -> + trim2({ok, S}). + +% This is useful to know if there are more messages still coming. +get_remaining(ParsersQueue) -> + case queue:is_empty(ParsersQueue) of + true -> 0; + false -> 1 + end. + +% This function helps with pipelining by creating a pubsub system with +% the caller. The caller could submit multiple requests and not listen +% until later when all or some of them have been answered, at that +% point 2 conditions can be true: +% 1) We still need to process more things in this response chain +% 2) We are finished. +% +% And these 2 are together with the following 2: +% 1) We called get_all_results before the end of the responses. +% 2) We called get_all_results after the end of the responses. +% +% If there's stuff missing in the chain we just push results, this also +% happens when there's nothing more to process BUT we haven't requested +% results yet. +% In case we have requested results: if requests are not yet ready we +% just push them, otherwise we finally answer all of them. +save_or_reply(Result, State=#redis{results=Results, reply_caller=ReplyCaller, parsers=Parsers}) -> + case get_remaining(Parsers) of + 1 -> + State#redis{results=[Result|Results], remaining=1, pstate=empty, buffer=[]}; + 0 -> + % We don't reverse results here because if all the requests + % come in and then we submit another one, if we reverse + % they will be scrambled in the results field of the record. + % instead if we wait just before we reply they will be + % in the right order. + FullResults = [Result|Results], + NewState = case ReplyCaller of + undefined -> + State#redis{results=FullResults}; + _ -> + ReplyCaller(lists:reverse(FullResults)), + State#redis{results=[]} + end, + NewState#redis{remaining=0, pstate=empty, + reply_caller=undefined, buffer=[], + parsers=Parsers} + end. + +handle_info({tcp, Socket, Data}, State) -> + {{value, Parser}, NewParsers} = queue:out(State#redis.parsers), + Trimmed = trim2(Data), + NewState = case {State#redis.remaining-1, Parser(State#redis.pstate, Trimmed)} of + % This line contained an error code. Next line will hold + % The error message that we will parse. + {0, error} -> + % reinsert the parser in the front, next step is still gonna be needed + State#redis{remaining=1, pstate=error, + parsers=queue:in_r(Parser, NewParsers)}; + + % The stateful parser just started and tells us the number + % of results that we will have to parse for those calls + % where more than one result is expected. The next + % line will start with the first item to read. + {0, {hold, Remaining}} -> + % Reset the remaining value to the number of results + % that we need to parse. + % and reinsert the parser in the front, next step is still gonna be needed + State#redis{remaining=Remaining, pstate=read, + parsers=queue:in_r(Parser, NewParsers)}; + + % We either had only one thing to read or we are at the + % end of the stuff that we need to read. either way + % just pack up the buffer and send. + {0, {read, NBytes}} -> + inet:setopts(Socket, [{packet, 0}]), % go into raw mode to read bytes + CurrentValue = trim2(gen_tcp:recv(Socket, NBytes+2)), % also consume the \r\n + inet:setopts(Socket, [{packet, line}]), % go back to line mode + OldBuffer = State#redis.buffer, + case OldBuffer of + [] -> + save_or_reply(CurrentValue, State#redis{parsers=NewParsers}); + _ -> + save_or_reply(lists:reverse([CurrentValue|OldBuffer]), State#redis{parsers=NewParsers}) + end; + + + % The stateful parser tells us to read some bytes + {N, {read, NBytes}} -> + inet:setopts(Socket, [{packet, 0}]), % go into raw mode to read bytes + CurrentValue = trim2(gen_tcp:recv(Socket, NBytes+2)), % also consume the \r\n + inet:setopts(Socket, [{packet, line}]), % go back to line mode + OldBuffer = State#redis.buffer, + State#redis{remaining=N, buffer=[CurrentValue|OldBuffer], + pstate=read, parsers=queue:in_r(Parser, NewParsers)}; + + + % Simple return values contained in a single line + {0, Value} -> + save_or_reply(Value, State#redis{parsers=NewParsers}) + + end, + inet:setopts(Socket, [{active, once}]), + {noreply, NewState}; +handle_info(_Info, State) -> {noreply, State}. + + +terminate(_Reason, State) -> + case State#redis.socket of + undefined -> + pass; + Socket -> + gen_tcp:close(Socket) + end, + ok. + + +code_change(_OldVsn, State, _Extra) -> {ok, State}. +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% + + diff --git a/client-libraries/erlang/src/erldis.erl b/client-libraries/erlang/src/erldis.erl new file mode 100644 index 00000000..40d6c811 --- /dev/null +++ b/client-libraries/erlang/src/erldis.erl @@ -0,0 +1,82 @@ +-module(erldis). + +-compile(export_all). +-define(EOL, "\r\n"). + +%% helpers +flatten({error, Message}) -> + {error, Message}; +flatten(List) when is_list(List)-> + lists:flatten(List). + +%% exposed API +connect(Host) -> + client:connect(Host). + +quit(Client) -> + client:asend(Client, "QUIT"), + client:disconnect(Client). + +%% Commands operating on string values +internal_set_like(Client, Command, Key, Value) -> + client:send(Client, Command, [[Key, length(Value)], + [Value]]). + +get_all_results(Client) -> client:get_all_results(Client). + +set(Client, Key, Value) -> internal_set_like(Client, set, Key, Value). +setnx(Client, Key, Value) -> internal_set_like(Client, setnx, Key, Value). +incr(Client, Key) -> client:ssend(Client, incr, [Key]). +incrby(Client, Key, By) -> client:ssend(Client, incrby, [Key, By]). +decr(Client, Key) -> client:ssend(Client, decr, [Key]). +decrby(Client, Key, By) -> client:ssend(Client, decrby, [Key, By]). +get(Client, Key) -> client:ssend(Client, get, [Key]). + + +%% Commands operating on every value +exists(Client, Key) -> client:ssend(Client, exists, [Key]). +del(Client, Key) -> client:ssend(Client, del, [Key]). +type(Client, Key) -> client:ssend(Client, type, [Key]). +keys(Client, Pattern) -> client:ssend(Client, keys, [Pattern]). +randomkey(Client, Key) -> client:ssend(Client, randomkey, [Key]). +rename(Client, OldKey, NewKey) -> client:ssend(Client, rename, [OldKey, NewKey]). +renamenx(Client, OldKey, NewKey) -> client:ssend(Client, renamenx, [OldKey, NewKey]). + +%% Commands operating on both lists and sets +sort(Client, Key) -> client:ssend(Client, sort, [Key]). +sort(Client, Key, Extra) -> client:ssend(Client, sort, [Key, Extra]). + +%% Commands operating on lists +rpush(Client, Key, Value) -> internal_set_like(Client, rpush, Key, Value). +lpush(Client, Key, Value) -> internal_set_like(Client, lpush, Key, Value). +llen(Client, Key) -> client:ssend(Client, llen, [Key]). +lrange(Client, Key, Start, End) -> client:ssend(Client, lrange, [Key, Start, End]). +ltrim(Client, Key, Start, End) -> client:ssend(Client, ltrim, [Key, Start, End]). +lindex(Client, Key, Index) -> client:ssend(Client, lindex, [Key, Index]). +lpop(Client, Key) -> client:ssend(Client, lpop, [Key]). +rpop(Client, Key) -> client:ssend(Client, rpop, [Key]). +lrem(Client, Key, Number, Value) -> + client:send(Client, lrem, [[Key, Number, length(Value)], + [Value]]). +lset(Client, Key, Index, Value) -> + client:send(Client, lset, [[Key, Index, length(Value)], + [Value]]). + +%% Commands operating on sets +sadd(Client, Key, Value) -> internal_set_like(Client, sadd, Key, Value). +srem(Client, Key, Value) -> internal_set_like(Client, srem, Key, Value). +scard(Client, Key) -> client:ssend(Client, scard, [Key]). +sismember(Client, Key, Value) -> internal_set_like(Client, sismember, Key, Value). +sintersect(Client, Keys) -> client:ssend(Client, sinter, Keys). +smembers(Client, Key) -> client:ssend(Client, smembers, [Key]). + + +%% Multiple DB commands +flushdb(Client) -> client:ssend(Client, flushdb). +flushall(Client) -> client:ssend(Client, flushall). +select(Client, Index) -> client:ssend(Client, select, [Index]). +move(Client, Key, DBIndex) -> client:ssend(Client, move, [Key, DBIndex]). +save(Client) -> client:ssend(Client, save). +bgsave(Client) -> client:ssend(Client, bgsave). +lastsave(Client) -> client:ssend(Client, lastsave). +shutdown(Client) -> client:asend(Client, shutdown). diff --git a/client-libraries/erlang/src/proto.erl b/client-libraries/erlang/src/proto.erl new file mode 100644 index 00000000..ef0ace27 --- /dev/null +++ b/client-libraries/erlang/src/proto.erl @@ -0,0 +1,68 @@ +-module(proto). + +-export([parse/2, parse_int/2, parse_types/2, + parse_string/2, stateful_parser/2, + single_stateful_parser/2, parse_special/2]). + + +parse(empty, "+OK") -> + ok; +parse(empty, "+PONG") -> + pong; +parse(empty, "0") -> + false; +parse(empty, "1") -> + true; +parse(empty, "-1") -> + {error, no_such_key}; +parse(empty, "-2") -> + {error, wrong_type}; +parse(empty, "-3") -> + {error, same_db}; +parse(empty, "-4") -> + {error, argument_out_of_range}; +parse(empty, "-" ++ Message) -> + {error, Message}. + +parse_special(empty, "-1") -> + parse(empty, "-1"); +parse_special(empty, "-2") -> + parse(empty, "-2"); +parse_special(empty, N) -> + list_to_integer(N). + +parse_int(empty, "-ERR " ++ Message) -> + {error, Message}; +parse_int(empty, Value) -> + list_to_integer(Value). + +parse_string(empty, Message) -> + Message. + +parse_types(empty, "none") -> none; +parse_types(empty, "string") -> string; +parse_types(empty, "list") -> list; +parse_types(empty, "set") -> set. + + +% I'm used when redis returns multiple results +stateful_parser(empty, "nil") -> + nil; +stateful_parser(error, "-ERR " ++ Error) -> + {error, Error}; +stateful_parser(empty, "-" ++ _ErrorLength) -> + error; +stateful_parser(empty, NumberOfElements) -> + {hold, list_to_integer(NumberOfElements)}; +stateful_parser(read, ElementSize) -> + {read, list_to_integer(ElementSize)}. + +% I'm used when redis returns just one result +single_stateful_parser(empty, "nil") -> + nil; +single_stateful_parser(error, "-ERR " ++ Error) -> + {error, Error}; +single_stateful_parser(empty, "-" ++ _ErrorLength) -> + error; +single_stateful_parser(empty, ElementSize) -> + {read, list_to_integer(ElementSize)}. diff --git a/client-libraries/erlang/support/include.mk b/client-libraries/erlang/support/include.mk new file mode 100644 index 00000000..f289aed0 --- /dev/null +++ b/client-libraries/erlang/support/include.mk @@ -0,0 +1,51 @@ +## -*- makefile -*- + +ERL := erl +ERLC := $(ERL)c + +INCLUDE_DIRS := ../include $(wildcard ../deps/*/include) +EBIN_DIRS := $(wildcard ../deps/*/ebin) +ERLC_FLAGS := -W $(INCLUDE_DIRS:../%=-I ../%) $(EBIN_DIRS:%=-pa %) + +ifndef no_debug_info + ERLC_FLAGS += +debug_info +endif + +ifdef debug + ERLC_FLAGS += -Ddebug +endif + +ifdef test + ERLC_FLAGS += -DTEST +endif + +EBIN_DIR := ../ebin +DOC_DIR := ../doc +EMULATOR := beam + +ERL_TEMPLATE := $(wildcard *.et) +ERL_SOURCES := $(wildcard *.erl) +ERL_HEADERS := $(wildcard *.hrl) $(wildcard ../include/*.hrl) +ERL_OBJECTS := $(ERL_SOURCES:%.erl=$(EBIN_DIR)/%.beam) +ERL_TEMPLATES := $(ERL_TEMPLATE:%.et=$(EBIN_DIR)/%.beam) +ERL_OBJECTS_LOCAL := $(ERL_SOURCES:%.erl=./%.$(EMULATOR)) +APP_FILES := $(wildcard *.app) +EBIN_FILES = $(ERL_OBJECTS) $(APP_FILES:%.app=../ebin/%.app) $(ERL_TEMPLATES) +MODULES = $(ERL_SOURCES:%.erl=%) + +../ebin/%.app: %.app + cp $< $@ + +$(EBIN_DIR)/%.$(EMULATOR): %.erl + $(ERLC) $(ERLC_FLAGS) -o $(EBIN_DIR) $< + +$(EBIN_DIR)/%.$(EMULATOR): %.et + $(ERL) -noshell -pa ../../elib/erltl/ebin/ -eval "erltl:compile(atom_to_list('$<'), [{outdir, \"../ebin\"}, report_errors, report_warnings, nowarn_unused_vars])." -s init stop + +./%.$(EMULATOR): %.erl + $(ERLC) $(ERLC_FLAGS) -o . $< + +$(DOC_DIR)/%.html: %.erl + $(ERL) -noshell -run edoc file $< -run init stop + mv *.html $(DOC_DIR) + diff --git a/client-libraries/erlang/test/Makefile b/client-libraries/erlang/test/Makefile new file mode 100644 index 00000000..b87cc9b0 --- /dev/null +++ b/client-libraries/erlang/test/Makefile @@ -0,0 +1,12 @@ +include ../support/include.mk + +all: $(EBIN_FILES) + +clean: + rm -rf $(EBIN_FILES) erl_crash.dump + +test: $(MODULES) + +./$(MODULES): + @echo "Running tests for $@" + erl -pa ../ebin -run $@ test -run init stop -noshell diff --git a/client-libraries/erlang/test/erldis_tests.erl b/client-libraries/erlang/test/erldis_tests.erl new file mode 100644 index 00000000..132e22dc --- /dev/null +++ b/client-libraries/erlang/test/erldis_tests.erl @@ -0,0 +1,88 @@ +-module(erldis_tests). + +-include_lib("eunit/include/eunit.hrl"). +-include("erldis.hrl"). + +quit_test() -> + {ok, Client} = erldis:connect("localhost"), + ok = erldis:quit(Client), + false = is_process_alive(Client). + +utils_test() -> + ?assertEqual(client:str(1), "1"), + ?assertEqual(client:str(atom), "atom"), + ?assertEqual(client:format([[1, 2, 3]]), "1 2 3"), + ?assertEqual(client:format([[1,2,3], [4,5,6]]), "1 2 3\r\n4 5 6"). + +pipeline_test() -> + {ok, Client} = erldis:connect("localhost"), + erldis:flushall(Client), + erldis:get(Client, "pippo"), + erldis:set(Client, "hello", "kitty!"), + erldis:setnx(Client, "foo", "bar"), + erldis:setnx(Client, "foo", "bar"), + [ok, nil, ok, true, false] = erldis:get_all_results(Client), + + erldis:exists(Client, "hello"), + erldis:exists(Client, "foo"), + erldis:get(Client, "foo"), + erldis:del(Client, "hello"), + erldis:del(Client, "foo"), + erldis:exists(Client, "hello"), + erldis:exists(Client, "foo"), + [true, true, "bar", true, true, false, false] = erldis:get_all_results(Client), + + erldis:set(Client, "pippo", "pluto"), + erldis:sadd(Client, "pippo", "paperino"), + % foo doesn't exist, the result will be nil + erldis:lrange(Client, "foo", 1, 2), + erldis:lrange(Client, "pippo", 1, 2), + [ok, {error, wrong_type}, nil, + {error, "Operation against a key holding the wrong kind of value"} + ] = erldis:get_all_results(Client), + erldis:del(Client, "pippo"), + [true] = erldis:get_all_results(Client), + + erldis:rpush(Client, "a_list", "1"), + erldis:rpush(Client, "a_list", "2"), + erldis:rpush(Client, "a_list", "3"), + erldis:rpush(Client, "a_list", "1"), + erldis:lrem(Client, "a_list", 1, "1"), + erldis:lrange(Client, "a_list", 0, 2), + [ok, ok, ok, ok, 1, ["2", "3", "1"]] = erldis:get_all_results(Client), + + erldis:sort(Client, "a_list"), + erldis:sort(Client, "a_list", "DESC"), + erldis:lrange(Client, "a_list", 0, 2), + erldis:sort(Client, "a_list", "LIMIT 0 2 ASC"), + [["1", "2", "3"], ["3", "2", "1"], ["2", "3", "1"], + ["1", "2"]] = erldis:get_all_results(Client), + + ok = erldis:quit(Client). + + + +% inline_tests(Client) -> +% [?_assertMatch(ok, erldis:set(Client, "hello", "kitty!")), +% ?_assertMatch(false, erldis:setnx(Client, "hello", "kitty!")), +% ?_assertMatch(true, erldis:exists(Client, "hello")), +% ?_assertMatch(true, erldis:del(Client, "hello")), +% ?_assertMatch(false, erldis:exists(Client, "hello")), +% +% ?_assertMatch(true, erldis:setnx(Client, "hello", "kitty!")), +% ?_assertMatch(true, erldis:exists(Client, "hello")), +% ?_assertMatch("kitty!", erldis:get(Client, "hello")), +% ?_assertMatch(true, erldis:del(Client, "hello")), +% +% +% ?_assertMatch(1, erldis:incr(Client, "pippo")) +% ,?_assertMatch(2, erldis:incr(Client, "pippo")) +% ,?_assertMatch(1, erldis:decr(Client, "pippo")) +% ,?_assertMatch(0, erldis:decr(Client, "pippo")) +% ,?_assertMatch(-1, erldis:decr(Client, "pippo")) +% +% ,?_assertMatch(6, erldis:incrby(Client, "pippo", 7)) +% ,?_assertMatch(2, erldis:decrby(Client, "pippo", 4)) +% ,?_assertMatch(-2, erldis:decrby(Client, "pippo", 4)) +% ,?_assertMatch(true, erldis:del(Client, "pippo")) +% ]. diff --git a/client-libraries/erlang/test/proto_tests.erl b/client-libraries/erlang/test/proto_tests.erl new file mode 100644 index 00000000..dc2490ed --- /dev/null +++ b/client-libraries/erlang/test/proto_tests.erl @@ -0,0 +1,10 @@ +-module(proto_tests). + +-include_lib("eunit/include/eunit.hrl"). + +parse_test() -> + ok = proto:parse(empty, "+OK"), + pong = proto:parse(empty, "+PONG"), + false = proto:parse(empty, "0"), + true = proto:parse(empty, "1"), + {error, no_such_key} = proto:parse(empty, "-1"). diff --git a/client-libraries/php/redis.php b/client-libraries/php/redis.php new file mode 100644 index 00000000..7d03ba31 --- /dev/null +++ b/client-libraries/php/redis.php @@ -0,0 +1,330 @@ +host = $host; + $this->port = $port; + } + + function connect() { + if ($this->_sock) + return; + if ($sock = fsockopen($this->host, $this->port, $errno, $errstr)) { + $this->_sock = $sock; + return; + } + $msg = "Cannot open socket to {$this->host}:{$this->port}"; + if ($errno || $errmsg) + $msg .= "," . ($errno ? " error $errno" : "") . ($errmsg ? " $errmsg" : ""); + trigger_error("$msg.", E_USER_ERROR); + } + + function disconnect() { + if ($this->_sock) + @fclose($this->_sock); + $this->_sock = null; + } + + function &ping() { + $this->connect(); + $this->_write("PING\r\n"); + return $this->_simple_response(); + } + + function &do_echo($s) { + $this->connect(); + $this->_write("ECHO " . strlen($s) . "\r\n$s\r\n"); + return $this->_get_value(); + } + + function &set($name, $value, $preserve=false) { + $this->connect(); + $this->_write( + ($preserve ? 'SETNX' : 'SET') . + " $name " . strlen($value) . "\r\n$value\r\n" + ); + return $preserve ? $this->_numeric_response() : $this->_simple_response(); + } + + function &get($name) { + $this->connect(); + $this->_write("GET $name\r\n"); + return $this->_get_value(); + } + + function &incr($name, $amount=1) { + $this->connect(); + if ($amount == 1) + $this->_write("INCR $name\r\n"); + else + $this->_write("INCRBY $name $amount\r\n"); + return $this->_numeric_response(); + } + + function &decr($name, $amount=1) { + $this->connect(); + if ($amount == 1) + $this->_write("DECR $name\r\n"); + else + $this->_write("DECRBY $name $amount\r\n"); + return $this->_numeric_response(); + } + + function &exists($name) { + $this->connect(); + $this->_write("EXISTS $name\r\n"); + return $this->_numeric_response(); + } + + function &delete($name) { + $this->connect(); + $this->_write("DEL $name\r\n"); + return $this->_numeric_response(); + } + + function &keys($pattern) { + $this->connect(); + $this->_write("KEYS $pattern\r\n"); + return explode(' ', $this->_get_value()); + } + + function &randomkey() { + $this->connect(); + $this->_write("RANDOMKEY\r\n"); + $s =& trim($this->_read()); + $this->_check_for_error($s); + return $s; + } + + function &rename($src, $dst, $preserve=False) { + $this->connect(); + if ($preserve) { + $this->_write("RENAMENX $src $dst\r\n"); + return $this->_numeric_response(); + } + $this->_write("RENAME $src $dst\r\n"); + return trim($this->_simple_response()); + } + + function &push($name, $value, $tail=true) { + // default is to append the element to the list + $this->connect(); + $this->_write( + ($tail ? 'RPUSH' : 'LPUSH') . + " $name " . strlen($value) . "\r\n$value\r\n" + ); + return $this->_simple_response(); + } + + function <rim($name, $start, $end) { + $this->connect(); + $this->_write("LTRIM $name $start $end\r\n"); + return $this->_simple_response(); + } + + function &lindex($name, $index) { + $this->connect(); + $this->_write("LINDEX $name $index\r\n"); + return $this->_get_value(); + } + + function &pop($name, $tail=true) { + $this->connect(); + $this->_write( + ($tail ? 'RPOP' : 'LPOP') . + " $name\r\n" + ); + return $this->_get_value(); + } + + function &llen($name) { + $this->connect(); + $this->_write("LLEN $name\r\n"); + return $this->_numeric_response(); + } + + function &lrange($name, $start, $end) { + $this->connect(); + $this->_write("LRANGE $name $start $end\r\n"); + return $this->_get_multi(); + } + + function &sort($name, $query=false) { + $this->connect(); + if ($query === false) { + $this->_write("SORT $name\r\n"); + } else { + $this->_write("SORT $name $query\r\n"); + } + return $this->_get_multi(); + } + + function &lset($name, $value, $index) { + $this->connect(); + $this->_write("LSET $name $index " . strlen($value) . "\r\n$value\r\n"); + return $this->_simple_response(); + } + + function &sadd($name, $value) { + $this->connect(); + $this->_write("SADD $name " . strlen($value) . "\r\n$value\r\n"); + return $this->_numeric_response(); + } + + function &srem($name, $value) { + $this->connect(); + $this->_write("SREM $name " . strlen($value) . "\r\n$value\r\n"); + return $this->_numeric_response(); + } + + function &sismember($name, $value) { + $this->connect(); + $this->_write("SISMEMBER $name " . strlen($value) . "\r\n$value\r\n"); + return $this->_numeric_response(); + } + + function &sinter($sets) { + $this->connect(); + $this->_write('SINTER ' . implode(' ', $sets) . "\r\n"); + return $this->_get_multi(); + } + + function &smembers($name) { + $this->connect(); + $this->_write("SMEMBERS $name\r\n"); + return $this->_get_multi(); + } + + function &scard($name) { + $this->connect(); + $this->_write("SCARD $name\r\n"); + return $this->_numeric_response(); + } + + function &select_db($name) { + $this->connect(); + $this->_write("SELECT $name\r\n"); + return $this->_simple_response(); + } + + function &move($name, $db) { + $this->connect(); + $this->_write("MOVE $name $db\r\n"); + return $this->_numeric_response(); + } + + function &save($background=false) { + $this->connect(); + $this->_write(($background ? "BGSAVE\r\n" : "SAVE\r\n")); + return $this->_simple_response(); + } + + function &lastsave() { + $this->connect(); + $this->_write("LASTSAVE\r\n"); + return $this->_numeric_response(); + } + + function &_write($s) { + while ($s) { + $i = fwrite($this->_sock, $s); + if ($i == 0) + break; + $s = substr($s, $i); + } + } + + function &_read($len=1024) { + if ($s = fgets($this->_sock)) + return $s; + $this->disconnect(); + trigger_error("Cannot read from socket.", E_USER_ERROR); + } + + function _check_for_error(&$s) { + if (!$s || $s[0] != '-') + return; + if (substr($s, 0, 4) == '-ERR') + trigger_error("Redis error: " . trim(substr($s, 4)), E_USER_ERROR); + trigger_error("Redis error: " . substr(trim($this->_read()), 5), E_USER_ERROR); + } + + function &_simple_response() { + $s =& trim($this->_read()); + if ($s[0] == '+') + return substr($s, 1); + if ($err =& $this->_check_for_error($s)) + return $err; + trigger_error("Cannot parse first line '$s' for a simple response", E_USER_ERROR); + } + + function &_numeric_response($allow_negative=True) { + $s =& trim($this->_read()); + $i = (int)$s; + if ($i . '' == $s) { + if (!$allow_negative && $i < 0) + $this->_check_for_error($s); + return $i; + } + if ($s == 'nil') + return null; + trigger_error("Cannot parse '$s' as numeric response."); + } + + function &_get_value() { + $s =& trim($this->_read()); + if ($s == 'nil') + return ''; + else if ($s[0] == '-') + $this->_check_for_error($s); + $i = (int)$s; + if ($i . '' != $s) + trigger_error("Cannot parse '$s' as data length."); + $buffer = ''; + while ($i > 0) { + $s = $this->_read(); + $l = strlen($s); + $i -= $l; + if ($l > $i) // ending crlf + $s = rtrim($s); + $buffer .= $s; + } + if ($i == 0) // let's restore the trailing crlf + $buffer .= $this->_read(); + return $buffer; + } + + function &_get_multi() { + $results = array(); + $num =& $this->_numeric_response(false); + if ($num === false) + return $results; + while ($num) { + $results[] =& $this->_get_value(); + $num -= 1; + } + return $results; + } + +} + + +?> diff --git a/client-libraries/php/tests.php b/client-libraries/php/tests.php new file mode 100644 index 00000000..80576652 --- /dev/null +++ b/client-libraries/php/tests.php @@ -0,0 +1,78 @@ +connect(); +echo $r->ping() . "\n"; +echo $r->do_echo('ECHO test') . "\n"; +echo "SET aaa " . $r->set('aaa', 'bbb') . "\n"; +echo "SETNX aaa " . $r->set('aaa', 'ccc', true) . "\n"; +echo "GET aaa " . $r->get('aaa') . "\n"; +echo "INCR aaa " . $r->incr('aaa') . "\n"; +echo "GET aaa " . $r->get('aaa') . "\n"; +echo "INCRBY aaa 3 " . $r->incr('aaa', 2) . "\n"; +echo "GET aaa " . $r->get('aaa') . "\n"; +echo "DECR aaa " . $r->decr('aaa') . "\n"; +echo "GET aaa " . $r->get('aaa') . "\n"; +echo "DECRBY aaa 2 " . $r->decr('aaa', 2) . "\n"; +echo "GET aaa " . $r->get('aaa') . "\n"; +echo "EXISTS aaa " . $r->exists('aaa') . "\n"; +echo "EXISTS fsfjslfjkls " . $r->exists('fsfjslfjkls') . "\n"; +echo "DELETE aaa " . $r->delete('aaa') . "\n"; +echo "EXISTS aaa " . $r->exists('aaa') . "\n"; +echo 'SET a1 a2 a3' . $r->set('a1', 'a') . $r->set('a2', 'b') . $r->set('a3', 'c') . "\n"; +echo 'KEYS a* ' . print_r($r->keys('a*'), true) . "\n"; +echo 'RANDOMKEY ' . $r->randomkey('a*') . "\n"; +echo 'RENAME a1 a0 ' . $r->rename('a1', 'a0') . "\n"; +echo 'RENAMENX a0 a2 ' . $r->rename('a0', 'a2', true) . "\n"; +echo 'RENAMENX a0 a1 ' . $r->rename('a0', 'a1', true) . "\n"; + +echo 'LPUSH a0 aaa ' . $r->push('a0', 'aaa') . "\n"; +echo 'LPUSH a0 bbb ' . $r->push('a0', 'bbb') . "\n"; +echo 'RPUSH a0 ccc ' . $r->push('a0', 'ccc', false) . "\n"; +echo 'LLEN a0 ' . $r->llen('a0') . "\n"; +echo 'LRANGE sdkjhfskdjfh 0 100 ' . print_r($r->lrange('sdkjhfskdjfh', 0, 100), true) . "\n"; +echo 'LRANGE a0 0 0 ' . print_r($r->lrange('sdkjhfskdjfh', 0, 0), true) . "\n"; +echo 'LRANGE a0 0 100 ' . print_r($r->lrange('a0', 0, 100), true) . "\n"; +echo 'LTRIM a0 0 1 ' . $r->ltrim('a0', 0, 1) . "\n"; +echo 'LRANGE a0 0 100 ' . print_r($r->lrange('a0', 0, 100), true) . "\n"; +echo 'LINDEX a0 0 ' . $r->lindex('a0', 0) . "\n"; +echo 'LPUSH a0 bbb ' . $r->push('a0', 'bbb') . "\n"; +echo 'LRANGE a0 0 100 ' . print_r($r->lrange('a0', 0, 100), true) . "\n"; +echo 'RPOP a0 ' . $r->pop('a0') . "\n"; +echo 'LPOP a0 ' . $r->pop('a0', false) . "\n"; +echo 'LSET a0 ccc 0 ' . $r->lset('a0', 'ccc', 0) . "\n"; +echo 'LRANGE a0 0 100 ' . print_r($r->lrange('a0', 0, 100), true) . "\n"; + +echo 'SADD s0 aaa ' . $r->sadd('s0', 'aaa') . "\n"; +echo 'SADD s0 aaa ' . $r->sadd('s0', 'aaa') . "\n"; +echo 'SADD s0 bbb ' . $r->sadd('s0', 'bbb') . "\n"; +echo 'SREM s0 bbb ' . $r->srem('s0', 'bbb') . "\n"; +echo 'SISMEMBER s0 aaa ' . $r->sismember('s0', 'aaa') . "\n"; +echo 'SISMEMBER s0 bbb ' . $r->sismember('s0', 'bbb') . "\n"; +echo 'SADD s0 bbb ' . $r->sadd('s0', 'bbb') . "\n"; +echo 'SADD s1 bbb ' . $r->sadd('s1', 'bbb') . "\n"; +echo 'SADD s1 aaa ' . $r->sadd('s1', 'aaa') . "\n"; +echo 'SINTER s0 s1 ' . print_r($r->sinter(array('s0', 's1')), true) . "\n"; +echo 'SREM s0 bbb ' . $r->srem('s0', 'bbb') . "\n"; +echo 'SINTER s0 s1 ' . print_r($r->sinter(array('s0', 's1')), true) . "\n"; +echo 'SMEMBERS s1 ' . print_r($r->smembers('s1'), true) . "\n"; + +echo 'SELECT 1 ' . $r->select_db(1) . "\n"; +echo 'SMEMBERS s1 ' . print_r($r->smembers('s1'), true) . "\n"; +echo 'SELECT 0 ' . $r->select_db(0) . "\n"; +echo 'SMEMBERS s1 ' . print_r($r->smembers('s1'), true) . "\n"; +echo 'MOVE s1 1 ' . $r->move('s1', 1) . "\n"; +echo 'SMEMBERS s1 ' . print_r($r->smembers('s1'), true) . "\n"; +echo 'SELECT 1 ' . $r->select_db(1) . "\n"; +echo 'SMEMBERS s1 ' . print_r($r->smembers('s1'), true) . "\n"; +echo 'SELECT 0 ' . $r->select_db(0) . "\n"; + +echo 'SAVE ' . $r->save() . "\n"; +echo 'BGSAVE ' . $r->save(true) . "\n"; +echo 'LASTSAVE ' . $r->lastsave() . "\n"; + +?> \ No newline at end of file diff --git a/client-libraries/python/redis.py b/client-libraries/python/redis.py new file mode 100644 index 00000000..e844f812 --- /dev/null +++ b/client-libraries/python/redis.py @@ -0,0 +1,930 @@ +#!/usr/bin/python + +""" redis.py - A client for the Redis daemon. + +""" + +__author__ = "Ludovico Magnocavallo " +__copyright__ = "Copyright 2009, Ludovico Magnocavallo" +__license__ = "MIT" +__version__ = "0.5" +__revision__ = "$LastChangedRevision: 175 $"[22:-2] +__date__ = "$LastChangedDate: 2009-03-17 16:15:55 +0100 (Mar, 17 Mar 2009) $"[18:-2] + + +# TODO: Redis._get_multi_response + + +import socket + + +BUFSIZE = 4096 + + +class RedisError(Exception): pass +class ConnectionError(RedisError): pass +class ResponseError(RedisError): pass +class InvalidResponse(RedisError): pass +class InvalidData(RedisError): pass + + +class Redis(object): + """The main Redis client. + """ + + def __init__(self, host=None, port=None, timeout=None): + self.host = host or 'localhost' + self.port = port or 6379 + if timeout: + socket.setdefaulttimeout(timeout) + self._sock = None + self._fp = None + + def _write(self, s): + """ + >>> r = Redis() + >>> r.connect() + >>> r._sock.close() + >>> try: + ... r._write('pippo') + ... except ConnectionError, e: + ... print e + Error 9 while writing to socket. Bad file descriptor. + >>> + >>> + """ + try: + self._sock.sendall(s) + except socket.error, e: + if e.args[0] == 32: + # broken pipe + self.disconnect() + raise ConnectionError("Error %s while writing to socket. %s." % tuple(e.args)) + + def _read(self): + try: + return self._fp.readline() + except socket.error, e: + if e.args and e.args[0] == errno.EAGAIN: + return + self.disconnect() + raise ConnectionError("Error %s while reading from socket. %s." % tuple(e.args)) + if not data: + self.disconnect() + raise ConnectionError("Socket connection closed when reading.") + return data + + def ping(self): + """ + >>> r = Redis() + >>> r.ping() + 'PONG' + >>> + """ + self.connect() + self._write('PING\r\n') + return self._get_simple_response() + + def set(self, name, value, preserve=False): + """ + >>> r = Redis() + >>> r.set('a', 'pippo') + 'OK' + >>> try: + ... r.set('a', u'pippo \u3235') + ... except InvalidData, e: + ... print e + Error encoding unicode value for key 'a': 'ascii' codec can't encode character u'\u3235' in position 15: ordinal not in range(128). + >>> r.set('b', 105.2) + 'OK' + >>> r.set('b', 'xxx', preserve=True) + 0 + >>> r.get('b') + '105.2' + >>> + """ + self.connect() + # the following will raise an error for unicode values that can't be encoded to ascii + # we could probably add an 'encoding' arg to init, but then what do we do with get()? + # convert back to unicode? and what about ints, or pickled values? + try: + value = value if isinstance(value, basestring) else str(value) + self._write('%s %s %s\r\n%s\r\n' % ( + 'SETNX' if preserve else 'SET', name, len(value), value + )) + except UnicodeEncodeError, e: + raise InvalidData("Error encoding unicode value for key '%s': %s." % (name, e)) + return self._get_numeric_response() if preserve else self._get_simple_response() + + def get(self, name): + """ + >>> r = Redis() + >>> r.set('a', 'pippo'), r.set('b', 15), r.set('c', '\\r\\naaa\\nbbb\\r\\ncccc\\nddd\\r\\n'), r.set('d', '\\r\\n') + ('OK', 'OK', 'OK', 'OK') + >>> r.get('a') + 'pippo' + >>> r.get('b') + '15' + >>> r.get('d') + '\\r\\n' + >>> r.get('b') + '15' + >>> r.get('c') + '\\r\\naaa\\nbbb\\r\\ncccc\\nddd\\r\\n' + >>> r.get('c') + '\\r\\naaa\\nbbb\\r\\ncccc\\nddd\\r\\n' + >>> r.get('ajhsd') + >>> + """ + self.connect() + self._write('GET %s\r\n' % name) + return self._get_value() + + def incr(self, name, amount=1): + """ + >>> r = Redis() + >>> r.delete('a') + 1 + >>> r.incr('a') + 1 + >>> r.incr('a') + 2 + >>> r.incr('a', 2) + 4 + >>> + """ + self.connect() + if amount == 1: + self._write('INCR %s\r\n' % name) + else: + self._write('INCRBY %s %s\r\n' % (name, amount)) + return self._get_numeric_response() + + def decr(self, name, amount=1): + """ + >>> r = Redis() + >>> if r.get('a'): + ... r.delete('a') + ... else: + ... print 1 + 1 + >>> r.decr('a') + -1 + >>> r.decr('a') + -2 + >>> r.decr('a', 5) + -7 + >>> + """ + self.connect() + if amount == 1: + self._write('DECR %s\r\n' % name) + else: + self._write('DECRBY %s %s\r\n' % (name, amount)) + return self._get_numeric_response() + + def exists(self, name): + """ + >>> r = Redis() + >>> r.exists('dsjhfksjdhfkdsjfh') + 0 + >>> r.set('a', 'a') + 'OK' + >>> r.exists('a') + 1 + >>> + """ + self.connect() + self._write('EXISTS %s\r\n' % name) + return self._get_numeric_response() + + def delete(self, name): + """ + >>> r = Redis() + >>> r.delete('dsjhfksjdhfkdsjfh') + 0 + >>> r.set('a', 'a') + 'OK' + >>> r.delete('a') + 1 + >>> r.exists('a') + 0 + >>> r.delete('a') + 0 + >>> + """ + self.connect() + self._write('DEL %s\r\n' % name) + return self._get_numeric_response() + + def key_type(self, name): + """ + Not yet implemented. + """ + self.connect() + self._write('TYPE %s\r\n' % name) + return self._get_simple_response() + + def keys(self, pattern): + """ + >>> r = Redis() + >>> r.flush() + 'OK' + >>> r.set('a', 'a') + 'OK' + >>> r.keys('a*') + ['a'] + >>> r.set('a2', 'a') + 'OK' + >>> r.keys('a*') + ['a', 'a2'] + >>> r.delete('a2') + 1 + >>> r.keys('sjdfhskjh*') + [] + >>> + """ + self.connect() + self._write('KEYS %s\r\n' % pattern) + return self._get_value().split() + + def randomkey(self): + """ + >>> r = Redis() + >>> r.set('a', 'a') + 'OK' + >>> isinstance(r.randomkey(), str) + True + >>> + """ + #raise NotImplementedError("Implemented but buggy, do not use.") + self.connect() + self._write('RANDOMKEY\r\n') + data = self._read().strip() + self._check_for_error(data) + return data + + def rename(self, src, dst, preserve=False): + """ + >>> r = Redis() + >>> try: + ... r.rename('a', 'a') + ... except ResponseError, e: + ... print e + src and dest key are the same + >>> r.rename('a', 'b') + 'OK' + >>> try: + ... r.rename('a', 'b') + ... except ResponseError, e: + ... print e + no such key + >>> r.set('a', 1) + 'OK' + >>> r.rename('b', 'a', preserve=True) + 0 + >>> + """ + self.connect() + if preserve: + self._write('RENAMENX %s %s\r\n' % (src, dst)) + return self._get_numeric_response() + else: + self._write('RENAME %s %s\r\n' % (src, dst)) + return self._get_simple_response().strip() + + def push(self, name, value, tail=False): + """ + >>> r = Redis() + >>> r.delete('l') + 1 + >>> r.push('l', 'a') + 'OK' + >>> r.set('a', 'a') + 'OK' + >>> try: + ... r.push('a', 'a') + ... except ResponseError, e: + ... print e + Operation against a key holding the wrong kind of value + >>> + """ + self.connect() + # same considerations on unicode as in set() apply here + try: + value = value if isinstance(value, basestring) else str(value) + self._write('%s %s %s\r\n%s\r\n' % ( + 'LPUSH' if tail else 'RPUSH', name, len(value), value + )) + except UnicodeEncodeError, e: + raise InvalidData("Error encoding unicode value for element in list '%s': %s." % (name, e)) + return self._get_simple_response() + + def llen(self, name): + """ + >>> r = Redis() + >>> r.delete('l') + 1 + >>> r.push('l', 'a') + 'OK' + >>> r.llen('l') + 1 + >>> r.push('l', 'a') + 'OK' + >>> r.llen('l') + 2 + >>> + """ + self.connect() + self._write('LLEN %s\r\n' % name) + return self._get_numeric_response() + + def lrange(self, name, start, end): + """ + >>> r = Redis() + >>> r.delete('l') + 1 + >>> r.lrange('l', 0, 1) + [] + >>> r.push('l', 'aaa') + 'OK' + >>> r.lrange('l', 0, 1) + ['aaa'] + >>> r.push('l', 'bbb') + 'OK' + >>> r.lrange('l', 0, 0) + ['aaa'] + >>> r.lrange('l', 0, 1) + ['aaa', 'bbb'] + >>> r.lrange('l', -1, 0) + [] + >>> r.lrange('l', -1, -1) + ['bbb'] + >>> + """ + self.connect() + self._write('LRANGE %s %s %s\r\n' % (name, start, end)) + return self._get_multi_response() + + def ltrim(self, name, start, end): + """ + >>> r = Redis() + >>> r.delete('l') + 1 + >>> try: + ... r.ltrim('l', 0, 1) + ... except ResponseError, e: + ... print e + no such key + >>> r.push('l', 'aaa') + 'OK' + >>> r.push('l', 'bbb') + 'OK' + >>> r.push('l', 'ccc') + 'OK' + >>> r.ltrim('l', 0, 1) + 'OK' + >>> r.llen('l') + 2 + >>> r.ltrim('l', 99, 95) + 'OK' + >>> r.llen('l') + 0 + >>> + """ + self.connect() + self._write('LTRIM %s %s %s\r\n' % (name, start, end)) + return self._get_simple_response() + + def lindex(self, name, index): + """ + >>> r = Redis() + >>> res = r.delete('l') + >>> r.lindex('l', 0) + >>> r.push('l', 'aaa') + 'OK' + >>> r.lindex('l', 0) + 'aaa' + >>> r.lindex('l', 2) + >>> r.push('l', 'ccc') + 'OK' + >>> r.lindex('l', 1) + 'ccc' + >>> r.lindex('l', -1) + 'ccc' + >>> + """ + self.connect() + self._write('LINDEX %s %s\r\n' % (name, index)) + return self._get_value() + + def pop(self, name, tail=False): + """ + >>> r = Redis() + >>> r.delete('l') + 1 + >>> r.pop('l') + >>> r.push('l', 'aaa') + 'OK' + >>> r.push('l', 'bbb') + 'OK' + >>> r.pop('l') + 'aaa' + >>> r.pop('l') + 'bbb' + >>> r.pop('l') + >>> r.push('l', 'aaa') + 'OK' + >>> r.push('l', 'bbb') + 'OK' + >>> r.pop('l', tail=True) + 'bbb' + >>> r.pop('l') + 'aaa' + >>> r.pop('l') + >>> + """ + self.connect() + self._write('%s %s\r\n' % ('RPOP' if tail else 'LPOP', name)) + return self._get_value() + + def lset(self, name, index, value): + """ + >>> r = Redis() + >>> r.delete('l') + 1 + >>> try: + ... r.lset('l', 0, 'a') + ... except ResponseError, e: + ... print e + no such key + >>> r.push('l', 'aaa') + 'OK' + >>> try: + ... r.lset('l', 1, 'a') + ... except ResponseError, e: + ... print e + index out of range + >>> r.lset('l', 0, 'bbb') + 'OK' + >>> r.lrange('l', 0, 1) + ['bbb'] + >>> + """ + self.connect() + try: + value = value if isinstance(value, basestring) else str(value) + self._write('LSET %s %s %s\r\n%s\r\n' % ( + name, index, len(value), value + )) + except UnicodeEncodeError, e: + raise InvalidData("Error encoding unicode value for element %s in list '%s': %s." % (index, name, e)) + return self._get_simple_response() + + def lrem(self, name, value, num=0): + """ + >>> r = Redis() + >>> r.delete('l') + 1 + >>> r.push('l', 'aaa') + 'OK' + >>> r.push('l', 'bbb') + 'OK' + >>> r.push('l', 'aaa') + 'OK' + >>> r.lrem('l', 'aaa') + 2 + >>> r.lrange('l', 0, 10) + ['bbb'] + >>> r.push('l', 'aaa') + 'OK' + >>> r.push('l', 'aaa') + 'OK' + >>> r.lrem('l', 'aaa', 1) + 1 + >>> r.lrem('l', 'aaa', 1) + 1 + >>> r.lrem('l', 'aaa', 1) + 0 + >>> + """ + self.connect() + try: + value = value if isinstance(value, basestring) else str(value) + self._write('LREM %s %s %s\r\n%s\r\n' % ( + name, num, len(value), value + )) + except UnicodeEncodeError, e: + raise InvalidData("Error encoding unicode value for element %s in list '%s': %s." % (index, name, e)) + return self._get_numeric_response() + + def sort(self, name, by=None, get=None, start=None, num=None, desc=False, alpha=False): + """ + >>> r = Redis() + >>> r.delete('l') + 1 + >>> r.push('l', 'ccc') + 'OK' + >>> r.push('l', 'aaa') + 'OK' + >>> r.push('l', 'ddd') + 'OK' + >>> r.push('l', 'bbb') + 'OK' + >>> r.sort('l', alpha=True) + ['aaa', 'bbb', 'ccc', 'ddd'] + >>> r.delete('l') + 1 + >>> for i in range(1, 5): + ... res = r.push('l', 1.0 / i) + >>> r.sort('l') + ['0.25', '0.333333333333', '0.5', '1.0'] + >>> r.sort('l', desc=True) + ['1.0', '0.5', '0.333333333333', '0.25'] + >>> r.sort('l', desc=True, start=2, num=1) + ['0.333333333333'] + >>> r.set('weight_0.5', 10) + 'OK' + >>> r.sort('l', desc=True, by='weight_*') + ['0.5', '1.0', '0.333333333333', '0.25'] + >>> for i in r.sort('l', desc=True): + ... res = r.set('test_%s' % i, 100 - float(i)) + >>> r.sort('l', desc=True, get='test_*') + ['99.0', '99.5', '99.6666666667', '99.75'] + >>> r.sort('l', desc=True, by='weight_*', get='test_*') + ['99.5', '99.0', '99.6666666667', '99.75'] + >>> r.sort('l', desc=True, by='weight_*', get='missing_*') + [None, None, None, None] + >>> + """ + stmt = ['SORT', name] + if by: + stmt.append("BY %s" % by) + if start and num: + stmt.append("LIMIT %s %s" % (start, num)) + if get is None: + pass + elif isinstance(get, basestring): + stmt.append("GET %s" % get) + elif isinstance(get, list) or isinstance(get, tuple): + for g in get: + stmt.append("GET %s" % g) + else: + raise RedisError("Invalid parameter 'get' for Redis sort") + if desc: + stmt.append("DESC") + if alpha: + stmt.append("ALPHA") + self.connect() + self._write(' '.join(stmt + ["\r\n"])) + return self._get_multi_response() + + def sadd(self, name, value): + """ + >>> r = Redis() + >>> res = r.delete('s') + >>> r.sadd('s', 'a') + 1 + >>> r.sadd('s', 'b') + 1 + >>> + """ + self.connect() + # same considerations on unicode as in set() apply here + try: + value = value if isinstance(value, basestring) else str(value) + self._write('SADD %s %s\r\n%s\r\n' % ( + name, len(value), value + )) + except UnicodeEncodeError, e: + raise InvalidData("Error encoding unicode value for element in set '%s': %s." % (name, e)) + return self._get_numeric_response() + + def srem(self, name, value): + """ + >>> r = Redis() + >>> r.delete('s') + 1 + >>> r.srem('s', 'aaa') + 0 + >>> r.sadd('s', 'b') + 1 + >>> r.srem('s', 'b') + 1 + >>> r.sismember('s', 'b') + 0 + >>> + """ + self.connect() + # same considerations on unicode as in set() apply here + try: + value = value if isinstance(value, basestring) else str(value) + self._write('SREM %s %s\r\n%s\r\n' % ( + name, len(value), value + )) + except UnicodeEncodeError, e: + raise InvalidData("Error encoding unicode value for element in set '%s': %s." % (name, e)) + return self._get_numeric_response() + + def sismember(self, name, value): + """ + >>> r = Redis() + >>> r.delete('s') + 1 + >>> r.sismember('s', 'b') + 0 + >>> r.sadd('s', 'a') + 1 + >>> r.sismember('s', 'b') + 0 + >>> r.sismember('s', 'a') + 1 + >>> + """ + self.connect() + # same considerations on unicode as in set() apply here + try: + value = value if isinstance(value, basestring) else str(value) + self._write('SISMEMBER %s %s\r\n%s\r\n' % ( + name, len(value), value + )) + except UnicodeEncodeError, e: + raise InvalidData("Error encoding unicode value for element in set '%s': %s." % (name, e)) + return self._get_numeric_response() + + def sinter(self, *args): + """ + >>> r = Redis() + >>> res = r.delete('s1') + >>> res = r.delete('s2') + >>> res = r.delete('s3') + >>> r.sadd('s1', 'a') + 1 + >>> r.sadd('s2', 'a') + 1 + >>> r.sadd('s3', 'b') + 1 + >>> try: + ... r.sinter() + ... except ResponseError, e: + ... print e + wrong number of arguments + >>> try: + ... r.sinter('l') + ... except ResponseError, e: + ... print e + Operation against a key holding the wrong kind of value + >>> r.sinter('s1', 's2', 's3') + set([]) + >>> r.sinter('s1', 's2') + set(['a']) + >>> + """ + self.connect() + self._write('SINTER %s\r\n' % ' '.join(args)) + return set(self._get_multi_response()) + + def sinterstore(self, dest, *args): + """ + >>> r = Redis() + >>> res = r.delete('s1') + >>> res = r.delete('s2') + >>> res = r.delete('s3') + >>> r.sadd('s1', 'a') + 1 + >>> r.sadd('s2', 'a') + 1 + >>> r.sadd('s3', 'b') + 1 + >>> r.sinterstore('s_s', 's1', 's2', 's3') + 'OK' + >>> r.sinterstore('s_s', 's1', 's2') + 'OK' + >>> r.smembers('s_s') + set(['a']) + >>> + """ + self.connect() + self._write('SINTERSTORE %s %s\r\n' % (dest, ' '.join(args))) + return self._get_simple_response() + + def smembers(self, name): + """ + >>> r = Redis() + >>> r.delete('s') + 1 + >>> r.sadd('s', 'a') + 1 + >>> r.sadd('s', 'b') + 1 + >>> try: + ... r.smembers('l') + ... except ResponseError, e: + ... print e + Operation against a key holding the wrong kind of value + >>> r.smembers('s') + set(['a', 'b']) + >>> + """ + self.connect() + self._write('SMEMBERS %s\r\n' % name) + return set(self._get_multi_response()) + + def select(self, db): + """ + >>> r = Redis() + >>> r.delete('a') + 1 + >>> r.select(1) + 'OK' + >>> r.set('a', 1) + 'OK' + >>> r.select(0) + 'OK' + >>> r.get('a') + >>> + """ + self.connect() + self._write('SELECT %s\r\n' % db) + return self._get_simple_response() + + def move(self, name, db): + """ + >>> r = Redis() + >>> r.select(0) + 'OK' + >>> r.set('a', 'a') + 'OK' + >>> r.select(1) + 'OK' + >>> if r.get('a'): + ... r.delete('a') + ... else: + ... print 1 + 1 + >>> r.select(0) + 'OK' + >>> r.move('a', 1) + 1 + >>> r.get('a') + >>> r.select(1) + 'OK' + >>> r.get('a') + 'a' + >>> r.select(0) + 'OK' + >>> + """ + self.connect() + self._write('MOVE %s %s\r\n' % (name, db)) + return self._get_numeric_response() + + def save(self, background=False): + """ + >>> r = Redis() + >>> r.save() + 'OK' + >>> try: + ... resp = r.save(background=True) + ... except ResponseError, e: + ... assert str(e) == 'background save already in progress', str(e) + ... else: + ... assert resp == 'OK' + >>> + """ + self.connect() + if background: + self._write('BGSAVE\r\n') + else: + self._write('SAVE\r\n') + return self._get_simple_response() + + def lastsave(self): + """ + >>> import time + >>> r = Redis() + >>> t = int(time.time()) + >>> r.save() + 'OK' + >>> r.lastsave() >= t + True + >>> + """ + self.connect() + self._write('LASTSAVE\r\n') + return self._get_numeric_response() + + def flush(self, all_dbs=False): + """ + >>> r = Redis() + >>> r.flush() + 'OK' + >>> r.flush(all_dbs=True) + 'OK' + >>> + """ + self.connect() + self._write('%s\r\n' % ('FLUSHALL' if all_dbs else 'FLUSHDB')) + return self._get_simple_response() + + def _get_value(self, negative_as_nil=False): + data = self._read().strip() + if data == 'nil' or (negative_as_nil and data == '-1'): + return + elif data[0] == '-': + self._check_for_error(data) + try: + l = int(data) + except (TypeError, ValueError): + raise ResponseError("Cannot parse response '%s' as data length." % data) + buf = [] + while l > 0: + data = self._read() + l -= len(data) + if len(data) > l: + # we got the ending crlf + data = data.rstrip() + buf.append(data) + if l == 0: + # the data has a trailing crlf embedded, let's restore it + buf.append(self._read()) + return ''.join(buf) + + def _get_simple_response(self): + data = self._read().strip() + if data[0] == '+': + return data[1:] + self._check_for_error(data) + raise InvalidResponse("Cannot parse first line '%s' for a simple response." % data, data) + + def _get_numeric_response(self, allow_negative=True): + data = self._read().strip() + try: + value = int(data) + except (TypeError, ValueError), e: + pass + else: + if not allow_negative and value < 0: + self._check_for_error(data) + return value + self._check_for_error(data) + raise InvalidResponse("Cannot parse first line '%s' for a numeric response: %s." % (data, e), data) + + def _get_multi_response(self): + results = list() + try: + num = self._get_numeric_response(allow_negative=False) + except InvalidResponse, e: + if e.args[1] == 'nil': + return results + raise + while num: + results.append(self._get_value(negative_as_nil=True)) + num -= 1 + return results + + def _check_for_error(self, data): + if not data or data[0] != '-': + return + if data.startswith('-ERR'): + raise ResponseError(data[4:].strip()) + try: + error_len = int(data[1:]) + except (TypeError, ValueError): + raise ResponseError("Unknown error format '%s'." % data) + error_message = self._read().strip()[5:] + raise ResponseError(error_message) + + def disconnect(self): + if isinstance(self._sock, socket.socket): + try: + self._sock.close() + except socket.error: + pass + self._sock = None + self._fp = None + + def connect(self): + """ + >>> r = Redis() + >>> r.connect() + >>> isinstance(r._sock, socket.socket) + True + >>> + """ + if isinstance(self._sock, socket.socket): + return + try: + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + sock.connect((self.host, self.port)) + except socket.error, e: + raise ConnectionError("Error %s connecting to %s:%s. %s." % (e.args[0], self.host, self.port, e.args[1])) + else: + self._sock = sock + self._fp = self._sock.makefile('r') + + +if __name__ == '__main__': + import doctest + doctest.testmod() + \ No newline at end of file diff --git a/client-libraries/ruby/LICENSE b/client-libraries/ruby/LICENSE new file mode 100644 index 00000000..5e648fa9 --- /dev/null +++ b/client-libraries/ruby/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2009 Ezra Zygmuntowicz + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/client-libraries/ruby/README.markdown b/client-libraries/ruby/README.markdown new file mode 100644 index 00000000..2518c421 --- /dev/null +++ b/client-libraries/ruby/README.markdown @@ -0,0 +1,31 @@ +# redis-rb + +A ruby client library for the redis key value storage system. + +## Information about redis + +Redis is a key value store with some interesting features: +1. It's fast. +2. Keys are strings but values can have types of "NONE", "STRING", "LIST", or "SET". List's can be atomically push'd, pop'd, lpush'd, lpop'd and indexed. This allows you to store things like lists of comments under one key while retaining the ability to append comments without reading and putting back the whole list. + +See [redis on code.google.com](http://code.google.com/p/redis/wiki/README) for more information. + +## Dependencies + +1. redis - + + rake redis:install + +2. dtach - + + rake dtach:install + +3. svn - git is the new black, but we need it for the google codes. + +## Setup + +Use the tasks mentioned above (in Dependencies) to get your machine setup. + +## Examples + +Check the examples/ directory. *Note* you need to have redis-server running first. \ No newline at end of file diff --git a/client-libraries/ruby/README.rdoc b/client-libraries/ruby/README.rdoc new file mode 100644 index 00000000..484fbc3c --- /dev/null +++ b/client-libraries/ruby/README.rdoc @@ -0,0 +1,12 @@ +== redis + +A ruby client library for the redis key value storage system: +http://code.google.com/p/redis/wiki/README + +redis is a key value store with some interesting features: + +1. fast +2. keys are strings but values can have types of "NONE","STRING","LIST","SET" + list's can be atomicaly push'd, pop'd and lpush'd, lpop'd and indexed so you + can store things like lists of comments under one key and still be able to + append comments without reading and putting back the whole list. diff --git a/client-libraries/ruby/Rakefile b/client-libraries/ruby/Rakefile new file mode 100644 index 00000000..569b654f --- /dev/null +++ b/client-libraries/ruby/Rakefile @@ -0,0 +1,58 @@ +require 'rubygems' +require 'rake/gempackagetask' +require 'rubygems/specification' +require 'date' +require 'spec/rake/spectask' +require 'tasks/redis.tasks' + + +GEM = 'redis' +GEM_VERSION = '0.0.2' +AUTHORS = ['Ezra Zygmuntowicz', 'Taylor Weibley'] +EMAIL = "ez@engineyard.com" +HOMEPAGE = "http://github.com/ezmobius/redis-rb" +SUMMARY = "Ruby client library for redis key value storage server" + +spec = Gem::Specification.new do |s| + s.name = GEM + s.version = GEM_VERSION + s.platform = Gem::Platform::RUBY + s.has_rdoc = true + s.extra_rdoc_files = ["LICENSE"] + s.summary = SUMMARY + s.description = s.summary + s.authors = AUTHORS + s.email = EMAIL + s.homepage = HOMEPAGE + + # Uncomment this to add a dependency + # s.add_dependency "foo" + + s.require_path = 'lib' + s.autorequire = GEM + s.files = %w(LICENSE README.markdown Rakefile) + Dir.glob("{lib,spec}/**/*") +end + +task :default => :spec + +desc "Run specs" +Spec::Rake::SpecTask.new do |t| + t.spec_files = FileList['spec/**/*_spec.rb'] + t.spec_opts = %w(-fs --color) +end + +Rake::GemPackageTask.new(spec) do |pkg| + pkg.gem_spec = spec +end + +desc "install the gem locally" +task :install => [:package] do + sh %{sudo gem install pkg/#{GEM}-#{GEM_VERSION}} +end + +desc "create a gemspec file" +task :make_spec do + File.open("#{GEM}.gemspec", "w") do |file| + file.puts spec.to_ruby + end +end \ No newline at end of file diff --git a/client-libraries/ruby/bench.rb b/client-libraries/ruby/bench.rb new file mode 100644 index 00000000..88b04e72 --- /dev/null +++ b/client-libraries/ruby/bench.rb @@ -0,0 +1,15 @@ +require 'benchmark' +$:.push File.join(File.dirname(__FILE__), 'lib') +require 'redis' + +times = 20000 + +@r = Redis.new +@r['foo'] = "The first line we sent to the server is some text" +Benchmark.bmbm do |x| + x.report("set") { 20000.times {|i| @r["foo#{i}"] = "The first line we sent to the server is some text"; @r["foo#{i}"]} } +end + +@r.keys('*').each do |k| + @r.delete k +end \ No newline at end of file diff --git a/client-libraries/ruby/bin/distredis b/client-libraries/ruby/bin/distredis new file mode 100755 index 00000000..d5702079 --- /dev/null +++ b/client-libraries/ruby/bin/distredis @@ -0,0 +1,33 @@ +require 'fileutils' + +class RedisCluster + + def initialize(opts={}) + opts = {:port => 6379, :host => 'localhost', :basedir => "#{Dir.pwd}/rdsrv" }.merge(opts) + FileUtils.mkdir_p opts[:basedir] + opts[:size].times do |i| + port = opts[:port] + i + FileUtils.mkdir_p "#{opts[:basedir]}/#{port}" + File.open("#{opts[:basedir]}/#{port}.conf", 'w'){|f| f.write(make_config(port, "#{opts[:basedir]}/#{port}", "#{opts[:basedir]}/#{port}.log"))} + system(%Q{#{File.join(File.expand_path(File.dirname(__FILE__)), "../redis/redis-server #{opts[:basedir]}/#{port}.conf &" )}}) + end + end + + def make_config(port=6379, data=port, logfile='stdout', loglevel='debug') + config = %Q{ +timeout 300 +save 900 1 +save 300 10 +save 60 10000 +dir #{data} +loglevel #{loglevel} +logfile #{logfile} +databases 16 +port #{port} + } + end + +end + + +RedisCluster.new :size => 4 \ No newline at end of file diff --git a/client-libraries/ruby/examples/basic.rb b/client-libraries/ruby/examples/basic.rb new file mode 100644 index 00000000..022796db --- /dev/null +++ b/client-libraries/ruby/examples/basic.rb @@ -0,0 +1,16 @@ +require 'rubygems' +require 'redis' + +r = Redis.new + +r.delete('foo') + +puts + +p'set foo to "bar"' +r['foo'] = 'bar' + +puts + +p 'value of foo' +p r['foo'] diff --git a/client-libraries/ruby/examples/incr-decr.rb b/client-libraries/ruby/examples/incr-decr.rb new file mode 100644 index 00000000..e951969a --- /dev/null +++ b/client-libraries/ruby/examples/incr-decr.rb @@ -0,0 +1,18 @@ +require 'rubygems' +require 'redis' + +r = Redis.new + +puts +p 'incr' +r.delete 'counter' + +p r.incr('counter') +p r.incr('counter') +p r.incr('counter') + +puts +p 'decr' +p r.decr('counter') +p r.decr('counter') +p r.decr('counter') diff --git a/client-libraries/ruby/examples/list.rb b/client-libraries/ruby/examples/list.rb new file mode 100644 index 00000000..8f186c15 --- /dev/null +++ b/client-libraries/ruby/examples/list.rb @@ -0,0 +1,26 @@ +require 'rubygems' +require 'redis' + +r = Redis.new + +r.delete 'logs' + +puts + +p "pushing log messages into a LIST" +r.push_tail 'logs', 'some log message' +r.push_tail 'logs', 'another log message' +r.push_tail 'logs', 'yet another log message' +r.push_tail 'logs', 'also another log message' + +puts +p 'contents of logs LIST' + +p r.list_range('logs', 0, -1) + +puts +p 'Trim logs LIST to last 2 elements(easy circular buffer)' + +r.list_trim('logs', -2, -1) + +p r.list_range('logs', 0, -1) diff --git a/client-libraries/ruby/examples/sets.rb b/client-libraries/ruby/examples/sets.rb new file mode 100644 index 00000000..1ba4e30c --- /dev/null +++ b/client-libraries/ruby/examples/sets.rb @@ -0,0 +1,36 @@ +require 'rubygems' +require 'redis' + +r = Redis.new + +r.delete 'foo-tags' +r.delete 'bar-tags' + +puts +p "create a set of tags on foo-tags" + +r.set_add 'foo-tags', 'one' +r.set_add 'foo-tags', 'two' +r.set_add 'foo-tags', 'three' + +puts +p "create a set of tags on bar-tags" + +r.set_add 'bar-tags', 'three' +r.set_add 'bar-tags', 'four' +r.set_add 'bar-tags', 'five' + +puts +p 'foo-tags' + +p r.set_members('foo-tags') + +puts +p 'bar-tags' + +p r.set_members('bar-tags') + +puts +p 'intersection of foo-tags and bar-tags' + +p r.set_intersect('foo-tags', 'bar-tags') diff --git a/client-libraries/ruby/fill.rb b/client-libraries/ruby/fill.rb new file mode 100644 index 00000000..ed95f3a9 --- /dev/null +++ b/client-libraries/ruby/fill.rb @@ -0,0 +1,11 @@ +require 'benchmark' +$:.push File.join(File.dirname(__FILE__), 'lib') +require 'redis' + +times = 20000 + +@r = Redis.new +(0..1000000).each{|x| + @r[x] = "Hello World" + puts x if (x > 0 and x % 10000) == 0 +} diff --git a/client-libraries/ruby/lib/better_timeout.rb b/client-libraries/ruby/lib/better_timeout.rb new file mode 100644 index 00000000..ac12c6eb --- /dev/null +++ b/client-libraries/ruby/lib/better_timeout.rb @@ -0,0 +1,188 @@ +#-- +# = timeout.rb +# +# execution timeout +# +# = Copyright +# +# Copyright - (C) 2008 Evan Phoenix +# Copyright:: (C) 2000 Network Applied Communication Laboratory, Inc. +# Copyright:: (C) 2000 Information-technology Promotion Agency, Japan +# +#++ +# +# = Description +# +# A way of performing a potentially long-running operation in a thread, and +# terminating it's execution if it hasn't finished within fixed amount of +# time. +# +# Previous versions of timeout didn't use a module for namespace. This version +# provides both Timeout.timeout, and a backwards-compatible #timeout. +# +# = Synopsis +# +# require 'timeout' +# status = Timeout::timeout(5) { +# # Something that should be interrupted if it takes too much time... +# } +# + +require 'thread' + +module Timeout + + ## + # Raised by Timeout#timeout when the block times out. + + class Error b.left } + end + + slept_for = sleep(min.left) + + @mutex.synchronize do + @requests.delete_if do |r| + if r.elapsed(slept_for) + r.cancel + true + else + false + end + end + end + + end + end + + req = TimeoutRequest.new(time, Thread.current, exc) + + @mutex.synchronize do + @requests << req + end + + @controller.run + + return req + end + + ## + # Executes the method's block. If the block execution terminates before +sec+ + # seconds has passed, it returns true. If not, it terminates the execution + # and raises +exception+ (which defaults to Timeout::Error). + # + # Note that this is both a method of module Timeout, so you can 'include + # Timeout' into your classes so they have a #timeout method, as well as a + # module method, so you can call it directly as Timeout.timeout(). + + def timeout(sec, exception=Error) + return yield if sec == nil or sec.zero? + raise ThreadError, "timeout within critical session" if Thread.critical + + req = Timeout.add_timeout sec, exception + + begin + yield sec + ensure + req.abort + end + end + + module_function :timeout + +end + +## +# Identical to: +# +# Timeout::timeout(n, e, &block). +# +# Defined for backwards compatibility with earlier versions of timeout.rb, see +# Timeout#timeout. + +def timeout(n, e=Timeout::Error, &block) # :nodoc: + Timeout::timeout(n, e, &block) +end + +## +# Another name for Timeout::Error, defined for backwards compatibility with +# earlier versions of timeout.rb. + +TimeoutError = Timeout::Error # :nodoc: + +if __FILE__ == $0 + p timeout(5) { + 45 + } + p timeout(5, TimeoutError) { + 45 + } + p timeout(nil) { + 54 + } + p timeout(0) { + 54 + } + p timeout(5) { + loop { + p 10 + sleep 1 + } + } +end + diff --git a/client-libraries/ruby/lib/dist_redis.rb b/client-libraries/ruby/lib/dist_redis.rb new file mode 100644 index 00000000..04505c20 --- /dev/null +++ b/client-libraries/ruby/lib/dist_redis.rb @@ -0,0 +1,111 @@ +require 'redis' +require 'hash_ring' +class DistRedis + attr_reader :ring + def initialize(*servers) + srvs = [] + servers.each do |s| + server, port = s.split(':') + srvs << Redis.new(:host => server, :port => port) + end + @ring = HashRing.new srvs + end + + def node_for_key(key) + if key =~ /\{(.*)?\}/ + key = $1 + end + @ring.get_node(key) + end + + def add_server(server) + server, port = server.split(':') + @ring.add_node Redis.new(:host => server, :port => port) + end + + def method_missing(sym, *args, &blk) + if redis = node_for_key(args.first) + redis.send sym, *args, &blk + else + super + end + end + + def keys(glob) + keyz = [] + @ring.nodes.each do |red| + keyz.concat red.keys(glob) + end + keyz + end + + def save + @ring.nodes.each do |red| + red.save + end + end + + def bgsave + @ring.nodes.each do |red| + red.bgsave + end + end + + def quit + @ring.nodes.each do |red| + red.quit + end + end + + def delete_cloud! + @ring.nodes.each do |red| + red.keys("*").each do |key| + red.delete key + end + end + end + +end + + +if __FILE__ == $0 + +r = DistRedis.new 'localhost:6379', 'localhost:6380', 'localhost:6381', 'localhost:6382' + r['urmom'] = 'urmom' + r['urdad'] = 'urdad' + r['urmom1'] = 'urmom1' + r['urdad1'] = 'urdad1' + r['urmom2'] = 'urmom2' + r['urdad2'] = 'urdad2' + r['urmom3'] = 'urmom3' + r['urdad3'] = 'urdad3' + p r['urmom'] + p r['urdad'] + p r['urmom1'] + p r['urdad1'] + p r['urmom2'] + p r['urdad2'] + p r['urmom3'] + p r['urdad3'] + + r.push_tail 'listor', 'foo1' + r.push_tail 'listor', 'foo2' + r.push_tail 'listor', 'foo3' + r.push_tail 'listor', 'foo4' + r.push_tail 'listor', 'foo5' + + p r.pop_tail 'listor' + p r.pop_tail 'listor' + p r.pop_tail 'listor' + p r.pop_tail 'listor' + p r.pop_tail 'listor' + + puts "key distribution:" + + r.ring.nodes.each do |red| + p [red.port, red.keys("*")] + end + r.delete_cloud! + p r.keys('*') + +end diff --git a/client-libraries/ruby/lib/hash_ring.rb b/client-libraries/ruby/lib/hash_ring.rb new file mode 100644 index 00000000..403f7cf5 --- /dev/null +++ b/client-libraries/ruby/lib/hash_ring.rb @@ -0,0 +1,73 @@ +require 'digest/md5' +class HashRing + attr_reader :ring, :sorted_keys, :replicas, :nodes + # nodes is a list of objects that have a proper to_s representation. + # replicas indicates how many virtual points should be used pr. node, + # replicas are required to improve the distribution. + def initialize(nodes=[], replicas=3) + @replicas = replicas + @ring = {} + @nodes = [] + @sorted_keys = [] + nodes.each do |node| + add_node(node) + end + end + + # Adds a `node` to the hash ring (including a number of replicas). + def add_node(node) + @nodes << node + @replicas.times do |i| + key = gen_key("#{node}:#{i}") + @ring[key] = node + @sorted_keys << key + end + @sorted_keys.sort! + end + + def remove_node(node) + @replicas.times do |i| + key = gen_key("#{node}:#{count}") + @ring.delete(key) + @sorted_keys.reject! {|k| k == key} + end + end + + # get the node in the hash ring for this key + def get_node(key) + get_node_pos(key)[0] + end + + def get_node_pos(key) + return [nil,nil] if @ring.size == 0 + key = gen_key(key) + nodes = @sorted_keys + nodes.size.times do |i| + node = nodes[i] + if key <= node + return [@ring[node], i] + end + end + [@ring[nodes[0]], 0] + end + + def iter_nodes(key) + return [nil,nil] if @ring.size == 0 + node, pos = get_node_pos(key) + @sorted_keys[pos..-1].each do |k| + yield @ring[k] + end + end + + def gen_key(key) + key = Digest::MD5.hexdigest(key) + ((key[3] << 24) | (key[2] << 16) | (key[1] << 8) | key[0]) + end + +end + +# ring = HashRing.new ['server1', 'server2', 'server3'] +# p ring +# # +# p ring.get_node "kjhjkjlkjlkkh" +# \ No newline at end of file diff --git a/client-libraries/ruby/lib/redis.rb b/client-libraries/ruby/lib/redis.rb new file mode 100644 index 00000000..f54910c1 --- /dev/null +++ b/client-libraries/ruby/lib/redis.rb @@ -0,0 +1,836 @@ +require 'socket' +require File.join(File.dirname(__FILE__),'better_timeout') +require 'set' + +class RedisError < StandardError +end + +class Redis + OK = "+OK".freeze + ERRCODE = "-".freeze + NIL = 'nil'.freeze + CTRLF = "\r\n".freeze + + def to_s + "#{host}:#{port}" + end + + def port + @opts[:port] + end + + def host + @opts[:host] + end + + def initialize(opts={}) + @opts = {:host => 'localhost', :port => '6379'}.merge(opts) + end + + # SET key value + # Time complexity: O(1) + # Set the string value as value of the key. The string can't be longer + # than 1073741824 bytes (1 GB). + # + # Return value: status code reply + def []=(key, val) + val = redis_marshal(val) + timeout_retry(3, 3){ + write "SET #{key} #{val.to_s.size}\r\n#{val}\r\n" + status_code_reply + } + end + + # SETNX key value + # + # Time complexity: O(1) + # SETNX works exactly like SET with the only difference that if the key + # already exists no operation is performed. SETNX actually means "SET if Not eXists". + # + # *Return value: integer reply, specifically: + # + # 1 if the key was set 0 if the key was not set + def set_unless_exists(key, val) + val = redis_marshal(val) + timeout_retry(3, 3){ + write "SETNX #{key} #{val.to_s.size}\r\n#{val}\r\n" + integer_reply == 1 + } + end + + # GET key + # Time complexity: O(1) + # Get the value of the specified key. If the key does not exist the special value + # 'nil' is returned. If the value stored at key is not a string an error is + # returned because GET can only handle string values. + # + # Return value: bulk reply + def [](key) + timeout_retry(3, 3){ + write "GET #{key}\r\n" + redis_unmarshal(bulk_reply) + } + end + + # INCR key + # INCRBY key value + # Time complexity: O(1) + # Increment the number stored at key by one. If the key does not exist or contains + # a value of a wrong type, set the key to the value of "1" (like if the previous + # value was zero). + # + # INCRBY works just like INCR but instead to increment by 1 the increment is value. + # + # Return value: integer reply + def incr(key, increment=nil) + timeout_retry(3, 3){ + if increment + write "INCRBY #{key} #{increment}\r\n" + else + write "INCR #{key}\r\n" + end + integer_reply + } + end + + + # DECR key + # + # DECRBY key value + # + # Time complexity: O(1) Like INCR/INCRBY but decrementing instead of incrementing. + def decr(key, increment=nil) + timeout_retry(3, 3){ + if increment + write "DECRBY #{key} #{increment}\r\n" + else + write "DECR #{key}\r\n" + end + integer_reply + } + end + + # RANDOMKEY + # Time complexity: O(1) + # Returns a random key from the currently seleted DB. + # + # Return value: single line reply + def randkey + timeout_retry(3, 3){ + write "RANDOMKEY\r\n" + single_line_reply + } + end + + # RENAME oldkey newkey + # + # Atomically renames the key oldkey to newkey. If the source and destination + # name are the same an error is returned. If newkey already exists it is + # overwritten. + # + # Return value: status code reply + def rename!(oldkey, newkey) + timeout_retry(3, 3){ + write "RENAME #{oldkey} #{newkey}\r\n" + status_code_reply + } + end + + # RENAMENX oldkey newkey + # Just like RENAME but fails if the destination key newkey already exists. + # + # *Return value: integer reply, specifically: + # + # 1 if the key was renamed 0 if the target key already exist -1 if the + # source key does not exist -3 if source and destination keys are the same + def rename(oldkey, newkey) + timeout_retry(3, 3){ + write "RENAMENX #{oldkey} #{newkey}\r\n" + case integer_reply + when -1 + raise RedisError, "source key: #{oldkey} does not exist" + when 0 + raise RedisError, "target key: #{oldkey} already exists" + when -3 + raise RedisError, "source and destination keys are the same" + when 1 + true + end + } + end + + # EXISTS key + # Time complexity: O(1) + # Test if the specified key exists. The command returns "0" if the key + # exists, otherwise "1" is returned. Note that even keys set with an empty + # string as value will return "1". + # + # *Return value: integer reply, specifically: + # + # 1 if the key exists 0 if the key does not exist + def key?(key) + timeout_retry(3, 3){ + write "EXISTS #{key}\r\n" + integer_reply == 1 + } + end + + # DEL key + # Time complexity: O(1) + # Remove the specified key. If the key does not exist no operation is + # performed. The command always returns success. + # + # *Return value: integer reply, specifically: + # + # 1 if the key was removed 0 if the key does not exist + def delete(key) + timeout_retry(3, 3){ + write "DEL #{key}\r\n" + integer_reply == 1 + } + end + + # KEYS pattern + # Time complexity: O(n) (with n being the number of keys in the DB) + # Returns all the keys matching the glob-style pattern as space separated strings. + # For example if you have in the database the keys "foo" and "foobar" the command + # "KEYS foo*" will return "foo foobar". + # + # Note that while the time complexity for this operation is O(n) the constant times + # are pretty low. For example Redis running on an entry level laptop can scan a 1 + # million keys database in 40 milliseconds. Still it's better to consider this one + # of the slow commands that may ruin the DB performance if not used with care. + # + # Return value: bulk reply + def keys(glob) + timeout_retry(3, 3){ + write "KEYS #{glob}\r\n" + bulk_reply.split(' ') + } + end + + # TYPE key + # + # Time complexity: O(1) Return the type of the value stored at key in form of + # a string. The type can be one of "none", "string", "list", "set". "none" is + # returned if the key does not exist. + # + # Return value: single line reply + def type?(key) + timeout_retry(3, 3){ + write "TYPE #{key}\r\n" + single_line_reply + } + end + + # RPUSH key string + # + # Time complexity: O(1) + # Add the given string to the tail of the list contained at key. If the key + # does not exist an empty list is created just before the append operation. + # If the key exists but is not a List an error is returned. + # + # Return value: status code reply + def push_tail(key, string) + timeout_retry(3, 3){ + write "RPUSH #{key} #{string.to_s.size}\r\n#{string.to_s}\r\n" + status_code_reply + } + end + + # LPUSH key string + # Time complexity: O(1) + # Add the given string to the head of the list contained at key. If the + # key does not exist an empty list is created just before the append operation. + # If the key exists but is not a List an error is returned. + # + # Return value: status code reply + def push_head(key, string) + timeout_retry(3, 3){ + write "LPUSH #{key} #{string.to_s.size}\r\n#{string.to_s}\r\n" + status_code_reply + } + end + + # LPOP key + # + # Time complexity: O(1) + # Atomically return and remove the first element of the list. For example if + # the list contains the elements "a","b","c" LPOP will return "a" and the + # list will become "b","c". + # + # If the key does not exist or the list is already empty the special value + # 'nil' is returned. + # + # Return value: bulk reply + def pop_head(key) + timeout_retry(3, 3){ + write "LPOP #{key}\r\n" + bulk_reply + } + end + + # RPOP key + # This command works exactly like LPOP, but the last element instead + # of the first element of the list is returned/deleted. + def pop_tail(key) + timeout_retry(3, 3){ + write "RPOP #{key}\r\n" + bulk_reply + } + end + + # LSET key index value + # Time complexity: O(N) (with N being the length of the list) + # Set the list element at index (see LINDEX for information about the index argument) with the new value. Out of range indexes will generate an error. Note that setting the first or last elements of the list is O(1). + # + # Return value: status code reply + def list_set(key, index, val) + timeout_retry(3, 3){ + write "LSET #{key} #{index} #{val.to_s.size}\r\n#{val}\r\n" + status_code_reply + } + end + + + # LLEN key + # Time complexity: O(1) + # Return the length of the list stored at the specified key. If the key does not + # exist zero is returned (the same behaviour as for empty lists). If the value + # stored at key is not a list the special value -1 is returned. Note: client + # library should raise an exception when -1 is returned instead to pass the + # value back to the caller like a normal list length value. + # + # *Return value: integer reply, specifically: + # + # the length of the list as an integer + # >= + # 0 if the operation succeeded -2 if the specified key does not hold a list valu + def list_length(key) + timeout_retry(3, 3){ + write "LLEN #{key}\r\n" + case i = integer_reply + when -2 + raise RedisError, "key: #{key} does not hold a list value" + else + i + end + } + end + + # LRANGE key start end + # Time complexity: O(n) (with n being the length of the range) + # Return the specified elements of the list stored at the specified key. Start + # and end are zero-based indexes. 0 is the first element of the list (the list head), + # 1 the next element and so on. + # + # For example LRANGE foobar 0 2 will return the first three elements of the list. + # + # start and end can also be negative numbers indicating offsets from the end of the list. + # For example -1 is the last element of the list, -2 the penultimate element and so on. + # + # Indexes out of range will not produce an error: if start is over the end of the list, + # or start > end, an empty list is returned. If end is over the end of the list Redis + # will threat it just like the last element of the list. + # + # Return value: multi bulk reply + def list_range(key, start, ending) + timeout_retry(3, 3){ + write "LRANGE #{key} #{start} #{ending}\r\n" + multi_bulk_reply + } + end + + + # LTRIM key start end + # Time complexity: O(n) (with n being len of list - len of range) + # Trim an existing list so that it will contain only the specified range of + # elements specified. Start and end are zero-based indexes. 0 is the first + # element of the list (the list head), 1 the next element and so on. + # + # For example LTRIM foobar 0 2 will modify the list stored at foobar key so that + # only the first three elements of the list will remain. + # + # start and end can also be negative numbers indicating offsets from the end of + # the list. For example -1 is the last element of the list, -2 the penultimate + # element and so on. + # + # Indexes out of range will not produce an error: if start is over the end of + # the list, or start > end, an empty list is left as value. If end over the + # end of the list Redis will threat it just like the last element of the list. + # + # Hint: the obvious use of LTRIM is together with LPUSH/RPUSH. For example: + # + # LPUSH mylist LTRIM mylist 0 99 + # The above two commands will push elements in the list taking care that the + # list will not grow without limits. This is very useful when using Redis + # to store logs for example. It is important to note that when used in this + # way LTRIM is an O(1) operation because in the average case just one element + # is removed from the tail of the list. + # + # Return value: status code reply + def list_trim(key, start, ending) + timeout_retry(3, 3){ + write "LTRIM #{key} #{start} #{ending}\r\n" + status_code_reply + } + end + + # LINDEX key index + # Time complexity: O(n) (with n being the length of the list) + # Return the specified element of the list stored at the specified key. 0 is + # the first element, 1 the second and so on. Negative indexes are supported, + # for example -1 is the last element, -2 the penultimate and so on. + # + # If the value stored at key is not of list type an error is returned. If + # the index is out of range an empty string is returned. + # + # Note that even if the average time complexity is O(n) asking for the first + # or the last element of the list is O(1). + # + # Return value: bulk reply + def list_index(key, index) + timeout_retry(3, 3){ + write "LINDEX #{key} #{index}\r\n" + bulk_reply + } + end + + # SADD key member + # Time complexity O(1) + # Add the specified member to the set value stored at key. If member is + # already a member of the set no operation is performed. If key does not + # exist a new set with the specified member as sole member is crated. If + # the key exists but does not hold a set value an error is returned. + # + # *Return value: integer reply, specifically: + # + # 1 if the new element was added 0 if the new element was already a member + # of the set -2 if the key contains a non set value + def set_add(key, member) + timeout_retry(3, 3){ + write "SADD #{key} #{member.to_s.size}\r\n#{member}\r\n" + case integer_reply + when 1 + true + when 0 + false + when -2 + raise RedisError, "key: #{key} contains a non set value" + end + } + end + + # SREM key member + # + # Time complexity O(1) + # Remove the specified member from the set value stored at key. If member + # was not a member of the set no operation is performed. If key does not + # exist or does not hold a set value an error is returned. + # + # *Return value: integer reply, specifically: + # + # 1 if the new element was removed 0 if the new element was not a member + # of the set -2 if the key does not hold a set value + def set_delete(key, member) + timeout_retry(3, 3){ + write "SREM #{key} #{member.to_s.size}\r\n#{member}\r\n" + case integer_reply + when 1 + true + when 0 + false + when -2 + raise RedisError, "key: #{key} contains a non set value" + end + } + end + + # SCARD key + # Time complexity O(1) + # Return the set cardinality (number of elements). If the key does not + # exist 0 is returned, like for empty sets. If the key does not hold a + # set value -1 is returned. Client libraries should raise an error when -1 + # is returned instead to pass the value to the caller. + # + # *Return value: integer reply, specifically: + # + # the cardinality (number of elements) of the set as an integer + # >= + # 0 if the operation succeeded -2 if the specified key does not hold a set value + def set_count(key) + timeout_retry(3, 3){ + write "SCARD #{key}\r\n" + case i = integer_reply + when -2 + raise RedisError, "key: #{key} contains a non set value" + else + i + end + } + end + + # SISMEMBER key member + # + # Time complexity O(1) + # Return 1 if member is a member of the set stored at key, otherwise 0 is + # returned. On error a negative value is returned. Client libraries should + # raise an error when a negative value is returned instead to pass the value + # to the caller. + # + # *Return value: integer reply, specifically: + # + # 1 if the element is a member of the set 0 if the element is not a member of + # the set OR if the key does not exist -2 if the key does not hold a set value + def set_member?(key, member) + timeout_retry(3, 3){ + write "SISMEMBER #{key} #{member.to_s.size}\r\n#{member}\r\n" + case integer_reply + when 1 + true + when 0 + false + when -2 + raise RedisError, "key: #{key} contains a non set value" + end + } + end + + # SINTER key1 key2 ... keyN + # Time complexity O(N*M) worst case where N is the cardinality of the smallest + # set and M the number of sets + # Return the members of a set resulting from the intersection of all the sets + # hold at the specified keys. Like in LRANGE the result is sent to the client + # as a multi-bulk reply (see the protocol specification for more information). + # If just a single key is specified, then this command produces the same + # result as SELEMENTS. Actually SELEMENTS is just syntax sugar for SINTERSECT. + # + # If at least one of the specified keys does not exist or does not hold a set + # value an error is returned. + # + # Return value: multi bulk reply + def set_intersect(*keys) + timeout_retry(3, 3){ + write "SINTER #{keys.join(' ')}\r\n" + Set.new(multi_bulk_reply) + } + end + + # SINTERSTORE dstkey key1 key2 ... keyN + # + # Time complexity O(N*M) worst case where N is the cardinality of the smallest set and M the number of sets + # This commnad works exactly like SINTER but instead of being returned the resulting set is sotred as dstkey. + # + # Return value: status code reply + def set_inter_store(destkey, *keys) + timeout_retry(3, 3){ + write "SINTERSTORE #{destkey} #{keys.join(' ')}\r\n" + status_code_reply + } + end + + # SMEMBERS key + # + # Time complexity O(N) + # Return all the members (elements) of the set value stored at key. + # This is just syntax glue for SINTERSECT. + def set_members(key) + timeout_retry(3, 3){ + write "SMEMBERS #{key}\r\n" + Set.new(multi_bulk_reply) + } + end + + + # SORT key [BY pattern] [GET|DEL|INCR|DECR pattern] [ASC|DESC] [LIMIT start count] + # Sort the elements contained in the List or Set value at key. By default sorting is + # numeric with elements being compared as double precision floating point numbers. + # This is the simplest form of SORT. + # SORT mylist + # + # Assuming mylist contains a list of numbers, the return value will be the list of + # numbers ordered from the smallest to the bigger number. In order to get the sorting + # in reverse order use DESC: + # SORT mylist DESC + # + # ASC is also supported but it's the default so you don't really need it. If you + # want to sort lexicographically use ALPHA. Note that Redis is utf-8 aware + # assuming you set the right value for the LC_COLLATE environment variable. + # + # Sort is able to limit the number of results using the LIMIT option: + # SORT mylist LIMIT 0 10 + # In the above example SORT will return only 10 elements, starting from the first one + # (star is zero-based). Almost all the sort options can be mixed together. For example: + # SORT mylist LIMIT 0 10 ALPHA DESC + # Will sort mylist lexicographically, in descending order, returning only the first + # 10 elements. + # Sometimes you want to sort elements using external keys as weights to compare + # instead to compare the actual List or Set elements. For example the list mylist + # may contain the elements 1, 2, 3, 4, that are just the unique IDs of objects + # stored at object_1, object_2, object_3 and object_4, while the keys weight_1, + # weight_2, weight_3 and weight_4 can contain weights we want to use to sort the + # list of objects identifiers. We can use the following command: + # SORT mylist BY weight_* + # the BY option takes a pattern (weight_* in our example) that is used in order to + # generate the key names of the weights used for sorting. Weight key names are obtained + # substituting the first occurrence of * with the actual value of the elements on the + # list (1,2,3,4 in our example). + # Still our previous example will return just the sorted IDs. Often it is needed to + # get the actual objects sorted (object_1, ..., object_4 in the example). We can do + # it with the following command: + # SORT mylist BY weight_* GET object_* + # Note that GET can be used multiple times in order to get more key for every + # element of the original List or Set sorted. + + # redis.sort 'index', :by => 'weight_*', + # :order => 'DESC ALPHA', + # :limit => [0,10], + # :get => 'obj_*' + def sort(key, opts={}) + cmd = "SORT #{key}" + cmd << " BY #{opts[:by]}" if opts[:by] + cmd << " GET #{opts[:get]}" if opts[:get] + cmd << " INCR #{opts[:incr]}" if opts[:incr] + cmd << " DEL #{opts[:del]}" if opts[:del] + cmd << " DECR #{opts[:decr]}" if opts[:decr] + cmd << " #{opts[:order]}" if opts[:order] + cmd << " LIMIT #{opts[:limit].join(' ')}" if opts[:limit] + cmd << "\r\n" + write cmd + multi_bulk_reply + end + + # ADMIN functions for redis + + # SELECT index + # + # Select the DB with having the specified zero-based numeric index. + # For default every new client connection is automatically selected to DB 0. + # Return value: status code reply + def select_db(index) + timeout_retry(3, 3){ + write "SELECT #{index}\r\n" + status_code_reply + } + end + + # MOVE key dbindex + # + # Move the specified key from the currently selected DB to the specified + # destination DB. Note that this command returns 1 only if the key was + # successfully moved, and 0 if the target key was already there or if + # the source key was not found at all, so it is possible to use MOVE + # as a locking primitive. + # + # *Return value: integer reply, specifically: + # + # 1 if the key was moved 0 if the key was not moved because already + # present on the target DB or was not found in the current DB. -3 + # if the destination DB is the same as the source DB -4 if the database + # index if out of range + def move(key, index) + timeout_retry(3, 3){ + write "MOVE #{index}\r\n" + case integer_reply + when 1 + true + when 0 + false + when -3 + raise RedisError, "destination db same as source db" + when -4 + raise RedisError, "db index if out of range" + end + } + end + + # SAVE + # + # Save the DB on disk. The server hangs while the saving is not completed, + # no connection is served in the meanwhile. An OK code is returned when + # the DB was fully stored in disk. + # Return value: status code reply + def save + timeout_retry(3, 3){ + write "SAVE\r\n" + status_code_reply + } + end + + # BGSAVE + # + # Save the DB in background. The OK code is immediately returned. Redis + # forks, the parent continues to server the clients, the child saves + # the DB on disk then exit. A client my be able to check if the operation + # succeeded using the LASTSAVE command. + # Return value: status code reply + def bgsave + timeout_retry(3, 3){ + write "BGSAVE\r\n" + status_code_reply + } + end + + # LASTSAVE + # + # Return the UNIX TIME of the last DB save executed with success. A client + # may check if a BGSAVE command succeeded reading the LASTSAVE value, then + # issuing a BGSAVE command and checking at regular intervals every N seconds + # if LASTSAVE changed. + # + # Return value: integer reply (UNIX timestamp) + def lastsave + timeout_retry(3, 3){ + write "LASTSAVE\r\n" + integer_reply + } + end + + def quit + timeout_retry(3, 3){ + write "QUIT\r\n" + status_code_reply + } + end + + private + + def redis_unmarshal(obj) + if obj[0] == 4 + Marshal.load(obj) + else + obj + end + end + + def redis_marshal(obj) + case obj + when String, Integer + obj + else + Marshal.dump(obj) + end + end + + def close + socket.close unless socket.closed? + end + + def timeout_retry(time, retries, &block) + timeout(time, &block) + rescue TimeoutError + retries -= 1 + retry unless retries < 0 + end + + def socket + connect if (!@socket or @socket.closed?) + @socket + end + + def connect + @socket = TCPSocket.new(@opts[:host], @opts[:port]) + @socket.sync = true + @socket + end + + def read(length, nodebug=true) + retries = 3 + res = socket.read(length) + puts "read: #{res}" if @opts[:debug] && nodebug + res + rescue + retries -= 1 + if retries > 0 + connect + retry + end + end + + def write(data) + puts "write: #{data}" if @opts[:debug] + retries = 3 + socket.write(data) + rescue + retries -= 1 + if retries > 0 + connect + retry + end + end + + def nibble_end + read(2) + end + + def read_proto + print "read proto: " if @opts[:debug] + buff = "" + while (char = read(1, false)) + print char if @opts[:debug] + buff << char + break if buff[-2..-1] == CTRLF + end + puts if @opts[:debug] + buff[0..-3] + end + + + def status_code_reply + res = read_proto + if res.index(ERRCODE) == 0 + raise RedisError, res + else + true + end + end + + def bulk_reply + res = read_proto + if res.index(ERRCODE) == 0 + err = read(res.to_i.abs) + nibble_end + raise RedisError, err + elsif res != NIL + val = read(res.to_i.abs) + nibble_end + val + else + nil + end + end + + + def multi_bulk_reply + res = read_proto + if res.index(ERRCODE) == 0 + err = read(res.to_i.abs) + nibble_end + raise RedisError, err + elsif res == NIL + nil + else + items = Integer(res) + list = [] + items.times do + len = Integer(read_proto) + if len == -1 + nil + else + list << read(len) + end + nibble_end + end + list + end + end + + def single_line_reply + read_proto + end + + def integer_reply + Integer(read_proto) + end + +end diff --git a/client-libraries/ruby/spec/redis_spec.rb b/client-libraries/ruby/spec/redis_spec.rb new file mode 100644 index 00000000..88c46f79 --- /dev/null +++ b/client-libraries/ruby/spec/redis_spec.rb @@ -0,0 +1,267 @@ +require File.dirname(__FILE__) + '/spec_helper' + +class Foo + attr_accessor :bar + def initialize(bar) + @bar = bar + end + + def ==(other) + @bar == other.bar + end +end + +describe "redis" do + before do + @r = Redis.new + @r.select_db(15) # use database 15 for testing so we dont accidentally step on you real data + @r['foo'] = 'bar' + end + + after do + @r.keys('*').each {|k| @r.delete k } + end + + it "should properly marshall objects" do + class MyFail; def fail; 'it will' end; end + + @r['fail'] = MyFail.new + @r['fail'].fail.should == 'it will' + + end + + it "should be able to GET a key" do + @r['foo'].should == 'bar' + end + + it "should be able to SET a key" do + @r['foo'] = 'nik' + @r['foo'].should == 'nik' + end + + it "should be able to SETNX(set_unless_exists)" do + @r['foo'] = 'nik' + @r['foo'].should == 'nik' + @r.set_unless_exists 'foo', 'bar' + @r['foo'].should == 'nik' + end + + it "should be able to INCR(increment) a key" do + @r.delete('counter') + @r.incr('counter').should == 1 + @r.incr('counter').should == 2 + @r.incr('counter').should == 3 + end + + it "should be able to DECR(decrement) a key" do + @r.delete('counter') + @r.incr('counter').should == 1 + @r.incr('counter').should == 2 + @r.incr('counter').should == 3 + @r.decr('counter').should == 2 + @r.decr('counter').should == 1 + @r.decr('counter').should == 0 + end + + it "should be able to RANDKEY(return a random key)" do + @r.randkey.should_not be_nil + end + + it "should be able to RENAME a key" do + @r.delete 'foo' + @r.delete 'bar' + @r['foo'] = 'hi' + @r.rename! 'foo', 'bar' + @r['bar'].should == 'hi' + end + + it "should be able to RENAMENX(rename unless the new key already exists) a key" do + @r.delete 'foo' + @r.delete 'bar' + @r['foo'] = 'hi' + @r['bar'] = 'ohai' + lambda {@r.rename 'foo', 'bar'}.should raise_error(RedisError) + @r['bar'].should == 'ohai' + end + + it "should be able to EXISTS(check if key exists)" do + @r['foo'] = 'nik' + @r.key?('foo').should be_true + @r.delete 'foo' + @r.key?('foo').should be_false + end + + it "should be able to KEYS(glob for keys)" do + @r.keys("f*").each do |key| + @r.delete key + end + @r['f'] = 'nik' + @r['fo'] = 'nak' + @r['foo'] = 'qux' + @r.keys("f*").sort.should == ['f','fo', 'foo'].sort + end + + it "should be able to check the TYPE of a key" do + @r['foo'] = 'nik' + @r.type?('foo').should == "string" + @r.delete 'foo' + @r.type?('foo').should == "none" + end + + it "should be able to push to the head of a list" do + @r.push_head "list", 'hello' + @r.push_head "list", 42 + @r.type?('list').should == "list" + @r.list_length('list').should == 2 + @r.pop_head('list').should == '42' + @r.delete('list') + end + + it "should be able to push to the tail of a list" do + @r.push_tail "list", 'hello' + @r.type?('list').should == "list" + @r.list_length('list').should == 1 + @r.delete('list') + end + + it "should be able to pop the tail of a list" do + @r.push_tail "list", 'hello' + @r.push_tail "list", 'goodbye' + @r.type?('list').should == "list" + @r.list_length('list').should == 2 + @r.pop_tail('list').should == 'goodbye' + @r.delete('list') + end + + it "should be able to pop the head of a list" do + @r.push_tail "list", 'hello' + @r.push_tail "list", 'goodbye' + @r.type?('list').should == "list" + @r.list_length('list').should == 2 + @r.pop_head('list').should == 'hello' + @r.delete('list') + end + + it "should be able to get the length of a list" do + @r.push_tail "list", 'hello' + @r.push_tail "list", 'goodbye' + @r.type?('list').should == "list" + @r.list_length('list').should == 2 + @r.delete('list') + end + + it "should be able to get a range of values from a list" do + @r.push_tail "list", 'hello' + @r.push_tail "list", 'goodbye' + @r.push_tail "list", '1' + @r.push_tail "list", '2' + @r.push_tail "list", '3' + @r.type?('list').should == "list" + @r.list_length('list').should == 5 + @r.list_range('list', 2, -1).should == ['1', '2', '3'] + @r.delete('list') + end + + it "should be able to trim a list" do + @r.push_tail "list", 'hello' + @r.push_tail "list", 'goodbye' + @r.push_tail "list", '1' + @r.push_tail "list", '2' + @r.push_tail "list", '3' + @r.type?('list').should == "list" + @r.list_length('list').should == 5 + @r.list_trim 'list', 0, 1 + @r.list_length('list').should == 2 + @r.list_range('list', 0, -1).should == ['hello', 'goodbye'] + @r.delete('list') + end + + it "should be able to get a value by indexing into a list" do + @r.push_tail "list", 'hello' + @r.push_tail "list", 'goodbye' + @r.type?('list').should == "list" + @r.list_length('list').should == 2 + @r.list_index('list', 1).should == 'goodbye' + @r.delete('list') + end + + it "should be able to set a value by indexing into a list" do + @r.push_tail "list", 'hello' + @r.push_tail "list", 'hello' + @r.type?('list').should == "list" + @r.list_length('list').should == 2 + @r.list_set('list', 1, 'goodbye').should be_true + @r.list_index('list', 1).should == 'goodbye' + @r.delete('list') + end + + it "should be able add members to a set" do + @r.set_add "set", 'key1' + @r.set_add "set", 'key2' + @r.type?('set').should == "set" + @r.set_count('set').should == 2 + @r.set_members('set').sort.should == ['key1', 'key2'].sort + @r.delete('set') + end + + it "should be able delete members to a set" do + @r.set_add "set", 'key1' + @r.set_add "set", 'key2' + @r.type?('set').should == "set" + @r.set_count('set').should == 2 + @r.set_members('set').should == Set.new(['key1', 'key2']) + @r.set_delete('set', 'key1') + @r.set_count('set').should == 1 + @r.set_members('set').should == Set.new(['key2']) + @r.delete('set') + end + + it "should be able count the members of a set" do + @r.set_add "set", 'key1' + @r.set_add "set", 'key2' + @r.type?('set').should == "set" + @r.set_count('set').should == 2 + @r.delete('set') + end + + it "should be able test for set membership" do + @r.set_add "set", 'key1' + @r.set_add "set", 'key2' + @r.type?('set').should == "set" + @r.set_count('set').should == 2 + @r.set_member?('set', 'key1').should be_true + @r.set_member?('set', 'key2').should be_true + @r.set_member?('set', 'notthere').should be_false + @r.delete('set') + end + + it "should be able to do set intersection" do + @r.set_add "set", 'key1' + @r.set_add "set", 'key2' + @r.set_add "set2", 'key2' + @r.set_intersect('set', 'set2').should == Set.new(['key2']) + @r.delete('set') + end + + it "should be able to do set intersection and store the results in a key" do + @r.set_add "set", 'key1' + @r.set_add "set", 'key2' + @r.set_add "set2", 'key2' + @r.set_inter_store('newone', 'set', 'set2') + @r.set_members('newone').should == Set.new(['key2']) + @r.delete('set') + end + + it "should be able to do crazy SORT queries" do + @r['dog_1'] = 'louie' + @r.push_tail 'dogs', 1 + @r['dog_2'] = 'lucy' + @r.push_tail 'dogs', 2 + @r['dog_3'] = 'max' + @r.push_tail 'dogs', 3 + @r['dog_4'] = 'taj' + @r.push_tail 'dogs', 4 + @r.sort('dogs', :get => 'dog_*', :limit => [0,1]).should == ['louie'] + @r.sort('dogs', :get => 'dog_*', :limit => [0,1], :order => 'desc alpha').should == ['taj'] + end +end \ No newline at end of file diff --git a/client-libraries/ruby/spec/spec_helper.rb b/client-libraries/ruby/spec/spec_helper.rb new file mode 100644 index 00000000..55c7855c --- /dev/null +++ b/client-libraries/ruby/spec/spec_helper.rb @@ -0,0 +1,4 @@ +require 'rubygems' +$TESTING=true +$:.push File.join(File.dirname(__FILE__), '..', 'lib') +require 'redis' diff --git a/client-libraries/ruby/tasks/redis.tasks.rb b/client-libraries/ruby/tasks/redis.tasks.rb new file mode 100644 index 00000000..67b9499f --- /dev/null +++ b/client-libraries/ruby/tasks/redis.tasks.rb @@ -0,0 +1,116 @@ +# Inspired by rabbitmq.rake the Redbox project at http://github.com/rick/redbox/tree/master +require 'fileutils' + +class RedisRunner + + def self.redisdir + "/tmp/redis/" + end + + def self.redisconfdir + '/etc/redis.conf' + end + + def self.dtach_socket + '/tmp/redis.dtach' + end + + # Just check for existance of dtach socket + def self.running? + File.exists? dtach_socket + end + + def self.start + puts 'Detach with Ctrl+\ Re-attach with rake redis:attach' + sleep 3 + exec "dtach -A #{dtach_socket} redis-server #{redisconfdir}" + end + + def self.attach + exec "dtach -a #{dtach_socket}" + end + + def self.stop + sh 'killall redis-server' + end + +end + +namespace :redis do + + desc 'About redis' + task :about do + puts "\nSee http://code.google.com/p/redis/ for information about redis.\n\n" + end + + desc 'Start redis' + task :start do + RedisRunner.start + end + + desc 'Stop redis' + task :stop do + RedisRunner.stop + end + + desc 'Attach to redis dtach socket' + task :attach do + RedisRunner.attach + end + + desc 'Install the lastest redis from svn' + task :install => [:about, :download, :make] do + sh 'sudo cp /tmp/redis/redis-server /usr/bin/' + sh 'sudo cp /tmp/redis/redis-benchmark /usr/bin/' + puts 'Installed redis-server and redis-benchmark to /usr/bin/' + unless File.exists?('/etc/redis.conf') + sh 'sudo cp /tmp/redis/redis.conf /etc/' + puts "Installed redis.conf to /etc/ \n You should look at this file!" + end + end + + task :make do + sh "cd #{RedisRunner.redisdir} && make clean" + sh "cd #{RedisRunner.redisdir} && make" + end + + desc "Download package" + task :download do + system 'svn checkout http://redis.googlecode.com/svn/trunk /tmp/redis' unless File.exists?(RedisRunner.redisdir) + system 'svn up' if File.exists?("#{RedisRunner.redisdir}/.svn") + end + +end + +namespace :dtach do + + desc 'About dtach' + task :about do + puts "\nSee http://dtach.sourceforge.net/ for information about dtach.\n\n" + end + + desc 'Install dtach 0.8 from source' + task :install => [:about] do + + Dir.chdir('/tmp/') + unless File.exists?('/tmp/dtach-0.8.tar.gz') + require 'net/http' + + Net::HTTP.start('superb-west.dl.sourceforge.net') do |http| + resp = http.get('/sourceforge/dtach/dtach-0.8.tar.gz') + open('/tmp/dtach-0.8.tar.gz', 'wb') do |file| file.write(resp.body) end + end + end + + unless File.directory?('/tmp/dtach-0.8') + system('tar xzf dtach-0.8.tar.gz') + end + + Dir.chdir('/tmp/dtach-0.8/') + sh 'cd /tmp/dtach-0.8/ && ./configure && make' + sh 'sudo cp /tmp/dtach-0.8/dtach /usr/bin/' + + puts 'Dtach successfully installed to /usr/bin.' + end +end + \ No newline at end of file diff --git a/dict.c b/dict.c new file mode 100644 index 00000000..2d186c1d --- /dev/null +++ b/dict.c @@ -0,0 +1,579 @@ +/* Hash Tables Implementation. + * + * This file implements in memory hash tables with insert/del/replace/find/ + * get-random-element operations. Hash tables will auto resize if needed + * tables of power of two in size are used, collisions are handled by + * chaining. See the source code for more information... :) + * + * Copyright (c) 2006-2009, Salvatore Sanfilippo + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include +#include + +#include "dict.h" +#include "zmalloc.h" + +/* ---------------------------- Utility funcitons --------------------------- */ + +static void _dictPanic(const char *fmt, ...) +{ + va_list ap; + + va_start(ap, fmt); + fprintf(stderr, "\nDICT LIBRARY PANIC: "); + vfprintf(stderr, fmt, ap); + fprintf(stderr, "\n\n"); + va_end(ap); +} + +/* ------------------------- Heap Management Wrappers------------------------ */ + +static void *_dictAlloc(int size) +{ + void *p = zmalloc(size); + if (p == NULL) + _dictPanic("Out of memory"); + return p; +} + +static void _dictFree(void *ptr) { + zfree(ptr); +} + +/* -------------------------- private prototypes ---------------------------- */ + +static int _dictExpandIfNeeded(dict *ht); +static unsigned int _dictNextPower(unsigned int size); +static int _dictKeyIndex(dict *ht, const void *key); +static int _dictInit(dict *ht, dictType *type, void *privDataPtr); + +/* -------------------------- hash functions -------------------------------- */ + +/* Thomas Wang's 32 bit Mix Function */ +unsigned int dictIntHashFunction(unsigned int key) +{ + key += ~(key << 15); + key ^= (key >> 10); + key += (key << 3); + key ^= (key >> 6); + key += ~(key << 11); + key ^= (key >> 16); + return key; +} + +/* Identity hash function for integer keys */ +unsigned int dictIdentityHashFunction(unsigned int key) +{ + return key; +} + +/* Generic hash function (a popular one from Bernstein). + * I tested a few and this was the best. */ +unsigned int dictGenHashFunction(const unsigned char *buf, int len) { + unsigned int hash = 5381; + + while (len--) + hash = ((hash << 5) + hash) + (*buf++); /* hash * 33 + c */ + return hash; +} + +/* ----------------------------- API implementation ------------------------- */ + +/* Reset an hashtable already initialized with ht_init(). + * NOTE: This function should only called by ht_destroy(). */ +static void _dictReset(dict *ht) +{ + ht->table = NULL; + ht->size = 0; + ht->sizemask = 0; + ht->used = 0; +} + +/* Create a new hash table */ +dict *dictCreate(dictType *type, + void *privDataPtr) +{ + dict *ht = _dictAlloc(sizeof(*ht)); + + _dictInit(ht,type,privDataPtr); + return ht; +} + +/* Initialize the hash table */ +int _dictInit(dict *ht, dictType *type, + void *privDataPtr) +{ + _dictReset(ht); + ht->type = type; + ht->privdata = privDataPtr; + return DICT_OK; +} + +/* Resize the table to the minimal size that contains all the elements, + * but with the invariant of a USER/BUCKETS ration near to <= 1 */ +int dictResize(dict *ht) +{ + int minimal = ht->used; + + if (minimal < DICT_HT_INITIAL_SIZE) + minimal = DICT_HT_INITIAL_SIZE; + return dictExpand(ht, minimal); +} + +/* Expand or create the hashtable */ +int dictExpand(dict *ht, unsigned int size) +{ + dict n; /* the new hashtable */ + unsigned int realsize = _dictNextPower(size), i; + + /* the size is invalid if it is smaller than the number of + * elements already inside the hashtable */ + if (ht->used > size) + return DICT_ERR; + + _dictInit(&n, ht->type, ht->privdata); + n.size = realsize; + n.sizemask = realsize-1; + n.table = _dictAlloc(realsize*sizeof(dictEntry*)); + + /* Initialize all the pointers to NULL */ + memset(n.table, 0, realsize*sizeof(dictEntry*)); + + /* Copy all the elements from the old to the new table: + * note that if the old hash table is empty ht->size is zero, + * so dictExpand just creates an hash table. */ + n.used = ht->used; + for (i = 0; i < ht->size && ht->used > 0; i++) { + dictEntry *he, *nextHe; + + if (ht->table[i] == NULL) continue; + + /* For each hash entry on this slot... */ + he = ht->table[i]; + while(he) { + unsigned int h; + + nextHe = he->next; + /* Get the new element index */ + h = dictHashKey(ht, he->key) & n.sizemask; + he->next = n.table[h]; + n.table[h] = he; + ht->used--; + /* Pass to the next element */ + he = nextHe; + } + } + assert(ht->used == 0); + _dictFree(ht->table); + + /* Remap the new hashtable in the old */ + *ht = n; + return DICT_OK; +} + +/* Add an element to the target hash table */ +int dictAdd(dict *ht, void *key, void *val) +{ + int index; + dictEntry *entry; + + /* Get the index of the new element, or -1 if + * the element already exists. */ + if ((index = _dictKeyIndex(ht, key)) == -1) + return DICT_ERR; + + /* Allocates the memory and stores key */ + entry = _dictAlloc(sizeof(*entry)); + entry->next = ht->table[index]; + ht->table[index] = entry; + + /* Set the hash entry fields. */ + dictSetHashKey(ht, entry, key); + dictSetHashVal(ht, entry, val); + ht->used++; + return DICT_OK; +} + +/* Add an element, discarding the old if the key already exists */ +int dictReplace(dict *ht, void *key, void *val) +{ + dictEntry *entry; + + /* Try to add the element. If the key + * does not exists dictAdd will suceed. */ + if (dictAdd(ht, key, val) == DICT_OK) + return DICT_OK; + /* It already exists, get the entry */ + entry = dictFind(ht, key); + /* Free the old value and set the new one */ + dictFreeEntryVal(ht, entry); + dictSetHashVal(ht, entry, val); + return DICT_OK; +} + +/* Search and remove an element */ +static int dictGenericDelete(dict *ht, const void *key, int nofree) +{ + unsigned int h; + dictEntry *he, *prevHe; + + if (ht->size == 0) + return DICT_ERR; + h = dictHashKey(ht, key) & ht->sizemask; + he = ht->table[h]; + + prevHe = NULL; + while(he) { + if (dictCompareHashKeys(ht, key, he->key)) { + /* Unlink the element from the list */ + if (prevHe) + prevHe->next = he->next; + else + ht->table[h] = he->next; + if (!nofree) { + dictFreeEntryKey(ht, he); + dictFreeEntryVal(ht, he); + } + _dictFree(he); + ht->used--; + return DICT_OK; + } + prevHe = he; + he = he->next; + } + return DICT_ERR; /* not found */ +} + +int dictDelete(dict *ht, const void *key) { + return dictGenericDelete(ht,key,0); +} + +int dictDeleteNoFree(dict *ht, const void *key) { + return dictGenericDelete(ht,key,1); +} + +/* Destroy an entire hash table */ +int _dictClear(dict *ht) +{ + unsigned int i; + + /* Free all the elements */ + for (i = 0; i < ht->size && ht->used > 0; i++) { + dictEntry *he, *nextHe; + + if ((he = ht->table[i]) == NULL) continue; + while(he) { + nextHe = he->next; + dictFreeEntryKey(ht, he); + dictFreeEntryVal(ht, he); + _dictFree(he); + ht->used--; + he = nextHe; + } + } + /* Free the table and the allocated cache structure */ + _dictFree(ht->table); + /* Re-initialize the table */ + _dictReset(ht); + return DICT_OK; /* never fails */ +} + +/* Clear & Release the hash table */ +void dictRelease(dict *ht) +{ + _dictClear(ht); + _dictFree(ht); +} + +dictEntry *dictFind(dict *ht, const void *key) +{ + dictEntry *he; + unsigned int h; + + if (ht->size == 0) return NULL; + h = dictHashKey(ht, key) & ht->sizemask; + he = ht->table[h]; + while(he) { + if (dictCompareHashKeys(ht, key, he->key)) + return he; + he = he->next; + } + return NULL; +} + +dictIterator *dictGetIterator(dict *ht) +{ + dictIterator *iter = _dictAlloc(sizeof(*iter)); + + iter->ht = ht; + iter->index = -1; + iter->entry = NULL; + iter->nextEntry = NULL; + return iter; +} + +dictEntry *dictNext(dictIterator *iter) +{ + while (1) { + if (iter->entry == NULL) { + iter->index++; + if (iter->index >= + (signed)iter->ht->size) break; + iter->entry = iter->ht->table[iter->index]; + } else { + iter->entry = iter->nextEntry; + } + if (iter->entry) { + /* We need to save the 'next' here, the iterator user + * may delete the entry we are returning. */ + iter->nextEntry = iter->entry->next; + return iter->entry; + } + } + return NULL; +} + +void dictReleaseIterator(dictIterator *iter) +{ + _dictFree(iter); +} + +/* Return a random entry from the hash table. Useful to + * implement randomized algorithms */ +dictEntry *dictGetRandomKey(dict *ht) +{ + dictEntry *he; + unsigned int h; + int listlen, listele; + + if (ht->size == 0) return NULL; + do { + h = random() & ht->sizemask; + he = ht->table[h]; + } while(he == NULL); + + /* Now we found a non empty bucket, but it is a linked + * list and we need to get a random element from the list. + * The only sane way to do so is to count the element and + * select a random index. */ + listlen = 0; + while(he) { + he = he->next; + listlen++; + } + listele = random() % listlen; + he = ht->table[h]; + while(listele--) he = he->next; + return he; +} + +/* ------------------------- private functions ------------------------------ */ + +/* Expand the hash table if needed */ +static int _dictExpandIfNeeded(dict *ht) +{ + /* If the hash table is empty expand it to the intial size, + * if the table is "full" dobule its size. */ + if (ht->size == 0) + return dictExpand(ht, DICT_HT_INITIAL_SIZE); + if (ht->used == ht->size) + return dictExpand(ht, ht->size*2); + return DICT_OK; +} + +/* Our hash table capability is a power of two */ +static unsigned int _dictNextPower(unsigned int size) +{ + unsigned int i = DICT_HT_INITIAL_SIZE; + + if (size >= 2147483648U) + return 2147483648U; + while(1) { + if (i >= size) + return i; + i *= 2; + } +} + +/* Returns the index of a free slot that can be populated with + * an hash entry for the given 'key'. + * If the key already exists, -1 is returned. */ +static int _dictKeyIndex(dict *ht, const void *key) +{ + unsigned int h; + dictEntry *he; + + /* Expand the hashtable if needed */ + if (_dictExpandIfNeeded(ht) == DICT_ERR) + return -1; + /* Compute the key hash value */ + h = dictHashKey(ht, key) & ht->sizemask; + /* Search if this slot does not already contain the given key */ + he = ht->table[h]; + while(he) { + if (dictCompareHashKeys(ht, key, he->key)) + return -1; + he = he->next; + } + return h; +} + +void dictEmpty(dict *ht) { + _dictClear(ht); +} + +#define DICT_STATS_VECTLEN 50 +void dictPrintStats(dict *ht) { + unsigned int i, slots = 0, chainlen, maxchainlen = 0; + unsigned int totchainlen = 0; + unsigned int clvector[DICT_STATS_VECTLEN]; + + if (ht->used == 0) { + printf("No stats available for empty dictionaries\n"); + return; + } + + for (i = 0; i < DICT_STATS_VECTLEN; i++) clvector[i] = 0; + for (i = 0; i < ht->size; i++) { + dictEntry *he; + + if (ht->table[i] == NULL) { + clvector[0]++; + continue; + } + slots++; + /* For each hash entry on this slot... */ + chainlen = 0; + he = ht->table[i]; + while(he) { + chainlen++; + he = he->next; + } + clvector[(chainlen < DICT_STATS_VECTLEN) ? chainlen : (DICT_STATS_VECTLEN-1)]++; + if (chainlen > maxchainlen) maxchainlen = chainlen; + totchainlen += chainlen; + } + printf("Hash table stats:\n"); + printf(" table size: %d\n", ht->size); + printf(" number of elements: %d\n", ht->used); + printf(" different slots: %d\n", slots); + printf(" max chain length: %d\n", maxchainlen); + printf(" avg chain length (counted): %.02f\n", (float)totchainlen/slots); + printf(" avg chain length (computed): %.02f\n", (float)ht->used/slots); + printf(" Chain length distribution:\n"); + for (i = 0; i < DICT_STATS_VECTLEN-1; i++) { + if (clvector[i] == 0) continue; + printf(" %s%d: %d (%.02f%%)\n",(i == DICT_STATS_VECTLEN-1)?">= ":"", i, clvector[i], ((float)clvector[i]/ht->size)*100); + } +} + +/* ----------------------- StringCopy Hash Table Type ------------------------*/ + +static unsigned int _dictStringCopyHTHashFunction(const void *key) +{ + return dictGenHashFunction(key, strlen(key)); +} + +static void *_dictStringCopyHTKeyDup(void *privdata, const void *key) +{ + int len = strlen(key); + char *copy = _dictAlloc(len+1); + DICT_NOTUSED(privdata); + + memcpy(copy, key, len); + copy[len] = '\0'; + return copy; +} + +static void *_dictStringKeyValCopyHTValDup(void *privdata, const void *val) +{ + int len = strlen(val); + char *copy = _dictAlloc(len+1); + DICT_NOTUSED(privdata); + + memcpy(copy, val, len); + copy[len] = '\0'; + return copy; +} + +static int _dictStringCopyHTKeyCompare(void *privdata, const void *key1, + const void *key2) +{ + DICT_NOTUSED(privdata); + + return strcmp(key1, key2) == 0; +} + +static void _dictStringCopyHTKeyDestructor(void *privdata, void *key) +{ + DICT_NOTUSED(privdata); + + _dictFree((void*)key); /* ATTENTION: const cast */ +} + +static void _dictStringKeyValCopyHTValDestructor(void *privdata, void *val) +{ + DICT_NOTUSED(privdata); + + _dictFree((void*)val); /* ATTENTION: const cast */ +} + +dictType dictTypeHeapStringCopyKey = { + _dictStringCopyHTHashFunction, /* hash function */ + _dictStringCopyHTKeyDup, /* key dup */ + NULL, /* val dup */ + _dictStringCopyHTKeyCompare, /* key compare */ + _dictStringCopyHTKeyDestructor, /* key destructor */ + NULL /* val destructor */ +}; + +/* This is like StringCopy but does not auto-duplicate the key. + * It's used for intepreter's shared strings. */ +dictType dictTypeHeapStrings = { + _dictStringCopyHTHashFunction, /* hash function */ + NULL, /* key dup */ + NULL, /* val dup */ + _dictStringCopyHTKeyCompare, /* key compare */ + _dictStringCopyHTKeyDestructor, /* key destructor */ + NULL /* val destructor */ +}; + +/* This is like StringCopy but also automatically handle dynamic + * allocated C strings as values. */ +dictType dictTypeHeapStringCopyKeyValue = { + _dictStringCopyHTHashFunction, /* hash function */ + _dictStringCopyHTKeyDup, /* key dup */ + _dictStringKeyValCopyHTValDup, /* val dup */ + _dictStringCopyHTKeyCompare, /* key compare */ + _dictStringCopyHTKeyDestructor, /* key destructor */ + _dictStringKeyValCopyHTValDestructor, /* val destructor */ +}; diff --git a/dict.h b/dict.h new file mode 100644 index 00000000..ae634b49 --- /dev/null +++ b/dict.h @@ -0,0 +1,136 @@ +/* Hash Tables Implementation. + * + * This file implements in memory hash tables with insert/del/replace/find/ + * get-random-element operations. Hash tables will auto resize if needed + * tables of power of two in size are used, collisions are handled by + * chaining. See the source code for more information... :) + * + * Copyright (c) 2006-2009, Salvatore Sanfilippo + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __DICT_H +#define __DICT_H + +#define DICT_OK 0 +#define DICT_ERR 1 + +/* Unused arguments generate annoying warnings... */ +#define DICT_NOTUSED(V) ((void) V) + +typedef struct dictEntry { + void *key; + void *val; + struct dictEntry *next; +} dictEntry; + +typedef struct dictType { + unsigned int (*hashFunction)(const void *key); + void *(*keyDup)(void *privdata, const void *key); + void *(*valDup)(void *privdata, const void *obj); + int (*keyCompare)(void *privdata, const void *key1, const void *key2); + void (*keyDestructor)(void *privdata, void *key); + void (*valDestructor)(void *privdata, void *obj); +} dictType; + +typedef struct dict { + dictEntry **table; + dictType *type; + unsigned int size; + unsigned int sizemask; + unsigned int used; + void *privdata; +} dict; + +typedef struct dictIterator { + dict *ht; + int index; + dictEntry *entry, *nextEntry; +} dictIterator; + +/* This is the initial size of every hash table */ +#define DICT_HT_INITIAL_SIZE 16 + +/* ------------------------------- Macros ------------------------------------*/ +#define dictFreeEntryVal(ht, entry) \ + if ((ht)->type->valDestructor) \ + (ht)->type->valDestructor((ht)->privdata, (entry)->val) + +#define dictSetHashVal(ht, entry, _val_) do { \ + if ((ht)->type->valDup) \ + entry->val = (ht)->type->valDup((ht)->privdata, _val_); \ + else \ + entry->val = (_val_); \ +} while(0) + +#define dictFreeEntryKey(ht, entry) \ + if ((ht)->type->keyDestructor) \ + (ht)->type->keyDestructor((ht)->privdata, (entry)->key) + +#define dictSetHashKey(ht, entry, _key_) do { \ + if ((ht)->type->keyDup) \ + entry->key = (ht)->type->keyDup((ht)->privdata, _key_); \ + else \ + entry->key = (_key_); \ +} while(0) + +#define dictCompareHashKeys(ht, key1, key2) \ + (((ht)->type->keyCompare) ? \ + (ht)->type->keyCompare((ht)->privdata, key1, key2) : \ + (key1) == (key2)) + +#define dictHashKey(ht, key) (ht)->type->hashFunction(key) + +#define dictGetEntryKey(he) ((he)->key) +#define dictGetEntryVal(he) ((he)->val) +#define dictGetHashTableSize(ht) ((ht)->size) +#define dictGetHashTableUsed(ht) ((ht)->used) + +/* API */ +dict *dictCreate(dictType *type, void *privDataPtr); +int dictExpand(dict *ht, unsigned int size); +int dictAdd(dict *ht, void *key, void *val); +int dictReplace(dict *ht, void *key, void *val); +int dictDelete(dict *ht, const void *key); +int dictDeleteNoFree(dict *ht, const void *key); +void dictRelease(dict *ht); +dictEntry * dictFind(dict *ht, const void *key); +int dictResize(dict *ht); +dictIterator *dictGetIterator(dict *ht); +dictEntry *dictNext(dictIterator *iter); +void dictReleaseIterator(dictIterator *iter); +dictEntry *dictGetRandomKey(dict *ht); +void dictPrintStats(dict *ht); +unsigned int dictGenHashFunction(const unsigned char *buf, int len); +void dictEmpty(dict *ht); + +/* Hash table types */ +extern dictType dictTypeHeapStringCopyKey; +extern dictType dictTypeHeapStrings; +extern dictType dictTypeHeapStringCopyKeyValue; + +#endif /* __DICT_H */ diff --git a/doc/Benchmarks.html b/doc/Benchmarks.html new file mode 100644 index 00000000..e33e7b71 --- /dev/null +++ b/doc/Benchmarks.html @@ -0,0 +1,121 @@ + + + + + + + +
+ + + +
+
+ +Benchmarks: Contents
  How Fast is Redis?
  Latency percentiles +
+ +

Benchmarks

+ +
+ +
+ +
+

How Fast is Redis?

Redis includes the redis-benchmark utility that simulates SETs/GETs done by N clients at the same time sending M total queries (it is similar to the Apache's ab utility). Below you'll find the full output of the benchmark executed against a Linux box.

  • The test was done with 50 simultaneous clients performing 100000 requests.
  • The value SET and GET is a 256 bytes string.
  • The Linux box is running Linux 2.6, it's Xeon X3320 2.5Ghz.
  • Text executed using the loopback interface (127.0.0.1).
+Results: about 110000 SETs per second, about 81000 GETs per second.

Latency percentiles

+./redis-benchmark -n 100000
+
+====== SET ======
+  100007 requests completed in 0.88 seconds
+  50 parallel clients
+  3 bytes payload
+  keep alive: 1
+
+58.50% <= 0 milliseconds
+99.17% <= 1 milliseconds
+99.58% <= 2 milliseconds
+99.85% <= 3 milliseconds
+99.90% <= 6 milliseconds
+100.00% <= 9 milliseconds
+114293.71 requests per second
+
+====== GET ======
+  100000 requests completed in 1.23 seconds
+  50 parallel clients
+  3 bytes payload
+  keep alive: 1
+
+43.12% <= 0 milliseconds
+96.82% <= 1 milliseconds
+98.62% <= 2 milliseconds
+100.00% <= 3 milliseconds
+81234.77 requests per second
+
+====== INCR ======
+  100018 requests completed in 1.46 seconds
+  50 parallel clients
+  3 bytes payload
+  keep alive: 1
+
+32.32% <= 0 milliseconds
+96.67% <= 1 milliseconds
+99.14% <= 2 milliseconds
+99.83% <= 3 milliseconds
+99.88% <= 4 milliseconds
+99.89% <= 5 milliseconds
+99.96% <= 9 milliseconds
+100.00% <= 18 milliseconds
+68458.59 requests per second
+
+====== LPUSH ======
+  100004 requests completed in 1.14 seconds
+  50 parallel clients
+  3 bytes payload
+  keep alive: 1
+
+62.27% <= 0 milliseconds
+99.74% <= 1 milliseconds
+99.85% <= 2 milliseconds
+99.86% <= 3 milliseconds
+99.89% <= 5 milliseconds
+99.93% <= 7 milliseconds
+99.96% <= 9 milliseconds
+100.00% <= 22 milliseconds
+100.00% <= 208 milliseconds
+88109.25 requests per second
+
+====== LPOP ======
+  100001 requests completed in 1.39 seconds
+  50 parallel clients
+  3 bytes payload
+  keep alive: 1
+
+54.83% <= 0 milliseconds
+97.34% <= 1 milliseconds
+99.95% <= 2 milliseconds
+99.96% <= 3 milliseconds
+99.96% <= 4 milliseconds
+100.00% <= 9 milliseconds
+100.00% <= 208 milliseconds
+71994.96 requests per second
+
Notes: changing the payload from 256 to 1024 or 4096 bytes does not change the numbers significantly (but reply packets are glued together up to 1024 bytes so GETs may be slower with big payloads). The same for the number of clients, from 50 to 256 clients I got the same numbers. With only 10 clients it starts to get a bit slower.

You can expect different results from different boxes. For example a low profile box like Intel core duo T5500 clocked at 1.66Ghz running Linux 2.6 will output the following: +
+ ./redis-benchmark -q -n 100000
+SET: 53684.38 requests per second
+GET: 45497.73 requests per second
+INCR: 39370.47 requests per second
+LPUSH: 34803.41 requests per second
+LPOP: 37367.20 requests per second
+
+
+ +
+
+ + + diff --git a/doc/BgsaveCommand.html b/doc/BgsaveCommand.html new file mode 100644 index 00000000..5fab9485 --- /dev/null +++ b/doc/BgsaveCommand.html @@ -0,0 +1,39 @@ + + + + + + + +
+ + + +
+
+ +BgsaveCommand: Contents
  BGSAVE
    Return value
    See also +
+ +

BgsaveCommand

+ +
+ +
+ +
+

BGSAVE

+
Save the DB in background. The OK code is immediately returned.Redis forks, the parent continues to server the clients, the childsaves the DB on disk then exit. A client my be able to check if theoperation succeeded using the LASTSAVE command.
+

Return value

Status code reply

See also

+ +
+ +
+
+ + + diff --git a/doc/CommandReference.html b/doc/CommandReference.html new file mode 100644 index 00000000..18b00924 --- /dev/null +++ b/doc/CommandReference.html @@ -0,0 +1,44 @@ + + + + + + + +
+ + + +
+ + +

CommandReference

+ +
+ +
+ +
+

Redis Command Reference

Every command name links to a specific wiki page describing the behavior of the command.

Connection handling

  • QUIT close the connection
+

Commands operating on string values

  • SET key value set a key to a string value
  • GET key return the string value of the key
  • SETNX key value set a key to a string value if the key does not exist
  • INCR key increment the integer value of key
  • INCRBY key integer increment the integer value of key by integer
  • INCR key decrement the integer value of key
  • DECRBY key integer decrement the integer value of key by integer
  • EXISTS key test if a key exists
  • DEL key delete a key
  • TYPE key return the type of the value stored at key
+

Commands operating on the key space

  • KEYS pattern return all the keys matching a given pattern
  • RANDOMKEY return a random key from the key space
  • RENAME oldname newname rename the old key in the new one, destroing the newname key if it already exists
  • RENAMENX oldname newname rename the old key in the new one, if the newname key does not already exist
  • DBSIZE return the number of keys in the current db
+

Commands operating on lists

  • RPUSH key value Append an element to the tail of the List value at key
  • LPUSH key value Append an element to the head of the List value at key
  • LLEN key Return the length of the List value at key
  • LRANGE key start end Return a range of elements from the List at key
  • LTRIM key start end Trim the list at key to the specified range of elements
  • LINDEX key index Return the element at index position from the List at key
  • LSET key index value Set a new value as the element at index position of the List at key
  • LREM key count value Remove the first-N, last-N, or all the elements matching value from the List at key
  • LPOP key Return and remove (atomically) the first element of the List at key
  • RPOP key Return and remove (atomically) the last element of the List at key
+

Commands operating on sets

  • SADD key member Add the specified member to the Set value at key
  • SREM key member Remove the specified member from the Set value at key
  • SCARD key Return the number of elements (the cardinality) of the Set at key
  • SISMEMBER key member Test if the specified value is a member of the Set at key
  • SINTER key1 key2 ... keyN Return the intersection between the Sets stored at key1, key2, ..., keyN
  • SINTERSTORE dstkey key1 key2 ... keyN Compute the intersection between the Sets stored at key1, key2, ..., keyN, and store the resulting Set at dstkey
  • SMEMBERS key Return all the members of the Set value at key
+

Multiple databases handling commands

  • SELECT index Select the DB having the specified index
  • MOVE key dbindex Move the key from the currently selected DB to the DB having as index dbindex
  • FLUSHDB Remove all the keys of the currently selected DB
  • FLUSHALL Remove all the keys from all the databases
+

Sorting

  • SORT key BY pattern LIMIT start end GET pattern ASC|DESC ALPHA Sort a Set or a List accordingly to the specified parameters
+

Persistence control commands

  • SAVE Synchronously save the DB on disk
  • BGSAVE Asynchronously save the DB on disk
  • LASTSAVE Return the UNIX time stamp of the last successfully saving of the dataset on disk
  • SHUTDOWN Synchronously save the DB on disk, then shutdown the server
+

Remote server control commands

  • INFO provide information and statistics about the server
+
+ +
+
+ + + diff --git a/doc/Credits.html b/doc/Credits.html new file mode 100644 index 00000000..b00cfa26 --- /dev/null +++ b/doc/Credits.html @@ -0,0 +1,36 @@ + + + + + + + +
+ + + +
+
+ +Credits: Contents
  Credits +
+ +

Credits

+ +
+ +
+ +
+

Credits

+
+ +
+
+ + + diff --git a/doc/DbsizeCommand.html b/doc/DbsizeCommand.html new file mode 100644 index 00000000..2c3e796e --- /dev/null +++ b/doc/DbsizeCommand.html @@ -0,0 +1,38 @@ + + + + + + + +
+ + + +
+
+ +DbsizeCommand: Contents
  DBSIZE
    Return value
    See also +
+ +

DbsizeCommand

+ +
+ +
+ +
+

DBSIZE

Return the number of keys in the currently selected database.
+

Return value

Integer reply

See also

+ +
+ +
+
+ + + diff --git a/doc/DelCommand.html b/doc/DelCommand.html new file mode 100644 index 00000000..4867af9b --- /dev/null +++ b/doc/DelCommand.html @@ -0,0 +1,42 @@ + + + + + + + +
+ + + +
+
+ +DelCommand: Contents
  DEL _key_
    Return value
    See also +
+ +

DelCommand

+ +
+ +
+ +
+

DEL _key_

+Time complexity: O(1)
Remove the specified key. If the key does not existno operation is performed. The command always returns success.
+

Return value

Integer reply, specifically:

+1 if the key was removed
+0 if the key does not exist
+

See also

+ +
+ +
+
+ + + diff --git a/doc/DesignPatterns.html b/doc/DesignPatterns.html new file mode 100644 index 00000000..411739f1 --- /dev/null +++ b/doc/DesignPatterns.html @@ -0,0 +1,37 @@ + + + + + + + +
+ + + +
+
+ +DesignPatterns: Contents +
+ +

DesignPatterns

+ +
+ +
+ +
+ Use random keys instead of incremental keys in order to avoid a single-key that gets incremented by many servers. This can can't be distributed among servers. + +
+ +
+
+ + + diff --git a/doc/ExistsCommand.html b/doc/ExistsCommand.html new file mode 100644 index 00000000..32cc9978 --- /dev/null +++ b/doc/ExistsCommand.html @@ -0,0 +1,42 @@ + + + + + + + +
+ + + +
+
+ +ExistsCommand: Contents
  EXISTS _key_
    Return value
    See also +
+ +

ExistsCommand

+ +
+ +
+ +
+

EXISTS _key_

+Time complexity: O(1)
Test if the specified key exists. The command returns"0" if the key exists, otherwise "1" is returned.Note that even keys set with an empty string as value willreturn "1".
+

Return value

Integer reply, specifically:

+1 if the key exists.
+0 if the key does not exist.
+

See also

+
  • SETNX is a SET if not EXISTS atomic operation.
  • SISMEMBER test if an element is a member of a Set.
+
+ +
+
+ + + diff --git a/doc/FAQ.html b/doc/FAQ.html new file mode 100644 index 00000000..a7f8dcaf --- /dev/null +++ b/doc/FAQ.html @@ -0,0 +1,47 @@ + + + + + + + +
+ + + +
+ + +

FAQ

+ +
+ +
+ +
+

Why I need Redis if there is already memcachedb, Tokyo Cabinet, ...?

Memcachedb is basically memcached done persistent. Redis is a different evolution +path in the key-value DBs, the idea is that the main advantages of key-value DBs +are retained even without a so severe loss of comfort of plain key-value DBs. +So Redis offers more features:

  • Keys can store different data types, not just strings. Notably Lists and Sets. For example if you want to use Redis as a log storage system for different computers every computer can just RPUSH data to the computer_ID key. Don't want to save more than 1000 log lines per computer? Just issue a LTRIM computer_ID 0 999 command to trim the list after every push.
+
  • Another example is about Sets. Imagine to build a social news site like Reddit. Every time a user upvote a given news you can just add to the news_ID_upmods key holding a value of type SET the id of the user that did the upmodding. Sets can also be used to index things. Every key can be a tag holding a SET with the IDs of all the objects associated to this tag. Using Redis set intersection you obtain the list of IDs having all this tags at the same time.
+
  • We wrote a simple Twitter Clone using just Redis as database. Download the source code from the download section and imagine to write it with a plain key-value DB without support for lists and sets... it's much harder.
+
  • Multiple DBs. Using the SELECT command the client can select different datasets. This is useful because Redis provides a MOVE atomic primitive that moves a key form a DB to another one, if the target DB already contains such a key it returns an error: this basically means a way to perform locking in distributed processing.
+
  • So what is Redis really about? The User interface with the programmer. Redis aims to export to the programmer the right tools to model a wide range of problems. Sets, Lists with O(1) push operation, lrange and ltrim, server-side fast intersection between sets, are primitives that allow to model complex problems with a key value database.
+

Isn't this key-value thing just hype?

I imagine key-value DBs, in the short term future, to be used like you use memory in a program, with lists, hashes, and so on. With Redis it's like this, but this special kind of memory containing your data structures is shared, atomic, persistent.

When we write code it is obvious, when we take data in memory, to use the most sensible data structure for the work, right? Incredibly when data is put inside a relational DB this is no longer true, and we create an absurd data model even if our need is to put data and get this data back in the same order we put it inside (an ORDER BY is required when the data should be already sorted. Strange, dont' you think?).

Key-value DBs bring this back at home, to create sensible data models and use the right data structures for the problem we are trying to solve.

Can I backup a Redis DB while the server is working?

Yes you can. When Redis saves the DB it actually creates a temp file, then rename(2) that temp file name to the destination file name. So even while the server is working it is safe to save the database file just with the cp unix command. Note that you can use master-slave replication in order to have redundancy of data, but if all you need is backups, cp or scp will do the work pretty well.

What's the Redis memory footprint?

Worst case scenario: 1 Million keys with the key being the natural numbers from 0 to 999999 and the string "Hello World" as value use 100MB on my Intel macbook (32bit). Note that the same data stored linearly in an unique string takes something like 16MB, this is the norm because with small keys and values there is a lot of overhead. Memcached will perform similarly.

With large keys/values the ratio is much better of course.

64 bit systems will use much more memory than 32 bit systems to store the same keys, especially if the keys and values are small, this is because pointers takes 8 bytes in 64 bit systems. But of course the advantage is that you can have a lot of memory in 64 bit systems, so to run large Redis servers a 64 bit system is more or less required.

I like Redis high level operations and features, but I don't like it takes everything in memory and I can't have a dataset larger the memory. Plans to change this?

The whole key-value hype started for a reason: performances. Redis takes the whole dataset in memory and writes asynchronously on disk in order to be very fast, you have the best of both worlds: hyper-speed and persistence of data, but the price to pay is exactly this, that the dataset must fit on your computers RAM.

If the data is larger then memory, and this data is stored on disk, what happens is that the bottleneck of the disk I/O speed will start to ruin the performances. Maybe not in benchmarks, but once you have real load with distributed key accesses the data must come from disk, and the disk is damn slow. Not only, but Redis supports higher level data structures than the plain values. To implement this things on disk is even slower.

Redis will always continue to hold the whole dataset in memory because this days scalability requires to use RAM as storage media, and RAM is getting cheaper and cheaper. Today it is common for an entry level server to have 16 GB of RAM! And in the 64-bit era there are no longer limits to the amount of RAM you can have in theory.

Ok but I absolutely need to have a DB larger than memory, still I need the Redis features

One possible solution is to use both MySQL and Redis at the same time, basically take the state on Redis, and all the things that get accessed very frequently: user auth tokens, Redis Lists with chronologically ordered IDs of the last N-comments, N-posts, and so on. Then use MySQL as a simple storage engine for larger data, that is just create a table with an auto-incrementing ID as primary key and a large BLOB field as data field. Access MySQL data only by primary key (the ID). The application will run the high traffic queries against Redis but when there is to take the big data will ask MySQL for specific resources IDs.

What happens if Redis runs out of memory?

With modern operating systems malloc() returning NULL is not common, usually the server will start swapping and Redis performances will be disastrous so you'll know it's time to use more Redis servers or get more RAM.

However it is planned to add a configuration directive to tell Redis to stop accepting queries but instead to SAVE the latest data and quit if it is using more than a given amount of memory. Also the new INFO command (work in progress in this days) will report the amount of memory Redis is using so you can write scripts that monitor your Redis servers checking for critical conditions.

Update: redis SVN is able to know how much memory it is using and report it via the INFO command.

What Redis means actually?

Redis means two things: +
  • it's a joke on the word Redistribute (instead to use just a Relational DB redistribute your workload among Redis servers)
  • it means REmote DIctionary Server
+

Why did you started the Redis project?

In order to scale LLOOGG. But after I got the basic server working I liked the idea to share the work with other guys, and Redis was turned into an open source project. + +
+ +
+
+ + + diff --git a/doc/FlushallCommand.html b/doc/FlushallCommand.html new file mode 100644 index 00000000..85c84225 --- /dev/null +++ b/doc/FlushallCommand.html @@ -0,0 +1,39 @@ + + + + + + + +
+ + + +
+
+ +FlushallCommand: Contents
  FLUSHALL
    Return value
    See also +
+ +

FlushallCommand

+ +
+ +
+ +
+

FLUSHALL

+
Delete all the keys of all the existing databases, not just the currently selected one. This command never fails.
+

Return value

Status code reply

See also

+ +
+ +
+
+ + + diff --git a/doc/FlushdbCommand.html b/doc/FlushdbCommand.html new file mode 100644 index 00000000..6bf4ca04 --- /dev/null +++ b/doc/FlushdbCommand.html @@ -0,0 +1,39 @@ + + + + + + + +
+ + + +
+
+ +FlushdbCommand: Contents
  FLUSHDB
    Return value
    See also +
+ +

FlushdbCommand

+ +
+ +
+ +
+

FLUSHDB

+
Delete all the keys of the currently selected DB. This command never fails.
+

Return value

Status code reply

See also

+ +
+ +
+
+ + + diff --git a/doc/GetCommand.html b/doc/GetCommand.html new file mode 100644 index 00000000..50d6bf34 --- /dev/null +++ b/doc/GetCommand.html @@ -0,0 +1,39 @@ + + + + + + + +
+ + + +
+
+ +GetCommand: Contents
  GET _key_
    Return value
    See also +
+ +

GetCommand

+ +
+ +
+ +
+

GET _key_

+Time complexity: O(1)
Get the value of the specified key. If the keydoes not exist the special value 'nil' is returned.If the value stored at key is not a string an erroris returned because GET can only handle string values.
+

Return value

Bulk reply

See also

+ +
+ +
+
+ + + diff --git a/doc/IncrCommand.html b/doc/IncrCommand.html new file mode 100644 index 00000000..676d06cb --- /dev/null +++ b/doc/IncrCommand.html @@ -0,0 +1,43 @@ + + + + + + + +
+ + + +
+ + +

IncrCommand

+ +
+ +
+ +
+

INCR _key_

+

INCRBY _key_ _integer_

+

DECR _key_ _integer_

+

DECRBY _key_ _integer_

+Time complexity: O(1)
Increment or decrement the number stored at key by one. If the key doesnot exist or contains a value of a wrong type, set the key to thevalue of "0" before to perform the increment or decrement operation.
+
INCRBY and DECRBY work just like INCR and DECR but instead toincrement/decrement by 1 the increment/decrement is integer.
+

Return value

Integer reply, this commands will reply with the new value of key after the increment or decrement.

See also

+ +
+ +
+
+ + + diff --git a/doc/InfoCommand.html b/doc/InfoCommand.html new file mode 100644 index 00000000..325deff4 --- /dev/null +++ b/doc/InfoCommand.html @@ -0,0 +1,50 @@ + + + + + + + +
+ + + +
+
+ +InfoCommand: Contents
  INFO
    Return value
    Notes
    See also +
+ +

InfoCommand

+ +
+ +
+ +
+

INFO

The info command returns different information and statistics about the server in an format that's simple to parse by computers and easy to red by huamns.
+

Return value

Bulk reply, specifically in the following format:

+edis_version:0.07
+connected_clients:1
+connected_slaves:0
+used_memory:3187
+changes_since_last_save:0
+last_save_time:1237655729
+total_connections_received:1
+total_commands_processed:1
+uptime_in_seconds:25
+uptime_in_days:0
+
All the fields are in the form field:value

Notes

  • used_memory is returned in bytes, and is the total number of bytes allocated by the program using malloc.
  • uptime_in_days is redundant since the uptime in seconds contains already the full uptime information, this field is only mainly present for humans.
  • changes_since_last_save does not refer to the number of key changes, but to the number of operations that produced some kind of change in the dataset.
+

See also

+ +
+ +
+
+ + + diff --git a/doc/KeysCommand.html b/doc/KeysCommand.html new file mode 100644 index 00000000..98718fc0 --- /dev/null +++ b/doc/KeysCommand.html @@ -0,0 +1,42 @@ + + + + + + + +
+ + + +
+
+ +KeysCommand: Contents
  KEYS _pattern_
    Return value
    See also +
+ +

KeysCommand

+ +
+ +
+ +
+

KEYS _pattern_

+Time complexity: O(n) (with n being the number of keys in the DB, and assuming keys and pattern of limited length)
Returns all the keys matching the glob-style pattern asspace separated strings. For example if you have in thedatabase the keys "foo" and "foobar" the command "KEYS foo*"will return "foo foobar".
+
Note that while the time complexity for this operation is O(n)the constant times are pretty low. For example Redis runningon an entry level laptop can scan a 1 million keys databasein 40 milliseconds. Still it's better to consider this one ofthe slow commands that may ruin the DB performance if not usedwith care.
+Glob style patterns examples: +
  • h?llo will match hello hallo hhllo
  • hllo will match hllo heeeello +
    * haello will match hello and hallo, but not hillo
    Use \ to escape special chars if you want to match them verbatim.

    Return value

    Bulk reply, specifically a string in the form of space separated list of keys. Note that most client libraries will return an Array of keys and not a single string with space separated keys (that is, split by " " is performed in the client library usually).

    See also

    +
    * RANDOMKEY to get the name of a randomly selected key in O(1).
+
+ +
+
+ + + diff --git a/doc/LastsaveCommand.html b/doc/LastsaveCommand.html new file mode 100644 index 00000000..c10b8024 --- /dev/null +++ b/doc/LastsaveCommand.html @@ -0,0 +1,39 @@ + + + + + + + +
+ + + +
+
+ +LastsaveCommand: Contents
  LASTSAVE
    Return value
    See also +
+ +

LastsaveCommand

+ +
+ +
+ +
+

LASTSAVE

+
Return the UNIX TIME of the last DB save executed with success.A client may check if a BGSAVE command succeeded reading the LASTSAVEvalue, then issuing a BGSAVE command and checking at regular intervalsevery N seconds if LASTSAVE changed.
+

Return value

Integer reply, specifically an UNIX time stamp.

See also

+ +
+ +
+
+ + + diff --git a/doc/LindexCommand.html b/doc/LindexCommand.html new file mode 100644 index 00000000..8116338a --- /dev/null +++ b/doc/LindexCommand.html @@ -0,0 +1,41 @@ + + + + + + + +
+ + + +
+
+ +LindexCommand: Contents
  LINDEX _key_ _index_
    Return value
    See also +
+ +

LindexCommand

+ +
+ +
+ +
+

LINDEX _key_ _index_

+Time complexity: O(n) (with n being the length of the list)
Return the specified element of the list stored at the specifiedkey. 0 is the first element, 1 the second and so on. Negative indexesare supported, for example -1 is the last element, -2 the penultimateand so on.
+
If the value stored at key is not of list type an error is returned.If the index is out of range an empty string is returned.
+
Note that even if the average time complexity is O(n) asking forthe first or the last element of the list is O(1).
+

Return value

Bulk reply, specifically the requested element.

See also

+ +
+ +
+
+ + + diff --git a/doc/LlenCommand.html b/doc/LlenCommand.html new file mode 100644 index 00000000..e1284a31 --- /dev/null +++ b/doc/LlenCommand.html @@ -0,0 +1,42 @@ + + + + + + + +
+ + + +
+
+ +LlenCommand: Contents
  LLEN _key_
    Return value
    See also +
+ +

LlenCommand

+ +
+ +
+ +
+

LLEN _key_

+Time complexity: O(1)
Return the length of the list stored at the specified key. If thekey does not exist zero is returned (the same behaviour as forempty lists). If the value stored at key is not a list an error is returned.
+

Return value

Integer reply, specifically:

+The length of the list as an integer `>=` 0 if the operation succeeded
+-2 if the specified key does not hold a list value
+
Note that library clients should raise an error if -2 is returned by the Redis server instead to pass the negative value back to the caller.

See also

+ +
+ +
+
+ + + diff --git a/doc/LpopCommand.html b/doc/LpopCommand.html new file mode 100644 index 00000000..8750a668 --- /dev/null +++ b/doc/LpopCommand.html @@ -0,0 +1,41 @@ + + + + + + + +
+ + + +
+
+ +LpopCommand: Contents
  LPOP _key_
  RPOP _key_
    Return value
    See also +
+ +

LpopCommand

+ +
+ +
+ +
+

LPOP _key_

+

RPOP _key_

+Time complexity: O(1)
Atomically return and remove the first (LPOP) or last (RPOP) elementof the list. For example if the list contains the elements "a","b","c" LPOPwill return "a" and the list will become "b","c".
+
If the key does not exist or the list is already empty the specialvalue 'nil' is returned.
+

Return value

Bulk reply

See also

+ +
+ +
+
+ + + diff --git a/doc/LrangeCommand.html b/doc/LrangeCommand.html new file mode 100644 index 00000000..2df5cc6f --- /dev/null +++ b/doc/LrangeCommand.html @@ -0,0 +1,42 @@ + + + + + + + +
+ + + +
+
+ +LrangeCommand: Contents
  LRANGE _key_ _start_ _end_
    Return value
    See also +
+ +

LrangeCommand

+ +
+ +
+ +
+

LRANGE _key_ _start_ _end_

+Time complexity: O(n) (with n being the length of the range)
Return the specified elements of the list stored at the specifiedkey. Start and end are zero-based indexes. 0 is the first elementof the list (the list head), 1 the next element and so on.
+
For example LRANGE foobar 0 2 will return the first three elementsof the list.
+
_start_ and end can also be negative numbers indicating offsetsfrom the end of the list. For example -1 is the last element ofthe list, -2 the penultimate element and so on.
+
Indexes out of range will not produce an error: if start is overthe end of the list, or start > end, an empty list is returned.If end is over the end of the list Redis will threat it just likethe last element of the list.
+

Return value

Multi bulk reply, specifically a list of elements in the specified range.

See also

+ +
+ +
+
+ + + diff --git a/doc/LremCommand.html b/doc/LremCommand.html new file mode 100644 index 00000000..a2441ba2 --- /dev/null +++ b/doc/LremCommand.html @@ -0,0 +1,43 @@ + + + + + + + +
+ + + +
+
+ +LremCommand: Contents
  LREM _key_ _count_ _value_
    Return value
    See also +
+ +

LremCommand

+ +
+ +
+ +
+

LREM _key_ _count_ _value_

+Time complexity: O(N) (with N being the length of the list)
Remove the first count occurrences of the value element from the list.If count is zero all the elements are removed. If count is negativeelements are removed from tail to head, instead to go from head to tailthat is the normal behaviour. So for example LREM with count -2 and_hello_ as value to remove against the list (a,b,c,hello,x,hello,hello) willlave the list (a,b,c,hello,x). The number of removed elements is returnedas an integer, see below for more information aboht the returned value.
+

Return value

Integer Reply, specifically:

+The number of removed elements if the operation succeeded
+-1 if the specified key does not exist
+-2 if the specified key does not hold a list value
+

See also

+ +
+ +
+
+ + + diff --git a/doc/LsetCommand.html b/doc/LsetCommand.html new file mode 100644 index 00000000..064da293 --- /dev/null +++ b/doc/LsetCommand.html @@ -0,0 +1,39 @@ + + + + + + + +
+ + + +
+
+ +LsetCommand: Contents
  LSET _key_ _index_ _value_
    Return value
    See also +
+ +

LsetCommand

+ +
+ +
+ +
+

LSET _key_ _index_ _value_

+Time complexity: O(N) (with N being the length of the list)
Set the list element at index (see LINDEX for information about the_index_ argument) with the new value. Out of range indexes willgenerate an error. Note that setting the first or last elements ofthe list is O(1).
+

Return value

Status code reply

See also

+ +
+ +
+
+ + + diff --git a/doc/LtrimCommand.html b/doc/LtrimCommand.html new file mode 100644 index 00000000..a492fccb --- /dev/null +++ b/doc/LtrimCommand.html @@ -0,0 +1,47 @@ + + + + + + + +
+ + + +
+
+ +LtrimCommand: Contents
  LTRIM _key_ _start_ _end_
    Return value
    See also +
+ +

LtrimCommand

+ +
+ +
+ +
+

LTRIM _key_ _start_ _end_

+Time complexity: O(n) (with n being len of list - len of range)
Trim an existing list so that it will contain only the specifiedrange of elements specified. Start and end are zero-based indexes.0 is the first element of the list (the list head), 1 the next elementand so on.
+
For example LTRIM foobar 0 2 will modify the list stored at foobarkey so that only the first three elements of the list will remain.
+
_start_ and end can also be negative numbers indicating offsetsfrom the end of the list. For example -1 is the last element ofthe list, -2 the penultimate element and so on.
+
Indexes out of range will not produce an error: if start is overthe end of the list, or start > end, an empty list is left as value.If end over the end of the list Redis will threat it just likethe last element of the list.
+
Hint: the obvious use of LTRIM is together with LPUSH/RPUSH. For example:
+
+        LPUSH mylist <someelement>
+        LTRIM mylist 0 99
+
The above two commands will push elements in the list taking care thatthe list will not grow without limits. This is very useful when usingRedis to store logs for example. It is important to note that when usedin this way LTRIM is an O(1) operation because in the average casejust one element is removed from the tail of the list.
+

Return value

Status code reply

See also

+ +
+ +
+
+ + + diff --git a/doc/MoveCommand.html b/doc/MoveCommand.html new file mode 100644 index 00000000..f3252f84 --- /dev/null +++ b/doc/MoveCommand.html @@ -0,0 +1,42 @@ + + + + + + + +
+ + + +
+
+ +MoveCommand: Contents
  MOVE _key_ _dbindex_
    Return value
    See also +
+ +

MoveCommand

+ +
+ +
+ +
+

MOVE _key_ _dbindex_

+
Move the specified key from the currently selected DB to the specifieddestination DB. Note that this command returns 1 only if the key wassuccessfully moved, and 0 if the target key was already there or if thesource key was not found at all, so it is possible to use MOVE as a lockingprimitive.
+

Return value

Integer reply, specifically:

1 if the key was moved +0 if the key was not moved because already present on the target DB or was not found in the current DB. +-3 if the destination DB is the same as the source DB +-4 if the database index if out of range

See also

+ +
+ +
+
+ + + diff --git a/doc/ProtocolSpecification.html b/doc/ProtocolSpecification.html new file mode 100644 index 00000000..95d454ab --- /dev/null +++ b/doc/ProtocolSpecification.html @@ -0,0 +1,143 @@ + + + + + + + +
+ + + +
+ + +

ProtocolSpecification

+ +
+ +
+ +
+

Protocol Specification

The Redis protocol is a compromise between being easy to parse by a computer +and being easy to parse by an human. Before reading this section you are +strongly encouraged to read the "REDIS TUTORIAL" section of this README in order +to get a first feeling of the protocol playing with it by TELNET.

Networking layer

A client connects to a Redis server creating a TCP connection to the port 6973. +Every redis command or data transmitted by the client and the server is +terminated by "\r\n" (CRLF).

Simple INLINE commands

The simplest commands are the inline commands. This is an example of a +server/client chat (the server chat starts with S:, the client chat with C:)

+C: PING
+S: +PONG
+
An inline command is a CRLF-terminated string sent to the client. The server +usually replies to inline commands with a single line that can be a number +or a return code.

When the server replies with a status code (that is a one line reply just indicating if the operation succeeded or not), if the first character of the +reply is a "+" then the command succeeded, if it is a "-" then the following +part of the string is an error.

The following is another example of an INLINE command returning an integer:

+C: EXISTS somekey
+S: 0
+
Since 'somekey' does not exist the server returned '0'.

Note that the EXISTS command takes one argument. Arguments are separated +simply by spaces.

Bulk commands

A bulk command is exactly like an inline command, but the last argument +of the command must be a stream of bytes in order to send data to the server. +the "SET" command is a bulk command, see the following example:

+C: SET mykey 6
+C: foobar
+S: +OK
+
The last argument of the commnad is '6'. This specify the number of DATA +bytes that will follow (note that even this bytes are terminated by two +additional bytes of CRLF).

All the bulk commands are in this exact form: instead of the last argument +the number of bytes that will follow is specified, followed by the bytes, +and CRLF. In order to be more clear for the programmer this is the string +sent by the client in the above sample:

"SET mykey 6\r\nfoobar\r\n"
+

Bulk replies

The server may reply to an inline or bulk command with a bulk reply. See +the following example:

+C: GET mykey
+S: 6
+S: foobar
+
A bulk reply is very similar to the last argument of a bulk command. The +server sends as the first line the number of bytes of the actual reply +followed by CRLF, then the bytes are sent followed by additional two bytes +for the final CRLF. The exact sequence sent by the server is:

"6\r\nfoobar\r\n"
+If the requested value does not exist the bulk reply will use the special +value 'nil' instead to send the line containing the number of bytes to read. +This is an example:

+C: GET nonexistingkey
+S: nil
+
The client library API should not return an empty string, but a nil object. +For example a Ruby library should return 'nil' while a C library should return +NULL.

Bulk reply error reporting

Bulk replies can signal errors, for example trying to use GET against a list +value is not permitted. Bulk replies use a negative bytes count in order to +signal an error. An error string of ABS(bytes_count) bytes will follow. See +the following example:

+S: GET alistkey
+S: -38
+S: -ERR Requested element is not a string
+
-38 means: sorry your operation resulted in an error, but a 38 bytes string +that explains this error will follow. Client APIs should abort on this kind +of errors, for example a PHP client should call the die() function.

The following commands reply with a bulk reply: GET, KEYS, LINDEX, LPOP, RPOP

Multi-Bulk replies

Commands similar to LRANGE needs to return multiple values (every element +of the list is a value, and LRANGE needs to return more than a single element). This is accomplished using multiple bulk writes, +prefixed by an initial line indicating how many bulk writes will follow. +Example:

+C: LRANGE mylist 0 3
+S: 4
+S: 3
+S: foo
+S: 3
+S: bar
+S: 5
+S: Hello
+S: 5
+S: World
+
The first line the server sent is "4\r\n" in order to specify that four bulk +write will follow. Then every bulk write is transmitted.

If the specified key does not exist instead of the number of elements in the +list, the special value 'nil' is sent. Example:

+C: LRANGE nokey 0 1
+S: nil
+
A client library API SHOULD return a nil object and not an empty list when this +happens. This makes possible to distinguish between empty list and non existing ones.

Nil elements in Multi-Bulk replies

Single elements of a multi bulk reply may have -1 length, in order to signal that this elements are missing and not empty strings. This can happen with the SORT command when used with the GET pattern option when the specified key is missing. Example of a multi bulk reply containing an empty element:

+S: 3
+S: 3
+S: foo
+S: -1
+S: 3
+S: bar
+
The second element is nul. The client library should return something like this:

+["foo",nil,"bar"]
+

Multi-Bulk replies errors

Like bulk reply errors Multi-bulk reply errors are reported using a negative +count. Example:

+C: LRANGE stringkey 0 1
+S: -38
+S: -ERR Requested element is not a string
+
The following commands reply with a multi-bulk reply: LRANGE, LINTER

Check the Bulk replies errors section for more information.

Status code reply

As already seen a status code reply is in the form of a single line string +terminated by "\r\n". For example:

++OK
+
and

+-ERR no suck key
+
are two examples of status code replies. The first character of a status code reply is always "+" or "-".

The following commands reply with a status code reply: +PING, SET, SELECT, SAVE, BGSAVE, SHUTDOWN, RENAME, LPUSH, RPUSH, LSET, LTRIM

Integer reply

This type of reply is just a CRLF terminated string representing an integer. For example "0\r\n", or "1000\r\n" are integer replies.

With commands like INCR or LASTSAVE using the integer reply to actually return a value there is no special meaning for the returned integer. It is just an incremental number for INCR, a UNIX time for LASTSAVE and so on.

Some commands like EXISTS will return 1 for true and 0 for false.

Other commands like SADD, SREM and SETNX will return 1 if the operation was actually done, 0 otherwise, and a negative value if the operation is invalid (for example SADD against a non-set value), accordingly to this table: +
+-1 no such key
+-2 operation against the a key holding a value of the wrong type
+-3 source and destiantion objects/dbs are the same
+-4 argument out of range
+
+In all this cases it is mandatory that the client raises an error instead to pass the negative value to the caller. Please check the commands documentation for the exact behaviour.

The following commands will reply with an integer reply: SETNX, DEL, EXISTS, INCR, INCRBY, DECR, DECRBY, DBSIZE, LASTSAVE, RENAMENX, MOVE, LLEN, SADD, SREM, SISMEMBER, SCARD

The commands that will never return a negative integer (commands that can't fail) are: INCR, DECR, INCRBY, DECRBY, LASTSAVE, EXISTS, SETNX, DEL, DBSIZE.

Single line reply

This replies are just single line strings terminated by CRLF. Only two commands reply in this way currently, RANDOMKEY and TYPE.

Multiple commands and pipelining

A client can use the same connection in order to issue multiple commands. +Pipelining is supported so multiple commands can be sent with a single +write operation by the client, it is not needed to read the server reply +in order to issue the next command. All the replies can be read at the end.

Usually Redis server and client will have a very fast link so this is not +very important to support this feature in a client implementation, still +if an application needs to issue a very large number of commands in short +time to use pipelining can be much faster.

+
+ +
+
+ + + diff --git a/doc/QuitCommand.html b/doc/QuitCommand.html new file mode 100644 index 00000000..c1508dce --- /dev/null +++ b/doc/QuitCommand.html @@ -0,0 +1,38 @@ + + + + + + + +
+ + + +
+
+ +QuitCommand: Contents
  Quit
    Return value +
+ +

QuitCommand

+ +
+ +
+ +
+

Quit

Ask the server to silently close the connection.
+

Return value

None. The connection is closed as soon as the QUIT command is received. + +
+ +
+
+ + + diff --git a/doc/README.html b/doc/README.html new file mode 100644 index 00000000..402038d7 --- /dev/null +++ b/doc/README.html @@ -0,0 +1,109 @@ + + + + + + + +
+ + + +
+ + +

README

+ +
+ +
+ +
+

Introduction

Redis is a database. To be more specific redis is a very simple database +implementing a dictionary where keys are associated with values. For example +I can set the key "surname_1992" to the string "Smith".

Redis takes the whole dataset in memory, but the dataset is persistent +since from time to time Redis writes a dump of the dataset on disk asynchronously. The dump is loaded every time the server is restarted. This means that if a system crash occurs the last few queries can get lost (that is acceptable in many applications), so we supported master-slave replication from the early days.

Beyond key-value databases

In most key-value databases keys and values are simple strings. In Redis keys are just strings too, but the associated values can be Strings, Lists and Sets, and there are commands to perform complex atomic operations against this data types, so you can think at Redis as a data structures server.

For example you can append elements to a list stored at the key "mylist" using the LPUSH or RPUSH operation in O(1). Later you'll be able to get a range of elements with LRANGE or trim the list with LTRIM. Sets are very flexible too, it is possible to add and remove elements from Sets (unsorted collections of strings), and then ask for server-side intersection of Sets.

All this features, the support for sorting Lists and Sets, allow to use Redis as the sole DB for your scalable application without the need of any relational database. We wrote a simple Twitter clone in PHP + Redis to show a real world example, the link points to an article explaining the design and internals in very simple words.

What are the differences between Redis and Memcached?

In the following ways:

  • Memcached is not persistent, it just holds everything in memory without saving since its main goal is to be used as a cache. Redis instead can be used as the main DB for the application. We wrote a simple Twitter clone using only Redis as database.
+
  • Like memcached Redis uses a key-value model, but while keys can just be strings, values in Redis can be lists and sets, and complex operations like intersections, set/get n-th element of lists, pop/push of elements, can be performed against sets and lists. It is possible to use lists as message queues.
+

What are the differences between Redis and Tokyo Cabinet / Tyrant?

Redis and Tokyo can be used for the same applications, but actually they are ery different beasts:

  • Tokyo is purely key-value, everything beyond key-value storing of strings is delegated to an embedded Lua interpreter. AFAIK there is no way to guarantee atomicity of operations like pushing into a list, and every time you want to have data structures inside a Tokyo key you have to perform some kind of object serialization/de-serialization.
+
  • Tokyo stores data on disk, synchronously, this means you can have datasets bigger than memory, but that under load, like every kind of process that relay on the disk I/O for speed, the performances may start to degrade. With Redis you don't have this problems but you have another problem: the dataset in every single server must fit in your memory.
+
  • Redis is generally an higher level beast in the operations supported. Things like SORTing, Server-side set-intersections, can't be done with Tokyo. But Redis is not an on-disk DB engine like Tokyo: the latter can be used as a fast DB engine in your C project without the networking overhead just linking to the library. Still remember that in many scalable applications you need multiple servers talking with multiple servers, so the server-client model is almost always needed.
+

Does Redis support locking?

No, the idea is to provide atomic primitives in order to make the programmer +able to use redis with locking free algorithms. For example imagine you have +10 computers and 1 redis server. You want to count words in a very large text. +This large text is split among the 10 computers, every computer will process +its part and use Redis's INCR command to atomically increment a counter +for every occurrence of the word found.

INCR/DECR are not the only atomic primitives, there are others like PUSH/POP +on lists, POP RANDOM KEY operations, UPDATE and so on. For example you can +use Redis like a Tuple Space (http://en.wikipedia.org/wiki/Tuple_space) in +order to implement distributed algorithms.

(News: locking with key-granularity is now planned)

Multiple databases support

Another synchronization primitive is the support for multiple DBs. By default DB 0 is selected for every new connection, but using the SELECT command it is possible to select a different database. The MOVE operation can move an item from one DB to another atomically. This can be used as a base for locking free algorithms together with the 'RANDOMKEY' or 'POPRANDOMKEY' commands.

Redis Data Types

Redis supports the following three data types as values:

  • Strings: just any sequence of bytes. Redis strings are binary safe so they can not just hold text, but images, compressed data and everything else.
  • Lists: lists of strings, with support for operations like append a new string on head, on tail, list length, obtain a range of elements, truncate the list to a given length, sort the list, and so on.
  • Sets: an unsorted set of strings. It is possible to add or delete elements from a set, to perform set intersection, union, subtraction, and so on.
+Values can be Strings, Lists or Sets. Keys can be a subset of strings not containing newlines ("\n") and spaces (" ").

Note that sometimes strings may hold numeric vaules that must be parsed by +Redis. An example is the INCR command that atomically increments the number +stored at the specified key. In this case Redis is able to handle integers +that can be stored inside a 'long long' type, that is a 64-bit signed integer.

Implementation Details

Strings are implemented as dynamically allocated strings of characters. +Lists are implemented as doubly linked lists with cached length. +Sets are implemented using hash tables that use chaining to resolve collisions.

Redis Tutorial

(note, you can skip this section if you are only interested in "formal" doc.)

Later in this document you can find detailed information about Redis commands, +the protocol specification, and so on. This kind of documentation is useful +but... if you are new to Redis it is also BORING! The Redis protocol is designed +so that is both pretty efficient to be parsed by computers, but simple enough +to be used by humans just poking around with the 'telnet' command, so this +section will show to the reader how to play a bit with Redis to get an initial +feeling about it, and how it works.

To start just compile redis with 'make' and start it with './redis-server'. +The server will start and log stuff on the standard output, if you want +it to log more edit redis.conf, set the loglevel to debug, and restart it.

You can specify a configuration file as unique parameter:

./redis-server /etc/redis.conf
+This is NOT required. The server will start even without a configuration file +using a default built-in configuration.

Now let's try to set a key to a given value:

+$ telnet localhost 6379
+Trying 127.0.0.1...
+Connected to localhost.
+Escape character is '^]'.
+SET foo 3  
+bar
++OK
+
The first line we sent to the server is "set foo 3". This means "set the key +foo with the following three bytes I'll send you". The following line is +the "bar" string, that is, the three bytes. So the effect is to set the +key "foo" to the value "bar". Very simple!

(note that you can send commands in lowercase and it will work anyway, +commands are not case sensitive)

Note that after the first and the second line we sent to the server there +is a newline at the end. The server expects commands terminated by "\r\n" +and sequence of bytes terminated by "\r\n". This is a minimal overhead from +the point of view of both the server and client but allows us to play with +Redis with the telnet command easily.

The last line of the chat between server and client is "+OK". This means +our key was added without problems. Actually SET can never fail but +the "+OK" sent lets us know that the server received everything and +the command was actually executed.

Let's try to get the key content now:

+GET foo
+3
+bar
+
Ok that's very similar to 'set', just the other way around. We sent "get foo", +the server replied with a first line that is just a number of bytes the value +stored at key contained, followed by the actual bytes. Again "\r\n" are appended +both to the bytes count and the actual data.

What about requesting a non existing key?

+GET blabla
+nil
+
When the key does not exist instead of the length just the "nil" string is sent. +Another way to check if a given key exists or not is indeed the EXISTS command:

+EXISTS nokey
+0
+EXISTS foo
+1
+
As you can see the server replied '0' the first time since 'nokey' does not +exist, and '1' for 'foo', a key that actually exists.

Ok... now you know the basics, read the REDIS COMMAND REFERENCE section to +learn all the commands supported by Redis and the PROTOCOL SPECIFICATION +section for more details about the protocol used if you plan to implement one +for a language missing a decent client implementation.

License

Redis is released under the BSD license. See the COPYING file for more information.

Credits

Redis is written and maintained by Salvatore Sanfilippo, Aka 'antirez'.

Enjoy, +antirez + +
+ +
+
+ + + diff --git a/doc/RandomkeyCommand.html b/doc/RandomkeyCommand.html new file mode 100644 index 00000000..8714f4c6 --- /dev/null +++ b/doc/RandomkeyCommand.html @@ -0,0 +1,39 @@ + + + + + + + +
+ + + +
+
+ +RandomkeyCommand: Contents
  RANDOMKEY
    Return value
    See also +
+ +

RandomkeyCommand

+ +
+ +
+ +
+

RANDOMKEY

+Time complexity: O(1)
Return a randomly selected key from the currently selected DB.
+

Return value

Singe line reply, specifically the randomly selected key or an empty string is the database is empty.

See also

+ +
+ +
+
+ + + diff --git a/doc/RenameCommand.html b/doc/RenameCommand.html new file mode 100644 index 00000000..1ed0d9e1 --- /dev/null +++ b/doc/RenameCommand.html @@ -0,0 +1,39 @@ + + + + + + + +
+ + + +
+
+ +RenameCommand: Contents
  RENAME _oldkey_ _newkey_
    Return value
    See also +
+ +

RenameCommand

+ +
+ +
+ +
+

RENAME _oldkey_ _newkey_

+Time complexity: O(1)
Atomically renames the key oldkey to newkey. If the source anddestination name are the same an error is returned. If newkeyalready exists it is overwritten.
+

Return value

Status code repy

See also

+
  • RENAMENX if you don't want overwrite the destionation key if it exists.
+
+ +
+
+ + + diff --git a/doc/RenamenxCommand.html b/doc/RenamenxCommand.html new file mode 100644 index 00000000..232cb65d --- /dev/null +++ b/doc/RenamenxCommand.html @@ -0,0 +1,44 @@ + + + + + + + +
+ + + +
+
+ +RenamenxCommand: Contents
  RENAMENX _oldkey_ _newkey_
    Return value
    See also +
+ +

RenamenxCommand

+ +
+ +
+ +
+

RENAMENX _oldkey_ _newkey_

+Time complexity: O(1)
Rename oldkey into newkey but fails if the destination key newkey already exists.
+

Return value

Integer reply, specifically:

+1 if the key was renamed
+0 if the target key already exist
+-1 if the source key does not exist
+-3 if source and destination keys are the same
+

See also

+
  • RENAME is like RENAMENX but overwrite existing destionation key.
+
+ +
+
+ + + diff --git a/doc/ReplyTypes.html b/doc/ReplyTypes.html new file mode 100644 index 00000000..18b4a640 --- /dev/null +++ b/doc/ReplyTypes.html @@ -0,0 +1,44 @@ + + + + + + + +
+ + + +
+ + +

ReplyTypes

+ +
+ +
+ +
+

Redis Reply Types

Redis commands can reply to the client with four different kind of replies, you can find the protocol level specification of this replies in the Redis Protocol Specification. This page is instead an higher level description of the four types of replies from the point of view of the final user.

Status code reply

+Status code replies are in the form of a +OK from the server, or a -ERR followed by an error string. At the protocol level this replies are sent as a single line. Client libraries should return true on OK, and should raise an exception in form of an error that stops the execution of the program on ERR replies from server, because this kind of replies are used by operations that usually fails because of a programming error, an inconsistent DB, and so on.

Integer reply

+At protocol level integer replies are single line replies in form of a decimal singed number. Redis commands returning true or false will use an integer reply and "1" and "0" as replies. A negative value in an integer reply is used to signal an error by all the commands, with the exception of INCR/INCRBY/DECR/DECRBY where negative return values are allowed (this command never fails).

All the integer replies using negative values to return errors will use the same values to signal the same errors: + -1 key not found + -2 key contains a value of the wrong type + -3 source object and destination object are the same + -4 out of range argument

Integer replies are usually passed by client libraries as integer values. On negative integer reply an exception should be raised (excluding the INCR family commands).

Bulk reply

+A bulk reply is a binary-safe reply that is used to return a single string value (string is not limited to alphanumerical strings, it may contain binary data of any kind). Client libraries will usually return a string as return value of Redis commands returning bulk replies. There is a special bulk reply that signal that the element does not exist. When this happens the client library should return 'nil', 'false', or some other special element that can be distinguished by an empty string.

Multi bulk reply

+While a bulk reply returns a single string value, multi bulk replies are used to return multiple values: lists, sets, and so on. Elements of a bulk reply can be missing (-1 length count at protocol level). Client libraries should return 'nil' or 'false' in order to make this elements distinguishable from empty strings. Client libraries should return multi bulk replies that are about ordered elements like list ranges as lists, and bulk replies about sets as hashes. +
+ +
+
+ + + diff --git a/doc/RpushCommand.html b/doc/RpushCommand.html new file mode 100644 index 00000000..e22c8b57 --- /dev/null +++ b/doc/RpushCommand.html @@ -0,0 +1,40 @@ + + + + + + + +
+ + + +
+
+ +RpushCommand: Contents
      RPUSH _key_ _string_
      LPUSH _key_ _string_
    Return value
    See also +
+ +

RpushCommand

+ +
+ +
+ +
+

RPUSH _key_ _string_

+

LPUSH _key_ _string_

+Time complexity: O(1)
Add the string value to the head (RPUSH) or tail (LPUSH) of the liststored at key. If the key does not exist an empty list is created just beforethe append operation. If the key exists but is not a List an erroris returned.
+

Return value

Status code reply

See also

+ +
+ +
+
+ + + diff --git a/doc/SaddCommand.html b/doc/SaddCommand.html new file mode 100644 index 00000000..ed3258ca --- /dev/null +++ b/doc/SaddCommand.html @@ -0,0 +1,43 @@ + + + + + + + +
+ + + +
+
+ +SaddCommand: Contents
  SADD _key_ _member_
    Return value
    See also +
+ +

SaddCommand

+ +
+ +
+ +
+

SADD _key_ _member_

+Time complexity O(1)
Add the specified member to the set value stored at key. If memberis already a member of the set no operation is performed. If keydoes not exist a new set with the specified member as sole member iscrated. If the key exists but does not hold a set value an error isreturned.
+

Return value

Integer reply, specifically:

+1 if the new element was added
+0 if the new element was already a member of the set
+-2 if the key contains a non set value
+

See also

+ +
+ +
+
+ + + diff --git a/doc/SaveCommand.html b/doc/SaveCommand.html new file mode 100644 index 00000000..3c4c3714 --- /dev/null +++ b/doc/SaveCommand.html @@ -0,0 +1,39 @@ + + + + + + + +
+ + + +
+
+ +SaveCommand: Contents
      SAVE
    Return value
    See also +
+ +

SaveCommand

+ +
+ +
+ +
+

SAVE

+
Save the DB on disk. The server hangs while the saving is notcompleted, no connection is served in the meanwhile. An OK codeis returned when the DB was fully stored in disk.
+

Return value

Status code reply

See also

+ +
+ +
+
+ + + diff --git a/doc/ScardCommand.html b/doc/ScardCommand.html new file mode 100644 index 00000000..d85be9db --- /dev/null +++ b/doc/ScardCommand.html @@ -0,0 +1,42 @@ + + + + + + + +
+ + + +
+
+ +ScardCommand: Contents
  SCARD _key_
    Return value
    See also +
+ +

ScardCommand

+ +
+ +
+ +
+

SCARD _key_

+Time complexity O(1)
Return the set cardinality (number of elements). If the key does notexist 0 is returned, like for empty sets. If the key does not holda set value -1 is returned. Client libraries should raise an errorwhen -1 is returned instead to pass the value to the caller.
+

Return value

Integer reply, specifically:

+the cardinality (number of elements) of the set as an integer `>=` 0 if the operation succeeded
+-2 if the specified key does not hold a set value
+

See also

+ +
+ +
+
+ + + diff --git a/doc/SelectCommand.html b/doc/SelectCommand.html new file mode 100644 index 00000000..11c4b050 --- /dev/null +++ b/doc/SelectCommand.html @@ -0,0 +1,39 @@ + + + + + + + +
+ + + +
+
+ +SelectCommand: Contents
  SELECT _index_
    Return value
    See also +
+ +

SelectCommand

+ +
+ +
+ +
+

SELECT _index_

+
Select the DB with having the specified zero-based numeric index.For default every new client connection is automatically selectedto DB 0.
+

Return value

Status code reply

See also

+ +
+ +
+
+ + + diff --git a/doc/SetCommand.html b/doc/SetCommand.html new file mode 100644 index 00000000..63c51f46 --- /dev/null +++ b/doc/SetCommand.html @@ -0,0 +1,39 @@ + + + + + + + +
+ + + +
+
+ +SetCommand: Contents
  SET _key_ _value_
    Return value
    See also +
+ +

SetCommand

+ +
+ +
+ +
+

SET _key_ _value_

+Time complexity: O(1)
Set the string value as value of the key.The string can't be longer than 1073741824 bytes (1 GB).
+

Return value

Status code reply

See also

+
  • SETNX is like SET but don't perform the operation if the target key already exists.
+
+ +
+
+ + + diff --git a/doc/SetnxCommand.html b/doc/SetnxCommand.html new file mode 100644 index 00000000..15a6c0e2 --- /dev/null +++ b/doc/SetnxCommand.html @@ -0,0 +1,42 @@ + + + + + + + +
+ + + +
+
+ +SetnxCommand: Contents
  SETNX _key_ _value_
    Return value
    See also +
+ +

SetnxCommand

+ +
+ +
+ +
+

SETNX _key_ _value_

+Time complexity: O(1)
SETNX works exactly like SET with the only difference thatif the key already exists no operation is performed.SETNX actually means "SET if Not eXists".
+

Return value

Integer reply, specifically:

+1 if the key was set
+0 if the key was not set
+

See also

+ +
+ +
+
+ + + diff --git a/doc/ShutdownCommand.html b/doc/ShutdownCommand.html new file mode 100644 index 00000000..ce77ae97 --- /dev/null +++ b/doc/ShutdownCommand.html @@ -0,0 +1,39 @@ + + + + + + + +
+ + + +
+
+ +ShutdownCommand: Contents
  SHUTDOWN
    Return value
    See also +
+ +

ShutdownCommand

+ +
+ +
+ +
+

SHUTDOWN

+
Stop all the clients, save the DB, then quit the server. This commandsmakes sure that the DB is switched off without the lost of any data.This is not guaranteed if the client uses simply "SAVE" and then"QUIT" because other clients may alter the DB data between the twocommands.
+

Return value

Status code reply on error. On success nothing is returned since the server quits and the connection is closed.

See also

+ +
+ +
+
+ + + diff --git a/doc/SinterCommand.html b/doc/SinterCommand.html new file mode 100644 index 00000000..22d6acf5 --- /dev/null +++ b/doc/SinterCommand.html @@ -0,0 +1,40 @@ + + + + + + + +
+ + + +
+
+ +SinterCommand: Contents
  SINTER _key1_ _key2_ ... _keyN_
    Return value
    See also +
+ +

SinterCommand

+ +
+ +
+ +
+

SINTER _key1_ _key2_ ... _keyN_

+Time complexity O(NM) worst case where N is the cardinality of the smallest set and M the number of sets_

Return the members of a set resulting from the intersection of all thesets hold at the specified keys. Like in LRANGE the result is sent tothe client as a multi-bulk reply (see the protocol specification formore information). If just a single key is specified, then this commandproduces the same result as SELEMENTS. Actually SELEMENTS is just syntaxsugar for SINTERSECT.
+
If at least one of the specified keys does not exist or does not holda set value an error is returned.
+

Return value

Multi bulk reply, specifically the list of common elements.

See also

+
* SREM* SISMEMBER* SCARD* SMEMBERS* SINTER* SINTERSTORE
+
+ +
+
+ + + diff --git a/doc/SinterstoreCommand.html b/doc/SinterstoreCommand.html new file mode 100644 index 00000000..02757ccf --- /dev/null +++ b/doc/SinterstoreCommand.html @@ -0,0 +1,39 @@ + + + + + + + +
+ + + +
+
+ +SinterstoreCommand: Contents
  SINTERSTORE _dstkey_ _key1_ _key2_ ... _keyN_
    Return value
    See also +
+ +

SinterstoreCommand

+ +
+ +
+ +
+

SINTERSTORE _dstkey_ _key1_ _key2_ ... _keyN_

+Time complexity O(NM) worst case where N is the cardinality of the smallest set and M the number of sets_

This commnad works exactly like SINTER but instead of being returned the resulting set is sotred as _dstkey_.
+

Return value

Status code reply

See also

+
* SREM* SISMEMBER* SCARD* SMEMBERS* SINTER* SINTERSTORE
+
+ +
+
+ + + diff --git a/doc/SismemberCommand.html b/doc/SismemberCommand.html new file mode 100644 index 00000000..63516d02 --- /dev/null +++ b/doc/SismemberCommand.html @@ -0,0 +1,43 @@ + + + + + + + +
+ + + +
+
+ +SismemberCommand: Contents
  SISMEMBER _key_ _member_
    Return value
    See also +
+ +

SismemberCommand

+ +
+ +
+ +
+

SISMEMBER _key_ _member_

+Time complexity O(1)
Return 1 if member is a member of the set stored at key, otherwise0 is returned. On error a negative value is returned. Client librariesshould raise an error when a negative value is returned instead to passthe value to the caller.
+

Return value

Integer reply, specifically:

+1 if the element is a member of the set
+0 if the element is not a member of the set OR if the key does not exist
+-2 if the key does not hold a set value
+

See also

+ +
+ +
+
+ + + diff --git a/doc/SmembersCommand.html b/doc/SmembersCommand.html new file mode 100644 index 00000000..52bdcbc4 --- /dev/null +++ b/doc/SmembersCommand.html @@ -0,0 +1,39 @@ + + + + + + + +
+ + + +
+
+ +SmembersCommand: Contents
  SMEMBERS _key_
    Return value
    See also +
+ +

SmembersCommand

+ +
+ +
+ +
+

SMEMBERS _key_

+Time complexity O(N)
Return all the members (elements) of the set value stored at key. Thisis just syntax glue for SINTERSECT.
+

Return value

Multi bulk reply

See also

+ +
+ +
+
+ + + diff --git a/doc/SortCommand.html b/doc/SortCommand.html new file mode 100644 index 00000000..3d88e401 --- /dev/null +++ b/doc/SortCommand.html @@ -0,0 +1,60 @@ + + + + + + + +
+ + + +
+ + +

SortCommand

+ +
+ +
+ +
+

SORT _key_ BY _pattern_ LIMIT _start_ _end_ GET _pattern_ ASC|DESC ALPHA

+
Sort the elements contained in the List or Set value at key. By defaultsorting is numeric with elements being compared as double precisionfloating point numbers. This is the simplest form of SORT.
+
+SORT mylist
+
Assuming mylist contains a list of numbers, the return value will bethe list of numbers ordered from the smallest to the bigger number.In order to get the sorting in reverse order use DESC:
+
+SORT mylist DESC
+
ASC is also supported but it's the default so you don't really need it.If you want to sort lexicographically use ALPHA. Note that Redis isutf-8 aware assuming you set the right value for the LC_COLLATEenvironment variable.
+
Sort is able to limit the number of results using the LIMIT option:
+
+SORT mylist LIMIT 0 10
+
In the above example SORT will return only 10 elements, starting fromthe first one (star is zero-based). Almost all the sort options canbe mixed together. For example:
+
+SORT mylist LIMIT 0 10 ALPHA DESC
+
Will sort mylist lexicographically, in descending order, returning onlythe first 10 elements.
+
Sometimes you want to sort elements using external keys as weights tocompare instead to compare the actual List or Set elements. For examplethe list mylist may contain the elements 1, 2, 3, 4, that are justthe unique IDs of objects stored at object_1, object_2, object_3and object_4, while the keys weight_1, weight_2, weight_3 and weight_4can contain weights we want to use to sort the list of objectsidentifiers. We can use the following command:
+
+SORT mylist BY weight_*
+
the BY option takes a pattern (weight_* in our example) that is usedin order to generate the key names of the weights used for sorting.Weight key names are obtained substituting the first occurrence of *with the actual value of the elements on the list (1,2,3,4 in our example).
+
Still our previous example will return just the sorted IDs. Often it isneeded to get the actual objects sorted (object_1, ..., object_4 in theexample). We can do it with the following command:
+
+SORT mylist BY weight_* GET object_*
+
Note that GET can be used multiple times in order to get more keys forevery element of the original List or Set sorted.
+

Return value

Multi bulk reply, specifically a list of sorted elements.

See Also

+ +
+ +
+
+ + + diff --git a/doc/SremCommand.html b/doc/SremCommand.html new file mode 100644 index 00000000..6219e92c --- /dev/null +++ b/doc/SremCommand.html @@ -0,0 +1,43 @@ + + + + + + + +
+ + + +
+
+ +SremCommand: Contents
  SREM _key_ _member_
    Return value
    See also +
+ +

SremCommand

+ +
+ +
+ +
+

SREM _key_ _member_

+Time complexity O(1)
Remove the specified member from the set value stored at key. If_member_ was not a member of the set no operation is performed. If keydoes not exist or does not hold a set value an error is returned.
+

Return value

Integer reply, specifically:

+1 if the new element was removed
+0 if the new element was not a member of the set
+-2 if the key does not hold a set value
+

See also

+ +
+ +
+
+ + + diff --git a/doc/TemplateCommand.html b/doc/TemplateCommand.html new file mode 100644 index 00000000..7286836e --- /dev/null +++ b/doc/TemplateCommand.html @@ -0,0 +1,38 @@ + + + + + + + +
+ + + +
+
+ +TemplateCommand: Contents
    Return value
    See also +
+ +

TemplateCommand

+ +
+ +
+ + + +
+
+ + + diff --git a/doc/TwitterAlikeExample.html b/doc/TwitterAlikeExample.html new file mode 100644 index 00000000..adb9bbaa --- /dev/null +++ b/doc/TwitterAlikeExample.html @@ -0,0 +1,252 @@ + + + + + + + +
+ + + +
+ + +

TwitterAlikeExample

+ +
+ +
+ +
+

A case study: Design and implementation of a simple Twitter clone using only the Redis key-value store as database and PHP

In this article I'll explain the design and the implementation of a simple clone of Twitter written using PHP and Redis as only database. The programming community uses to look at key-value stores like special databases that can't be used as drop in replacement for a relational database for the development of web applications. This article will try to prove the contrary.

Our Twitter clone, called Retwis, is structurally simple, has very good performances, and can be distributed among N web servers and M Redis servers with very little efforts. You can find the source code here.

We use PHP for the example since it can be read by everybody. The same (or... much better) results can be obtained using Ruby, Python, Erlang, and so on. +

Key-value stores basics

+The essence of a key-value store is the ability to store some data, called value, inside a key. This data can later be retrieved only if we know the exact key used to store it. There is no way to search something by value. So for example I can use the command SET to store the value bar at key foo:

+SET foo bar
+
Redis will store our data permanently, so we can later ask for "What is the value stored at key foo?" and Redis will reply with bar:

+GET foo => bar
+
Other common operations provided by key-value stores are DEL used to delete a given key, and the associated value, SET-if-not-exists (called SETNX on Redis) that sets a key only if it does not already exist, and INCR that is able to atomically increment a number stored at a given key:

+SET foo 10
+INCR foo => 11
+INCR foo => 12
+INCR foo => 13
+

Atomic operations

+So far it should be pretty simple, but there is something special about INCR. Think about this, why to provide such an operation if we can do it ourself with a bit of code? After all it is as simple as:

+x = GET foo
+x = x + 1
+SET foo x
+
The problem is that doing the increment this way will work as long as there is only a client working with the value x at a time. See what happens if two computers are accessing this data at the same time:

+x = GET foo (yields 10)
+y = GET foo (yields 10)
+x = x + 1 (x is now 11)
+y = y + 1 (y is now 11)
+SET foo x (foo is now 11)
+SET foo y (foo is now 11)
+
Something is wrong with that! We incremented the value two times, but instead to go from 10 to 12 our key holds 11. This is because the INCR operation done with GET / increment / SET is not an atomic operation. Instead the INCR provided by Redis, Memcached, ..., are atomic implementations, the server will take care to protect the get-increment-set for all the time needed to complete in order to prevent simultaneous accesses.

What makes Redis different from other key-value stores is that it provides more operations similar to INCR that can be used together to model complex problems. This is why you can use Redis to write whole web applications without using an SQL database and without to get mad. +

Beyond key-value stores

+In this section we will see what Redis features we need to build our Twitter clone. The first thing to know is that Redis values can be more than strings. Redis supports Lists and Sets as values, and there are atomic operations to operate against this more advanced values so we are safe even with multiple accesses against the same key. Let's start from Lists:

+LPUSH mylist a (now mylist holds one element list 'a')
+LPUSH mylist b (now mylist holds 'b,a')
+LPUSH mylist c (now mylist holds 'c,b,a')
+
LPUSH means Left Push, that is, add an element to the left (or to the head) of the list stored at mylist. If the key mylist does not exist it is automatically created by Redis as an empty list before the PUSH operation. As you can imagine, there is also the RPUSH operation that adds the element on the right of the list (on the tail).

This is very useful for our Twitter clone. Updates of users can be stored into a list stored at username:updates for instance. There are operations to get data or information from Lists of course. For instance LRANGE returns a range of the list, or the whole list.

+LRANGE mylist 0 1 => c,b
+
LRANGE uses zero-based indexes, that is the first element is 0, the second 1, and so on. The command aguments are LRANGE key first-index last-index. The last index argument can be negative, with a special meaning: -1 is the last element of the list, -2 the penultimate, and so on. So in order to get the whole list we can use:

+LRANGE mylist 0 -1 => c,b,a
+
Other important operations are LLEN that returns the length of the list, and LTRIM that is like LRANGE but instead of returning the specified range trims the list, so it is like Get range from mylist, Set this range as new value but atomic. We will use only this List operations, but make sure to check the Redis documentation to discover all the List operations supported by Redis. +

The set data type

+There is more than Lists, Redis also supports Sets, that are unsorted collection of elements. It is possible to add, remove, and test for existence of members, and perform intersection between different Sets. Of course it is possible to ask for the list or the number of elements of a Set. Some example will make it more clear. Keep in mind that SADD is the add to set operation, SREM is the remove from set operation, sismember is the test if it is a member operation, and SINTER is perform intersection operation. Other operations are SCARD that is used to get the cardinality (the number of elements) of a Set, and SMEMBERS that will return all the members of a Set.

+SADD myset a
+SADD myset b
+SADD myset foo
+SADD myset bar
+SCARD myset => 4
+SMEMBERS myset => bar,a,foo,b
+
Note that SMEMBERS does not return the elements in the same order we added them, since Sets are unsorted collections of elements. When you want to store the order it is better to use Lists instead. Some more operations against Sets:

+SADD mynewset b
+SADD mynewset foo
+SADD mynewset hello
+SINTER myset mynewset => foo,b
+
SINTER can return the intersection between Sets but it is not limited to two sets, you may ask for intersection of 4,5 or 10000 Sets. Finally let's check how SISMEMBER works:

+SISMEMBER myset foo => 1
+SISMEMBER myset notamember => 0
+
Ok I think we are ready to start coding! +

Prerequisites

+If you didn't download it already please grab the source code of Retwis. It's a simple tar.gz file with a few of .php files inside. The implementation is very simple. You will find the PHP library client inside (redis.php) that is used to talk with the Redis server from PHP. This library was written by Ludovico Magnocavallo and you are free to reuse this in your own projects, but for updated version of the library please download the Redis distribution.

Another thing you probably want is a working Redis server. Just get the source, compile with make, and run with ./redis-server and you are done. No configuration is required at all in order to play with it or to run Retwis in your computer. +

Data layout

+Working with a relational database this is the stage were the database layout should be produced in form of tables, indexes, and so on. We don't have tables, so what should be designed? We need to identify what keys are needed to represent our objects and what kind of values this keys need to hold.

Let's start from Users. We need to represent this users of course, with the username, userid, password, followers and following users, and so on. The first question is, what should identify an user inside our system? The username can be a good idea since it is unique, but it is also too big, and we want to stay low on memory. So like if our DB was a relational one we can associate an unique ID to every user. Every other reference to this user will be done by id. That's very simple to do, because we have our atomic INCR operation! When we create a new user we can do something like this, assuming the user is callled "antirez":

+INCR global:nextUserId => 1000
+SET uid:1000:username antirez
+SET uid:1000:password p1pp0
+
We use the global:nextUserId key in order to always get an unique ID for every new user. Then we use this unique ID to populate all the other keys holding our user data. This is a Design Pattern with key-values stores! Keep it in mind. +Besides the fields already defined, we need some more stuff in order to fully define an User. For example sometimes it can be useful to be able to get the user ID from the username, so we set this key too:

+SET username:antirez:uid 1000
+
This may appear strange at first, but remember that we are only able to access data by key! It's not possible to tell Redis to return the key that holds a specific value. This is also our strength, this new paradigm is forcing us to organize the data so that everything is accessible by primary key, speaking with relational DBs language. +

Following, followers and updates

+There is another central need in our system. Every user has followers users and following users. We have a perfect data structure for this work! That is... Sets. So let's add this two new fields to our schema:

+uid:1000:followers => Set of uids of all the followers users
+uid:1000:following => Set of uids of all the following users
+
Another important thing we need is a place were we can add the updates to display in the user home page. We'll need to access this data in chronological order later, from the most recent update to the older ones, so the perfect kind of Value for this work is a List. Basically every new update will be LPUSHed in the user updates key, and thanks to LRANGE we can implement pagination and so on. Note that we use the words updates and posts interchangeably, since updates are actually "little posts" in some way.

+uid:1000:posts => a List of post ids, every new post is LPUSHed here.
+
+

Authentication

+Ok we have more or less everything about the user, but authentication. We'll handle authentication in a simple but robust way: we don't want to use PHP sessions or other things like this, our system must be ready in order to be distributed among different servers, so we'll take the whole state in our Redis database. So all we need is a random string to set as the cookie of an authenticated user, and a key that will tell us what is the user ID of the client holding such a random string. We need two keys in order to make this thing working in a robust way:

+SET uid:1000:auth fea5e81ac8ca77622bed1c2132a021f9
+SET auth:fea5e81ac8ca77622bed1c2132a021f9 1000
+
In order to authenticate an user we'll do this simple work (login.php): +
  • Get the username and password via the login form
  • Check if the username:<username>:uid key actually exists
  • If it exists we have the user id, (i.e. 1000)
  • Check if uid:1000:password matches, if not, error message
  • Ok authenticated! Set "fea5e81ac8ca77622bed1c2132a021f9" (the value of uid:1000:auth) as "auth" cookie
+This is the actual code:

+include("retwis.php");
+
+# Form sanity checks
+if (!gt("username") || !gt("password"))
+    goback("You need to enter both username and password to login.");
+
+# The form is ok, check if the username is available
+$username = gt("username");
+$password = gt("password");
+$r = redisLink();
+$userid = $r->get("username:$username:id");
+if (!$userid)
+    goback("Wrong username or password");
+$realpassword = $r->get("uid:$userid:password");
+if ($realpassword != $password)
+    goback("Wrong useranme or password");
+
+# Username / password OK, set the cookie and redirect to index.php
+$authsecret = $r->get("uid:$userid:auth");
+setcookie("auth",$authsecret,time()+3600*24*365);
+header("Location: index.php");
+
This happens every time the users log in, but we also need a function isLoggedIn in order to check if a given user is already authenticated or not. These are the logical steps preformed by the isLoggedIn function: +
  • Get the "auth" cookie from the user. If there is no cookie, the user is not logged in, of course. Let's call the value of this cookie <authcookie>
  • Check if auth:<authcookie> exists, and what the value (the user id) is (1000 in the exmple).
  • In order to be sure check that uid:1000:auth matches.
  • Ok the user is authenticated, and we loaded a bit of information in the $User global variable.
+The code is simpler than the description, possibly:

+function isLoggedIn() {
+    global $User, $_COOKIE;
+
+    if (isset($User)) return true;
+
+    if (isset($_COOKIE['auth'])) {
+        $r = redisLink();
+        $authcookie = $_COOKIE['auth'];
+        if ($userid = $r->get("auth:$authcookie")) {
+            if ($r->get("uid:$userid:auth") != $authcookie) return false;
+            loadUserInfo($userid);
+            return true;
+        }
+    }
+    return false;
+}
+
+function loadUserInfo($userid) {
+    global $User;
+
+    $r = redisLink();
+    $User['id'] = $userid;
+    $User['username'] = $r->get("uid:$userid:username");
+    return true;
+}
+
loadUserInfo as separated function is an overkill for our application, but it's a good template for a complex application. The only thing it's missing from all the authentication is the logout. What we do on logout? That's simple, we'll just change the random string in uid:1000:auth, remove the old auth:<oldauthstring> and add a new auth:<newauthstring>.

Important: the logout procedure explains why we don't just authenticate the user after the lookup of auth:<randomstring>, but double check it against uid:1000:auth. The true authentication string is the latter, the auth:<randomstring> is just an authentication key that may even be volatile, or if there are bugs in the program or a script gets interrupted we may even end with multiple auth:<something> keys pointing to the same user id. The logout code is the following (logout.php):

+include("retwis.php");
+
+if (!isLoggedIn()) {
+    header("Location: index.php");
+    exit;
+}
+
+$r = redisLink();
+$newauthsecret = getrand();
+$userid = $User['id'];
+$oldauthsecret = $r->get("uid:$userid:auth");
+
+$r->set("uid:$userid:auth",$newauthsecret);
+$r->set("auth:$newauthsecret",$userid);
+$r->delete("auth:$oldauthsecret");
+
+header("Location: index.php");
+
That is just what we described and should be simple to undestand. +

Updates

+Updates, also known as posts, are even simpler. In order to create a new post on the database we do something like this:

+INCR global:nextPostId => 10343
+SET post:10343 "$owner_id|$time|I'm having fun with Retwis"
+
As you can se the user id and time of the post are stored directly inside the string, we don't need to lookup by time or user id in the example application so it is better to compact everything inside the post string.

After we create a post we obtain the post id. We need to LPUSH this post id in every user that's following the author of the post, and of course in the list of posts of the author. This is the file update.php that shows how this is performed:

+include("retwis.php");
+
+if (!isLoggedIn() || !gt("status")) {
+    header("Location:index.php");
+    exit;
+}
+
+$r = redisLink();
+$postid = $r->incr("global:nextPostId");
+$status = str_replace("\n"," ",gt("status"));
+$post = $User['id']."|".time()."|".$status;
+$r->set("post:$postid",$post);
+$followers = $r->smembers("uid:".$User['id'].":followers");
+if ($followers === false) $followers = Array();
+$followers[] = $User['id']; /* Add the post to our own posts too */
+
+foreach($followers as $fid) {
+    $r->push("uid:$fid:posts",$postid,false);
+}
+# Push the post on the timeline, and trim the timeline to the
+# newest 1000 elements.
+$r->push("global:timeline",$postid,false);
+$r->ltrim("global:timeline",0,1000);
+
+header("Location: index.php");
+
The core of the function is the foreach. We get using SMEMBERS all the followers of the current user, then the loop will LPUSH the post against the uid:<userid>:posts of every follower.

Note that we also maintain a timeline with all the posts. In order to do so what is needed is just to LPUSH the post against global:timeline. Let's face it, do you start thinking it was a bit strange to have to sort things added in chronological order using ORDER BY with SQL? I think so indeed. +

Paginating updates

+Now it should be pretty clear how we can user LRANGE in order to get ranges of posts, and render this posts on the screen. The code is simple:

+function showPost($id) {
+    $r = redisLink();
+    $postdata = $r->get("post:$id");
+    if (!$postdata) return false;
+
+    $aux = explode("|",$postdata);
+    $id = $aux[0];
+    $time = $aux[1];
+    $username = $r->get("uid:$id:username");
+    $post = join(array_splice($aux,2,count($aux)-2),"|");
+    $elapsed = strElapsed($time);
+    $userlink = "<a class=\"username\" href=\"profile.php?u=".urlencode($username)."\">".utf8entities($username)."</a>";
+
+    echo('<div class="post">'.$userlink.' '.utf8entities($post)."<br>");
+    echo('<i>posted '.$elapsed.' ago via web</i></div>');
+    return true;
+}
+
+function showUserPosts($userid,$start,$count) {
+    $r = redisLink();
+    $key = ($userid == -1) ? "global:timeline" : "uid:$userid:posts";
+    $posts = $r->lrange($key,$start,$start+$count);
+    $c = 0;
+    foreach($posts as $p) {
+        if (showPost($p)) $c++;
+        if ($c == $count) break;
+    }
+    return count($posts) == $count+1;
+}
+
showPost will simply convert and print a Post in HTML while showUserPosts get range of posts passing them to showPosts.

Following users

If user id 1000 (antirez) wants to follow user id 1001 (pippo), we can do this with just two SADD:

+SADD uid:1000:following 1001
+SADD uid:1001:followers 1000
+
Note the same pattern again and again, in theory with a relational database the list of following and followers is a single table with fields like following_id and follower_id. With queries you can extract the followers or following of every user. With a key-value DB that's a bit different as we need to set both the 1000 is following 1001 and 1001 is followed by 1000 relations. This is the price to pay, but on the other side accessing the data is simpler and ultra-fast. And having this things as separated sets allows us to do interesting stuff, for example using SINTER we can have the intersection of 'following' of two different users, so we may add a feature to our Twitter clone so that it is able to say you at warp speed, when you visit somebody' else profile, "you and foobar have 34 followers in common" and things like that.

You can find the code that sets or removes a following/follower relation at follow.php. It is trivial as you can see. +

Making it horizontally scalable

+Gentle reader, if you reached this point you are already an hero, thank you. Before to talk about scaling horizontally it is worth to check the performances on a single server. Retwis is amazingly fast, without any kind of cache. On a very slow and loaded server, apache benchmark with 100 parallel clients issuing 100000 requests measured the average pageview to take 5 milliseconds. This means you can serve millions of users every day with just a single Linux box, and this one was monkey asses slow! Go figure with more recent hardware.

So, first of all, probably you will not need more than one server for a lot of applications, even when you have a lot of users. But let's assume we are Twitter and need to handle a huge amount of traffic. What to do? +

Hashing the key

+The first thing to do is to hash the key and issue the request on different servers based on the key hash. There are a lot of well known algorithms to do so, for example check the Redis Ruby library client that implements consistent hashing, but the general idea is that you can turn your key into a number, and than take the reminder of the division of this number by the number of servers you have:

+server_id = crc32(key) % number_of_servers
+
This has a lot of problems since if you add one server you need to move too much keys and so on, but this is the general idea even if you use a better hashing scheme like consistent hashing.

Ok, are key accesses distributed among the key space? Well, all the user data will be partitioned among different servers. There are no inter-keys operations used (like SINTER, otherwise you need to care that things you want to intersect will end in the same server. This is why Redis unlike memcached does not force a specific hashing scheme, it's application specific). Btw there are keys that are accessed more frequently.

Special keys

For example every time we post a new message, we need to increment the global:nextPostId key. How to fix this problem? A Single server will get a lot if increments. The simplest way to handle this is to have a dedicated server just for increments. This is probably an overkill btw unless you have really a lot of traffic. There is another trick. The ID does not really need to be an incremental number, but just it needs to be unique. So you can get a random string long enough to be unlikely (almost impossible, if it's md5-size) to collide, and you are done. We successfully eliminated our main problem to make it really horizontally scalable!

There is another one: global:timeline. There is no fix for this, if you need to take something in order you can split among different servers and then merge when you need to get the data back, or take it ordered and use a single key. Again if you really have so much posts per second, you can use a single server just for this. Remember that with commodity hardware Redis is able to handle 100000 writes for second, that's enough even for Twitter, I guess.

Please feel free to use the comments below for questions and feedbacks. + +
+ +
+
+ + + diff --git a/doc/TypeCommand.html b/doc/TypeCommand.html new file mode 100644 index 00000000..9311d324 --- /dev/null +++ b/doc/TypeCommand.html @@ -0,0 +1,44 @@ + + + + + + + +
+ + + +
+
+ +TypeCommand: Contents
  TYPE _key_
    Return value
    See also +
+ +

TypeCommand

+ +
+ +
+ +
+

TYPE _key_

+Time complexity: O(1)
Return the type of the value stored at key in form of astring. The type can be one of "none", "string", "list", "set"."none" is returned if the key does not exist.
+

Return value

Single line reply, specifically:

+"none" if the key does not exist
+"string" if the key contains a String value
+"list" if the key contains a List value
+"set" if the key contains a Set value
+

See also

+ +
+ +
+
+ + + diff --git a/doc/VersionControl.html b/doc/VersionControl.html new file mode 100644 index 00000000..bb249a32 --- /dev/null +++ b/doc/VersionControl.html @@ -0,0 +1,40 @@ + + + + + + + +
+ + + +
+
+ +VersionControl: Contents
  VERSION
    Return value +
+ +

VersionControl

+ +
+ +
+ +
+

VERSION

Return the server version as a float string. Example:

+VERSION
+0.07
+
It is guaranteed that if you compare versions as floats newer versions will be greater than older versions.

Return value

Single line reply + +
+ +
+
+ + + diff --git a/doc/index.html b/doc/index.html new file mode 100644 index 00000000..0b040564 --- /dev/null +++ b/doc/index.html @@ -0,0 +1,36 @@ + + + + + + + +
+ + + +
+
+ +index: Contents
  Redis Documentation +
+ +

index

+ +
+ +
+ +
+

Redis Documentation

Hello! The followings are pointers to different parts of the Redis Documentation.

+
+ +
+
+ + + diff --git a/doc/redis.png b/doc/redis.png new file mode 100644 index 0000000000000000000000000000000000000000..4578f00c32ec1b768cd3dc4e98edbe1721506c8e GIT binary patch literal 4852 zcmV000X0X+uL$Nkc;* zP;zf(X>4Tx0C)kFR(U+sTN^)P>|3^wqKUH8Sd%1-HOiVL$}$GSFw+=g-}haX>=bD% z*~wC=Y?o55vP6_fk_sWM=KhBF-uwP>@8|uz=bz_$p5OO*p7WgNcg}MFVCQot5O8z= z0B;`>(ZWCnVQY6D!So!k0uI0qFXL2-O|3dmVH3tz2 z2mlNL5%}D9u`&(2>|#S2_9qcY5XXUr(H_pO5GFuamS|;xf^ZRpIo)^hO&WID#dm0! zjCQBl4gtW(1063>B#up6$>ngwNnFXolj8Z2Ya0$N@l*8-Uudzjf;tJNpCUANF-qQrvL|W7h$I;U$&2 zE)K0H6M$crsnj23RO+vCXl>H~+{dB)h~ynEw0PKK0RGo}H_!iAz@dUbMTK35YtcVt zJkNBOMT#|*ZHfcQ8L+2|n~6u8H-@iu@3Me^AZj08D0Ba<1J8sv53(bq51kOT5OX|y z=}3Thj6|Yjrc{n}zD&Msp4=7rjH9W@7=>^}qLQcb1r-Zb-D4_h636+~S=6^qENYCO z?A2^KRiu@ujn}b5Y3hpUG3&k4e`r84j4^UPt!B()yl8UgOy*fc6goS(It^%t94wtjXBcAw5y+q*g(beOzw`J%BSz2jXcvhz{rMVG5+J69gpAxylR zj@!2TZI1x#G3-~*4zCcLjyH?$ryahy>HbgNdt250bM|FsW*39BFgu zZ5e5o-7+<^1hdw&hjVJLq~zlA&gLr?2o_Ka-xrM)cb7DjmR?Q07EMXo{<4*J4p04qG z%MTd35B2Eudh})X_x!dssQl3PQOnTA6Xjvj?=2(i&y+?n4>7)!z(GPoK@O z%%sCy!LrDv#E$2v7@qojLYOZR+@>`sj(L8jnv7 zXm+1!*Q(bp)yYCd>Js!W=o=ZR8X}B1jJ8hC89y=UJVQB~V;XBlHpg0Av^-~Je$LF= z+{W70!47ktU?1v`e4*fC4OH{Jl?peD z+KYorv`QIDhpuK_b0`xpTPyFno?3C?#!(6#WxSG76 z8grZ6npImETgF={w*uRY+eO+xcMNov+$P^Ky(`tl*tKx4?|#*T)NcPCmtONe-G24o zjt!^|YCbf2F{Jr>c*oPONbXJRecNjLH*&A`^sz)0=%5o10RVDj1KGoWuYYfVZ=g_McaUc=XK+J^Wyoe|R+vKANVr!7W5l&cjmT$FUeOHEg)s^- z1F?>=>v0M3!tqTB`Uz8s_#}8zZn8}B{S>Q|rPQ!A-n7be&GfMhugmbu1(`~jLs{Eh7|D^H5MBczbf%7WrJ=6rK|nd?5}++izw$S zue`2t{r3vDik~;KDYBHlN|#D%RdMyH>V=w=o60vAYYXa3>-p*@8*VhlH+eMsv;?R{t=U>3lzb5i0HKvxP zuf9ajD$a4efxTsYcl3S0hmDo?&#%8M(dxh3?`S;%&Imy6WdLm30XU}(K+0aIz1aZ3 zUkQL22LM*b0SspiV8vVjX5$3lAb{3C06pLa;s6D(AP@WwghP?$aL@F9`H5# zI0gxZF~&3|D`p86CYBA>Piz|;%$yQ?EVvT6hj|b4CGrD-#C_sI69-B}LJ=gG3t7ZG?+sH2X z{qCNKe!W4?N7GMgN5V%fCJs+-Oy7SMH>dyR=VH}+!w*v{_G^P1qF*py)4mt}Ox(7n z?H6DGyg&w+05YfnOE4rX3N}NBqHBiB!W-$e=qDLM8D$w4n5vnBSgcsp*<{!yIOI4_ z?Xl;I;_l#~@>%U|6A%}?DkQqUT3A)22Vrt(4C|GfJ7bHPt=S zKif0k@-}bD`vdaR%Qe#a>KD_msoyJq7H+%#m9I1~27aIlEW+er;jk&XQ*?E3Nq7yt zI{h;Sf5sz>ub8eglUPhyRoKMY#W-X*PeS=dbGPw)YH?M`fKPHC^{9XG5uF*Dn5#cqpmuyQKLuwWy7 z7>SwwBEjVmLNN`ADQTry)A>pzndNV*4%CS>jkZa2{_Ik{-`gYAmo)%;NE~`QY(FwG zsz2U2Df1$4hC1ss*RjC7XtETuT>G(aWoYf*deNry7Tec??<_xTen$O@-*%z$P_wCT zX!-8G`Do_>Ak2t(H!Kd1Ks)2GE<|S%9^r~bn4okKXgm&2#O{!2JYWWh01w=tGk}96 z0-%92gs{K`(mMmvj?NXDBcT5V1$1{9_#G}0(*7Tz{tWrgl7tC>-lzbe@Pt4j*4=}I z(1u>y7=)1zT3!~Rh(xOWv)!NC-FGc*#>fRgks}S=7sf=;-|2><{9 z32;bRa{vGf6951U69E94oEQKA2c=0wK~#7F?VP_)9Y+wyA5lbOnjl3;Nn=6f6lowX z6AJSOq=1Sd6*TE-;|}^1fwWX0|8uAGfPsY1@yL)$g?_p<8 z5uV$fAK&@z?A+daPno*EzaQ1B9|NEDGq2YFFi=S>PzRO7N@4*93b?41SV=6vKmixE z5-W)X7%1SPR$?Ww00RYFd|trEL|t58U!R_y^7rE60)gxN{CwPqcz6DIukGz^pRe(! zGm3`{qV#dUc=2L&brof*AntI8*;lV#EiNu9sv$zeeI`i67>K*OyA~xfBaCxF+{VU+ zCV;TDwG|IJ#O%Srfno^y2kI9O7eY{>&tRHF@qTi0q9{S^grop1%j4$}Y80M>O>XDs z=S2Y_sL*FHmqcPH%HU#NU0o?qG=n;QfW~c;BvOTfA0h6l!)&p0BH$=MCHnC2Q02!U zXmeMgB&vMkHEN{FRA-1?6M>qnL~Q9fToEUor;1gg(8pt11Ws*A=pzwWSFRzz6ee*9 zh>zdrP$b%ibrTCmM@MUGYqPVnQ&Uq*OG`UDJG^)_DeE{odwY8rE-WlyDA&r$iYf#J zwIXOxfkDSDkTy3rc?D|H|8G3M5vWb1C+b5me#+rdE*qaSCY_#xx;V$2vs45U%II*3 zAyNDtYR&P6i8dLc1i^zxRvot}aE8;vFW9WZpj_;TgLm!jBpGSpirAhvIlU{7DEL@$ zsChaH&I8xl;?`j~NTPVgk7ElY%eP(F9YZpfpvx)3uHweYOvY+A#^_Cr(DriuyWT zNYqrV5GfOlsvF00q_bdEhc+KP%Xp+B`jDu5h$uF$0!s>ANF)-Vxezo%5F{UDy5?+8 zWaqN3G#ThtW+TY}Ca$KlXgYqZEo$tlct>)~x$Q!t#i1=kML7^mx z1hA{e3j5_|ixC9}Jt;vGpe?_aAhJS%)w0PD#S-nzie}R`66a1tF#&^EvAb-T9}y zOn0L8Odu^vUYmhpUf=u`*XCooT;0yR_2W* zKi(&)SxGFgtby%Z!0U%DXbzW`mmiL`yIWshZ)eb~4U(QXEcEs7(6>W z>l1ixZq6aGZs$IH{%c!FtRxm-pn!`&PvoU4`TL-FWDeVLg^siRk9c1_9ld?mF?zFE zfW+zPX?ka~$>e4ru|JP*dp%Wx$p=Ub>OC9`S|3yV(?HO9{ve+Kb$BK1xv`S)T9DY` zWFV&j=VP=aricOz_B`c9oOhneC9!h=4EeQWAM>)dL%i3IgT%5dboNFS(+%31Qvu1H zG!7D9|8@NO&2c9B7# zLN5|CL;m#rkNlUZJe(R-7W{D6;v5Hw#B)l+B5-6kFO#zbQN*M|cY zi>^es)1sOOg?hvZ($a2lOqJLU)M(UUQxE$5jpKi$Bo6)IRh*QzvvoVSWh|gyNeqzE zcBt2O9`5ATHjT;Z_abq!(7Q$F`IBt8e9P3z`Tvf@A-;zD1b^ho&u4k5x242axcm_}Pp6C|-LK{?*)(>aHwgia+fK~T + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include +#include +#include + +#include "anet.h" +#include "sds.h" +#include "adlist.h" +#include "zmalloc.h" + +#define REDIS_CMD_INLINE 1 +#define REDIS_CMD_BULK 2 +#define REDIS_CMD_INTREPLY 4 +#define REDIS_CMD_RETCODEREPLY 8 +#define REDIS_CMD_BULKREPLY 16 +#define REDIS_CMD_MULTIBULKREPLY 32 +#define REDIS_CMD_SINGLELINEREPLY 64 + +#define REDIS_NOTUSED(V) ((void) V) + +static struct config { + char *hostip; + int hostport; +} config; + +struct redisCommand { + char *name; + int arity; + int flags; +}; + +static struct redisCommand cmdTable[] = { + {"get",2,REDIS_CMD_INLINE|REDIS_CMD_BULKREPLY}, + {"set",3,REDIS_CMD_BULK|REDIS_CMD_RETCODEREPLY}, + {"setnx",3,REDIS_CMD_BULK|REDIS_CMD_INTREPLY}, + {"del",2,REDIS_CMD_INLINE|REDIS_CMD_INTREPLY}, + {"exists",2,REDIS_CMD_INLINE|REDIS_CMD_INTREPLY}, + {"incr",2,REDIS_CMD_INLINE|REDIS_CMD_INTREPLY}, + {"decr",2,REDIS_CMD_INLINE|REDIS_CMD_INTREPLY}, + {"rpush",3,REDIS_CMD_BULK|REDIS_CMD_RETCODEREPLY}, + {"lpush",3,REDIS_CMD_BULK|REDIS_CMD_RETCODEREPLY}, + {"rpop",2,REDIS_CMD_INLINE|REDIS_CMD_BULKREPLY}, + {"lpop",2,REDIS_CMD_INLINE|REDIS_CMD_BULKREPLY}, + {"llen",2,REDIS_CMD_INLINE|REDIS_CMD_INTREPLY}, + {"lindex",3,REDIS_CMD_INLINE|REDIS_CMD_BULKREPLY}, + {"lset",4,REDIS_CMD_BULK|REDIS_CMD_RETCODEREPLY}, + {"lrange",4,REDIS_CMD_INLINE|REDIS_CMD_MULTIBULKREPLY}, + {"ltrim",4,REDIS_CMD_INLINE|REDIS_CMD_RETCODEREPLY}, + {"lrem",4,REDIS_CMD_BULK|REDIS_CMD_INTREPLY}, + {"sadd",3,REDIS_CMD_BULK|REDIS_CMD_INTREPLY}, + {"srem",3,REDIS_CMD_BULK|REDIS_CMD_INTREPLY}, + {"sismember",3,REDIS_CMD_BULK|REDIS_CMD_INTREPLY}, + {"scard",2,REDIS_CMD_INLINE|REDIS_CMD_INTREPLY}, + {"sinter",-2,REDIS_CMD_INLINE|REDIS_CMD_MULTIBULKREPLY}, + {"sinterstore",-3,REDIS_CMD_INLINE|REDIS_CMD_RETCODEREPLY}, + {"smembers",2,REDIS_CMD_INLINE|REDIS_CMD_MULTIBULKREPLY}, + {"incrby",3,REDIS_CMD_INLINE|REDIS_CMD_INTREPLY}, + {"decrby",3,REDIS_CMD_INLINE|REDIS_CMD_INTREPLY}, + {"randomkey",1,REDIS_CMD_INLINE|REDIS_CMD_SINGLELINEREPLY}, + {"select",2,REDIS_CMD_INLINE|REDIS_CMD_RETCODEREPLY}, + {"move",3,REDIS_CMD_INLINE|REDIS_CMD_INTREPLY}, + {"rename",3,REDIS_CMD_INLINE|REDIS_CMD_RETCODEREPLY}, + {"renamenx",3,REDIS_CMD_INLINE|REDIS_CMD_INTREPLY}, + {"keys",2,REDIS_CMD_INLINE|REDIS_CMD_BULKREPLY}, + {"dbsize",1,REDIS_CMD_INLINE|REDIS_CMD_INTREPLY}, + {"ping",1,REDIS_CMD_INLINE|REDIS_CMD_RETCODEREPLY}, + {"echo",2,REDIS_CMD_BULK|REDIS_CMD_BULKREPLY}, + {"save",1,REDIS_CMD_INLINE|REDIS_CMD_RETCODEREPLY}, + {"bgsave",1,REDIS_CMD_INLINE|REDIS_CMD_RETCODEREPLY}, + {"shutdown",1,REDIS_CMD_INLINE|REDIS_CMD_RETCODEREPLY}, + {"lastsave",1,REDIS_CMD_INLINE|REDIS_CMD_INTREPLY}, + {"type",2,REDIS_CMD_INLINE|REDIS_CMD_SINGLELINEREPLY}, + {"flushdb",1,REDIS_CMD_INLINE|REDIS_CMD_RETCODEREPLY}, + {"flushall",1,REDIS_CMD_INLINE|REDIS_CMD_RETCODEREPLY}, + {"sort",-2,REDIS_CMD_INLINE|REDIS_CMD_MULTIBULKREPLY}, + {"version",1,REDIS_CMD_INLINE|REDIS_CMD_SINGLELINEREPLY}, + {NULL,0,0} +}; + +static struct redisCommand *lookupCommand(char *name) { + int j = 0; + while(cmdTable[j].name != NULL) { + if (!strcasecmp(name,cmdTable[j].name)) return &cmdTable[j]; + j++; + } + return NULL; +} + +static int cliConnect(void) { + char err[ANET_ERR_LEN]; + int fd; + + fd = anetTcpConnect(err,config.hostip,config.hostport); + if (fd == ANET_ERR) { + fprintf(stderr,"Connect: %s\n",err); + return -1; + } + anetTcpNoDelay(NULL,fd); + return fd; +} + +static sds cliReadLine(int fd) { + sds line = sdsempty(); + + while(1) { + char c; + + if (read(fd,&c,1) == -1) { + sdsfree(line); + return NULL; + } else if (c == '\n') { + break; + } else { + line = sdscatlen(line,&c,1); + } + } + return sdstrim(line,"\r\n"); +} + +static int cliReadInlineReply(int fd, int type) { + sds reply = cliReadLine(fd); + + if (reply == NULL) return 1; + printf("%s\n", reply); + if (type == REDIS_CMD_SINGLELINEREPLY) return 0; + if (type == REDIS_CMD_INTREPLY) return atoi(reply) < 0; + if (type == REDIS_CMD_RETCODEREPLY) return reply[0] == '-'; + return 0; +} + +static int cliReadBulkReply(int fd, int multibulk) { + sds replylen = cliReadLine(fd); + char *reply, crlf[2]; + int bulklen, error = 0; + + if (replylen == NULL) return 1; + if (strcmp(replylen,"nil") == 0) { + sdsfree(replylen); + printf("(nil)\n"); + return 0; + } + bulklen = atoi(replylen); + if (multibulk && bulklen == -1) { + sdsfree(replylen); + printf("(nil)"); + return 0; + } + if (bulklen < 0) { + bulklen = -bulklen; + error = 1; + } + reply = zmalloc(bulklen); + anetRead(fd,reply,bulklen); + anetRead(fd,crlf,2); + if (bulklen && fwrite(reply,bulklen,1,stdout) == 0) { + zfree(reply); + return 1; + } + if (!multibulk && isatty(fileno(stdout)) && reply[bulklen-1] != '\n') + printf("\n"); + zfree(reply); + return error; +} + +static int cliReadMultiBulkReply(int fd) { + sds replylen = cliReadLine(fd); + int elements, c = 1; + + if (replylen == NULL) return 1; + if (strcmp(replylen,"nil") == 0) { + sdsfree(replylen); + printf("(nil)\n"); + return 0; + } + elements = atoi(replylen); + while(elements--) { + printf("%d. ", c); + if (cliReadBulkReply(fd,1)) return 1; + printf("\n"); + c++; + } + return 0; +} + +static int cliSendCommand(int argc, char **argv) { + struct redisCommand *rc = lookupCommand(argv[0]); + int fd, j, retval = 0; + sds cmd = sdsempty(); + + if (!rc) { + fprintf(stderr,"Unknown command '%s'\n",argv[0]); + return 1; + } + + if ((rc->arity > 0 && argc != rc->arity) || + (rc->arity < 0 && argc < rc->arity)) { + fprintf(stderr,"Wrong number of arguments for '%s'\n",rc->name); + return 1; + } + if ((fd = cliConnect()) == -1) return 1; + + /* Build the command to send */ + for (j = 0; j < argc; j++) { + if (j != 0) cmd = sdscat(cmd," "); + if (j == argc-1 && rc->flags & REDIS_CMD_BULK) { + cmd = sdscatprintf(cmd,"%d",sdslen(argv[j])); + } else { + cmd = sdscatlen(cmd,argv[j],sdslen(argv[j])); + } + } + cmd = sdscat(cmd,"\r\n"); + if (rc->flags & REDIS_CMD_BULK) { + cmd = sdscatlen(cmd,argv[argc-1],sdslen(argv[argc-1])); + cmd = sdscat(cmd,"\r\n"); + } + anetWrite(fd,cmd,sdslen(cmd)); + if (rc->flags & REDIS_CMD_INTREPLY) { + retval = cliReadInlineReply(fd,REDIS_CMD_INTREPLY); + } else if (rc->flags & REDIS_CMD_RETCODEREPLY) { + retval = cliReadInlineReply(fd,REDIS_CMD_RETCODEREPLY); + } else if (rc->flags & REDIS_CMD_SINGLELINEREPLY) { + retval = cliReadInlineReply(fd,REDIS_CMD_SINGLELINEREPLY); + } else if (rc->flags & REDIS_CMD_BULKREPLY) { + retval = cliReadBulkReply(fd,0); + } else if (rc->flags & REDIS_CMD_MULTIBULKREPLY) { + retval = cliReadMultiBulkReply(fd); + } + if (retval) { + close(fd); + return retval; + } + close(fd); + return 0; +} + +static int parseOptions(int argc, char **argv) { + int i; + + for (i = 1; i < argc; i++) { + int lastarg = i==argc-1; + + if (!strcmp(argv[i],"-h") && !lastarg) { + char *ip = zmalloc(32); + if (anetResolve(NULL,argv[i+1],ip) == ANET_ERR) { + printf("Can't resolve %s\n", argv[i]); + exit(1); + } + config.hostip = ip; + i++; + } else if (!strcmp(argv[i],"-p") && !lastarg) { + config.hostport = atoi(argv[i+1]); + i++; + } else { + break; + } + } + return i; +} + +static sds readArgFromStdin(void) { + char buf[1024]; + sds arg = sdsempty(); + + while(1) { + int nread = read(fileno(stdin),buf,1024); + + if (nread == 0) break; + else if (nread == -1) { + perror("Reading from standard input"); + exit(1); + } + arg = sdscatlen(arg,buf,nread); + } + return arg; +} + +int main(int argc, char **argv) { + int firstarg, j; + char **argvcopy; + + config.hostip = "127.0.0.1"; + config.hostport = 6379; + + firstarg = parseOptions(argc,argv); + argc -= firstarg; + argv += firstarg; + + /* Turn the plain C strings into Sds strings */ + argvcopy = zmalloc(sizeof(char*)*argc+1); + for(j = 0; j < argc; j++) + argvcopy[j] = sdsnew(argv[j]); + + /* Read the last argument from stdandard input */ + if (!isatty(fileno(stdin))) { + sds lastarg = readArgFromStdin(); + argvcopy[argc] = lastarg; + argc++; + } + + if (argc < 1) { + fprintf(stderr, "usage: redis-cli [-h host] [-p port] cmd arg1 arg2 arg3 ... argN\n"); + fprintf(stderr, "usage: echo \"argN\" | redis-cli [-h host] [-p port] cmd arg1 arg2 ... arg(N-1)\n"); + fprintf(stderr, "\nIf a pipe from standard input is detected this data is used as last argument.\n\n"); + fprintf(stderr, "example: cat /etc/passwd | redis-cli set my_passwd\n"); + fprintf(stderr, "example: redis-cli get my_passwd\n"); + exit(1); + } + + return cliSendCommand(argc, argvcopy); +} diff --git a/redis.c b/redis.c new file mode 100644 index 00000000..7c2b9a40 --- /dev/null +++ b/redis.c @@ -0,0 +1,3037 @@ +/* + * Copyright (c) 2006-2009, Salvatore Sanfilippo + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#define REDIS_VERSION "0.07" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ae.h" /* Event driven programming library */ +#include "sds.h" /* Dynamic safe strings */ +#include "anet.h" /* Networking the easy way */ +#include "dict.h" /* Hash tables */ +#include "adlist.h" /* Linked lists */ +#include "zmalloc.h" /* total memory usage aware version of malloc/free */ + +/* Error codes */ +#define REDIS_OK 0 +#define REDIS_ERR -1 + +/* Static server configuration */ +#define REDIS_SERVERPORT 6379 /* TCP port */ +#define REDIS_MAXIDLETIME (60*5) /* default client timeout */ +#define REDIS_QUERYBUF_LEN 1024 +#define REDIS_LOADBUF_LEN 1024 +#define REDIS_MAX_ARGS 16 +#define REDIS_DEFAULT_DBNUM 16 +#define REDIS_CONFIGLINE_MAX 1024 +#define REDIS_OBJFREELIST_MAX 1000000 /* Max number of objects to cache */ +#define REDIS_MAX_SYNC_TIME 60 /* Slave can't take more to sync */ + +/* Hash table parameters */ +#define REDIS_HT_MINFILL 10 /* Minimal hash table fill 10% */ +#define REDIS_HT_MINSLOTS 16384 /* Never resize the HT under this */ + +/* Command flags */ +#define REDIS_CMD_BULK 1 +#define REDIS_CMD_INLINE 2 + +/* Object types */ +#define REDIS_STRING 0 +#define REDIS_LIST 1 +#define REDIS_SET 2 +#define REDIS_HASH 3 +#define REDIS_SELECTDB 254 +#define REDIS_EOF 255 + +/* Client flags */ +#define REDIS_CLOSE 1 /* This client connection should be closed ASAP */ +#define REDIS_SLAVE 2 /* This client is a slave server */ +#define REDIS_MASTER 4 /* This client is a master server */ + +/* Server replication state */ +#define REDIS_REPL_NONE 0 /* No active replication */ +#define REDIS_REPL_CONNECT 1 /* Must connect to master */ +#define REDIS_REPL_CONNECTED 2 /* Connected to master */ + +/* List related stuff */ +#define REDIS_HEAD 0 +#define REDIS_TAIL 1 + +/* Sort operations */ +#define REDIS_SORT_GET 0 +#define REDIS_SORT_DEL 1 +#define REDIS_SORT_INCR 2 +#define REDIS_SORT_DECR 3 +#define REDIS_SORT_ASC 4 +#define REDIS_SORT_DESC 5 +#define REDIS_SORTKEY_MAX 1024 + +/* Log levels */ +#define REDIS_DEBUG 0 +#define REDIS_NOTICE 1 +#define REDIS_WARNING 2 + +/* Anti-warning macro... */ +#define REDIS_NOTUSED(V) ((void) V) + +/*================================= Data types ============================== */ + +/* A redis object, that is a type able to hold a string / list / set */ +typedef struct redisObject { + int type; + void *ptr; + int refcount; +} robj; + +/* With multiplexing we need to take per-clinet state. + * Clients are taken in a liked list. */ +typedef struct redisClient { + int fd; + dict *dict; + int dictid; + sds querybuf; + robj *argv[REDIS_MAX_ARGS]; + int argc; + int bulklen; /* bulk read len. -1 if not in bulk read mode */ + list *reply; + int sentlen; + time_t lastinteraction; /* time of the last interaction, used for timeout */ + int flags; /* REDIS_CLOSE | REDIS_SLAVE */ + int slaveseldb; /* slave selected db, if this client is a slave */ +} redisClient; + +struct saveparam { + time_t seconds; + int changes; +}; + +/* Global server state structure */ +struct redisServer { + int port; + int fd; + dict **dict; + long long dirty; /* changes to DB from the last save */ + list *clients; + list *slaves; + char neterr[ANET_ERR_LEN]; + aeEventLoop *el; + int cronloops; /* number of times the cron function run */ + list *objfreelist; /* A list of freed objects to avoid malloc() */ + time_t lastsave; /* Unix time of last save succeeede */ + int usedmemory; /* Used memory in megabytes */ + /* Fields used only for stats */ + time_t stat_starttime; /* server start time */ + long long stat_numcommands; /* number of processed commands */ + long long stat_numconnections; /* number of connections received */ + /* Configuration */ + int verbosity; + int glueoutputbuf; + int maxidletime; + int dbnum; + int daemonize; + int bgsaveinprogress; + struct saveparam *saveparams; + int saveparamslen; + char *logfile; + char *bindaddr; + char *dbfilename; + /* Replication related */ + int isslave; + char *masterhost; + int masterport; + redisClient *master; + int replstate; + /* Sort parameters - qsort_r() is only available under BSD so we + * have to take this state global, in order to pass it to sortCompare() */ + int sort_desc; + int sort_alpha; + int sort_bypattern; +}; + +typedef void redisCommandProc(redisClient *c); +struct redisCommand { + char *name; + redisCommandProc *proc; + int arity; + int flags; +}; + +typedef struct _redisSortObject { + robj *obj; + union { + double score; + robj *cmpobj; + } u; +} redisSortObject; + +typedef struct _redisSortOperation { + int type; + robj *pattern; +} redisSortOperation; + +struct sharedObjectsStruct { + robj *crlf, *ok, *err, *zerobulk, *nil, *zero, *one, *pong, *space, + *minus1, *minus2, *minus3, *minus4, + *wrongtypeerr, *nokeyerr, *wrongtypeerrbulk, *nokeyerrbulk, + *syntaxerr, *syntaxerrbulk, + *select0, *select1, *select2, *select3, *select4, + *select5, *select6, *select7, *select8, *select9; +} shared; + +/*================================ Prototypes =============================== */ + +static void freeStringObject(robj *o); +static void freeListObject(robj *o); +static void freeSetObject(robj *o); +static void decrRefCount(void *o); +static robj *createObject(int type, void *ptr); +static void freeClient(redisClient *c); +static int loadDb(char *filename); +static void addReply(redisClient *c, robj *obj); +static void addReplySds(redisClient *c, sds s); +static void incrRefCount(robj *o); +static int saveDbBackground(char *filename); +static robj *createStringObject(char *ptr, size_t len); +static void replicationFeedSlaves(struct redisCommand *cmd, int dictid, robj **argv, int argc); +static int syncWithMaster(void); + +static void pingCommand(redisClient *c); +static void echoCommand(redisClient *c); +static void setCommand(redisClient *c); +static void setnxCommand(redisClient *c); +static void getCommand(redisClient *c); +static void delCommand(redisClient *c); +static void existsCommand(redisClient *c); +static void incrCommand(redisClient *c); +static void decrCommand(redisClient *c); +static void incrbyCommand(redisClient *c); +static void decrbyCommand(redisClient *c); +static void selectCommand(redisClient *c); +static void randomkeyCommand(redisClient *c); +static void keysCommand(redisClient *c); +static void dbsizeCommand(redisClient *c); +static void lastsaveCommand(redisClient *c); +static void saveCommand(redisClient *c); +static void bgsaveCommand(redisClient *c); +static void shutdownCommand(redisClient *c); +static void moveCommand(redisClient *c); +static void renameCommand(redisClient *c); +static void renamenxCommand(redisClient *c); +static void lpushCommand(redisClient *c); +static void rpushCommand(redisClient *c); +static void lpopCommand(redisClient *c); +static void rpopCommand(redisClient *c); +static void llenCommand(redisClient *c); +static void lindexCommand(redisClient *c); +static void lrangeCommand(redisClient *c); +static void ltrimCommand(redisClient *c); +static void typeCommand(redisClient *c); +static void lsetCommand(redisClient *c); +static void saddCommand(redisClient *c); +static void sremCommand(redisClient *c); +static void sismemberCommand(redisClient *c); +static void scardCommand(redisClient *c); +static void sinterCommand(redisClient *c); +static void sinterstoreCommand(redisClient *c); +static void syncCommand(redisClient *c); +static void flushdbCommand(redisClient *c); +static void flushallCommand(redisClient *c); +static void sortCommand(redisClient *c); +static void lremCommand(redisClient *c); +static void infoCommand(redisClient *c); + +/*================================= Globals ================================= */ + +/* Global vars */ +static struct redisServer server; /* server global state */ +static struct redisCommand cmdTable[] = { + {"get",getCommand,2,REDIS_CMD_INLINE}, + {"set",setCommand,3,REDIS_CMD_BULK}, + {"setnx",setnxCommand,3,REDIS_CMD_BULK}, + {"del",delCommand,2,REDIS_CMD_INLINE}, + {"exists",existsCommand,2,REDIS_CMD_INLINE}, + {"incr",incrCommand,2,REDIS_CMD_INLINE}, + {"decr",decrCommand,2,REDIS_CMD_INLINE}, + {"rpush",rpushCommand,3,REDIS_CMD_BULK}, + {"lpush",lpushCommand,3,REDIS_CMD_BULK}, + {"rpop",rpopCommand,2,REDIS_CMD_INLINE}, + {"lpop",lpopCommand,2,REDIS_CMD_INLINE}, + {"llen",llenCommand,2,REDIS_CMD_INLINE}, + {"lindex",lindexCommand,3,REDIS_CMD_INLINE}, + {"lset",lsetCommand,4,REDIS_CMD_BULK}, + {"lrange",lrangeCommand,4,REDIS_CMD_INLINE}, + {"ltrim",ltrimCommand,4,REDIS_CMD_INLINE}, + {"lrem",lremCommand,4,REDIS_CMD_BULK}, + {"sadd",saddCommand,3,REDIS_CMD_BULK}, + {"srem",sremCommand,3,REDIS_CMD_BULK}, + {"sismember",sismemberCommand,3,REDIS_CMD_BULK}, + {"scard",scardCommand,2,REDIS_CMD_INLINE}, + {"sinter",sinterCommand,-2,REDIS_CMD_INLINE}, + {"sinterstore",sinterstoreCommand,-3,REDIS_CMD_INLINE}, + {"smembers",sinterCommand,2,REDIS_CMD_INLINE}, + {"incrby",incrbyCommand,3,REDIS_CMD_INLINE}, + {"decrby",decrbyCommand,3,REDIS_CMD_INLINE}, + {"randomkey",randomkeyCommand,1,REDIS_CMD_INLINE}, + {"select",selectCommand,2,REDIS_CMD_INLINE}, + {"move",moveCommand,3,REDIS_CMD_INLINE}, + {"rename",renameCommand,3,REDIS_CMD_INLINE}, + {"renamenx",renamenxCommand,3,REDIS_CMD_INLINE}, + {"keys",keysCommand,2,REDIS_CMD_INLINE}, + {"dbsize",dbsizeCommand,1,REDIS_CMD_INLINE}, + {"ping",pingCommand,1,REDIS_CMD_INLINE}, + {"echo",echoCommand,2,REDIS_CMD_BULK}, + {"save",saveCommand,1,REDIS_CMD_INLINE}, + {"bgsave",bgsaveCommand,1,REDIS_CMD_INLINE}, + {"shutdown",shutdownCommand,1,REDIS_CMD_INLINE}, + {"lastsave",lastsaveCommand,1,REDIS_CMD_INLINE}, + {"type",typeCommand,2,REDIS_CMD_INLINE}, + {"sync",syncCommand,1,REDIS_CMD_INLINE}, + {"flushdb",flushdbCommand,1,REDIS_CMD_INLINE}, + {"flushall",flushallCommand,1,REDIS_CMD_INLINE}, + {"sort",sortCommand,-2,REDIS_CMD_INLINE}, + {"info",infoCommand,1,REDIS_CMD_INLINE}, + {NULL,NULL,0,0} +}; + +/*============================ Utility functions ============================ */ + +/* Glob-style pattern matching. */ +int stringmatchlen(const char *pattern, int patternLen, + const char *string, int stringLen, int nocase) +{ + while(patternLen) { + switch(pattern[0]) { + case '*': + while (pattern[1] == '*') { + pattern++; + patternLen--; + } + if (patternLen == 1) + return 1; /* match */ + while(stringLen) { + if (stringmatchlen(pattern+1, patternLen-1, + string, stringLen, nocase)) + return 1; /* match */ + string++; + stringLen--; + } + return 0; /* no match */ + break; + case '?': + if (stringLen == 0) + return 0; /* no match */ + string++; + stringLen--; + break; + case '[': + { + int not, match; + + pattern++; + patternLen--; + not = pattern[0] == '^'; + if (not) { + pattern++; + patternLen--; + } + match = 0; + while(1) { + if (pattern[0] == '\\') { + pattern++; + patternLen--; + if (pattern[0] == string[0]) + match = 1; + } else if (pattern[0] == ']') { + break; + } else if (patternLen == 0) { + pattern--; + patternLen++; + break; + } else if (pattern[1] == '-' && patternLen >= 3) { + int start = pattern[0]; + int end = pattern[2]; + int c = string[0]; + if (start > end) { + int t = start; + start = end; + end = t; + } + if (nocase) { + start = tolower(start); + end = tolower(end); + c = tolower(c); + } + pattern += 2; + patternLen -= 2; + if (c >= start && c <= end) + match = 1; + } else { + if (!nocase) { + if (pattern[0] == string[0]) + match = 1; + } else { + if (tolower((int)pattern[0]) == tolower((int)string[0])) + match = 1; + } + } + pattern++; + patternLen--; + } + if (not) + match = !match; + if (!match) + return 0; /* no match */ + string++; + stringLen--; + break; + } + case '\\': + if (patternLen >= 2) { + pattern++; + patternLen--; + } + /* fall through */ + default: + if (!nocase) { + if (pattern[0] != string[0]) + return 0; /* no match */ + } else { + if (tolower((int)pattern[0]) != tolower((int)string[0])) + return 0; /* no match */ + } + string++; + stringLen--; + break; + } + pattern++; + patternLen--; + if (stringLen == 0) { + while(*pattern == '*') { + pattern++; + patternLen--; + } + break; + } + } + if (patternLen == 0 && stringLen == 0) + return 1; + return 0; +} + +void redisLog(int level, const char *fmt, ...) +{ + va_list ap; + FILE *fp; + + fp = (server.logfile == NULL) ? stdout : fopen(server.logfile,"a"); + if (!fp) return; + + va_start(ap, fmt); + if (level >= server.verbosity) { + char *c = ".-*"; + fprintf(fp,"%c ",c[level]); + vfprintf(fp, fmt, ap); + fprintf(fp,"\n"); + fflush(fp); + } + va_end(ap); + + if (server.logfile) fclose(fp); +} + +/*====================== Hash table type implementation ==================== */ + +/* This is an hash table type that uses the SDS dynamic strings libary as + * keys and radis objects as values (objects can hold SDS strings, + * lists, sets). */ + +static int sdsDictKeyCompare(void *privdata, const void *key1, + const void *key2) +{ + int l1,l2; + DICT_NOTUSED(privdata); + + l1 = sdslen((sds)key1); + l2 = sdslen((sds)key2); + if (l1 != l2) return 0; + return memcmp(key1, key2, l1) == 0; +} + +static void dictRedisObjectDestructor(void *privdata, void *val) +{ + DICT_NOTUSED(privdata); + + decrRefCount(val); +} + +static int dictSdsKeyCompare(void *privdata, const void *key1, + const void *key2) +{ + const robj *o1 = key1, *o2 = key2; + return sdsDictKeyCompare(privdata,o1->ptr,o2->ptr); +} + +static unsigned int dictSdsHash(const void *key) { + const robj *o = key; + return dictGenHashFunction(o->ptr, sdslen((sds)o->ptr)); +} + +static dictType setDictType = { + dictSdsHash, /* hash function */ + NULL, /* key dup */ + NULL, /* val dup */ + dictSdsKeyCompare, /* key compare */ + dictRedisObjectDestructor, /* key destructor */ + NULL /* val destructor */ +}; + +static dictType hashDictType = { + dictSdsHash, /* hash function */ + NULL, /* key dup */ + NULL, /* val dup */ + dictSdsKeyCompare, /* key compare */ + dictRedisObjectDestructor, /* key destructor */ + dictRedisObjectDestructor /* val destructor */ +}; + +/* ========================= Random utility functions ======================= */ + +/* Redis generally does not try to recover from out of memory conditions + * when allocating objects or strings, it is not clear if it will be possible + * to report this condition to the client since the networking layer itself + * is based on heap allocation for send buffers, so we simply abort. + * At least the code will be simpler to read... */ +static void oom(const char *msg) { + fprintf(stderr, "%s: Out of memory\n",msg); + fflush(stderr); + sleep(1); + abort(); +} + +/* ====================== Redis server networking stuff ===================== */ +void closeTimedoutClients(void) { + redisClient *c; + listIter *li; + listNode *ln; + time_t now = time(NULL); + + li = listGetIterator(server.clients,AL_START_HEAD); + if (!li) return; + while ((ln = listNextElement(li)) != NULL) { + c = listNodeValue(ln); + if (!(c->flags & REDIS_SLAVE) && /* no timeout for slaves */ + (now - c->lastinteraction > server.maxidletime)) { + redisLog(REDIS_DEBUG,"Closing idle client"); + freeClient(c); + } + } + listReleaseIterator(li); +} + +int serverCron(struct aeEventLoop *eventLoop, long long id, void *clientData) { + int j, size, used, loops = server.cronloops++; + REDIS_NOTUSED(eventLoop); + REDIS_NOTUSED(id); + REDIS_NOTUSED(clientData); + + /* Update the global state with the amount of used memory */ + server.usedmemory = zmalloc_used_memory(); + + /* If the percentage of used slots in the HT reaches REDIS_HT_MINFILL + * we resize the hash table to save memory */ + for (j = 0; j < server.dbnum; j++) { + size = dictGetHashTableSize(server.dict[j]); + used = dictGetHashTableUsed(server.dict[j]); + if (!(loops % 5) && used > 0) { + redisLog(REDIS_DEBUG,"DB %d: %d keys in %d slots HT.",j,used,size); + // dictPrintStats(server.dict); + } + if (size && used && size > REDIS_HT_MINSLOTS && + (used*100/size < REDIS_HT_MINFILL)) { + redisLog(REDIS_NOTICE,"The hash table %d is too sparse, resize it...",j); + dictResize(server.dict[j]); + redisLog(REDIS_NOTICE,"Hash table %d resized.",j); + } + } + + /* Show information about connected clients */ + if (!(loops % 5)) { + redisLog(REDIS_DEBUG,"%d clients connected (%d slaves), %d bytes in use", + listLength(server.clients)-listLength(server.slaves), + listLength(server.slaves), + server.usedmemory); + } + + /* Close connections of timedout clients */ + if (!(loops % 10)) + closeTimedoutClients(); + + /* Check if a background saving in progress terminated */ + if (server.bgsaveinprogress) { + int statloc; + if (wait4(-1,&statloc,WNOHANG,NULL)) { + int exitcode = WEXITSTATUS(statloc); + if (exitcode == 0) { + redisLog(REDIS_NOTICE, + "Background saving terminated with success"); + server.dirty = 0; + server.lastsave = time(NULL); + } else { + redisLog(REDIS_WARNING, + "Background saving error"); + } + server.bgsaveinprogress = 0; + } + } else { + /* If there is not a background saving in progress check if + * we have to save now */ + time_t now = time(NULL); + for (j = 0; j < server.saveparamslen; j++) { + struct saveparam *sp = server.saveparams+j; + + if (server.dirty >= sp->changes && + now-server.lastsave > sp->seconds) { + redisLog(REDIS_NOTICE,"%d changes in %d seconds. Saving...", + sp->changes, sp->seconds); + saveDbBackground(server.dbfilename); + break; + } + } + } + /* Check if we should connect to a MASTER */ + if (server.replstate == REDIS_REPL_CONNECT) { + redisLog(REDIS_NOTICE,"Connecting to MASTER..."); + if (syncWithMaster() == REDIS_OK) { + redisLog(REDIS_NOTICE,"MASTER <-> SLAVE sync succeeded"); + } + } + return 1000; +} + +static void createSharedObjects(void) { + shared.crlf = createObject(REDIS_STRING,sdsnew("\r\n")); + shared.ok = createObject(REDIS_STRING,sdsnew("+OK\r\n")); + shared.err = createObject(REDIS_STRING,sdsnew("-ERR\r\n")); + shared.zerobulk = createObject(REDIS_STRING,sdsnew("0\r\n\r\n")); + shared.nil = createObject(REDIS_STRING,sdsnew("nil\r\n")); + shared.zero = createObject(REDIS_STRING,sdsnew("0\r\n")); + shared.one = createObject(REDIS_STRING,sdsnew("1\r\n")); + /* no such key */ + shared.minus1 = createObject(REDIS_STRING,sdsnew("-1\r\n")); + /* operation against key holding a value of the wrong type */ + shared.minus2 = createObject(REDIS_STRING,sdsnew("-2\r\n")); + /* src and dest objects are the same */ + shared.minus3 = createObject(REDIS_STRING,sdsnew("-3\r\n")); + /* out of range argument */ + shared.minus4 = createObject(REDIS_STRING,sdsnew("-4\r\n")); + shared.pong = createObject(REDIS_STRING,sdsnew("+PONG\r\n")); + shared.wrongtypeerr = createObject(REDIS_STRING,sdsnew( + "-ERR Operation against a key holding the wrong kind of value\r\n")); + shared.wrongtypeerrbulk = createObject(REDIS_STRING,sdscatprintf(sdsempty(),"%d\r\n%s",-sdslen(shared.wrongtypeerr->ptr)+2,shared.wrongtypeerr->ptr)); + shared.nokeyerr = createObject(REDIS_STRING,sdsnew( + "-ERR no such key\r\n")); + shared.nokeyerrbulk = createObject(REDIS_STRING,sdscatprintf(sdsempty(),"%d\r\n%s",-sdslen(shared.nokeyerr->ptr)+2,shared.nokeyerr->ptr)); + shared.syntaxerr = createObject(REDIS_STRING,sdsnew( + "-ERR syntax error\r\n")); + shared.syntaxerrbulk = createObject(REDIS_STRING,sdscatprintf(sdsempty(),"%d\r\n%s",-sdslen(shared.syntaxerr->ptr)+2,shared.syntaxerr->ptr)); + shared.space = createObject(REDIS_STRING,sdsnew(" ")); + shared.select0 = createStringObject("select 0\r\n",10); + shared.select1 = createStringObject("select 1\r\n",10); + shared.select2 = createStringObject("select 2\r\n",10); + shared.select3 = createStringObject("select 3\r\n",10); + shared.select4 = createStringObject("select 4\r\n",10); + shared.select5 = createStringObject("select 5\r\n",10); + shared.select6 = createStringObject("select 6\r\n",10); + shared.select7 = createStringObject("select 7\r\n",10); + shared.select8 = createStringObject("select 8\r\n",10); + shared.select9 = createStringObject("select 9\r\n",10); +} + +static void appendServerSaveParams(time_t seconds, int changes) { + server.saveparams = zrealloc(server.saveparams,sizeof(struct saveparam)*(server.saveparamslen+1)); + if (server.saveparams == NULL) oom("appendServerSaveParams"); + server.saveparams[server.saveparamslen].seconds = seconds; + server.saveparams[server.saveparamslen].changes = changes; + server.saveparamslen++; +} + +static void ResetServerSaveParams() { + zfree(server.saveparams); + server.saveparams = NULL; + server.saveparamslen = 0; +} + +static void initServerConfig() { + server.dbnum = REDIS_DEFAULT_DBNUM; + server.port = REDIS_SERVERPORT; + server.verbosity = REDIS_DEBUG; + server.maxidletime = REDIS_MAXIDLETIME; + server.saveparams = NULL; + server.logfile = NULL; /* NULL = log on standard output */ + server.bindaddr = NULL; + server.glueoutputbuf = 1; + server.daemonize = 0; + server.dbfilename = "dump.rdb"; + ResetServerSaveParams(); + + appendServerSaveParams(60*60,1); /* save after 1 hour and 1 change */ + appendServerSaveParams(300,100); /* save after 5 minutes and 100 changes */ + appendServerSaveParams(60,10000); /* save after 1 minute and 10000 changes */ + /* Replication related */ + server.isslave = 0; + server.masterhost = NULL; + server.masterport = 6379; + server.master = NULL; + server.replstate = REDIS_REPL_NONE; +} + +static void initServer() { + int j; + + signal(SIGHUP, SIG_IGN); + signal(SIGPIPE, SIG_IGN); + + server.clients = listCreate(); + server.slaves = listCreate(); + server.objfreelist = listCreate(); + createSharedObjects(); + server.el = aeCreateEventLoop(); + server.dict = zmalloc(sizeof(dict*)*server.dbnum); + if (!server.dict || !server.clients || !server.slaves || !server.el || !server.objfreelist) + oom("server initialization"); /* Fatal OOM */ + server.fd = anetTcpServer(server.neterr, server.port, server.bindaddr); + if (server.fd == -1) { + redisLog(REDIS_WARNING, "Opening TCP port: %s", server.neterr); + exit(1); + } + for (j = 0; j < server.dbnum; j++) { + server.dict[j] = dictCreate(&hashDictType,NULL); + if (!server.dict[j]) + oom("dictCreate"); /* Fatal OOM */ + } + server.cronloops = 0; + server.bgsaveinprogress = 0; + server.lastsave = time(NULL); + server.dirty = 0; + server.usedmemory = 0; + server.stat_numcommands = 0; + server.stat_numconnections = 0; + server.stat_starttime = time(NULL); + aeCreateTimeEvent(server.el, 1000, serverCron, NULL, NULL); +} + +/* Empty the whole database */ +static void emptyDb() { + int j; + + for (j = 0; j < server.dbnum; j++) + dictEmpty(server.dict[j]); +} + +/* I agree, this is a very rudimental way to load a configuration... + will improve later if the config gets more complex */ +static void loadServerConfig(char *filename) { + FILE *fp = fopen(filename,"r"); + char buf[REDIS_CONFIGLINE_MAX+1], *err = NULL; + int linenum = 0; + sds line = NULL; + + if (!fp) { + redisLog(REDIS_WARNING,"Fatal error, can't open config file"); + exit(1); + } + while(fgets(buf,REDIS_CONFIGLINE_MAX+1,fp) != NULL) { + sds *argv; + int argc, j; + + linenum++; + line = sdsnew(buf); + line = sdstrim(line," \t\r\n"); + + /* Skip comments and blank lines*/ + if (line[0] == '#' || line[0] == '\0') { + sdsfree(line); + continue; + } + + /* Split into arguments */ + argv = sdssplitlen(line,sdslen(line)," ",1,&argc); + sdstolower(argv[0]); + + /* Execute config directives */ + if (!strcmp(argv[0],"timeout") && argc == 2) { + server.maxidletime = atoi(argv[1]); + if (server.maxidletime < 1) { + err = "Invalid timeout value"; goto loaderr; + } + } else if (!strcmp(argv[0],"port") && argc == 2) { + server.port = atoi(argv[1]); + if (server.port < 1 || server.port > 65535) { + err = "Invalid port"; goto loaderr; + } + } else if (!strcmp(argv[0],"bind") && argc == 2) { + server.bindaddr = zstrdup(argv[1]); + } else if (!strcmp(argv[0],"save") && argc == 3) { + int seconds = atoi(argv[1]); + int changes = atoi(argv[2]); + if (seconds < 1 || changes < 0) { + err = "Invalid save parameters"; goto loaderr; + } + appendServerSaveParams(seconds,changes); + } else if (!strcmp(argv[0],"dir") && argc == 2) { + if (chdir(argv[1]) == -1) { + redisLog(REDIS_WARNING,"Can't chdir to '%s': %s", + argv[1], strerror(errno)); + exit(1); + } + } else if (!strcmp(argv[0],"loglevel") && argc == 2) { + if (!strcmp(argv[1],"debug")) server.verbosity = REDIS_DEBUG; + else if (!strcmp(argv[1],"notice")) server.verbosity = REDIS_NOTICE; + else if (!strcmp(argv[1],"warning")) server.verbosity = REDIS_WARNING; + else { + err = "Invalid log level. Must be one of debug, notice, warning"; + goto loaderr; + } + } else if (!strcmp(argv[0],"logfile") && argc == 2) { + FILE *fp; + + server.logfile = zstrdup(argv[1]); + if (!strcmp(server.logfile,"stdout")) { + zfree(server.logfile); + server.logfile = NULL; + } + if (server.logfile) { + /* Test if we are able to open the file. The server will not + * be able to abort just for this problem later... */ + fp = fopen(server.logfile,"a"); + if (fp == NULL) { + err = sdscatprintf(sdsempty(), + "Can't open the log file: %s", strerror(errno)); + goto loaderr; + } + fclose(fp); + } + } else if (!strcmp(argv[0],"databases") && argc == 2) { + server.dbnum = atoi(argv[1]); + if (server.dbnum < 1) { + err = "Invalid number of databases"; goto loaderr; + } + } else if (!strcmp(argv[0],"slaveof") && argc == 3) { + server.masterhost = sdsnew(argv[1]); + server.masterport = atoi(argv[2]); + server.replstate = REDIS_REPL_CONNECT; + } else if (!strcmp(argv[0],"glueoutputbuf") && argc == 2) { + sdstolower(argv[1]); + if (!strcmp(argv[1],"yes")) server.glueoutputbuf = 1; + else if (!strcmp(argv[1],"no")) server.glueoutputbuf = 0; + else { + err = "argument must be 'yes' or 'no'"; goto loaderr; + } + } else if (!strcmp(argv[0],"daemonize") && argc == 2) { + sdstolower(argv[1]); + if (!strcmp(argv[1],"yes")) server.daemonize = 1; + else if (!strcmp(argv[1],"no")) server.daemonize = 0; + else { + err = "argument must be 'yes' or 'no'"; goto loaderr; + } + } else { + err = "Bad directive or wrong number of arguments"; goto loaderr; + } + for (j = 0; j < argc; j++) + sdsfree(argv[j]); + zfree(argv); + sdsfree(line); + } + fclose(fp); + return; + +loaderr: + fprintf(stderr, "\n*** FATAL CONFIG FILE ERROR ***\n"); + fprintf(stderr, "Reading the configuration file, at line %d\n", linenum); + fprintf(stderr, ">>> '%s'\n", line); + fprintf(stderr, "%s\n", err); + exit(1); +} + +static void freeClientArgv(redisClient *c) { + int j; + + for (j = 0; j < c->argc; j++) + decrRefCount(c->argv[j]); + c->argc = 0; +} + +static void freeClient(redisClient *c) { + listNode *ln; + + aeDeleteFileEvent(server.el,c->fd,AE_READABLE); + aeDeleteFileEvent(server.el,c->fd,AE_WRITABLE); + sdsfree(c->querybuf); + listRelease(c->reply); + freeClientArgv(c); + close(c->fd); + ln = listSearchKey(server.clients,c); + assert(ln != NULL); + listDelNode(server.clients,ln); + if (c->flags & REDIS_SLAVE) { + ln = listSearchKey(server.slaves,c); + assert(ln != NULL); + listDelNode(server.slaves,ln); + } + if (c->flags & REDIS_MASTER) { + server.master = NULL; + server.replstate = REDIS_REPL_CONNECT; + } + zfree(c); +} + +static void glueReplyBuffersIfNeeded(redisClient *c) { + int totlen = 0; + listNode *ln = c->reply->head, *next; + robj *o; + + while(ln) { + o = ln->value; + totlen += sdslen(o->ptr); + ln = ln->next; + /* This optimization makes more sense if we don't have to copy + * too much data */ + if (totlen > 1024) return; + } + if (totlen > 0) { + char buf[1024]; + int copylen = 0; + + ln = c->reply->head; + while(ln) { + next = ln->next; + o = ln->value; + memcpy(buf+copylen,o->ptr,sdslen(o->ptr)); + copylen += sdslen(o->ptr); + listDelNode(c->reply,ln); + ln = next; + } + /* Now the output buffer is empty, add the new single element */ + addReplySds(c,sdsnewlen(buf,totlen)); + } +} + +static void sendReplyToClient(aeEventLoop *el, int fd, void *privdata, int mask) { + redisClient *c = privdata; + int nwritten = 0, totwritten = 0, objlen; + robj *o; + REDIS_NOTUSED(el); + REDIS_NOTUSED(mask); + + if (server.glueoutputbuf && listLength(c->reply) > 1) + glueReplyBuffersIfNeeded(c); + while(listLength(c->reply)) { + o = listNodeValue(listFirst(c->reply)); + objlen = sdslen(o->ptr); + + if (objlen == 0) { + listDelNode(c->reply,listFirst(c->reply)); + continue; + } + + if (c->flags & REDIS_MASTER) { + nwritten = objlen - c->sentlen; + } else { + nwritten = write(fd, o->ptr+c->sentlen, objlen - c->sentlen); + if (nwritten <= 0) break; + } + c->sentlen += nwritten; + totwritten += nwritten; + /* If we fully sent the object on head go to the next one */ + if (c->sentlen == objlen) { + listDelNode(c->reply,listFirst(c->reply)); + c->sentlen = 0; + } + } + if (nwritten == -1) { + if (errno == EAGAIN) { + nwritten = 0; + } else { + redisLog(REDIS_DEBUG, + "Error writing to client: %s", strerror(errno)); + freeClient(c); + return; + } + } + if (totwritten > 0) c->lastinteraction = time(NULL); + if (listLength(c->reply) == 0) { + c->sentlen = 0; + aeDeleteFileEvent(server.el,c->fd,AE_WRITABLE); + } +} + +static struct redisCommand *lookupCommand(char *name) { + int j = 0; + while(cmdTable[j].name != NULL) { + if (!strcmp(name,cmdTable[j].name)) return &cmdTable[j]; + j++; + } + return NULL; +} + +/* resetClient prepare the client to process the next command */ +static void resetClient(redisClient *c) { + freeClientArgv(c); + c->bulklen = -1; +} + +/* If this function gets called we already read a whole + * command, argments are in the client argv/argc fields. + * processCommand() execute the command or prepare the + * server for a bulk read from the client. + * + * If 1 is returned the client is still alive and valid and + * and other operations can be performed by the caller. Otherwise + * if 0 is returned the client was destroied (i.e. after QUIT). */ +static int processCommand(redisClient *c) { + struct redisCommand *cmd; + long long dirty; + + sdstolower(c->argv[0]->ptr); + /* The QUIT command is handled as a special case. Normal command + * procs are unable to close the client connection safely */ + if (!strcmp(c->argv[0]->ptr,"quit")) { + freeClient(c); + return 0; + } + cmd = lookupCommand(c->argv[0]->ptr); + if (!cmd) { + addReplySds(c,sdsnew("-ERR unknown command\r\n")); + resetClient(c); + return 1; + } else if ((cmd->arity > 0 && cmd->arity != c->argc) || + (c->argc < -cmd->arity)) { + addReplySds(c,sdsnew("-ERR wrong number of arguments\r\n")); + resetClient(c); + return 1; + } else if (cmd->flags & REDIS_CMD_BULK && c->bulklen == -1) { + int bulklen = atoi(c->argv[c->argc-1]->ptr); + + decrRefCount(c->argv[c->argc-1]); + if (bulklen < 0 || bulklen > 1024*1024*1024) { + c->argc--; + addReplySds(c,sdsnew("-ERR invalid bulk write count\r\n")); + resetClient(c); + return 1; + } + c->argc--; + c->bulklen = bulklen+2; /* add two bytes for CR+LF */ + /* It is possible that the bulk read is already in the + * buffer. Check this condition and handle it accordingly */ + if ((signed)sdslen(c->querybuf) >= c->bulklen) { + c->argv[c->argc] = createStringObject(c->querybuf,c->bulklen-2); + c->argc++; + c->querybuf = sdsrange(c->querybuf,c->bulklen,-1); + } else { + return 1; + } + } + /* Exec the command */ + dirty = server.dirty; + cmd->proc(c); + if (server.dirty-dirty != 0 && listLength(server.slaves)) + replicationFeedSlaves(cmd,c->dictid,c->argv,c->argc); + server.stat_numcommands++; + + /* Prepare the client for the next command */ + if (c->flags & REDIS_CLOSE) { + freeClient(c); + return 0; + } + resetClient(c); + return 1; +} + +static void replicationFeedSlaves(struct redisCommand *cmd, int dictid, robj **argv, int argc) { + listNode *ln = server.slaves->head; + robj *outv[REDIS_MAX_ARGS*4]; /* enough room for args, spaces, newlines */ + int outc = 0, j; + + for (j = 0; j < argc; j++) { + if (j != 0) outv[outc++] = shared.space; + if ((cmd->flags & REDIS_CMD_BULK) && j == argc-1) { + robj *lenobj; + + lenobj = createObject(REDIS_STRING, + sdscatprintf(sdsempty(),"%d\r\n",sdslen(argv[j]->ptr))); + lenobj->refcount = 0; + outv[outc++] = lenobj; + } + outv[outc++] = argv[j]; + } + outv[outc++] = shared.crlf; + + while(ln) { + redisClient *slave = ln->value; + if (slave->slaveseldb != dictid) { + robj *selectcmd; + + switch(dictid) { + case 0: selectcmd = shared.select0; break; + case 1: selectcmd = shared.select1; break; + case 2: selectcmd = shared.select2; break; + case 3: selectcmd = shared.select3; break; + case 4: selectcmd = shared.select4; break; + case 5: selectcmd = shared.select5; break; + case 6: selectcmd = shared.select6; break; + case 7: selectcmd = shared.select7; break; + case 8: selectcmd = shared.select8; break; + case 9: selectcmd = shared.select9; break; + default: + selectcmd = createObject(REDIS_STRING, + sdscatprintf(sdsempty(),"select %d\r\n",dictid)); + selectcmd->refcount = 0; + break; + } + addReply(slave,selectcmd); + slave->slaveseldb = dictid; + } + for (j = 0; j < outc; j++) addReply(slave,outv[j]); + ln = ln->next; + } +} + +static void readQueryFromClient(aeEventLoop *el, int fd, void *privdata, int mask) { + redisClient *c = (redisClient*) privdata; + char buf[REDIS_QUERYBUF_LEN]; + int nread; + REDIS_NOTUSED(el); + REDIS_NOTUSED(mask); + + nread = read(fd, buf, REDIS_QUERYBUF_LEN); + if (nread == -1) { + if (errno == EAGAIN) { + nread = 0; + } else { + redisLog(REDIS_DEBUG, "Reading from client: %s",strerror(errno)); + freeClient(c); + return; + } + } else if (nread == 0) { + redisLog(REDIS_DEBUG, "Client closed connection"); + freeClient(c); + return; + } + if (nread) { + c->querybuf = sdscatlen(c->querybuf, buf, nread); + c->lastinteraction = time(NULL); + } else { + return; + } + +again: + if (c->bulklen == -1) { + /* Read the first line of the query */ + char *p = strchr(c->querybuf,'\n'); + size_t querylen; + if (p) { + sds query, *argv; + int argc, j; + + query = c->querybuf; + c->querybuf = sdsempty(); + querylen = 1+(p-(query)); + if (sdslen(query) > querylen) { + /* leave data after the first line of the query in the buffer */ + c->querybuf = sdscatlen(c->querybuf,query+querylen,sdslen(query)-querylen); + } + *p = '\0'; /* remove "\n" */ + if (*(p-1) == '\r') *(p-1) = '\0'; /* and "\r" if any */ + sdsupdatelen(query); + + /* Now we can split the query in arguments */ + if (sdslen(query) == 0) { + /* Ignore empty query */ + sdsfree(query); + return; + } + argv = sdssplitlen(query,sdslen(query)," ",1,&argc); + sdsfree(query); + if (argv == NULL) oom("sdssplitlen"); + for (j = 0; j < argc && j < REDIS_MAX_ARGS; j++) { + if (sdslen(argv[j])) { + c->argv[c->argc] = createObject(REDIS_STRING,argv[j]); + c->argc++; + } else { + sdsfree(argv[j]); + } + } + zfree(argv); + /* Execute the command. If the client is still valid + * after processCommand() return and there is something + * on the query buffer try to process the next command. */ + if (processCommand(c) && sdslen(c->querybuf)) goto again; + return; + } else if (sdslen(c->querybuf) >= 1024) { + redisLog(REDIS_DEBUG, "Client protocol error"); + freeClient(c); + return; + } + } else { + /* Bulk read handling. Note that if we are at this point + the client already sent a command terminated with a newline, + we are reading the bulk data that is actually the last + argument of the command. */ + int qbl = sdslen(c->querybuf); + + if (c->bulklen <= qbl) { + /* Copy everything but the final CRLF as final argument */ + c->argv[c->argc] = createStringObject(c->querybuf,c->bulklen-2); + c->argc++; + c->querybuf = sdsrange(c->querybuf,c->bulklen,-1); + processCommand(c); + return; + } + } +} + +static int selectDb(redisClient *c, int id) { + if (id < 0 || id >= server.dbnum) + return REDIS_ERR; + c->dict = server.dict[id]; + c->dictid = id; + return REDIS_OK; +} + +static redisClient *createClient(int fd) { + redisClient *c = zmalloc(sizeof(*c)); + + anetNonBlock(NULL,fd); + anetTcpNoDelay(NULL,fd); + if (!c) return NULL; + selectDb(c,0); + c->fd = fd; + c->querybuf = sdsempty(); + c->argc = 0; + c->bulklen = -1; + c->sentlen = 0; + c->flags = 0; + c->lastinteraction = time(NULL); + if ((c->reply = listCreate()) == NULL) oom("listCreate"); + listSetFreeMethod(c->reply,decrRefCount); + if (aeCreateFileEvent(server.el, c->fd, AE_READABLE, + readQueryFromClient, c, NULL) == AE_ERR) { + freeClient(c); + return NULL; + } + if (!listAddNodeTail(server.clients,c)) oom("listAddNodeTail"); + return c; +} + +static void addReply(redisClient *c, robj *obj) { + if (listLength(c->reply) == 0 && + aeCreateFileEvent(server.el, c->fd, AE_WRITABLE, + sendReplyToClient, c, NULL) == AE_ERR) return; + if (!listAddNodeTail(c->reply,obj)) oom("listAddNodeTail"); + incrRefCount(obj); +} + +static void addReplySds(redisClient *c, sds s) { + robj *o = createObject(REDIS_STRING,s); + addReply(c,o); + decrRefCount(o); +} + +static void acceptHandler(aeEventLoop *el, int fd, void *privdata, int mask) { + int cport, cfd; + char cip[128]; + REDIS_NOTUSED(el); + REDIS_NOTUSED(mask); + REDIS_NOTUSED(privdata); + + cfd = anetAccept(server.neterr, fd, cip, &cport); + if (cfd == AE_ERR) { + redisLog(REDIS_DEBUG,"Accepting client connection: %s", server.neterr); + return; + } + redisLog(REDIS_DEBUG,"Accepted %s:%d", cip, cport); + if (createClient(cfd) == NULL) { + redisLog(REDIS_WARNING,"Error allocating resoures for the client"); + close(cfd); /* May be already closed, just ingore errors */ + return; + } + server.stat_numconnections++; +} + +/* ======================= Redis objects implementation ===================== */ + +static robj *createObject(int type, void *ptr) { + robj *o; + + if (listLength(server.objfreelist)) { + listNode *head = listFirst(server.objfreelist); + o = listNodeValue(head); + listDelNode(server.objfreelist,head); + } else { + o = zmalloc(sizeof(*o)); + } + if (!o) oom("createObject"); + o->type = type; + o->ptr = ptr; + o->refcount = 1; + return o; +} + +static robj *createStringObject(char *ptr, size_t len) { + return createObject(REDIS_STRING,sdsnewlen(ptr,len)); +} + +static robj *createListObject(void) { + list *l = listCreate(); + + if (!l) oom("listCreate"); + listSetFreeMethod(l,decrRefCount); + return createObject(REDIS_LIST,l); +} + +static robj *createSetObject(void) { + dict *d = dictCreate(&setDictType,NULL); + if (!d) oom("dictCreate"); + return createObject(REDIS_SET,d); +} + +#if 0 +static robj *createHashObject(void) { + dict *d = dictCreate(&hashDictType,NULL); + if (!d) oom("dictCreate"); + return createObject(REDIS_SET,d); +} +#endif + +static void freeStringObject(robj *o) { + sdsfree(o->ptr); +} + +static void freeListObject(robj *o) { + listRelease((list*) o->ptr); +} + +static void freeSetObject(robj *o) { + dictRelease((dict*) o->ptr); +} + +static void freeHashObject(robj *o) { + dictRelease((dict*) o->ptr); +} + +static void incrRefCount(robj *o) { + o->refcount++; +} + +static void decrRefCount(void *obj) { + robj *o = obj; + if (--(o->refcount) == 0) { + switch(o->type) { + case REDIS_STRING: freeStringObject(o); break; + case REDIS_LIST: freeListObject(o); break; + case REDIS_SET: freeSetObject(o); break; + case REDIS_HASH: freeHashObject(o); break; + default: assert(0 != 0); break; + } + if (listLength(server.objfreelist) > REDIS_OBJFREELIST_MAX || + !listAddNodeHead(server.objfreelist,o)) + zfree(o); + } +} + +/*============================ DB saving/loading ============================ */ + +/* Save the DB on disk. Return REDIS_ERR on error, REDIS_OK on success */ +static int saveDb(char *filename) { + dictIterator *di = NULL; + dictEntry *de; + uint32_t len; + uint8_t type; + FILE *fp; + char tmpfile[256]; + int j; + + snprintf(tmpfile,256,"temp-%d.%ld.rdb",(int)time(NULL),(long int)random()); + fp = fopen(tmpfile,"w"); + if (!fp) { + redisLog(REDIS_WARNING, "Failed saving the DB: %s", strerror(errno)); + return REDIS_ERR; + } + if (fwrite("REDIS0000",9,1,fp) == 0) goto werr; + for (j = 0; j < server.dbnum; j++) { + dict *d = server.dict[j]; + if (dictGetHashTableUsed(d) == 0) continue; + di = dictGetIterator(d); + if (!di) { + fclose(fp); + return REDIS_ERR; + } + + /* Write the SELECT DB opcode */ + type = REDIS_SELECTDB; + len = htonl(j); + if (fwrite(&type,1,1,fp) == 0) goto werr; + if (fwrite(&len,4,1,fp) == 0) goto werr; + + /* Iterate this DB writing every entry */ + while((de = dictNext(di)) != NULL) { + robj *key = dictGetEntryKey(de); + robj *o = dictGetEntryVal(de); + + type = o->type; + len = htonl(sdslen(key->ptr)); + if (fwrite(&type,1,1,fp) == 0) goto werr; + if (fwrite(&len,4,1,fp) == 0) goto werr; + if (fwrite(key->ptr,sdslen(key->ptr),1,fp) == 0) goto werr; + if (type == REDIS_STRING) { + /* Save a string value */ + sds sval = o->ptr; + len = htonl(sdslen(sval)); + if (fwrite(&len,4,1,fp) == 0) goto werr; + if (sdslen(sval) && + fwrite(sval,sdslen(sval),1,fp) == 0) goto werr; + } else if (type == REDIS_LIST) { + /* Save a list value */ + list *list = o->ptr; + listNode *ln = list->head; + + len = htonl(listLength(list)); + if (fwrite(&len,4,1,fp) == 0) goto werr; + while(ln) { + robj *eleobj = listNodeValue(ln); + len = htonl(sdslen(eleobj->ptr)); + if (fwrite(&len,4,1,fp) == 0) goto werr; + if (sdslen(eleobj->ptr) && fwrite(eleobj->ptr,sdslen(eleobj->ptr),1,fp) == 0) + goto werr; + ln = ln->next; + } + } else if (type == REDIS_SET) { + /* Save a set value */ + dict *set = o->ptr; + dictIterator *di = dictGetIterator(set); + dictEntry *de; + + if (!set) oom("dictGetIteraotr"); + len = htonl(dictGetHashTableUsed(set)); + if (fwrite(&len,4,1,fp) == 0) goto werr; + while((de = dictNext(di)) != NULL) { + robj *eleobj; + + eleobj = dictGetEntryKey(de); + len = htonl(sdslen(eleobj->ptr)); + if (fwrite(&len,4,1,fp) == 0) goto werr; + if (sdslen(eleobj->ptr) && fwrite(eleobj->ptr,sdslen(eleobj->ptr),1,fp) == 0) + goto werr; + } + dictReleaseIterator(di); + } else { + assert(0 != 0); + } + } + dictReleaseIterator(di); + } + /* EOF opcode */ + type = REDIS_EOF; + if (fwrite(&type,1,1,fp) == 0) goto werr; + fflush(fp); + fsync(fileno(fp)); + fclose(fp); + + /* Use RENAME to make sure the DB file is changed atomically only + * if the generate DB file is ok. */ + if (rename(tmpfile,filename) == -1) { + redisLog(REDIS_WARNING,"Error moving temp DB file on the final destionation: %s", strerror(errno)); + unlink(tmpfile); + return REDIS_ERR; + } + redisLog(REDIS_NOTICE,"DB saved on disk"); + server.dirty = 0; + server.lastsave = time(NULL); + return REDIS_OK; + +werr: + fclose(fp); + unlink(tmpfile); + redisLog(REDIS_WARNING,"Write error saving DB on disk: %s", strerror(errno)); + if (di) dictReleaseIterator(di); + return REDIS_ERR; +} + +static int saveDbBackground(char *filename) { + pid_t childpid; + + if (server.bgsaveinprogress) return REDIS_ERR; + if ((childpid = fork()) == 0) { + /* Child */ + close(server.fd); + if (saveDb(filename) == REDIS_OK) { + exit(0); + } else { + exit(1); + } + } else { + /* Parent */ + redisLog(REDIS_NOTICE,"Background saving started by pid %d",childpid); + server.bgsaveinprogress = 1; + return REDIS_OK; + } + return REDIS_OK; /* unreached */ +} + +static int loadDb(char *filename) { + FILE *fp; + char buf[REDIS_LOADBUF_LEN]; /* Try to use this buffer instead of */ + char vbuf[REDIS_LOADBUF_LEN]; /* malloc() when the element is small */ + char *key = NULL, *val = NULL; + uint32_t klen,vlen,dbid; + uint8_t type; + int retval; + dict *d = server.dict[0]; + + fp = fopen(filename,"r"); + if (!fp) return REDIS_ERR; + if (fread(buf,9,1,fp) == 0) goto eoferr; + if (memcmp(buf,"REDIS0000",9) != 0) { + fclose(fp); + redisLog(REDIS_WARNING,"Wrong signature trying to load DB from file"); + return REDIS_ERR; + } + while(1) { + robj *o; + + /* Read type. */ + if (fread(&type,1,1,fp) == 0) goto eoferr; + if (type == REDIS_EOF) break; + /* Handle SELECT DB opcode as a special case */ + if (type == REDIS_SELECTDB) { + if (fread(&dbid,4,1,fp) == 0) goto eoferr; + dbid = ntohl(dbid); + if (dbid >= (unsigned)server.dbnum) { + redisLog(REDIS_WARNING,"FATAL: Data file was created with a Redis server compiled to handle more than %d databases. Exiting\n", server.dbnum); + exit(1); + } + d = server.dict[dbid]; + continue; + } + /* Read key */ + if (fread(&klen,4,1,fp) == 0) goto eoferr; + klen = ntohl(klen); + if (klen <= REDIS_LOADBUF_LEN) { + key = buf; + } else { + key = zmalloc(klen); + if (!key) oom("Loading DB from file"); + } + if (fread(key,klen,1,fp) == 0) goto eoferr; + + if (type == REDIS_STRING) { + /* Read string value */ + if (fread(&vlen,4,1,fp) == 0) goto eoferr; + vlen = ntohl(vlen); + if (vlen <= REDIS_LOADBUF_LEN) { + val = vbuf; + } else { + val = zmalloc(vlen); + if (!val) oom("Loading DB from file"); + } + if (vlen && fread(val,vlen,1,fp) == 0) goto eoferr; + o = createObject(REDIS_STRING,sdsnewlen(val,vlen)); + } else if (type == REDIS_LIST || type == REDIS_SET) { + /* Read list/set value */ + uint32_t listlen; + if (fread(&listlen,4,1,fp) == 0) goto eoferr; + listlen = ntohl(listlen); + o = (type == REDIS_LIST) ? createListObject() : createSetObject(); + /* Load every single element of the list/set */ + while(listlen--) { + robj *ele; + + if (fread(&vlen,4,1,fp) == 0) goto eoferr; + vlen = ntohl(vlen); + if (vlen <= REDIS_LOADBUF_LEN) { + val = vbuf; + } else { + val = zmalloc(vlen); + if (!val) oom("Loading DB from file"); + } + if (vlen && fread(val,vlen,1,fp) == 0) goto eoferr; + ele = createObject(REDIS_STRING,sdsnewlen(val,vlen)); + if (type == REDIS_LIST) { + if (!listAddNodeTail((list*)o->ptr,ele)) + oom("listAddNodeTail"); + } else { + if (dictAdd((dict*)o->ptr,ele,NULL) == DICT_ERR) + oom("dictAdd"); + } + /* free the temp buffer if needed */ + if (val != vbuf) zfree(val); + val = NULL; + } + } else { + assert(0 != 0); + } + /* Add the new object in the hash table */ + retval = dictAdd(d,createStringObject(key,klen),o); + if (retval == DICT_ERR) { + redisLog(REDIS_WARNING,"Loading DB, duplicated key found! Unrecoverable error, exiting now."); + exit(1); + } + /* Iteration cleanup */ + if (key != buf) zfree(key); + if (val != vbuf) zfree(val); + key = val = NULL; + } + fclose(fp); + return REDIS_OK; + +eoferr: /* unexpected end of file is handled here with a fatal exit */ + if (key != buf) zfree(key); + if (val != vbuf) zfree(val); + redisLog(REDIS_WARNING,"Short read loading DB. Unrecoverable error, exiting now."); + exit(1); + return REDIS_ERR; /* Just to avoid warning */ +} + +/*================================== Commands =============================== */ + +static void pingCommand(redisClient *c) { + addReply(c,shared.pong); +} + +static void echoCommand(redisClient *c) { + addReplySds(c,sdscatprintf(sdsempty(),"%d\r\n", + (int)sdslen(c->argv[1]->ptr))); + addReply(c,c->argv[1]); + addReply(c,shared.crlf); +} + +/*=================================== Strings =============================== */ + +static void setGenericCommand(redisClient *c, int nx) { + int retval; + + retval = dictAdd(c->dict,c->argv[1],c->argv[2]); + if (retval == DICT_ERR) { + if (!nx) { + dictReplace(c->dict,c->argv[1],c->argv[2]); + incrRefCount(c->argv[2]); + } else { + addReply(c,shared.zero); + return; + } + } else { + incrRefCount(c->argv[1]); + incrRefCount(c->argv[2]); + } + server.dirty++; + addReply(c, nx ? shared.one : shared.ok); +} + +static void setCommand(redisClient *c) { + return setGenericCommand(c,0); +} + +static void setnxCommand(redisClient *c) { + return setGenericCommand(c,1); +} + +static void getCommand(redisClient *c) { + dictEntry *de; + + de = dictFind(c->dict,c->argv[1]); + if (de == NULL) { + addReply(c,shared.nil); + } else { + robj *o = dictGetEntryVal(de); + + if (o->type != REDIS_STRING) { + addReply(c,shared.wrongtypeerrbulk); + } else { + addReplySds(c,sdscatprintf(sdsempty(),"%d\r\n",(int)sdslen(o->ptr))); + addReply(c,o); + addReply(c,shared.crlf); + } + } +} + +static void incrDecrCommand(redisClient *c, int incr) { + dictEntry *de; + long long value; + int retval; + robj *o; + + de = dictFind(c->dict,c->argv[1]); + if (de == NULL) { + value = 0; + } else { + robj *o = dictGetEntryVal(de); + + if (o->type != REDIS_STRING) { + value = 0; + } else { + char *eptr; + + value = strtoll(o->ptr, &eptr, 10); + } + } + + value += incr; + o = createObject(REDIS_STRING,sdscatprintf(sdsempty(),"%lld",value)); + retval = dictAdd(c->dict,c->argv[1],o); + if (retval == DICT_ERR) { + dictReplace(c->dict,c->argv[1],o); + } else { + incrRefCount(c->argv[1]); + } + server.dirty++; + addReply(c,o); + addReply(c,shared.crlf); +} + +static void incrCommand(redisClient *c) { + return incrDecrCommand(c,1); +} + +static void decrCommand(redisClient *c) { + return incrDecrCommand(c,-1); +} + +static void incrbyCommand(redisClient *c) { + int incr = atoi(c->argv[2]->ptr); + return incrDecrCommand(c,incr); +} + +static void decrbyCommand(redisClient *c) { + int incr = atoi(c->argv[2]->ptr); + return incrDecrCommand(c,-incr); +} + +/* ========================= Type agnostic commands ========================= */ + +static void delCommand(redisClient *c) { + if (dictDelete(c->dict,c->argv[1]) == DICT_OK) { + server.dirty++; + addReply(c,shared.one); + } else { + addReply(c,shared.zero); + } +} + +static void existsCommand(redisClient *c) { + dictEntry *de; + + de = dictFind(c->dict,c->argv[1]); + if (de == NULL) + addReply(c,shared.zero); + else + addReply(c,shared.one); +} + +static void selectCommand(redisClient *c) { + int id = atoi(c->argv[1]->ptr); + + if (selectDb(c,id) == REDIS_ERR) { + addReplySds(c,"-ERR invalid DB index\r\n"); + } else { + addReply(c,shared.ok); + } +} + +static void randomkeyCommand(redisClient *c) { + dictEntry *de; + + de = dictGetRandomKey(c->dict); + if (de == NULL) { + addReply(c,shared.crlf); + } else { + addReply(c,dictGetEntryKey(de)); + addReply(c,shared.crlf); + } +} + +static void keysCommand(redisClient *c) { + dictIterator *di; + dictEntry *de; + sds pattern = c->argv[1]->ptr; + int plen = sdslen(pattern); + int numkeys = 0, keyslen = 0; + robj *lenobj = createObject(REDIS_STRING,NULL); + + di = dictGetIterator(c->dict); + if (!di) oom("dictGetIterator"); + addReply(c,lenobj); + decrRefCount(lenobj); + while((de = dictNext(di)) != NULL) { + robj *keyobj = dictGetEntryKey(de); + sds key = keyobj->ptr; + if ((pattern[0] == '*' && pattern[1] == '\0') || + stringmatchlen(pattern,plen,key,sdslen(key),0)) { + if (numkeys != 0) + addReply(c,shared.space); + addReply(c,keyobj); + numkeys++; + keyslen += sdslen(key); + } + } + dictReleaseIterator(di); + lenobj->ptr = sdscatprintf(sdsempty(),"%lu\r\n",keyslen+(numkeys ? (numkeys-1) : 0)); + addReply(c,shared.crlf); +} + +static void dbsizeCommand(redisClient *c) { + addReplySds(c, + sdscatprintf(sdsempty(),"%lu\r\n",dictGetHashTableUsed(c->dict))); +} + +static void lastsaveCommand(redisClient *c) { + addReplySds(c, + sdscatprintf(sdsempty(),"%lu\r\n",server.lastsave)); +} + +static void typeCommand(redisClient *c) { + dictEntry *de; + char *type; + + de = dictFind(c->dict,c->argv[1]); + if (de == NULL) { + type = "none"; + } else { + robj *o = dictGetEntryVal(de); + + switch(o->type) { + case REDIS_STRING: type = "string"; break; + case REDIS_LIST: type = "list"; break; + case REDIS_SET: type = "set"; break; + default: type = "unknown"; break; + } + } + addReplySds(c,sdsnew(type)); + addReply(c,shared.crlf); +} + +static void saveCommand(redisClient *c) { + if (saveDb(server.dbfilename) == REDIS_OK) { + addReply(c,shared.ok); + } else { + addReply(c,shared.err); + } +} + +static void bgsaveCommand(redisClient *c) { + if (server.bgsaveinprogress) { + addReplySds(c,sdsnew("-ERR background save already in progress\r\n")); + return; + } + if (saveDbBackground(server.dbfilename) == REDIS_OK) { + addReply(c,shared.ok); + } else { + addReply(c,shared.err); + } +} + +static void shutdownCommand(redisClient *c) { + redisLog(REDIS_WARNING,"User requested shutdown, saving DB..."); + if (saveDb(server.dbfilename) == REDIS_OK) { + redisLog(REDIS_WARNING,"Server exit now, bye bye..."); + exit(1); + } else { + redisLog(REDIS_WARNING,"Error trying to save the DB, can't exit"); + addReplySds(c,sdsnew("-ERR can't quit, problems saving the DB\r\n")); + } +} + +static void renameGenericCommand(redisClient *c, int nx) { + dictEntry *de; + robj *o; + + /* To use the same key as src and dst is probably an error */ + if (sdscmp(c->argv[1]->ptr,c->argv[2]->ptr) == 0) { + if (nx) + addReply(c,shared.minus3); + else + addReplySds(c,sdsnew("-ERR src and dest key are the same\r\n")); + return; + } + + de = dictFind(c->dict,c->argv[1]); + if (de == NULL) { + if (nx) + addReply(c,shared.minus1); + else + addReply(c,shared.nokeyerr); + return; + } + o = dictGetEntryVal(de); + incrRefCount(o); + if (dictAdd(c->dict,c->argv[2],o) == DICT_ERR) { + if (nx) { + decrRefCount(o); + addReply(c,shared.zero); + return; + } + dictReplace(c->dict,c->argv[2],o); + } else { + incrRefCount(c->argv[2]); + } + dictDelete(c->dict,c->argv[1]); + server.dirty++; + addReply(c,nx ? shared.one : shared.ok); +} + +static void renameCommand(redisClient *c) { + renameGenericCommand(c,0); +} + +static void renamenxCommand(redisClient *c) { + renameGenericCommand(c,1); +} + +static void moveCommand(redisClient *c) { + dictEntry *de; + robj *o, *key; + dict *src, *dst; + int srcid; + + /* Obtain source and target DB pointers */ + src = c->dict; + srcid = c->dictid; + if (selectDb(c,atoi(c->argv[2]->ptr)) == REDIS_ERR) { + addReply(c,shared.minus4); + return; + } + dst = c->dict; + c->dict = src; + c->dictid = srcid; + + /* If the user is moving using as target the same + * DB as the source DB it is probably an error. */ + if (src == dst) { + addReply(c,shared.minus3); + return; + } + + /* Check if the element exists and get a reference */ + de = dictFind(c->dict,c->argv[1]); + if (!de) { + addReply(c,shared.zero); + return; + } + + /* Try to add the element to the target DB */ + key = dictGetEntryKey(de); + o = dictGetEntryVal(de); + if (dictAdd(dst,key,o) == DICT_ERR) { + addReply(c,shared.zero); + return; + } + incrRefCount(key); + incrRefCount(o); + + /* OK! key moved, free the entry in the source DB */ + dictDelete(src,c->argv[1]); + server.dirty++; + addReply(c,shared.one); +} + +/* =================================== Lists ================================ */ +static void pushGenericCommand(redisClient *c, int where) { + robj *lobj; + dictEntry *de; + list *list; + + de = dictFind(c->dict,c->argv[1]); + if (de == NULL) { + lobj = createListObject(); + list = lobj->ptr; + if (where == REDIS_HEAD) { + if (!listAddNodeHead(list,c->argv[2])) oom("listAddNodeHead"); + } else { + if (!listAddNodeTail(list,c->argv[2])) oom("listAddNodeTail"); + } + dictAdd(c->dict,c->argv[1],lobj); + incrRefCount(c->argv[1]); + incrRefCount(c->argv[2]); + } else { + lobj = dictGetEntryVal(de); + if (lobj->type != REDIS_LIST) { + addReply(c,shared.wrongtypeerr); + return; + } + list = lobj->ptr; + if (where == REDIS_HEAD) { + if (!listAddNodeHead(list,c->argv[2])) oom("listAddNodeHead"); + } else { + if (!listAddNodeTail(list,c->argv[2])) oom("listAddNodeTail"); + } + incrRefCount(c->argv[2]); + } + server.dirty++; + addReply(c,shared.ok); +} + +static void lpushCommand(redisClient *c) { + pushGenericCommand(c,REDIS_HEAD); +} + +static void rpushCommand(redisClient *c) { + pushGenericCommand(c,REDIS_TAIL); +} + +static void llenCommand(redisClient *c) { + dictEntry *de; + list *l; + + de = dictFind(c->dict,c->argv[1]); + if (de == NULL) { + addReply(c,shared.zero); + return; + } else { + robj *o = dictGetEntryVal(de); + if (o->type != REDIS_LIST) { + addReply(c,shared.minus2); + } else { + l = o->ptr; + addReplySds(c,sdscatprintf(sdsempty(),"%d\r\n",listLength(l))); + } + } +} + +static void lindexCommand(redisClient *c) { + dictEntry *de; + int index = atoi(c->argv[2]->ptr); + + de = dictFind(c->dict,c->argv[1]); + if (de == NULL) { + addReply(c,shared.nil); + } else { + robj *o = dictGetEntryVal(de); + + if (o->type != REDIS_LIST) { + addReply(c,shared.wrongtypeerrbulk); + } else { + list *list = o->ptr; + listNode *ln; + + ln = listIndex(list, index); + if (ln == NULL) { + addReply(c,shared.nil); + } else { + robj *ele = listNodeValue(ln); + addReplySds(c,sdscatprintf(sdsempty(),"%d\r\n",(int)sdslen(ele->ptr))); + addReply(c,ele); + addReply(c,shared.crlf); + } + } + } +} + +static void lsetCommand(redisClient *c) { + dictEntry *de; + int index = atoi(c->argv[2]->ptr); + + de = dictFind(c->dict,c->argv[1]); + if (de == NULL) { + addReply(c,shared.nokeyerr); + } else { + robj *o = dictGetEntryVal(de); + + if (o->type != REDIS_LIST) { + addReply(c,shared.wrongtypeerr); + } else { + list *list = o->ptr; + listNode *ln; + + ln = listIndex(list, index); + if (ln == NULL) { + addReplySds(c,sdsnew("-ERR index out of range\r\n")); + } else { + robj *ele = listNodeValue(ln); + + decrRefCount(ele); + listNodeValue(ln) = c->argv[3]; + incrRefCount(c->argv[3]); + addReply(c,shared.ok); + server.dirty++; + } + } + } +} + +static void popGenericCommand(redisClient *c, int where) { + dictEntry *de; + + de = dictFind(c->dict,c->argv[1]); + if (de == NULL) { + addReply(c,shared.nil); + } else { + robj *o = dictGetEntryVal(de); + + if (o->type != REDIS_LIST) { + addReply(c,shared.wrongtypeerrbulk); + } else { + list *list = o->ptr; + listNode *ln; + + if (where == REDIS_HEAD) + ln = listFirst(list); + else + ln = listLast(list); + + if (ln == NULL) { + addReply(c,shared.nil); + } else { + robj *ele = listNodeValue(ln); + addReplySds(c,sdscatprintf(sdsempty(),"%d\r\n",(int)sdslen(ele->ptr))); + addReply(c,ele); + addReply(c,shared.crlf); + listDelNode(list,ln); + server.dirty++; + } + } + } +} + +static void lpopCommand(redisClient *c) { + popGenericCommand(c,REDIS_HEAD); +} + +static void rpopCommand(redisClient *c) { + popGenericCommand(c,REDIS_TAIL); +} + +static void lrangeCommand(redisClient *c) { + dictEntry *de; + int start = atoi(c->argv[2]->ptr); + int end = atoi(c->argv[3]->ptr); + + de = dictFind(c->dict,c->argv[1]); + if (de == NULL) { + addReply(c,shared.nil); + } else { + robj *o = dictGetEntryVal(de); + + if (o->type != REDIS_LIST) { + addReply(c,shared.wrongtypeerrbulk); + } else { + list *list = o->ptr; + listNode *ln; + int llen = listLength(list); + int rangelen, j; + robj *ele; + + /* convert negative indexes */ + if (start < 0) start = llen+start; + if (end < 0) end = llen+end; + if (start < 0) start = 0; + if (end < 0) end = 0; + + /* indexes sanity checks */ + if (start > end || start >= llen) { + /* Out of range start or start > end result in empty list */ + addReply(c,shared.zero); + return; + } + if (end >= llen) end = llen-1; + rangelen = (end-start)+1; + + /* Return the result in form of a multi-bulk reply */ + ln = listIndex(list, start); + addReplySds(c,sdscatprintf(sdsempty(),"%d\r\n",rangelen)); + for (j = 0; j < rangelen; j++) { + ele = listNodeValue(ln); + addReplySds(c,sdscatprintf(sdsempty(),"%d\r\n",(int)sdslen(ele->ptr))); + addReply(c,ele); + addReply(c,shared.crlf); + ln = ln->next; + } + } + } +} + +static void ltrimCommand(redisClient *c) { + dictEntry *de; + int start = atoi(c->argv[2]->ptr); + int end = atoi(c->argv[3]->ptr); + + de = dictFind(c->dict,c->argv[1]); + if (de == NULL) { + addReply(c,shared.nokeyerr); + } else { + robj *o = dictGetEntryVal(de); + + if (o->type != REDIS_LIST) { + addReply(c,shared.wrongtypeerr); + } else { + list *list = o->ptr; + listNode *ln; + int llen = listLength(list); + int j, ltrim, rtrim; + + /* convert negative indexes */ + if (start < 0) start = llen+start; + if (end < 0) end = llen+end; + if (start < 0) start = 0; + if (end < 0) end = 0; + + /* indexes sanity checks */ + if (start > end || start >= llen) { + /* Out of range start or start > end result in empty list */ + ltrim = llen; + rtrim = 0; + } else { + if (end >= llen) end = llen-1; + ltrim = start; + rtrim = llen-end-1; + } + + /* Remove list elements to perform the trim */ + for (j = 0; j < ltrim; j++) { + ln = listFirst(list); + listDelNode(list,ln); + } + for (j = 0; j < rtrim; j++) { + ln = listLast(list); + listDelNode(list,ln); + } + addReply(c,shared.ok); + server.dirty++; + } + } +} + +static void lremCommand(redisClient *c) { + dictEntry *de; + + de = dictFind(c->dict,c->argv[1]); + if (de == NULL) { + addReply(c,shared.minus1); + } else { + robj *o = dictGetEntryVal(de); + + if (o->type != REDIS_LIST) { + addReply(c,shared.minus2); + } else { + list *list = o->ptr; + listNode *ln, *next; + int toremove = atoi(c->argv[2]->ptr); + int removed = 0; + int fromtail = 0; + + if (toremove < 0) { + toremove = -toremove; + fromtail = 1; + } + ln = fromtail ? list->tail : list->head; + while (ln) { + next = fromtail ? ln->prev : ln->next; + robj *ele = listNodeValue(ln); + if (sdscmp(ele->ptr,c->argv[3]->ptr) == 0) { + listDelNode(list,ln); + server.dirty++; + removed++; + if (toremove && removed == toremove) break; + } + ln = next; + } + addReplySds(c,sdscatprintf(sdsempty(),"%d\r\n",removed)); + } + } +} + +/* ==================================== Sets ================================ */ + +static void saddCommand(redisClient *c) { + dictEntry *de; + robj *set; + + de = dictFind(c->dict,c->argv[1]); + if (de == NULL) { + set = createSetObject(); + dictAdd(c->dict,c->argv[1],set); + incrRefCount(c->argv[1]); + } else { + set = dictGetEntryVal(de); + if (set->type != REDIS_SET) { + addReply(c,shared.minus2); + return; + } + } + if (dictAdd(set->ptr,c->argv[2],NULL) == DICT_OK) { + incrRefCount(c->argv[2]); + server.dirty++; + addReply(c,shared.one); + } else { + addReply(c,shared.zero); + } +} + +static void sremCommand(redisClient *c) { + dictEntry *de; + + de = dictFind(c->dict,c->argv[1]); + if (de == NULL) { + addReply(c,shared.zero); + } else { + robj *set; + + set = dictGetEntryVal(de); + if (set->type != REDIS_SET) { + addReply(c,shared.minus2); + return; + } + if (dictDelete(set->ptr,c->argv[2]) == DICT_OK) { + server.dirty++; + addReply(c,shared.one); + } else { + addReply(c,shared.zero); + } + } +} + +static void sismemberCommand(redisClient *c) { + dictEntry *de; + + de = dictFind(c->dict,c->argv[1]); + if (de == NULL) { + addReply(c,shared.zero); + } else { + robj *set; + + set = dictGetEntryVal(de); + if (set->type != REDIS_SET) { + addReply(c,shared.minus2); + return; + } + if (dictFind(set->ptr,c->argv[2])) + addReply(c,shared.one); + else + addReply(c,shared.zero); + } +} + +static void scardCommand(redisClient *c) { + dictEntry *de; + dict *s; + + de = dictFind(c->dict,c->argv[1]); + if (de == NULL) { + addReply(c,shared.zero); + return; + } else { + robj *o = dictGetEntryVal(de); + if (o->type != REDIS_SET) { + addReply(c,shared.minus2); + } else { + s = o->ptr; + addReplySds(c,sdscatprintf(sdsempty(),"%d\r\n", + dictGetHashTableUsed(s))); + } + } +} + +static int qsortCompareSetsByCardinality(const void *s1, const void *s2) { + dict **d1 = (void*) s1, **d2 = (void*) s2; + + return dictGetHashTableUsed(*d1)-dictGetHashTableUsed(*d2); +} + +static void sinterGenericCommand(redisClient *c, robj **setskeys, int setsnum, robj *dstkey) { + dict **dv = zmalloc(sizeof(dict*)*setsnum); + dictIterator *di; + dictEntry *de; + robj *lenobj = NULL, *dstset = NULL; + int j, cardinality = 0; + + if (!dv) oom("sinterCommand"); + for (j = 0; j < setsnum; j++) { + robj *setobj; + dictEntry *de; + + de = dictFind(c->dict,setskeys[j]); + if (!de) { + zfree(dv); + addReply(c,dstkey ? shared.nokeyerr : shared.nil); + return; + } + setobj = dictGetEntryVal(de); + if (setobj->type != REDIS_SET) { + zfree(dv); + addReply(c,dstkey ? shared.wrongtypeerr : shared.wrongtypeerrbulk); + return; + } + dv[j] = setobj->ptr; + } + /* Sort sets from the smallest to largest, this will improve our + * algorithm's performace */ + qsort(dv,setsnum,sizeof(dict*),qsortCompareSetsByCardinality); + + /* The first thing we should output is the total number of elements... + * since this is a multi-bulk write, but at this stage we don't know + * the intersection set size, so we use a trick, append an empty object + * to the output list and save the pointer to later modify it with the + * right length */ + if (!dstkey) { + lenobj = createObject(REDIS_STRING,NULL); + addReply(c,lenobj); + decrRefCount(lenobj); + } else { + /* If we have a target key where to store the resulting set + * create this key with an empty set inside */ + dstset = createSetObject(); + dictDelete(c->dict,dstkey); + dictAdd(c->dict,dstkey,dstset); + incrRefCount(dstkey); + } + + /* Iterate all the elements of the first (smallest) set, and test + * the element against all the other sets, if at least one set does + * not include the element it is discarded */ + di = dictGetIterator(dv[0]); + if (!di) oom("dictGetIterator"); + + while((de = dictNext(di)) != NULL) { + robj *ele; + + for (j = 1; j < setsnum; j++) + if (dictFind(dv[j],dictGetEntryKey(de)) == NULL) break; + if (j != setsnum) + continue; /* at least one set does not contain the member */ + ele = dictGetEntryKey(de); + if (!dstkey) { + addReplySds(c,sdscatprintf(sdsempty(),"%d\r\n",sdslen(ele->ptr))); + addReply(c,ele); + addReply(c,shared.crlf); + cardinality++; + } else { + dictAdd(dstset->ptr,ele,NULL); + incrRefCount(ele); + } + } + dictReleaseIterator(di); + + if (!dstkey) + lenobj->ptr = sdscatprintf(sdsempty(),"%d\r\n",cardinality); + else + addReply(c,shared.ok); + zfree(dv); +} + +static void sinterCommand(redisClient *c) { + sinterGenericCommand(c,c->argv+1,c->argc-1,NULL); +} + +static void sinterstoreCommand(redisClient *c) { + sinterGenericCommand(c,c->argv+2,c->argc-2,c->argv[1]); +} + +static void flushdbCommand(redisClient *c) { + dictEmpty(c->dict); + addReply(c,shared.ok); + saveDb(server.dbfilename); +} + +static void flushallCommand(redisClient *c) { + emptyDb(); + addReply(c,shared.ok); + saveDb(server.dbfilename); +} + +redisSortOperation *createSortOperation(int type, robj *pattern) { + redisSortOperation *so = zmalloc(sizeof(*so)); + if (!so) oom("createSortOperation"); + so->type = type; + so->pattern = pattern; + return so; +} + +/* Return the value associated to the key with a name obtained + * substituting the first occurence of '*' in 'pattern' with 'subst' */ +robj *lookupKeyByPattern(dict *dict, robj *pattern, robj *subst) { + char *p; + sds spat, ssub; + robj keyobj; + int prefixlen, sublen, postfixlen; + dictEntry *de; + /* Expoit the internal sds representation to create a sds string allocated on the stack in order to make this function faster */ + struct { + long len; + long free; + char buf[REDIS_SORTKEY_MAX+1]; + } keyname; + + + spat = pattern->ptr; + ssub = subst->ptr; + if (sdslen(spat)+sdslen(ssub)-1 > REDIS_SORTKEY_MAX) return NULL; + p = strchr(spat,'*'); + if (!p) return NULL; + + prefixlen = p-spat; + sublen = sdslen(ssub); + postfixlen = sdslen(spat)-(prefixlen+1); + memcpy(keyname.buf,spat,prefixlen); + memcpy(keyname.buf+prefixlen,ssub,sublen); + memcpy(keyname.buf+prefixlen+sublen,p+1,postfixlen); + keyname.buf[prefixlen+sublen+postfixlen] = '\0'; + keyname.len = prefixlen+sublen+postfixlen; + + keyobj.refcount = 1; + keyobj.type = REDIS_STRING; + keyobj.ptr = ((char*)&keyname)+(sizeof(long)*2); + + de = dictFind(dict,&keyobj); + // printf("lookup '%s' => %p\n", keyname.buf,de); + if (!de) return NULL; + return dictGetEntryVal(de); +} + +/* sortCompare() is used by qsort in sortCommand(). Given that qsort_r with + * the additional parameter is not standard but a BSD-specific we have to + * pass sorting parameters via the global 'server' structure */ +static int sortCompare(const void *s1, const void *s2) { + const redisSortObject *so1 = s1, *so2 = s2; + int cmp; + + if (!server.sort_alpha) { + /* Numeric sorting. Here it's trivial as we precomputed scores */ + if (so1->u.score > so2->u.score) { + cmp = 1; + } else if (so1->u.score < so2->u.score) { + cmp = -1; + } else { + cmp = 0; + } + } else { + /* Alphanumeric sorting */ + if (server.sort_bypattern) { + if (!so1->u.cmpobj || !so2->u.cmpobj) { + /* At least one compare object is NULL */ + if (so1->u.cmpobj == so2->u.cmpobj) + cmp = 0; + else if (so1->u.cmpobj == NULL) + cmp = -1; + else + cmp = 1; + } else { + /* We have both the objects, use strcoll */ + cmp = strcoll(so1->u.cmpobj->ptr,so2->u.cmpobj->ptr); + } + } else { + /* Compare elements directly */ + cmp = strcoll(so1->obj->ptr,so2->obj->ptr); + } + } + return server.sort_desc ? -cmp : cmp; +} + +/* The SORT command is the most complex command in Redis. Warning: this code + * is optimized for speed and a bit less for readability */ +static void sortCommand(redisClient *c) { + dictEntry *de; + list *operations; + int outputlen = 0; + int desc = 0, alpha = 0; + int limit_start = 0, limit_count = -1, start, end; + int j, dontsort = 0, vectorlen; + int getop = 0; /* GET operation counter */ + robj *sortval, *sortby = NULL; + redisSortObject *vector; /* Resulting vector to sort */ + + /* Lookup the key to sort. It must be of the right types */ + de = dictFind(c->dict,c->argv[1]); + if (de == NULL) { + addReply(c,shared.nokeyerrbulk); + return; + } + sortval = dictGetEntryVal(de); + if (sortval->type != REDIS_SET && sortval->type != REDIS_LIST) { + addReply(c,shared.wrongtypeerrbulk); + return; + } + + /* Create a list of operations to perform for every sorted element. + * Operations can be GET/DEL/INCR/DECR */ + operations = listCreate(); + listSetFreeMethod(operations,free); + j = 2; + + /* Now we need to protect sortval incrementing its count, in the future + * SORT may have options able to overwrite/delete keys during the sorting + * and the sorted key itself may get destroied */ + incrRefCount(sortval); + + /* The SORT command has an SQL-alike syntax, parse it */ + while(j < c->argc) { + int leftargs = c->argc-j-1; + if (!strcasecmp(c->argv[j]->ptr,"asc")) { + desc = 0; + } else if (!strcasecmp(c->argv[j]->ptr,"desc")) { + desc = 1; + } else if (!strcasecmp(c->argv[j]->ptr,"alpha")) { + alpha = 1; + } else if (!strcasecmp(c->argv[j]->ptr,"limit") && leftargs >= 2) { + limit_start = atoi(c->argv[j+1]->ptr); + limit_count = atoi(c->argv[j+2]->ptr); + j+=2; + } else if (!strcasecmp(c->argv[j]->ptr,"by") && leftargs >= 1) { + sortby = c->argv[j+1]; + /* If the BY pattern does not contain '*', i.e. it is constant, + * we don't need to sort nor to lookup the weight keys. */ + if (strchr(c->argv[j+1]->ptr,'*') == NULL) dontsort = 1; + j++; + } else if (!strcasecmp(c->argv[j]->ptr,"get") && leftargs >= 1) { + listAddNodeTail(operations,createSortOperation( + REDIS_SORT_GET,c->argv[j+1])); + getop++; + j++; + } else if (!strcasecmp(c->argv[j]->ptr,"del") && leftargs >= 1) { + listAddNodeTail(operations,createSortOperation( + REDIS_SORT_DEL,c->argv[j+1])); + j++; + } else if (!strcasecmp(c->argv[j]->ptr,"incr") && leftargs >= 1) { + listAddNodeTail(operations,createSortOperation( + REDIS_SORT_INCR,c->argv[j+1])); + j++; + } else if (!strcasecmp(c->argv[j]->ptr,"get") && leftargs >= 1) { + listAddNodeTail(operations,createSortOperation( + REDIS_SORT_DECR,c->argv[j+1])); + j++; + } else { + decrRefCount(sortval); + listRelease(operations); + addReply(c,shared.syntaxerrbulk); + return; + } + j++; + } + + /* Load the sorting vector with all the objects to sort */ + vectorlen = (sortval->type == REDIS_LIST) ? + listLength((list*)sortval->ptr) : + dictGetHashTableUsed((dict*)sortval->ptr); + vector = zmalloc(sizeof(redisSortObject)*vectorlen); + if (!vector) oom("allocating objects vector for SORT"); + j = 0; + if (sortval->type == REDIS_LIST) { + list *list = sortval->ptr; + listNode *ln = list->head; + while(ln) { + robj *ele = ln->value; + vector[j].obj = ele; + vector[j].u.score = 0; + vector[j].u.cmpobj = NULL; + ln = ln->next; + j++; + } + } else { + dict *set = sortval->ptr; + dictIterator *di; + dictEntry *setele; + + di = dictGetIterator(set); + if (!di) oom("dictGetIterator"); + while((setele = dictNext(di)) != NULL) { + vector[j].obj = dictGetEntryKey(setele); + vector[j].u.score = 0; + vector[j].u.cmpobj = NULL; + j++; + } + dictReleaseIterator(di); + } + assert(j == vectorlen); + + /* Now it's time to load the right scores in the sorting vector */ + if (dontsort == 0) { + for (j = 0; j < vectorlen; j++) { + if (sortby) { + robj *byval; + + byval = lookupKeyByPattern(c->dict,sortby,vector[j].obj); + if (!byval || byval->type != REDIS_STRING) continue; + if (alpha) { + vector[j].u.cmpobj = byval; + incrRefCount(byval); + } else { + vector[j].u.score = strtod(byval->ptr,NULL); + } + } else { + if (!alpha) vector[j].u.score = strtod(vector[j].obj->ptr,NULL); + } + } + } + + /* We are ready to sort the vector... perform a bit of sanity check + * on the LIMIT option too. We'll use a partial version of quicksort. */ + start = (limit_start < 0) ? 0 : limit_start; + end = (limit_count < 0) ? vectorlen-1 : start+limit_count-1; + if (start >= vectorlen) { + start = vectorlen-1; + end = vectorlen-2; + } + if (end >= vectorlen) end = vectorlen-1; + + if (dontsort == 0) { + server.sort_desc = desc; + server.sort_alpha = alpha; + server.sort_bypattern = sortby ? 1 : 0; + qsort(vector,vectorlen,sizeof(redisSortObject),sortCompare); + } + + /* Send command output to the output buffer, performing the specified + * GET/DEL/INCR/DECR operations if any. */ + outputlen = getop ? getop*(end-start+1) : end-start+1; + addReplySds(c,sdscatprintf(sdsempty(),"%d\r\n",outputlen)); + for (j = start; j <= end; j++) { + listNode *ln = operations->head; + if (!getop) { + addReplySds(c,sdscatprintf(sdsempty(),"%d\r\n", + sdslen(vector[j].obj->ptr))); + addReply(c,vector[j].obj); + addReply(c,shared.crlf); + } + while(ln) { + redisSortOperation *sop = ln->value; + robj *val = lookupKeyByPattern(c->dict,sop->pattern, + vector[j].obj); + + if (sop->type == REDIS_SORT_GET) { + if (!val || val->type != REDIS_STRING) { + addReply(c,shared.minus1); + } else { + addReplySds(c,sdscatprintf(sdsempty(),"%d\r\n", + sdslen(val->ptr))); + addReply(c,val); + addReply(c,shared.crlf); + } + } else if (sop->type == REDIS_SORT_DEL) { + /* TODO */ + } + ln = ln->next; + } + } + + /* Cleanup */ + decrRefCount(sortval); + listRelease(operations); + for (j = 0; j < vectorlen; j++) { + if (sortby && alpha && vector[j].u.cmpobj) + decrRefCount(vector[j].u.cmpobj); + } + zfree(vector); +} + +static void infoCommand(redisClient *c) { + sds info; + time_t uptime = time(NULL)-server.stat_starttime; + + info = sdscatprintf(sdsempty(), + "redis_version:%s\r\n" + "connected_clients:%d\r\n" + "connected_slaves:%d\r\n" + "used_memory:%d\r\n" + "changes_since_last_save:%lld\r\n" + "last_save_time:%d\r\n" + "total_connections_received:%lld\r\n" + "total_commands_processed:%lld\r\n" + "uptime_in_seconds:%d\r\n" + "uptime_in_days:%d\r\n" + ,REDIS_VERSION, + listLength(server.clients)-listLength(server.slaves), + listLength(server.slaves), + server.usedmemory, + server.dirty, + server.lastsave, + server.stat_numconnections, + server.stat_numcommands, + uptime, + uptime/(3600*24) + ); + addReplySds(c,sdscatprintf(sdsempty(),"%d\r\n",sdslen(info))); + addReplySds(c,info); +} + +/* =============================== Replication ============================= */ + +/* Send the whole output buffer syncronously to the slave. This a general operation in theory, but it is actually useful only for replication. */ +static int flushClientOutput(redisClient *c) { + int retval; + time_t start = time(NULL); + + while(listLength(c->reply)) { + if (time(NULL)-start > 5) return REDIS_ERR; /* 5 seconds timeout */ + retval = aeWait(c->fd,AE_WRITABLE,1000); + if (retval == -1) { + return REDIS_ERR; + } else if (retval & AE_WRITABLE) { + sendReplyToClient(NULL, c->fd, c, AE_WRITABLE); + } + } + return REDIS_OK; +} + +static int syncWrite(int fd, void *ptr, ssize_t size, int timeout) { + ssize_t nwritten, ret = size; + time_t start = time(NULL); + + timeout++; + while(size) { + if (aeWait(fd,AE_WRITABLE,1000) & AE_WRITABLE) { + nwritten = write(fd,ptr,size); + if (nwritten == -1) return -1; + ptr += nwritten; + size -= nwritten; + } + if ((time(NULL)-start) > timeout) { + errno = ETIMEDOUT; + return -1; + } + } + return ret; +} + +static int syncRead(int fd, void *ptr, ssize_t size, int timeout) { + ssize_t nread, totread = 0; + time_t start = time(NULL); + + timeout++; + while(size) { + if (aeWait(fd,AE_READABLE,1000) & AE_READABLE) { + nread = read(fd,ptr,size); + if (nread == -1) return -1; + ptr += nread; + size -= nread; + totread += nread; + } + if ((time(NULL)-start) > timeout) { + errno = ETIMEDOUT; + return -1; + } + } + return totread; +} + +static int syncReadLine(int fd, char *ptr, ssize_t size, int timeout) { + ssize_t nread = 0; + + size--; + while(size) { + char c; + + if (syncRead(fd,&c,1,timeout) == -1) return -1; + if (c == '\n') { + *ptr = '\0'; + if (nread && *(ptr-1) == '\r') *(ptr-1) = '\0'; + return nread; + } else { + *ptr++ = c; + *ptr = '\0'; + nread++; + } + } + return nread; +} + +static void syncCommand(redisClient *c) { + struct stat sb; + int fd = -1, len; + time_t start = time(NULL); + char sizebuf[32]; + + redisLog(REDIS_NOTICE,"Slave ask for syncronization"); + if (flushClientOutput(c) == REDIS_ERR || saveDb(server.dbfilename) != REDIS_OK) + goto closeconn; + + fd = open(server.dbfilename, O_RDONLY); + if (fd == -1 || fstat(fd,&sb) == -1) goto closeconn; + len = sb.st_size; + + snprintf(sizebuf,32,"%d\r\n",len); + if (syncWrite(c->fd,sizebuf,strlen(sizebuf),5) == -1) goto closeconn; + while(len) { + char buf[1024]; + int nread; + + if (time(NULL)-start > REDIS_MAX_SYNC_TIME) goto closeconn; + nread = read(fd,buf,1024); + if (nread == -1) goto closeconn; + len -= nread; + if (syncWrite(c->fd,buf,nread,5) == -1) goto closeconn; + } + if (syncWrite(c->fd,"\r\n",2,5) == -1) goto closeconn; + close(fd); + c->flags |= REDIS_SLAVE; + c->slaveseldb = 0; + if (!listAddNodeTail(server.slaves,c)) oom("listAddNodeTail"); + redisLog(REDIS_NOTICE,"Syncronization with slave succeeded"); + return; + +closeconn: + if (fd != -1) close(fd); + c->flags |= REDIS_CLOSE; + redisLog(REDIS_WARNING,"Syncronization with slave failed"); + return; +} + +static int syncWithMaster(void) { + char buf[1024], tmpfile[256]; + int dumpsize; + int fd = anetTcpConnect(NULL,server.masterhost,server.masterport); + int dfd; + + if (fd == -1) { + redisLog(REDIS_WARNING,"Unable to connect to MASTER: %s", + strerror(errno)); + return REDIS_ERR; + } + /* Issue the SYNC command */ + if (syncWrite(fd,"SYNC \r\n",7,5) == -1) { + close(fd); + redisLog(REDIS_WARNING,"I/O error writing to MASTER: %s", + strerror(errno)); + return REDIS_ERR; + } + /* Read the bulk write count */ + if (syncReadLine(fd,buf,1024,5) == -1) { + close(fd); + redisLog(REDIS_WARNING,"I/O error reading bulk count from MASTER: %s", + strerror(errno)); + return REDIS_ERR; + } + dumpsize = atoi(buf); + redisLog(REDIS_NOTICE,"Receiving %d bytes data dump from MASTER",dumpsize); + /* Read the bulk write data on a temp file */ + snprintf(tmpfile,256,"temp-%d.%ld.rdb",(int)time(NULL),(long int)random()); + dfd = open(tmpfile,O_CREAT|O_WRONLY,0644); + if (dfd == -1) { + close(fd); + redisLog(REDIS_WARNING,"Opening the temp file needed for MASTER <-> SLAVE synchronization: %s",strerror(errno)); + return REDIS_ERR; + } + while(dumpsize) { + int nread, nwritten; + + nread = read(fd,buf,(dumpsize < 1024)?dumpsize:1024); + if (nread == -1) { + redisLog(REDIS_WARNING,"I/O error trying to sync with MASTER: %s", + strerror(errno)); + close(fd); + close(dfd); + return REDIS_ERR; + } + nwritten = write(dfd,buf,nread); + if (nwritten == -1) { + redisLog(REDIS_WARNING,"Write error writing to the DB dump file needed for MASTER <-> SLAVE synchrnonization: %s", strerror(errno)); + close(fd); + close(dfd); + return REDIS_ERR; + } + dumpsize -= nread; + } + close(dfd); + if (rename(tmpfile,server.dbfilename) == -1) { + redisLog(REDIS_WARNING,"Failed trying to rename the temp DB into dump.rdb in MASTER <-> SLAVE synchronization: %s", strerror(errno)); + unlink(tmpfile); + close(fd); + return REDIS_ERR; + } + emptyDb(); + if (loadDb(server.dbfilename) != REDIS_OK) { + redisLog(REDIS_WARNING,"Failed trying to load the MASTER synchronization DB from disk"); + close(fd); + return REDIS_ERR; + } + server.master = createClient(fd); + server.master->flags |= REDIS_MASTER; + server.replstate = REDIS_REPL_CONNECTED; + return REDIS_OK; +} + +/* =================================== Main! ================================ */ + +static void daemonize(void) { + int fd; + FILE *fp; + + if (fork() != 0) exit(0); /* parent exits */ + setsid(); /* create a new session */ + + /* Every output goes to /dev/null. If Redis is daemonized but + * the 'logfile' is set to 'stdout' in the configuration file + * it will not log at all. */ + if ((fd = open("/dev/null", O_RDWR, 0)) != -1) { + dup2(fd, STDIN_FILENO); + dup2(fd, STDOUT_FILENO); + dup2(fd, STDERR_FILENO); + if (fd > STDERR_FILENO) close(fd); + } + /* Try to write the pid file */ + fp = fopen("/var/run/redis.pid","w"); + if (fp) { + fprintf(fp,"%d\n",getpid()); + fclose(fp); + } +} + +int main(int argc, char **argv) { + initServerConfig(); + if (argc == 2) { + ResetServerSaveParams(); + loadServerConfig(argv[1]); + } else if (argc > 2) { + fprintf(stderr,"Usage: ./redis-server [/path/to/redis.conf]\n"); + exit(1); + } + initServer(); + if (server.daemonize) daemonize(); + redisLog(REDIS_NOTICE,"Server started, Redis version " REDIS_VERSION); + if (loadDb(server.dbfilename) == REDIS_OK) + redisLog(REDIS_NOTICE,"DB loaded from disk"); + if (aeCreateFileEvent(server.el, server.fd, AE_READABLE, + acceptHandler, NULL, NULL) == AE_ERR) oom("creating file event"); + redisLog(REDIS_NOTICE,"The server is now ready to accept connections"); + aeMain(server.el); + aeDeleteEventLoop(server.el); + return 0; +} diff --git a/redis.conf b/redis.conf new file mode 100644 index 00000000..19a9c640 --- /dev/null +++ b/redis.conf @@ -0,0 +1,66 @@ +# Redis configuration file example + +# By default Redis does not run as a daemon. Use 'yes' if you need it. +# Note that Redis will write a pid file in /var/run/redis.pid when daemonized. +daemonize no + +# Accept connections on the specified port, default is 6379 +port 6379 + +# If you want you can bind a single interface, if the bind option is not +# specified all the interfaces will listen for connections. +# +# bind 127.0.0.1 + +# Close the connection after a client is idle for N seconds +timeout 300 + +# Save the DB on disk: +# +# save +# +# Will save the DB if both the given number of seconds and the given +# number of write operations against the DB occurred. +# +# In the example below the behaviour will be to save: +# after 900 sec (15 min) if at least 1 key changed +# after 300 sec (5 min) if at least 10 keys changed +# after 60 sec if at least 10000 keys changed +save 900 1 +save 300 10 +save 60 10000 + +# For default save/load DB in/from the working directory +# Note that you must specify a directory not a file name. +dir ./ + +# Set server verbosity to 'debug' +# it can be one of: +# debug (a lot of information, useful for development/testing) +# notice (moderately verbose, what you want in production probably) +# warning (only very important / critical messages are logged) +loglevel debug + +# Specify the log file name. Also 'stdout' can be used to force +# the demon to log on the standard output. Note that if you use standard +# output for logging but daemonize, logs will be sent to /dev/null +logfile stdout + +# Set the number of databases. +databases 16 + +################################# REPLICATION ################################# + +# Master-Slave replication. Use slaveof to make a Redis instance a copy of +# another Redis server. Note that the configuration is local to the slave +# so for example it is possible to configure the slave to save the DB with a +# different interval, or to listen to another port, and so on. + +# slaveof + +############################### ADVANCED CONFIG ############################### + +# Glue small output buffers together in order to send small replies in a +# single TCP packet. Uses a bit more CPU but most of the times it is a win +# in terms of number of queries per second. Use 'yes' if unsure. +glueoutputbuf yes diff --git a/sds.c b/sds.c new file mode 100644 index 00000000..ca77a079 --- /dev/null +++ b/sds.c @@ -0,0 +1,329 @@ +/* SDSLib, A C dynamic strings library + * + * Copyright (c) 2006-2009, Salvatore Sanfilippo + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include "sds.h" +#include +#include +#include +#include +#include +#include "zmalloc.h" + +static void sdsOomAbort(void) { + fprintf(stderr,"SDS: Out Of Memory (SDS_ABORT_ON_OOM defined)\n"); + abort(); +} + +sds sdsnewlen(const void *init, size_t initlen) { + struct sdshdr *sh; + + sh = zmalloc(sizeof(struct sdshdr)+initlen+1); +#ifdef SDS_ABORT_ON_OOM + if (sh == NULL) sdsOomAbort(); +#else + if (sh == NULL) return NULL; +#endif + sh->len = initlen; + sh->free = 0; + if (initlen) { + if (init) memcpy(sh->buf, init, initlen); + else memset(sh->buf,0,initlen); + } + sh->buf[initlen] = '\0'; + return (char*)sh->buf; +} + +sds sdsempty(void) { + return sdsnewlen("",0); +} + +sds sdsnew(const char *init) { + size_t initlen = (init == NULL) ? 0 : strlen(init); + return sdsnewlen(init, initlen); +} + +size_t sdslen(const sds s) { + struct sdshdr *sh = (void*) (s-(sizeof(struct sdshdr))); + return sh->len; +} + +sds sdsdup(const sds s) { + return sdsnewlen(s, sdslen(s)); +} + +void sdsfree(sds s) { + if (s == NULL) return; + zfree(s-sizeof(struct sdshdr)); +} + +size_t sdsavail(sds s) { + struct sdshdr *sh = (void*) (s-(sizeof(struct sdshdr))); + return sh->free; +} + +void sdsupdatelen(sds s) { + struct sdshdr *sh = (void*) (s-(sizeof(struct sdshdr))); + int reallen = strlen(s); + sh->free += (sh->len-reallen); + sh->len = reallen; +} + +static sds sdsMakeRoomFor(sds s, size_t addlen) { + struct sdshdr *sh, *newsh; + size_t free = sdsavail(s); + size_t len, newlen; + + if (free >= addlen) return s; + len = sdslen(s); + sh = (void*) (s-(sizeof(struct sdshdr))); + newlen = (len+addlen)*2; + newsh = zrealloc(sh, sizeof(struct sdshdr)+newlen+1); +#ifdef SDS_ABORT_ON_OOM + if (newsh == NULL) sdsOomAbort(); +#else + if (newsh == NULL) return NULL; +#endif + + newsh->free = newlen - len; + return newsh->buf; +} + +sds sdscatlen(sds s, void *t, size_t len) { + struct sdshdr *sh; + size_t curlen = sdslen(s); + + s = sdsMakeRoomFor(s,len); + if (s == NULL) return NULL; + sh = (void*) (s-(sizeof(struct sdshdr))); + memcpy(s+curlen, t, len); + sh->len = curlen+len; + sh->free = sh->free-len; + s[curlen+len] = '\0'; + return s; +} + +sds sdscat(sds s, char *t) { + return sdscatlen(s, t, strlen(t)); +} + +sds sdscpylen(sds s, char *t, size_t len) { + struct sdshdr *sh = (void*) (s-(sizeof(struct sdshdr))); + size_t totlen = sh->free+sh->len; + + if (totlen < len) { + s = sdsMakeRoomFor(s,len-totlen); + if (s == NULL) return NULL; + sh = (void*) (s-(sizeof(struct sdshdr))); + totlen = sh->free+sh->len; + } + memcpy(s, t, len); + s[len] = '\0'; + sh->len = len; + sh->free = totlen-len; + return s; +} + +sds sdscpy(sds s, char *t) { + return sdscpylen(s, t, strlen(t)); +} + +sds sdscatprintf(sds s, const char *fmt, ...) { + va_list ap; + char *buf, *t; + size_t buflen = 32; + + while(1) { + buf = zmalloc(buflen); +#ifdef SDS_ABORT_ON_OOM + if (buf == NULL) sdsOomAbort(); +#else + if (buf == NULL) return NULL; +#endif + buf[buflen-2] = '\0'; + va_start(ap, fmt); + vsnprintf(buf, buflen, fmt, ap); + va_end(ap); + if (buf[buflen-2] != '\0') { + zfree(buf); + buflen *= 2; + continue; + } + break; + } + t = sdscat(s, buf); + zfree(buf); + return t; +} + +sds sdstrim(sds s, const char *cset) { + struct sdshdr *sh = (void*) (s-(sizeof(struct sdshdr))); + char *start, *end, *sp, *ep; + size_t len; + + sp = start = s; + ep = end = s+sdslen(s)-1; + while(sp <= end && strchr(cset, *sp)) sp++; + while(ep > start && strchr(cset, *ep)) ep--; + len = (sp > ep) ? 0 : ((ep-sp)+1); + if (sh->buf != sp) memmove(sh->buf, sp, len); + sh->buf[len] = '\0'; + sh->free = sh->free+(sh->len-len); + sh->len = len; + return s; +} + +sds sdsrange(sds s, long start, long end) { + struct sdshdr *sh = (void*) (s-(sizeof(struct sdshdr))); + size_t newlen, len = sdslen(s); + + if (len == 0) return s; + if (start < 0) { + start = len+start; + if (start < 0) start = 0; + } + if (end < 0) { + end = len+end; + if (end < 0) end = 0; + } + newlen = (start > end) ? 0 : (end-start)+1; + if (newlen != 0) { + if (start >= (signed)len) start = len-1; + if (end >= (signed)len) end = len-1; + newlen = (start > end) ? 0 : (end-start)+1; + } else { + start = 0; + } + if (start != 0) memmove(sh->buf, sh->buf+start, newlen); + sh->buf[newlen] = 0; + sh->free = sh->free+(sh->len-newlen); + sh->len = newlen; + return s; +} + +void sdstolower(sds s) { + int len = sdslen(s), j; + + for (j = 0; j < len; j++) s[j] = tolower(s[j]); +} + +void sdstoupper(sds s) { + int len = sdslen(s), j; + + for (j = 0; j < len; j++) s[j] = toupper(s[j]); +} + +int sdscmp(sds s1, sds s2) { + size_t l1, l2, minlen; + int cmp; + + l1 = sdslen(s1); + l2 = sdslen(s2); + minlen = (l1 < l2) ? l1 : l2; + cmp = memcmp(s1,s2,minlen); + if (cmp == 0) return l1-l2; + return cmp; +} + +/* Split 's' with separator in 'sep'. An array + * of sds strings is returned. *count will be set + * by reference to the number of tokens returned. + * + * On out of memory, zero length string, zero length + * separator, NULL is returned. + * + * Note that 'sep' is able to split a string using + * a multi-character separator. For example + * sdssplit("foo_-_bar","_-_"); will return two + * elements "foo" and "bar". + * + * This version of the function is binary-safe but + * requires length arguments. sdssplit() is just the + * same function but for zero-terminated strings. + */ +sds *sdssplitlen(char *s, int len, char *sep, int seplen, int *count) { + int elements = 0, slots = 5, start = 0, j; + + sds *tokens = zmalloc(sizeof(sds)*slots); +#ifdef SDS_ABORT_ON_OOM + if (tokens == NULL) sdsOomAbort(); +#endif + if (seplen < 1 || len < 0 || tokens == NULL) return NULL; + for (j = 0; j < (len-(seplen-1)); j++) { + /* make sure there is room for the next element and the final one */ + if (slots < elements+2) { + slots *= 2; + sds *newtokens = zrealloc(tokens,sizeof(sds)*slots); + if (newtokens == NULL) { +#ifdef SDS_ABORT_ON_OOM + sdsOomAbort(); +#else + goto cleanup; +#endif + } + tokens = newtokens; + } + /* search the separator */ + if ((seplen == 1 && *(s+j) == sep[0]) || (memcmp(s+j,sep,seplen) == 0)) { + tokens[elements] = sdsnewlen(s+start,j-start); + if (tokens[elements] == NULL) { +#ifdef SDS_ABORT_ON_OOM + sdsOomAbort(); +#else + goto cleanup; +#endif + } + elements++; + start = j+seplen; + j = j+seplen-1; /* skip the separator */ + } + } + /* Add the final element. We are sure there is room in the tokens array. */ + tokens[elements] = sdsnewlen(s+start,len-start); + if (tokens[elements] == NULL) { +#ifdef SDS_ABORT_ON_OOM + sdsOomAbort(); +#else + goto cleanup; +#endif + } + elements++; + *count = elements; + return tokens; + +#ifndef SDS_ABORT_ON_OOM +cleanup: + { + int i; + for (i = 0; i < elements; i++) sdsfree(tokens[i]); + zfree(tokens); + return NULL; + } +#endif +} diff --git a/sds.h b/sds.h new file mode 100644 index 00000000..ba2a5ca0 --- /dev/null +++ b/sds.h @@ -0,0 +1,63 @@ +/* SDSLib, A C dynamic strings library + * + * Copyright (c) 2006-2009, Salvatore Sanfilippo + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __SDS_H +#define __SDS_H + +#include + +typedef char *sds; + +struct sdshdr { + long len; + long free; + char buf[0]; +}; + +sds sdsnewlen(const void *init, size_t initlen); +sds sdsnew(const char *init); +sds sdsempty(); +size_t sdslen(const sds s); +sds sdsdup(const sds s); +void sdsfree(sds s); +size_t sdsavail(sds s); +sds sdscatlen(sds s, void *t, size_t len); +sds sdscat(sds s, char *t); +sds sdscpylen(sds s, char *t, size_t len); +sds sdscpy(sds s, char *t); +sds sdscatprintf(sds s, const char *fmt, ...); +sds sdstrim(sds s, const char *cset); +sds sdsrange(sds s, long start, long end); +void sdsupdatelen(sds s); +int sdscmp(sds s1, sds s2); +sds *sdssplitlen(char *s, int len, char *sep, int seplen, int *count); +void sdstolower(sds s); + +#endif diff --git a/test-redis.tcl b/test-redis.tcl new file mode 100644 index 00000000..4053d3f1 --- /dev/null +++ b/test-redis.tcl @@ -0,0 +1,807 @@ +# TODO # test pipelining + +set ::passed 0 +set ::failed 0 + +proc test {name code okpattern} { + puts -nonewline [format "%-70s " $name] + flush stdout + set retval [uplevel 1 $code] + if {$okpattern eq $retval || [string match $okpattern $retval]} { + puts "PASSED" + incr ::passed + } else { + puts "!! ERROR expected\n'$okpattern'\nbut got\n'$retval'" + incr ::failed + } +} + +proc main {server port} { + set fd [redis_connect $server $port] + + test {DEL all keys to start with a clean DB} { + foreach key [redis_keys $fd *] { + redis_del $fd $key + } + redis_dbsize $fd + } {0} + + test {SET and GET an item} { + redis_set $fd x foobar + redis_get $fd x + } {foobar} + + test {DEL against a single item} { + redis_del $fd x + redis_get $fd x + } {} + + test {KEYS with pattern} { + foreach key {key_x key_y key_z foo_a foo_b foo_c} { + redis_set $fd $key hello + } + lsort [redis_keys $fd foo*] + } {foo_a foo_b foo_c} + + test {KEYS to get all keys} { + lsort [redis_keys $fd *] + } {foo_a foo_b foo_c key_x key_y key_z} + + test {DBSIZE} { + redis_dbsize $fd + } {6} + + test {DEL all keys} { + foreach key [redis_keys $fd *] { + redis_del $fd $key + } + redis_dbsize $fd + } {0} + + test {Very big payload in GET/SET} { + set buf [string repeat "abcd" 1000000] + redis_set $fd foo $buf + redis_get $fd foo + } [string repeat "abcd" 1000000] + + test {SET 10000 numeric keys and access all them in reverse order} { + for {set x 0} {$x < 10000} {incr x} { + redis_set $fd $x $x + } + set sum 0 + for {set x 9999} {$x >= 0} {incr x -1} { + incr sum [redis_get $fd $x] + } + format $sum + } {49995000} + + test {DBSIZE should be 10001 now} { + redis_dbsize $fd + } {10001} + + test {INCR against non existing key} { + set res {} + append res [redis_incr $fd novar] + append res [redis_get $fd novar] + } {11} + + test {INCR against key created by incr itself} { + redis_incr $fd novar + } {2} + + test {INCR against key originally set with SET} { + redis_set $fd novar 100 + redis_incr $fd novar + } {101} + + test {SETNX target key missing} { + redis_setnx $fd novar2 foobared + redis_get $fd novar2 + } {foobared} + + test {SETNX target key exists} { + redis_setnx $fd novar2 blabla + redis_get $fd novar2 + } {foobared} + + test {EXISTS} { + set res {} + redis_set $fd newkey test + append res [redis_exists $fd newkey] + redis_del $fd newkey + append res [redis_exists $fd newkey] + } {10} + + test {Zero length value in key. SET/GET/EXISTS} { + redis_set $fd emptykey {} + set res [redis_get $fd emptykey] + append res [redis_exists $fd emptykey] + redis_del $fd emptykey + append res [redis_exists $fd emptykey] + } {10} + + test {Commands pipelining} { + puts -nonewline $fd "SET k1 4\r\nxyzk\r\nGET k1\r\nPING\r\n" + flush $fd + set res {} + append res [string match +OK* [redis_read_retcode $fd]] + append res [redis_bulk_read $fd] + append res [string match +PONG* [redis_read_retcode $fd]] + format $res + } {1xyzk1} + + test {Non existing command} { + puts -nonewline $fd "foo\r\n" + flush $fd + string match -ERR* [redis_read_retcode $fd] + } {1} + + test {Basic LPUSH, RPUSH, LLENGTH, LINDEX} { + redis_lpush $fd mylist a + redis_lpush $fd mylist b + redis_rpush $fd mylist c + set res [redis_llen $fd mylist] + append res [redis_lindex $fd mylist 0] + append res [redis_lindex $fd mylist 1] + append res [redis_lindex $fd mylist 2] + } {3bac} + + test {DEL a list} { + redis_del $fd mylist + redis_exists $fd mylist + } {0} + + test {Create a long list and check every single element with LINDEX} { + set ok 0 + for {set i 0} {$i < 1000} {incr i} { + redis_rpush $fd mylist $i + } + for {set i 0} {$i < 1000} {incr i} { + if {[redis_lindex $fd mylist $i] eq $i} {incr ok} + if {[redis_lindex $fd mylist [expr (-$i)-1]] eq [expr 999-$i]} { + incr ok + } + } + format $ok + } {2000} + + test {Test elements with LINDEX in random access} { + set ok 0 + for {set i 0} {$i < 1000} {incr i} { + set r [expr int(rand()*1000)] + if {[redis_lindex $fd mylist $r] eq $r} {incr ok} + if {[redis_lindex $fd mylist [expr (-$r)-1]] eq [expr 999-$r]} { + incr ok + } + } + format $ok + } {2000} + + test {LLEN against non-list value error} { + redis_del $fd mylist + redis_set $fd mylist foobar + redis_llen $fd mylist + } {-2} + + test {LINDEX against non-list value error} { + redis_lindex $fd mylist 0 + } {*ERROR*} + + test {LPUSH against non-list value error} { + redis_lpush $fd mylist 0 + } {-ERR*} + + test {RPUSH against non-list value error} { + redis_rpush $fd mylist 0 + } {-ERR*} + + test {RENAME basic usage} { + redis_set $fd mykey hello + redis_rename $fd mykey mykey1 + redis_rename $fd mykey1 mykey2 + redis_get $fd mykey2 + } {hello} + + test {RENAME source key should no longer exist} { + redis_exists $fd mykey + } {0} + + test {RENAME against already existing key} { + redis_set $fd mykey a + redis_set $fd mykey2 b + redis_rename $fd mykey2 mykey + set res [redis_get $fd mykey] + append res [redis_exists $fd mykey2] + } {b0} + + test {RENAMENX basic usage} { + redis_del $fd mykey + redis_del $fd mykey2 + redis_set $fd mykey foobar + redis_renamenx $fd mykey mykey2 + set res [redis_get $fd mykey2] + append res [redis_exists $fd mykey] + } {foobar0} + + test {RENAMENX against already existing key} { + redis_set $fd mykey foo + redis_set $fd mykey2 bar + redis_renamenx $fd mykey mykey2 + } {0} + + test {RENAMENX against already existing key (2)} { + set res [redis_get $fd mykey] + append res [redis_get $fd mykey2] + } {foobar} + + test {RENAME against non existing source key} { + redis_rename $fd nokey foobar + } {-ERR*} + + test {RENAME where source and dest key is the same} { + redis_rename $fd mykey mykey + } {-ERR*} + + test {DEL all keys again (DB 0)} { + foreach key [redis_keys $fd *] { + redis_del $fd $key + } + redis_dbsize $fd + } {0} + + test {DEL all keys again (DB 1)} { + redis_select $fd 1 + foreach key [redis_keys $fd *] { + redis_del $fd $key + } + set res [redis_dbsize $fd] + redis_select $fd 0 + format $res + } {0} + + test {MOVE basic usage} { + redis_set $fd mykey foobar + redis_move $fd mykey 1 + set res {} + lappend res [redis_exists $fd mykey] + lappend res [redis_dbsize $fd] + redis_select $fd 1 + lappend res [redis_get $fd mykey] + lappend res [redis_dbsize $fd] + redis_select $fd 0 + format $res + } [list 0 0 foobar 1] + + test {MOVE against key existing in the target DB} { + redis_set $fd mykey hello + redis_move $fd mykey 1 + } {0} + + test {SET/GET keys in different DBs} { + redis_set $fd a hello + redis_set $fd b world + redis_select $fd 1 + redis_set $fd a foo + redis_set $fd b bared + redis_select $fd 0 + set res {} + lappend res [redis_get $fd a] + lappend res [redis_get $fd b] + redis_select $fd 1 + lappend res [redis_get $fd a] + lappend res [redis_get $fd b] + redis_select $fd 0 + format $res + } {hello world foo bared} + + test {Basic LPOP/RPOP} { + redis_del $fd mylist + redis_rpush $fd mylist 1 + redis_rpush $fd mylist 2 + redis_lpush $fd mylist 0 + list [redis_lpop $fd mylist] [redis_rpop $fd mylist] [redis_lpop $fd mylist] [redis_llen $fd mylist] + } [list 0 2 1 0] + + test {LPOP/RPOP against empty list} { + redis_lpop $fd mylist + } {} + + test {LPOP against non list value} { + redis_set $fd notalist foo + redis_lpop $fd notalist + } {*ERROR*against*} + + test {Mass LPUSH/LPOP} { + set sum 0 + for {set i 0} {$i < 1000} {incr i} { + redis_lpush $fd mylist $i + incr sum $i + } + set sum2 0 + for {set i 0} {$i < 500} {incr i} { + incr sum2 [redis_lpop $fd mylist] + incr sum2 [redis_rpop $fd mylist] + } + expr $sum == $sum2 + } {1} + + test {LRANGE basics} { + for {set i 0} {$i < 10} {incr i} { + redis_rpush $fd mylist $i + } + list [redis_lrange $fd mylist 1 -2] \ + [redis_lrange $fd mylist -3 -1] \ + [redis_lrange $fd mylist 4 4] + } {{1 2 3 4 5 6 7 8} {7 8 9} 4} + + test {LRANGE inverted indexes} { + redis_lrange $fd mylist 6 2 + } {} + + test {LRANGE out of range indexes including the full list} { + redis_lrange $fd mylist -1000 1000 + } {0 1 2 3 4 5 6 7 8 9} + + test {LRANGE against non existing key} { + redis_lrange $fd nosuchkey 0 1 + } {} + + test {LTRIM basics} { + redis_del $fd mylist + for {set i 0} {$i < 100} {incr i} { + redis_lpush $fd mylist $i + redis_ltrim $fd mylist 0 4 + } + redis_lrange $fd mylist 0 -1 + } {99 98 97 96 95} + + test {LSET} { + redis_lset $fd mylist 1 foo + redis_lset $fd mylist -1 bar + redis_lrange $fd mylist 0 -1 + } {99 foo 97 96 bar} + + test {LSET out of range index} { + redis_lset $fd mylist 10 foo + } {-ERR*range*} + + test {LSET against non existing key} { + redis_lset $fd nosuchkey 10 foo + } {-ERR*key*} + + test {LSET against non list value} { + redis_set $fd nolist foobar + redis_lset $fd nolist 0 foo + } {-ERR*value*} + + test {SADD, SCARD, SISMEMBER, SMEMBERS basics} { + redis_sadd $fd myset foo + redis_sadd $fd myset bar + list [redis_scard $fd myset] [redis_sismember $fd myset foo] \ + [redis_sismember $fd myset bar] [redis_sismember $fd myset bla] \ + [lsort [redis_smembers $fd myset]] + } {2 1 1 0 {bar foo}} + + test {SADD adding the same element multiple times} { + redis_sadd $fd myset foo + redis_sadd $fd myset foo + redis_sadd $fd myset foo + redis_scard $fd myset + } {2} + + test {SADD against non set} { + redis_sadd $fd mylist foo + } {-2} + + test {SREM basics} { + redis_sadd $fd myset ciao + redis_srem $fd myset foo + lsort [redis_smembers $fd myset] + } {bar ciao} + + test {Mass SADD and SINTER with two sets} { + for {set i 0} {$i < 1000} {incr i} { + redis_sadd $fd set1 $i + redis_sadd $fd set2 [expr $i+995] + } + lsort [redis_sinter $fd set1 set2] + } {995 996 997 998 999} + + test {SINTERSTORE with two sets} { + redis_sinterstore $fd setres set1 set2 + lsort [redis_smembers $fd setres] + } {995 996 997 998 999} + + test {SINTER against three sets} { + redis_sadd $fd set3 999 + redis_sadd $fd set3 995 + redis_sadd $fd set3 1000 + redis_sadd $fd set3 2000 + lsort [redis_sinter $fd set1 set2 set3] + } {995 999} + + test {SINTERSTORE with three sets} { + redis_sinterstore $fd setres set1 set2 set3 + lsort [redis_smembers $fd setres] + } {995 999} + + test {SAVE - make sure there are all the types as values} { + redis_lpush $fd mysavelist hello + redis_lpush $fd mysavelist world + redis_set $fd myemptykey {} + redis_set $fd mynormalkey {blablablba} + redis_save $fd + } {+OK} + + test {Create a random list} { + set tosort {} + array set seenrand {} + for {set i 0} {$i < 10000} {incr i} { + while 1 { + # Make sure all the weights are different because + # Redis does not use a stable sort but Tcl does. + set r [expr int(rand()*1000000)] + if {![info exists seenrand($r)]} break + } + set seenrand($r) x + redis_lpush $fd tosort $i + redis_set $fd weight_$i $r + lappend tosort [list $i $r] + } + set sorted [lsort -index 1 -real $tosort] + set res {} + for {set i 0} {$i < 10000} {incr i} { + lappend res [lindex $sorted $i 0] + } + format {} + } {} + + test {SORT with BY against the newly created list} { + redis_sort $fd tosort {BY weight_*} + } $res + + test {SORT direct, numeric, against the newly created list} { + redis_sort $fd tosort + } [lsort -integer $res] + + test {SORT decreasing sort} { + redis_sort $fd tosort {DESC} + } [lsort -decreasing -integer $res] + + test {SORT speed, sorting 10000 elements list using BY, 100 times} { + set start [clock clicks -milliseconds] + for {set i 0} {$i < 100} {incr i} { + set sorted [redis_sort $fd tosort {BY weight_* LIMIT 0 10}] + } + set elapsed [expr [clock clicks -milliseconds]-$start] + puts -nonewline "\n Average time to sort: [expr double($elapsed)/100] milliseconds " + flush stdout + format {} + } {} + + test {SORT speed, sorting 10000 elements list directly, 100 times} { + set start [clock clicks -milliseconds] + for {set i 0} {$i < 100} {incr i} { + set sorted [redis_sort $fd tosort {LIMIT 0 10}] + } + set elapsed [expr [clock clicks -milliseconds]-$start] + puts -nonewline "\n Average time to sort: [expr double($elapsed)/100] milliseconds " + flush stdout + format {} + } {} + + test {SORT speed, pseudo-sorting 10000 elements list, BY , 100 times} { + set start [clock clicks -milliseconds] + for {set i 0} {$i < 100} {incr i} { + set sorted [redis_sort $fd tosort {BY nokey LIMIT 0 10}] + } + set elapsed [expr [clock clicks -milliseconds]-$start] + puts -nonewline "\n Average time to sort: [expr double($elapsed)/100] milliseconds " + flush stdout + format {} + } {} + + test {SORT regression for issue #19, sorting floats} { + redis_flushdb $fd + foreach x {1.1 5.10 3.10 7.44 2.1 5.75 6.12 0.25 1.15} { + redis_lpush $fd mylist $x + } + redis_sort $fd mylist + } [lsort -real {1.1 5.10 3.10 7.44 2.1 5.75 6.12 0.25 1.15}] + + test {LREM, remove all the occurrences} { + redis_flushall $fd + redis_rpush $fd mylist foo + redis_rpush $fd mylist bar + redis_rpush $fd mylist foobar + redis_rpush $fd mylist foobared + redis_rpush $fd mylist zap + redis_rpush $fd mylist bar + redis_rpush $fd mylist test + redis_rpush $fd mylist foo + set res [redis_lrem $fd mylist 0 bar] + list [redis_lrange $fd mylist 0 -1] $res + } {{foo foobar foobared zap test foo} 2} + + test {LREM, remove the first occurrence} { + set res [redis_lrem $fd mylist 1 foo] + list [redis_lrange $fd mylist 0 -1] $res + } {{foobar foobared zap test foo} 1} + + test {LREM, remove non existing element} { + set res [redis_lrem $fd mylist 1 nosuchelement] + list [redis_lrange $fd mylist 0 -1] $res + } {{foobar foobared zap test foo} 0} + + test {LREM, starting from tail with negative count} { + redis_flushall $fd + redis_rpush $fd mylist foo + redis_rpush $fd mylist bar + redis_rpush $fd mylist foobar + redis_rpush $fd mylist foobared + redis_rpush $fd mylist zap + redis_rpush $fd mylist bar + redis_rpush $fd mylist test + redis_rpush $fd mylist foo + redis_rpush $fd mylist foo + set res [redis_lrem $fd mylist -1 bar] + list [redis_lrange $fd mylist 0 -1] $res + } {{foo bar foobar foobared zap test foo foo} 1} + + test {LREM, starting from tail with negative count (2)} { + set res [redis_lrem $fd mylist -2 foo] + list [redis_lrange $fd mylist 0 -1] $res + } {{foo bar foobar foobared zap test} 2} + + # Leave the user with a clean DB before to exit + test {FLUSHALL} { + redis_flushall $fd + redis_dbsize $fd + } {0} + + puts "\n[expr $::passed+$::failed] tests, $::passed passed, $::failed failed" + if {$::failed > 0} { + puts "\n*** WARNING!!! $::failed FAILED TESTS ***\n" + } + close $fd +} + +proc redis_connect {server port} { + set fd [socket $server $port] + fconfigure $fd -translation binary + return $fd +} + +proc redis_write {fd buf} { + puts -nonewline $fd $buf +} + +proc redis_writenl {fd buf} { + # puts "C: $buf" + redis_write $fd $buf + redis_write $fd "\r\n" + flush $fd +} + +proc redis_readnl {fd len} { + set buf [read $fd $len] + read $fd 2 ; # discard CR LF + return $buf +} + +proc redis_bulk_read fd { + set count [redis_read_integer $fd] + if {$count eq {nil}} return {} + set len [expr {abs($count)}] + set buf [redis_readnl $fd $len] + if {$count < 0} {return "***ERROR*** $buf"} + return $buf +} + +proc redis_multi_bulk_read fd { + set count [redis_read_integer $fd] + if {$count eq {nil}} return {} + if {$count < 0} { + set len [expr {abs($count)}] + set buf [redis_readnl $fd $len] + return "***ERROR*** $buf" + } + set l {} + for {set i 0} {$i < $count} {incr i} { + lappend l [redis_bulk_read $fd] + } + return $l +} + +proc redis_read_retcode fd { + set retcode [string trim [gets $fd]] + # puts "S: $retcode" + return $retcode +} + +proc redis_read_integer fd { + string trim [gets $fd] +} + +### Actual API ### + +proc redis_set {fd key val} { + redis_writenl $fd "set $key [string length $val]\r\n$val" + redis_read_retcode $fd +} + +proc redis_setnx {fd key val} { + redis_writenl $fd "setnx $key [string length $val]\r\n$val" + redis_read_integer $fd +} + +proc redis_get {fd key} { + redis_writenl $fd "get $key" + redis_bulk_read $fd +} + +proc redis_select {fd id} { + redis_writenl $fd "select $id" + redis_read_retcode $fd +} + +proc redis_move {fd key id} { + redis_writenl $fd "move $key $id" + redis_read_integer $fd +} + +proc redis_del {fd key} { + redis_writenl $fd "del $key" + redis_read_integer $fd +} + +proc redis_keys {fd pattern} { + redis_writenl $fd "keys $pattern" + split [redis_bulk_read $fd] +} + +proc redis_dbsize {fd} { + redis_writenl $fd "dbsize" + redis_read_integer $fd +} + +proc redis_incr {fd key} { + redis_writenl $fd "incr $key" + redis_read_integer $fd +} + +proc redis_decr {fd key} { + redis_writenl $fd "decr $key" + redis_read_integer $fd +} + +proc redis_exists {fd key} { + redis_writenl $fd "exists $key" + redis_read_integer $fd +} + +proc redis_lpush {fd key val} { + redis_writenl $fd "lpush $key [string length $val]\r\n$val" + redis_read_retcode $fd +} + +proc redis_rpush {fd key val} { + redis_writenl $fd "rpush $key [string length $val]\r\n$val" + redis_read_retcode $fd +} + +proc redis_llen {fd key} { + redis_writenl $fd "llen $key" + redis_read_integer $fd +} + +proc redis_scard {fd key} { + redis_writenl $fd "scard $key" + redis_read_integer $fd +} + +proc redis_lindex {fd key index} { + redis_writenl $fd "lindex $key $index" + redis_bulk_read $fd +} + +proc redis_lrange {fd key first last} { + redis_writenl $fd "lrange $key $first $last" + redis_multi_bulk_read $fd +} + +proc redis_sort {fd key {params {}}} { + redis_writenl $fd "sort $key $params" + redis_multi_bulk_read $fd +} + +proc redis_ltrim {fd key first last} { + redis_writenl $fd "ltrim $key $first $last" + redis_read_retcode $fd +} + +proc redis_rename {fd key1 key2} { + redis_writenl $fd "rename $key1 $key2" + redis_read_retcode $fd +} + +proc redis_renamenx {fd key1 key2} { + redis_writenl $fd "renamenx $key1 $key2" + redis_read_integer $fd +} + +proc redis_lpop {fd key} { + redis_writenl $fd "lpop $key" + redis_bulk_read $fd +} + +proc redis_rpop {fd key} { + redis_writenl $fd "rpop $key" + redis_bulk_read $fd +} + +proc redis_lset {fd key index val} { + redis_writenl $fd "lset $key $index [string length $val]\r\n$val" + redis_read_retcode $fd +} + +proc redis_sadd {fd key val} { + redis_writenl $fd "sadd $key [string length $val]\r\n$val" + redis_read_integer $fd +} + +proc redis_srem {fd key val} { + redis_writenl $fd "srem $key [string length $val]\r\n$val" + redis_read_integer $fd +} + +proc redis_sismember {fd key val} { + redis_writenl $fd "sismember $key [string length $val]\r\n$val" + redis_read_integer $fd +} + +proc redis_sinter {fd args} { + redis_writenl $fd "sinter [join $args]\r\n" + redis_multi_bulk_read $fd +} + +proc redis_sinterstore {fd args} { + redis_writenl $fd "sinterstore [join $args]\r\n" + redis_read_retcode $fd +} + +proc redis_smembers {fd key} { + redis_writenl $fd "smembers $key\r\n" + redis_multi_bulk_read $fd +} + +proc redis_echo {fd str} { + redis_writenl $fd "echo [string length $str]\r\n$str\r\n" + redis_writenl $fd "smembers $key\r\n" +} + +proc redis_save {fd} { + redis_writenl $fd "save\r\n" + redis_read_retcode $fd +} + +proc redis_flushall {fd} { + redis_writenl $fd "flushall\r\n" + redis_read_retcode $fd +} + +proc redis_flushdb {fd} { + redis_writenl $fd "flushdb\r\n" + redis_read_retcode $fd +} + +proc redis_lrem {fd key count val} { + redis_writenl $fd "lrem $key $count [string length $val]\r\n$val" + redis_read_integer $fd +} + +if {[llength $argv] == 0} { + main 127.0.0.1 6379 +} else { + main [lindex $argv 0] [lindex $argv 1] +} diff --git a/zmalloc.c b/zmalloc.c new file mode 100644 index 00000000..b5cb7d6a --- /dev/null +++ b/zmalloc.c @@ -0,0 +1,82 @@ +/* zmalloc - total amount of allocated memory aware version of malloc() + * + * Copyright (c) 2006-2009, Salvatore Sanfilippo + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#include +#include + +static size_t used_memory = 0; + +void *zmalloc(size_t size) { + void *ptr = malloc(size+sizeof(size_t)); + + *((size_t*)ptr) = size; + used_memory += size+sizeof(size_t); + return ptr+sizeof(size_t); +} + +void *zrealloc(void *ptr, size_t size) { + void *realptr; + size_t oldsize; + void *newptr; + + if (ptr == NULL) return zmalloc(size); + realptr = ptr-sizeof(size_t); + oldsize = *((size_t*)realptr); + newptr = realloc(realptr,size+sizeof(size_t)); + if (!newptr) return NULL; + + *((size_t*)newptr) = size; + used_memory -= oldsize; + used_memory += size; + return newptr+sizeof(size_t); +} + +void zfree(void *ptr) { + void *realptr; + size_t oldsize; + + if (ptr == NULL) return; + realptr = ptr-sizeof(size_t); + oldsize = *((size_t*)realptr); + used_memory -= oldsize+sizeof(size_t); + free(realptr); +} + +char *zstrdup(const char *s) { + size_t l = strlen(s)+1; + char *p = zmalloc(l); + + memcpy(p,s,l); + return p; +} + +size_t zmalloc_used_memory(void) { + return used_memory; +} diff --git a/zmalloc.h b/zmalloc.h new file mode 100644 index 00000000..aaddb9ba --- /dev/null +++ b/zmalloc.h @@ -0,0 +1,40 @@ +/* zmalloc - total amount of allocated memory aware version of malloc() + * + * Copyright (c) 2006-2009, Salvatore Sanfilippo + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright notice, + * this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * * Neither the name of Redis nor the names of its contributors may be used + * to endorse or promote products derived from this software without + * specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" + * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE + * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef _ZMALLOC_H +#define _ZMALLOC_H + +void *zmalloc(size_t size); +void *zrealloc(void *ptr, size_t size); +void *zfree(void *ptr); +char *zstrdup(const char *s); +size_t zmalloc_used_memory(void); + +#endif /* _ZMALLOC_H */ -- 2.45.2